perf trace: Sample timestamp and cpu when using record flag
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / tools / perf / util / parse-events.c
bloba9bdcab8c0702c5e0fd2134955b43bf8aa1b7d8e
2 #include "util.h"
3 #include "../perf.h"
4 #include "parse-options.h"
5 #include "parse-events.h"
6 #include "exec_cmd.h"
7 #include "string.h"
8 #include "cache.h"
10 int nr_counters;
12 struct perf_counter_attr attrs[MAX_COUNTERS];
14 struct event_symbol {
15 u8 type;
16 u64 config;
17 const char *symbol;
18 const char *alias;
21 enum event_result {
22 EVT_FAILED,
23 EVT_HANDLED,
24 EVT_HANDLED_ALL
27 char debugfs_path[MAXPATHLEN];
29 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
30 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
32 static struct event_symbol event_symbols[] = {
33 { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
34 { CHW(INSTRUCTIONS), "instructions", "" },
35 { CHW(CACHE_REFERENCES), "cache-references", "" },
36 { CHW(CACHE_MISSES), "cache-misses", "" },
37 { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
38 { CHW(BRANCH_MISSES), "branch-misses", "" },
39 { CHW(BUS_CYCLES), "bus-cycles", "" },
41 { CSW(CPU_CLOCK), "cpu-clock", "" },
42 { CSW(TASK_CLOCK), "task-clock", "" },
43 { CSW(PAGE_FAULTS), "page-faults", "faults" },
44 { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
45 { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
46 { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
47 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
50 #define __PERF_COUNTER_FIELD(config, name) \
51 ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT)
53 #define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW)
54 #define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG)
55 #define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
56 #define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
58 static const char *hw_event_names[] = {
59 "cycles",
60 "instructions",
61 "cache-references",
62 "cache-misses",
63 "branches",
64 "branch-misses",
65 "bus-cycles",
68 static const char *sw_event_names[] = {
69 "cpu-clock-msecs",
70 "task-clock-msecs",
71 "page-faults",
72 "context-switches",
73 "CPU-migrations",
74 "minor-faults",
75 "major-faults",
78 #define MAX_ALIASES 8
80 static const char *hw_cache[][MAX_ALIASES] = {
81 { "L1-dcache", "l1-d", "l1d", "L1-data", },
82 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
83 { "LLC", "L2" },
84 { "dTLB", "d-tlb", "Data-TLB", },
85 { "iTLB", "i-tlb", "Instruction-TLB", },
86 { "branch", "branches", "bpu", "btb", "bpc", },
89 static const char *hw_cache_op[][MAX_ALIASES] = {
90 { "load", "loads", "read", },
91 { "store", "stores", "write", },
92 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
95 static const char *hw_cache_result[][MAX_ALIASES] = {
96 { "refs", "Reference", "ops", "access", },
97 { "misses", "miss", },
100 #define C(x) PERF_COUNT_HW_CACHE_##x
101 #define CACHE_READ (1 << C(OP_READ))
102 #define CACHE_WRITE (1 << C(OP_WRITE))
103 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
104 #define COP(x) (1 << x)
107 * cache operartion stat
108 * L1I : Read and prefetch only
109 * ITLB and BPU : Read-only
111 static unsigned long hw_cache_stat[C(MAX)] = {
112 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
113 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
114 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
115 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
116 [C(ITLB)] = (CACHE_READ),
117 [C(BPU)] = (CACHE_READ),
120 #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
121 while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
122 if (sys_dirent.d_type == DT_DIR && \
123 (strcmp(sys_dirent.d_name, ".")) && \
124 (strcmp(sys_dirent.d_name, "..")))
126 static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
128 char evt_path[MAXPATHLEN];
129 int fd;
131 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
132 sys_dir->d_name, evt_dir->d_name);
133 fd = open(evt_path, O_RDONLY);
134 if (fd < 0)
135 return -EINVAL;
136 close(fd);
138 return 0;
141 #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
142 while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
143 if (evt_dirent.d_type == DT_DIR && \
144 (strcmp(evt_dirent.d_name, ".")) && \
145 (strcmp(evt_dirent.d_name, "..")) && \
146 (!tp_event_has_id(&sys_dirent, &evt_dirent)))
148 #define MAX_EVENT_LENGTH 512
150 int valid_debugfs_mount(const char *debugfs)
152 struct statfs st_fs;
154 if (statfs(debugfs, &st_fs) < 0)
155 return -ENOENT;
156 else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
157 return -ENOENT;
158 return 0;
161 struct tracepoint_path *tracepoint_id_to_path(u64 config)
163 struct tracepoint_path *path = NULL;
164 DIR *sys_dir, *evt_dir;
165 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
166 char id_buf[4];
167 int sys_dir_fd, fd;
168 u64 id;
169 char evt_path[MAXPATHLEN];
171 if (valid_debugfs_mount(debugfs_path))
172 return NULL;
174 sys_dir = opendir(debugfs_path);
175 if (!sys_dir)
176 goto cleanup;
177 sys_dir_fd = dirfd(sys_dir);
179 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
180 int dfd = openat(sys_dir_fd, sys_dirent.d_name,
181 O_RDONLY|O_DIRECTORY), evt_dir_fd;
182 if (dfd == -1)
183 continue;
184 evt_dir = fdopendir(dfd);
185 if (!evt_dir) {
186 close(dfd);
187 continue;
189 evt_dir_fd = dirfd(evt_dir);
190 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
191 snprintf(evt_path, MAXPATHLEN, "%s/id",
192 evt_dirent.d_name);
193 fd = openat(evt_dir_fd, evt_path, O_RDONLY);
194 if (fd < 0)
195 continue;
196 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
197 close(fd);
198 continue;
200 close(fd);
201 id = atoll(id_buf);
202 if (id == config) {
203 closedir(evt_dir);
204 closedir(sys_dir);
205 path = calloc(1, sizeof(path));
206 path->system = malloc(MAX_EVENT_LENGTH);
207 if (!path->system) {
208 free(path);
209 return NULL;
211 path->name = malloc(MAX_EVENT_LENGTH);
212 if (!path->name) {
213 free(path->system);
214 free(path);
215 return NULL;
217 strncpy(path->system, sys_dirent.d_name,
218 MAX_EVENT_LENGTH);
219 strncpy(path->name, evt_dirent.d_name,
220 MAX_EVENT_LENGTH);
221 return path;
224 closedir(evt_dir);
227 cleanup:
228 closedir(sys_dir);
229 return NULL;
232 #define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
233 static const char *tracepoint_id_to_name(u64 config)
235 static char buf[TP_PATH_LEN];
236 struct tracepoint_path *path;
238 path = tracepoint_id_to_path(config);
239 if (path) {
240 snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
241 free(path->name);
242 free(path->system);
243 free(path);
244 } else
245 snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
247 return buf;
250 static int is_cache_op_valid(u8 cache_type, u8 cache_op)
252 if (hw_cache_stat[cache_type] & COP(cache_op))
253 return 1; /* valid */
254 else
255 return 0; /* invalid */
258 static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
260 static char name[50];
262 if (cache_result) {
263 sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
264 hw_cache_op[cache_op][0],
265 hw_cache_result[cache_result][0]);
266 } else {
267 sprintf(name, "%s-%s", hw_cache[cache_type][0],
268 hw_cache_op[cache_op][1]);
271 return name;
274 const char *event_name(int counter)
276 u64 config = attrs[counter].config;
277 int type = attrs[counter].type;
279 return __event_name(type, config);
282 const char *__event_name(int type, u64 config)
284 static char buf[32];
286 if (type == PERF_TYPE_RAW) {
287 sprintf(buf, "raw 0x%llx", config);
288 return buf;
291 switch (type) {
292 case PERF_TYPE_HARDWARE:
293 if (config < PERF_COUNT_HW_MAX)
294 return hw_event_names[config];
295 return "unknown-hardware";
297 case PERF_TYPE_HW_CACHE: {
298 u8 cache_type, cache_op, cache_result;
300 cache_type = (config >> 0) & 0xff;
301 if (cache_type > PERF_COUNT_HW_CACHE_MAX)
302 return "unknown-ext-hardware-cache-type";
304 cache_op = (config >> 8) & 0xff;
305 if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
306 return "unknown-ext-hardware-cache-op";
308 cache_result = (config >> 16) & 0xff;
309 if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
310 return "unknown-ext-hardware-cache-result";
312 if (!is_cache_op_valid(cache_type, cache_op))
313 return "invalid-cache";
315 return event_cache_name(cache_type, cache_op, cache_result);
318 case PERF_TYPE_SOFTWARE:
319 if (config < PERF_COUNT_SW_MAX)
320 return sw_event_names[config];
321 return "unknown-software";
323 case PERF_TYPE_TRACEPOINT:
324 return tracepoint_id_to_name(config);
326 default:
327 break;
330 return "unknown";
333 static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
335 int i, j;
336 int n, longest = -1;
338 for (i = 0; i < size; i++) {
339 for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
340 n = strlen(names[i][j]);
341 if (n > longest && !strncasecmp(*str, names[i][j], n))
342 longest = n;
344 if (longest > 0) {
345 *str += longest;
346 return i;
350 return -1;
353 static enum event_result
354 parse_generic_hw_event(const char **str, struct perf_counter_attr *attr)
356 const char *s = *str;
357 int cache_type = -1, cache_op = -1, cache_result = -1;
359 cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
361 * No fallback - if we cannot get a clear cache type
362 * then bail out:
364 if (cache_type == -1)
365 return EVT_FAILED;
367 while ((cache_op == -1 || cache_result == -1) && *s == '-') {
368 ++s;
370 if (cache_op == -1) {
371 cache_op = parse_aliases(&s, hw_cache_op,
372 PERF_COUNT_HW_CACHE_OP_MAX);
373 if (cache_op >= 0) {
374 if (!is_cache_op_valid(cache_type, cache_op))
375 return 0;
376 continue;
380 if (cache_result == -1) {
381 cache_result = parse_aliases(&s, hw_cache_result,
382 PERF_COUNT_HW_CACHE_RESULT_MAX);
383 if (cache_result >= 0)
384 continue;
388 * Can't parse this as a cache op or result, so back up
389 * to the '-'.
391 --s;
392 break;
396 * Fall back to reads:
398 if (cache_op == -1)
399 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
402 * Fall back to accesses:
404 if (cache_result == -1)
405 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
407 attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
408 attr->type = PERF_TYPE_HW_CACHE;
410 *str = s;
411 return EVT_HANDLED;
414 static enum event_result
415 parse_single_tracepoint_event(char *sys_name,
416 const char *evt_name,
417 unsigned int evt_length,
418 char *flags,
419 struct perf_counter_attr *attr,
420 const char **strp)
422 char evt_path[MAXPATHLEN];
423 char id_buf[4];
424 u64 id;
425 int fd;
427 if (flags) {
428 if (!strncmp(flags, "record", strlen(flags))) {
429 attr->sample_type |= PERF_SAMPLE_RAW;
430 attr->sample_type |= PERF_SAMPLE_TIME;
431 attr->sample_type |= PERF_SAMPLE_CPU;
435 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
436 sys_name, evt_name);
438 fd = open(evt_path, O_RDONLY);
439 if (fd < 0)
440 return EVT_FAILED;
442 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
443 close(fd);
444 return EVT_FAILED;
447 close(fd);
448 id = atoll(id_buf);
449 attr->config = id;
450 attr->type = PERF_TYPE_TRACEPOINT;
451 *strp = evt_name + evt_length;
453 return EVT_HANDLED;
456 /* sys + ':' + event + ':' + flags*/
457 #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
458 static enum event_result
459 parse_subsystem_tracepoint_event(char *sys_name, char *flags)
461 char evt_path[MAXPATHLEN];
462 struct dirent *evt_ent;
463 DIR *evt_dir;
465 snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name);
466 evt_dir = opendir(evt_path);
468 if (!evt_dir) {
469 perror("Can't open event dir");
470 return EVT_FAILED;
473 while ((evt_ent = readdir(evt_dir))) {
474 char event_opt[MAX_EVOPT_LEN + 1];
475 int len;
476 unsigned int rem = MAX_EVOPT_LEN;
478 if (!strcmp(evt_ent->d_name, ".")
479 || !strcmp(evt_ent->d_name, "..")
480 || !strcmp(evt_ent->d_name, "enable")
481 || !strcmp(evt_ent->d_name, "filter"))
482 continue;
484 len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s", sys_name,
485 evt_ent->d_name);
486 if (len < 0)
487 return EVT_FAILED;
489 rem -= len;
490 if (flags) {
491 if (rem < strlen(flags) + 1)
492 return EVT_FAILED;
494 strcat(event_opt, ":");
495 strcat(event_opt, flags);
498 if (parse_events(NULL, event_opt, 0))
499 return EVT_FAILED;
502 return EVT_HANDLED_ALL;
506 static enum event_result parse_tracepoint_event(const char **strp,
507 struct perf_counter_attr *attr)
509 const char *evt_name;
510 char *flags;
511 char sys_name[MAX_EVENT_LENGTH];
512 unsigned int sys_length, evt_length;
514 if (valid_debugfs_mount(debugfs_path))
515 return 0;
517 evt_name = strchr(*strp, ':');
518 if (!evt_name)
519 return EVT_FAILED;
521 sys_length = evt_name - *strp;
522 if (sys_length >= MAX_EVENT_LENGTH)
523 return 0;
525 strncpy(sys_name, *strp, sys_length);
526 sys_name[sys_length] = '\0';
527 evt_name = evt_name + 1;
529 flags = strchr(evt_name, ':');
530 if (flags) {
531 /* split it out: */
532 evt_name = strndup(evt_name, flags - evt_name);
533 flags++;
536 evt_length = strlen(evt_name);
537 if (evt_length >= MAX_EVENT_LENGTH)
538 return EVT_FAILED;
540 if (!strcmp(evt_name, "*")) {
541 *strp = evt_name + evt_length;
542 return parse_subsystem_tracepoint_event(sys_name, flags);
543 } else
544 return parse_single_tracepoint_event(sys_name, evt_name,
545 evt_length, flags,
546 attr, strp);
549 static int check_events(const char *str, unsigned int i)
551 int n;
553 n = strlen(event_symbols[i].symbol);
554 if (!strncmp(str, event_symbols[i].symbol, n))
555 return n;
557 n = strlen(event_symbols[i].alias);
558 if (n)
559 if (!strncmp(str, event_symbols[i].alias, n))
560 return n;
561 return 0;
564 static enum event_result
565 parse_symbolic_event(const char **strp, struct perf_counter_attr *attr)
567 const char *str = *strp;
568 unsigned int i;
569 int n;
571 for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
572 n = check_events(str, i);
573 if (n > 0) {
574 attr->type = event_symbols[i].type;
575 attr->config = event_symbols[i].config;
576 *strp = str + n;
577 return EVT_HANDLED;
580 return EVT_FAILED;
583 static enum event_result
584 parse_raw_event(const char **strp, struct perf_counter_attr *attr)
586 const char *str = *strp;
587 u64 config;
588 int n;
590 if (*str != 'r')
591 return EVT_FAILED;
592 n = hex2u64(str + 1, &config);
593 if (n > 0) {
594 *strp = str + n + 1;
595 attr->type = PERF_TYPE_RAW;
596 attr->config = config;
597 return EVT_HANDLED;
599 return EVT_FAILED;
602 static enum event_result
603 parse_numeric_event(const char **strp, struct perf_counter_attr *attr)
605 const char *str = *strp;
606 char *endp;
607 unsigned long type;
608 u64 config;
610 type = strtoul(str, &endp, 0);
611 if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
612 str = endp + 1;
613 config = strtoul(str, &endp, 0);
614 if (endp > str) {
615 attr->type = type;
616 attr->config = config;
617 *strp = endp;
618 return EVT_HANDLED;
621 return EVT_FAILED;
624 static enum event_result
625 parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
627 const char *str = *strp;
628 int eu = 1, ek = 1, eh = 1;
630 if (*str++ != ':')
631 return 0;
632 while (*str) {
633 if (*str == 'u')
634 eu = 0;
635 else if (*str == 'k')
636 ek = 0;
637 else if (*str == 'h')
638 eh = 0;
639 else
640 break;
641 ++str;
643 if (str >= *strp + 2) {
644 *strp = str;
645 attr->exclude_user = eu;
646 attr->exclude_kernel = ek;
647 attr->exclude_hv = eh;
648 return 1;
650 return 0;
654 * Each event can have multiple symbolic names.
655 * Symbolic names are (almost) exactly matched.
657 static enum event_result
658 parse_event_symbols(const char **str, struct perf_counter_attr *attr)
660 enum event_result ret;
662 ret = parse_tracepoint_event(str, attr);
663 if (ret != EVT_FAILED)
664 goto modifier;
666 ret = parse_raw_event(str, attr);
667 if (ret != EVT_FAILED)
668 goto modifier;
670 ret = parse_numeric_event(str, attr);
671 if (ret != EVT_FAILED)
672 goto modifier;
674 ret = parse_symbolic_event(str, attr);
675 if (ret != EVT_FAILED)
676 goto modifier;
678 ret = parse_generic_hw_event(str, attr);
679 if (ret != EVT_FAILED)
680 goto modifier;
682 return EVT_FAILED;
684 modifier:
685 parse_event_modifier(str, attr);
687 return ret;
690 int parse_events(const struct option *opt __used, const char *str, int unset __used)
692 struct perf_counter_attr attr;
693 enum event_result ret;
695 for (;;) {
696 if (nr_counters == MAX_COUNTERS)
697 return -1;
699 memset(&attr, 0, sizeof(attr));
700 ret = parse_event_symbols(&str, &attr);
701 if (ret == EVT_FAILED)
702 return -1;
704 if (!(*str == 0 || *str == ',' || isspace(*str)))
705 return -1;
707 if (ret != EVT_HANDLED_ALL) {
708 attrs[nr_counters] = attr;
709 nr_counters++;
712 if (*str == 0)
713 break;
714 if (*str == ',')
715 ++str;
716 while (isspace(*str))
717 ++str;
720 return 0;
723 static const char * const event_type_descriptors[] = {
725 "Hardware event",
726 "Software event",
727 "Tracepoint event",
728 "Hardware cache event",
732 * Print the events from <debugfs_mount_point>/tracing/events
735 static void print_tracepoint_events(void)
737 DIR *sys_dir, *evt_dir;
738 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
739 int sys_dir_fd;
740 char evt_path[MAXPATHLEN];
742 if (valid_debugfs_mount(debugfs_path))
743 return;
745 sys_dir = opendir(debugfs_path);
746 if (!sys_dir)
747 goto cleanup;
748 sys_dir_fd = dirfd(sys_dir);
750 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
751 int dfd = openat(sys_dir_fd, sys_dirent.d_name,
752 O_RDONLY|O_DIRECTORY), evt_dir_fd;
753 if (dfd == -1)
754 continue;
755 evt_dir = fdopendir(dfd);
756 if (!evt_dir) {
757 close(dfd);
758 continue;
760 evt_dir_fd = dirfd(evt_dir);
761 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
762 snprintf(evt_path, MAXPATHLEN, "%s:%s",
763 sys_dirent.d_name, evt_dirent.d_name);
764 fprintf(stderr, " %-42s [%s]\n", evt_path,
765 event_type_descriptors[PERF_TYPE_TRACEPOINT+1]);
767 closedir(evt_dir);
770 cleanup:
771 closedir(sys_dir);
775 * Print the help text for the event symbols:
777 void print_events(void)
779 struct event_symbol *syms = event_symbols;
780 unsigned int i, type, op, prev_type = -1;
781 char name[40];
783 fprintf(stderr, "\n");
784 fprintf(stderr, "List of pre-defined events (to be used in -e):\n");
786 for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
787 type = syms->type + 1;
788 if (type >= ARRAY_SIZE(event_type_descriptors))
789 type = 0;
791 if (type != prev_type)
792 fprintf(stderr, "\n");
794 if (strlen(syms->alias))
795 sprintf(name, "%s OR %s", syms->symbol, syms->alias);
796 else
797 strcpy(name, syms->symbol);
798 fprintf(stderr, " %-42s [%s]\n", name,
799 event_type_descriptors[type]);
801 prev_type = type;
804 fprintf(stderr, "\n");
805 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
806 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
807 /* skip invalid cache type */
808 if (!is_cache_op_valid(type, op))
809 continue;
811 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
812 fprintf(stderr, " %-42s [%s]\n",
813 event_cache_name(type, op, i),
814 event_type_descriptors[4]);
819 fprintf(stderr, "\n");
820 fprintf(stderr, " %-42s [raw hardware event descriptor]\n",
821 "rNNN");
822 fprintf(stderr, "\n");
824 print_tracepoint_events();
826 exit(129);