perf list: Add cache events
[linux-2.6/linux-2.6-openrd.git] / tools / perf / util / parse-events.c
blob5184959e0615b7e79e38058cac0594a55a3b6fc4
2 #include "../perf.h"
3 #include "util.h"
4 #include "parse-options.h"
5 #include "parse-events.h"
6 #include "exec_cmd.h"
7 #include "string.h"
9 extern char *strcasestr(const char *haystack, const char *needle);
11 int nr_counters;
13 struct perf_counter_attr attrs[MAX_COUNTERS];
15 struct event_symbol {
16 u8 type;
17 u64 config;
18 char *symbol;
19 char *alias;
22 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
23 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
25 static struct event_symbol event_symbols[] = {
26 { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
27 { CHW(INSTRUCTIONS), "instructions", "" },
28 { CHW(CACHE_REFERENCES), "cache-references", "" },
29 { CHW(CACHE_MISSES), "cache-misses", "" },
30 { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
31 { CHW(BRANCH_MISSES), "branch-misses", "" },
32 { CHW(BUS_CYCLES), "bus-cycles", "" },
34 { CSW(CPU_CLOCK), "cpu-clock", "" },
35 { CSW(TASK_CLOCK), "task-clock", "" },
36 { CSW(PAGE_FAULTS), "page-faults", "faults" },
37 { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
38 { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
39 { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
40 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
43 #define __PERF_COUNTER_FIELD(config, name) \
44 ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT)
46 #define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW)
47 #define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG)
48 #define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
49 #define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
51 static char *hw_event_names[] = {
52 "cycles",
53 "instructions",
54 "cache-references",
55 "cache-misses",
56 "branches",
57 "branch-misses",
58 "bus-cycles",
61 static char *sw_event_names[] = {
62 "cpu-clock-msecs",
63 "task-clock-msecs",
64 "page-faults",
65 "context-switches",
66 "CPU-migrations",
67 "minor-faults",
68 "major-faults",
71 #define MAX_ALIASES 8
73 static char *hw_cache[][MAX_ALIASES] = {
74 { "L1-d$", "l1-d", "l1d", "L1-data", },
75 { "L1-i$", "l1-i", "l1i", "L1-instruction", },
76 { "LLC", "L2" },
77 { "dTLB", "d-tlb", "Data-TLB", },
78 { "iTLB", "i-tlb", "Instruction-TLB", },
79 { "branch", "branches", "bpu", "btb", "bpc", },
82 static char *hw_cache_op[][MAX_ALIASES] = {
83 { "load", "loads", "read", },
84 { "store", "stores", "write", },
85 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
88 static char *hw_cache_result[][MAX_ALIASES] = {
89 { "refs", "Reference", "ops", "access", },
90 { "misses", "miss", },
93 #define C(x) PERF_COUNT_HW_CACHE_##x
94 #define CACHE_READ (1 << C(OP_READ))
95 #define CACHE_WRITE (1 << C(OP_WRITE))
96 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
97 #define COP(x) (1 << x)
100 * cache operartion stat
101 * L1I : Read and prefetch only
102 * ITLB and BPU : Read-only
104 static unsigned long hw_cache_stat[C(MAX)] = {
105 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
106 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
107 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
108 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
109 [C(ITLB)] = (CACHE_READ),
110 [C(BPU)] = (CACHE_READ),
113 static int is_cache_op_valid(u8 cache_type, u8 cache_op)
115 if (hw_cache_stat[cache_type] & COP(cache_op))
116 return 1; /* valid */
117 else
118 return 0; /* invalid */
121 static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
123 static char name[50];
125 if (cache_result) {
126 sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
127 hw_cache_op[cache_op][0],
128 hw_cache_result[cache_result][0]);
129 } else {
130 sprintf(name, "%s-%s", hw_cache[cache_type][0],
131 hw_cache_op[cache_op][1]);
134 return name;
137 char *event_name(int counter)
139 u64 config = attrs[counter].config;
140 int type = attrs[counter].type;
141 static char buf[32];
143 if (attrs[counter].type == PERF_TYPE_RAW) {
144 sprintf(buf, "raw 0x%llx", config);
145 return buf;
148 switch (type) {
149 case PERF_TYPE_HARDWARE:
150 if (config < PERF_COUNT_HW_MAX)
151 return hw_event_names[config];
152 return "unknown-hardware";
154 case PERF_TYPE_HW_CACHE: {
155 u8 cache_type, cache_op, cache_result;
157 cache_type = (config >> 0) & 0xff;
158 if (cache_type > PERF_COUNT_HW_CACHE_MAX)
159 return "unknown-ext-hardware-cache-type";
161 cache_op = (config >> 8) & 0xff;
162 if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
163 return "unknown-ext-hardware-cache-op";
165 cache_result = (config >> 16) & 0xff;
166 if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
167 return "unknown-ext-hardware-cache-result";
169 if (!is_cache_op_valid(cache_type, cache_op))
170 return "invalid-cache";
172 return event_cache_name(cache_type, cache_op, cache_result);
175 case PERF_TYPE_SOFTWARE:
176 if (config < PERF_COUNT_SW_MAX)
177 return sw_event_names[config];
178 return "unknown-software";
180 default:
181 break;
184 return "unknown";
187 static int parse_aliases(const char **str, char *names[][MAX_ALIASES], int size)
189 int i, j;
190 int n, longest = -1;
192 for (i = 0; i < size; i++) {
193 for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
194 n = strlen(names[i][j]);
195 if (n > longest && !strncasecmp(*str, names[i][j], n))
196 longest = n;
198 if (longest > 0) {
199 *str += longest;
200 return i;
204 return -1;
207 static int
208 parse_generic_hw_event(const char **str, struct perf_counter_attr *attr)
210 const char *s = *str;
211 int cache_type = -1, cache_op = -1, cache_result = -1;
213 cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
215 * No fallback - if we cannot get a clear cache type
216 * then bail out:
218 if (cache_type == -1)
219 return 0;
221 while ((cache_op == -1 || cache_result == -1) && *s == '-') {
222 ++s;
224 if (cache_op == -1) {
225 cache_op = parse_aliases(&s, hw_cache_op,
226 PERF_COUNT_HW_CACHE_OP_MAX);
227 if (cache_op >= 0) {
228 if (!is_cache_op_valid(cache_type, cache_op))
229 return 0;
230 continue;
234 if (cache_result == -1) {
235 cache_result = parse_aliases(&s, hw_cache_result,
236 PERF_COUNT_HW_CACHE_RESULT_MAX);
237 if (cache_result >= 0)
238 continue;
242 * Can't parse this as a cache op or result, so back up
243 * to the '-'.
245 --s;
246 break;
250 * Fall back to reads:
252 if (cache_op == -1)
253 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
256 * Fall back to accesses:
258 if (cache_result == -1)
259 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
261 attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
262 attr->type = PERF_TYPE_HW_CACHE;
264 *str = s;
265 return 1;
268 static int check_events(const char *str, unsigned int i)
270 int n;
272 n = strlen(event_symbols[i].symbol);
273 if (!strncmp(str, event_symbols[i].symbol, n))
274 return n;
276 n = strlen(event_symbols[i].alias);
277 if (n)
278 if (!strncmp(str, event_symbols[i].alias, n))
279 return n;
280 return 0;
283 static int
284 parse_symbolic_event(const char **strp, struct perf_counter_attr *attr)
286 const char *str = *strp;
287 unsigned int i;
288 int n;
290 for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
291 n = check_events(str, i);
292 if (n > 0) {
293 attr->type = event_symbols[i].type;
294 attr->config = event_symbols[i].config;
295 *strp = str + n;
296 return 1;
299 return 0;
302 static int parse_raw_event(const char **strp, struct perf_counter_attr *attr)
304 const char *str = *strp;
305 u64 config;
306 int n;
308 if (*str != 'r')
309 return 0;
310 n = hex2u64(str + 1, &config);
311 if (n > 0) {
312 *strp = str + n + 1;
313 attr->type = PERF_TYPE_RAW;
314 attr->config = config;
315 return 1;
317 return 0;
320 static int
321 parse_numeric_event(const char **strp, struct perf_counter_attr *attr)
323 const char *str = *strp;
324 char *endp;
325 unsigned long type;
326 u64 config;
328 type = strtoul(str, &endp, 0);
329 if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
330 str = endp + 1;
331 config = strtoul(str, &endp, 0);
332 if (endp > str) {
333 attr->type = type;
334 attr->config = config;
335 *strp = endp;
336 return 1;
339 return 0;
342 static int
343 parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
345 const char *str = *strp;
346 int eu = 1, ek = 1, eh = 1;
348 if (*str++ != ':')
349 return 0;
350 while (*str) {
351 if (*str == 'u')
352 eu = 0;
353 else if (*str == 'k')
354 ek = 0;
355 else if (*str == 'h')
356 eh = 0;
357 else
358 break;
359 ++str;
361 if (str >= *strp + 2) {
362 *strp = str;
363 attr->exclude_user = eu;
364 attr->exclude_kernel = ek;
365 attr->exclude_hv = eh;
366 return 1;
368 return 0;
372 * Each event can have multiple symbolic names.
373 * Symbolic names are (almost) exactly matched.
375 static int parse_event_symbols(const char **str, struct perf_counter_attr *attr)
377 if (!(parse_raw_event(str, attr) ||
378 parse_numeric_event(str, attr) ||
379 parse_symbolic_event(str, attr) ||
380 parse_generic_hw_event(str, attr)))
381 return 0;
383 parse_event_modifier(str, attr);
385 return 1;
388 int parse_events(const struct option *opt __used, const char *str, int unset __used)
390 struct perf_counter_attr attr;
392 for (;;) {
393 if (nr_counters == MAX_COUNTERS)
394 return -1;
396 memset(&attr, 0, sizeof(attr));
397 if (!parse_event_symbols(&str, &attr))
398 return -1;
400 if (!(*str == 0 || *str == ',' || isspace(*str)))
401 return -1;
403 attrs[nr_counters] = attr;
404 nr_counters++;
406 if (*str == 0)
407 break;
408 if (*str == ',')
409 ++str;
410 while (isspace(*str))
411 ++str;
414 return 0;
417 static const char * const event_type_descriptors[] = {
419 "Hardware event",
420 "Software event",
421 "Tracepoint event",
422 "Hardware cache event",
426 * Print the help text for the event symbols:
428 void print_events(void)
430 struct event_symbol *syms = event_symbols;
431 unsigned int i, type, op, prev_type = -1;
432 char name[40];
434 fprintf(stderr, "\n");
435 fprintf(stderr, "List of pre-defined events (to be used in -e):\n");
437 for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
438 type = syms->type + 1;
439 if (type > ARRAY_SIZE(event_type_descriptors))
440 type = 0;
442 if (type != prev_type)
443 fprintf(stderr, "\n");
445 if (strlen(syms->alias))
446 sprintf(name, "%s OR %s", syms->symbol, syms->alias);
447 else
448 strcpy(name, syms->symbol);
449 fprintf(stderr, " %-40s [%s]\n", name,
450 event_type_descriptors[type]);
452 prev_type = type;
455 fprintf(stderr, "\n");
456 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
457 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
458 /* skip invalid cache type */
459 if (!is_cache_op_valid(type, op))
460 continue;
462 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
463 fprintf(stderr, " %-40s [%s]\n",
464 event_cache_name(type, op, i),
465 event_type_descriptors[4]);
470 fprintf(stderr, "\n");
471 fprintf(stderr, " %-40s [raw hardware event descriptor]\n",
472 "rNNN");
473 fprintf(stderr, "\n");
475 exit(129);