perf, x86: Add new stalled cycles events for Intel and AMD CPUs
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / cpu / perf_event_amd.c
blobfe29c1d2219ecfa80d325d856a5db56241213150
1 #ifdef CONFIG_CPU_SUP_AMD
3 static __initconst const u64 amd_hw_cache_event_ids
4 [PERF_COUNT_HW_CACHE_MAX]
5 [PERF_COUNT_HW_CACHE_OP_MAX]
6 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
8 [ C(L1D) ] = {
9 [ C(OP_READ) ] = {
10 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
11 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
13 [ C(OP_WRITE) ] = {
14 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
15 [ C(RESULT_MISS) ] = 0,
17 [ C(OP_PREFETCH) ] = {
18 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
19 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
22 [ C(L1I ) ] = {
23 [ C(OP_READ) ] = {
24 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
25 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
27 [ C(OP_WRITE) ] = {
28 [ C(RESULT_ACCESS) ] = -1,
29 [ C(RESULT_MISS) ] = -1,
31 [ C(OP_PREFETCH) ] = {
32 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
33 [ C(RESULT_MISS) ] = 0,
36 [ C(LL ) ] = {
37 [ C(OP_READ) ] = {
38 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
39 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
41 [ C(OP_WRITE) ] = {
42 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
43 [ C(RESULT_MISS) ] = 0,
45 [ C(OP_PREFETCH) ] = {
46 [ C(RESULT_ACCESS) ] = 0,
47 [ C(RESULT_MISS) ] = 0,
50 [ C(DTLB) ] = {
51 [ C(OP_READ) ] = {
52 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
53 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
55 [ C(OP_WRITE) ] = {
56 [ C(RESULT_ACCESS) ] = 0,
57 [ C(RESULT_MISS) ] = 0,
59 [ C(OP_PREFETCH) ] = {
60 [ C(RESULT_ACCESS) ] = 0,
61 [ C(RESULT_MISS) ] = 0,
64 [ C(ITLB) ] = {
65 [ C(OP_READ) ] = {
66 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
67 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
69 [ C(OP_WRITE) ] = {
70 [ C(RESULT_ACCESS) ] = -1,
71 [ C(RESULT_MISS) ] = -1,
73 [ C(OP_PREFETCH) ] = {
74 [ C(RESULT_ACCESS) ] = -1,
75 [ C(RESULT_MISS) ] = -1,
78 [ C(BPU ) ] = {
79 [ C(OP_READ) ] = {
80 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
81 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
83 [ C(OP_WRITE) ] = {
84 [ C(RESULT_ACCESS) ] = -1,
85 [ C(RESULT_MISS) ] = -1,
87 [ C(OP_PREFETCH) ] = {
88 [ C(RESULT_ACCESS) ] = -1,
89 [ C(RESULT_MISS) ] = -1,
95 * AMD Performance Monitor K7 and later.
97 static const u64 amd_perfmon_event_map[] =
99 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
100 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
101 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
102 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
103 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
104 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
105 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
106 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
109 static u64 amd_pmu_event_map(int hw_event)
111 return amd_perfmon_event_map[hw_event];
114 static int amd_pmu_hw_config(struct perf_event *event)
116 int ret = x86_pmu_hw_config(event);
118 if (ret)
119 return ret;
121 if (event->attr.type != PERF_TYPE_RAW)
122 return 0;
124 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
126 return 0;
130 * AMD64 events are detected based on their event codes.
132 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
134 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
137 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
139 return (hwc->config & 0xe0) == 0xe0;
142 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
144 struct amd_nb *nb = cpuc->amd_nb;
146 return nb && nb->nb_id != -1;
149 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
150 struct perf_event *event)
152 struct hw_perf_event *hwc = &event->hw;
153 struct amd_nb *nb = cpuc->amd_nb;
154 int i;
157 * only care about NB events
159 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
160 return;
163 * need to scan whole list because event may not have
164 * been assigned during scheduling
166 * no race condition possible because event can only
167 * be removed on one CPU at a time AND PMU is disabled
168 * when we come here
170 for (i = 0; i < x86_pmu.num_counters; i++) {
171 if (nb->owners[i] == event) {
172 cmpxchg(nb->owners+i, event, NULL);
173 break;
179 * AMD64 NorthBridge events need special treatment because
180 * counter access needs to be synchronized across all cores
181 * of a package. Refer to BKDG section 3.12
183 * NB events are events measuring L3 cache, Hypertransport
184 * traffic. They are identified by an event code >= 0xe00.
185 * They measure events on the NorthBride which is shared
186 * by all cores on a package. NB events are counted on a
187 * shared set of counters. When a NB event is programmed
188 * in a counter, the data actually comes from a shared
189 * counter. Thus, access to those counters needs to be
190 * synchronized.
192 * We implement the synchronization such that no two cores
193 * can be measuring NB events using the same counters. Thus,
194 * we maintain a per-NB allocation table. The available slot
195 * is propagated using the event_constraint structure.
197 * We provide only one choice for each NB event based on
198 * the fact that only NB events have restrictions. Consequently,
199 * if a counter is available, there is a guarantee the NB event
200 * will be assigned to it. If no slot is available, an empty
201 * constraint is returned and scheduling will eventually fail
202 * for this event.
204 * Note that all cores attached the same NB compete for the same
205 * counters to host NB events, this is why we use atomic ops. Some
206 * multi-chip CPUs may have more than one NB.
208 * Given that resources are allocated (cmpxchg), they must be
209 * eventually freed for others to use. This is accomplished by
210 * calling amd_put_event_constraints().
212 * Non NB events are not impacted by this restriction.
214 static struct event_constraint *
215 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
217 struct hw_perf_event *hwc = &event->hw;
218 struct amd_nb *nb = cpuc->amd_nb;
219 struct perf_event *old = NULL;
220 int max = x86_pmu.num_counters;
221 int i, j, k = -1;
224 * if not NB event or no NB, then no constraints
226 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
227 return &unconstrained;
230 * detect if already present, if so reuse
232 * cannot merge with actual allocation
233 * because of possible holes
235 * event can already be present yet not assigned (in hwc->idx)
236 * because of successive calls to x86_schedule_events() from
237 * hw_perf_group_sched_in() without hw_perf_enable()
239 for (i = 0; i < max; i++) {
241 * keep track of first free slot
243 if (k == -1 && !nb->owners[i])
244 k = i;
246 /* already present, reuse */
247 if (nb->owners[i] == event)
248 goto done;
251 * not present, so grab a new slot
252 * starting either at:
254 if (hwc->idx != -1) {
255 /* previous assignment */
256 i = hwc->idx;
257 } else if (k != -1) {
258 /* start from free slot found */
259 i = k;
260 } else {
262 * event not found, no slot found in
263 * first pass, try again from the
264 * beginning
266 i = 0;
268 j = i;
269 do {
270 old = cmpxchg(nb->owners+i, NULL, event);
271 if (!old)
272 break;
273 if (++i == max)
274 i = 0;
275 } while (i != j);
276 done:
277 if (!old)
278 return &nb->event_constraints[i];
280 return &emptyconstraint;
283 static struct amd_nb *amd_alloc_nb(int cpu)
285 struct amd_nb *nb;
286 int i;
288 nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
289 cpu_to_node(cpu));
290 if (!nb)
291 return NULL;
293 nb->nb_id = -1;
296 * initialize all possible NB constraints
298 for (i = 0; i < x86_pmu.num_counters; i++) {
299 __set_bit(i, nb->event_constraints[i].idxmsk);
300 nb->event_constraints[i].weight = 1;
302 return nb;
305 static int amd_pmu_cpu_prepare(int cpu)
307 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
309 WARN_ON_ONCE(cpuc->amd_nb);
311 if (boot_cpu_data.x86_max_cores < 2)
312 return NOTIFY_OK;
314 cpuc->amd_nb = amd_alloc_nb(cpu);
315 if (!cpuc->amd_nb)
316 return NOTIFY_BAD;
318 return NOTIFY_OK;
321 static void amd_pmu_cpu_starting(int cpu)
323 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
324 struct amd_nb *nb;
325 int i, nb_id;
327 if (boot_cpu_data.x86_max_cores < 2)
328 return;
330 nb_id = amd_get_nb_id(cpu);
331 WARN_ON_ONCE(nb_id == BAD_APICID);
333 for_each_online_cpu(i) {
334 nb = per_cpu(cpu_hw_events, i).amd_nb;
335 if (WARN_ON_ONCE(!nb))
336 continue;
338 if (nb->nb_id == nb_id) {
339 kfree(cpuc->amd_nb);
340 cpuc->amd_nb = nb;
341 break;
345 cpuc->amd_nb->nb_id = nb_id;
346 cpuc->amd_nb->refcnt++;
349 static void amd_pmu_cpu_dead(int cpu)
351 struct cpu_hw_events *cpuhw;
353 if (boot_cpu_data.x86_max_cores < 2)
354 return;
356 cpuhw = &per_cpu(cpu_hw_events, cpu);
358 if (cpuhw->amd_nb) {
359 struct amd_nb *nb = cpuhw->amd_nb;
361 if (nb->nb_id == -1 || --nb->refcnt == 0)
362 kfree(nb);
364 cpuhw->amd_nb = NULL;
368 static __initconst const struct x86_pmu amd_pmu = {
369 .name = "AMD",
370 .handle_irq = x86_pmu_handle_irq,
371 .disable_all = x86_pmu_disable_all,
372 .enable_all = x86_pmu_enable_all,
373 .enable = x86_pmu_enable_event,
374 .disable = x86_pmu_disable_event,
375 .hw_config = amd_pmu_hw_config,
376 .schedule_events = x86_schedule_events,
377 .eventsel = MSR_K7_EVNTSEL0,
378 .perfctr = MSR_K7_PERFCTR0,
379 .event_map = amd_pmu_event_map,
380 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
381 .num_counters = 4,
382 .cntval_bits = 48,
383 .cntval_mask = (1ULL << 48) - 1,
384 .apic = 1,
385 /* use highest bit to detect overflow */
386 .max_period = (1ULL << 47) - 1,
387 .get_event_constraints = amd_get_event_constraints,
388 .put_event_constraints = amd_put_event_constraints,
390 .cpu_prepare = amd_pmu_cpu_prepare,
391 .cpu_starting = amd_pmu_cpu_starting,
392 .cpu_dead = amd_pmu_cpu_dead,
395 /* AMD Family 15h */
397 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
399 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
400 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
401 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
402 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
403 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
404 #define AMD_EVENT_EX_LS 0x000000C0ULL
405 #define AMD_EVENT_DE 0x000000D0ULL
406 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
409 * AMD family 15h event code/PMC mappings:
411 * type = event_code & 0x0F0:
413 * 0x000 FP PERF_CTL[5:3]
414 * 0x010 FP PERF_CTL[5:3]
415 * 0x020 LS PERF_CTL[5:0]
416 * 0x030 LS PERF_CTL[5:0]
417 * 0x040 DC PERF_CTL[5:0]
418 * 0x050 DC PERF_CTL[5:0]
419 * 0x060 CU PERF_CTL[2:0]
420 * 0x070 CU PERF_CTL[2:0]
421 * 0x080 IC/DE PERF_CTL[2:0]
422 * 0x090 IC/DE PERF_CTL[2:0]
423 * 0x0A0 ---
424 * 0x0B0 ---
425 * 0x0C0 EX/LS PERF_CTL[5:0]
426 * 0x0D0 DE PERF_CTL[2:0]
427 * 0x0E0 NB NB_PERF_CTL[3:0]
428 * 0x0F0 NB NB_PERF_CTL[3:0]
430 * Exceptions:
432 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
433 * 0x003 FP PERF_CTL[3]
434 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
435 * 0x00B FP PERF_CTL[3]
436 * 0x00D FP PERF_CTL[3]
437 * 0x023 DE PERF_CTL[2:0]
438 * 0x02D LS PERF_CTL[3]
439 * 0x02E LS PERF_CTL[3,0]
440 * 0x043 CU PERF_CTL[2:0]
441 * 0x045 CU PERF_CTL[2:0]
442 * 0x046 CU PERF_CTL[2:0]
443 * 0x054 CU PERF_CTL[2:0]
444 * 0x055 CU PERF_CTL[2:0]
445 * 0x08F IC PERF_CTL[0]
446 * 0x187 DE PERF_CTL[0]
447 * 0x188 DE PERF_CTL[0]
448 * 0x0DB EX PERF_CTL[5:0]
449 * 0x0DC LS PERF_CTL[5:0]
450 * 0x0DD LS PERF_CTL[5:0]
451 * 0x0DE LS PERF_CTL[5:0]
452 * 0x0DF LS PERF_CTL[5:0]
453 * 0x1D6 EX PERF_CTL[5:0]
454 * 0x1D8 EX PERF_CTL[5:0]
456 * (*) depending on the umask all FPU counters may be used
459 static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
460 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
461 static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
462 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
463 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
464 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
466 static struct event_constraint *
467 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
469 struct hw_perf_event *hwc = &event->hw;
470 unsigned int event_code = amd_get_event_code(hwc);
472 switch (event_code & AMD_EVENT_TYPE_MASK) {
473 case AMD_EVENT_FP:
474 switch (event_code) {
475 case 0x000:
476 if (!(hwc->config & 0x0000F000ULL))
477 break;
478 if (!(hwc->config & 0x00000F00ULL))
479 break;
480 return &amd_f15_PMC3;
481 case 0x004:
482 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
483 break;
484 return &amd_f15_PMC3;
485 case 0x003:
486 case 0x00B:
487 case 0x00D:
488 return &amd_f15_PMC3;
490 return &amd_f15_PMC53;
491 case AMD_EVENT_LS:
492 case AMD_EVENT_DC:
493 case AMD_EVENT_EX_LS:
494 switch (event_code) {
495 case 0x023:
496 case 0x043:
497 case 0x045:
498 case 0x046:
499 case 0x054:
500 case 0x055:
501 return &amd_f15_PMC20;
502 case 0x02D:
503 return &amd_f15_PMC3;
504 case 0x02E:
505 return &amd_f15_PMC30;
506 default:
507 return &amd_f15_PMC50;
509 case AMD_EVENT_CU:
510 case AMD_EVENT_IC_DE:
511 case AMD_EVENT_DE:
512 switch (event_code) {
513 case 0x08F:
514 case 0x187:
515 case 0x188:
516 return &amd_f15_PMC0;
517 case 0x0DB ... 0x0DF:
518 case 0x1D6:
519 case 0x1D8:
520 return &amd_f15_PMC50;
521 default:
522 return &amd_f15_PMC20;
524 case AMD_EVENT_NB:
525 /* not yet implemented */
526 return &emptyconstraint;
527 default:
528 return &emptyconstraint;
532 static __initconst const struct x86_pmu amd_pmu_f15h = {
533 .name = "AMD Family 15h",
534 .handle_irq = x86_pmu_handle_irq,
535 .disable_all = x86_pmu_disable_all,
536 .enable_all = x86_pmu_enable_all,
537 .enable = x86_pmu_enable_event,
538 .disable = x86_pmu_disable_event,
539 .hw_config = amd_pmu_hw_config,
540 .schedule_events = x86_schedule_events,
541 .eventsel = MSR_F15H_PERF_CTL,
542 .perfctr = MSR_F15H_PERF_CTR,
543 .event_map = amd_pmu_event_map,
544 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
545 .num_counters = 6,
546 .cntval_bits = 48,
547 .cntval_mask = (1ULL << 48) - 1,
548 .apic = 1,
549 /* use highest bit to detect overflow */
550 .max_period = (1ULL << 47) - 1,
551 .get_event_constraints = amd_get_event_constraints_f15h,
552 /* nortbridge counters not yet implemented: */
553 #if 0
554 .put_event_constraints = amd_put_event_constraints,
556 .cpu_prepare = amd_pmu_cpu_prepare,
557 .cpu_starting = amd_pmu_cpu_starting,
558 .cpu_dead = amd_pmu_cpu_dead,
559 #endif
562 static __init int amd_pmu_init(void)
564 /* Performance-monitoring supported from K7 and later: */
565 if (boot_cpu_data.x86 < 6)
566 return -ENODEV;
569 * If core performance counter extensions exists, it must be
570 * family 15h, otherwise fail. See x86_pmu_addr_offset().
572 switch (boot_cpu_data.x86) {
573 case 0x15:
574 if (!cpu_has_perfctr_core)
575 return -ENODEV;
576 x86_pmu = amd_pmu_f15h;
577 break;
578 default:
579 if (cpu_has_perfctr_core)
580 return -ENODEV;
581 x86_pmu = amd_pmu;
582 break;
585 /* Events are common for all AMDs */
586 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
587 sizeof(hw_cache_event_ids));
589 return 0;
592 #else /* CONFIG_CPU_SUP_AMD */
594 static int amd_pmu_init(void)
596 return 0;
599 #endif