1 #ifdef CONFIG_CPU_SUP_AMD
3 static __initconst
const u64 amd_hw_cache_event_ids
4 [PERF_COUNT_HW_CACHE_MAX
]
5 [PERF_COUNT_HW_CACHE_OP_MAX
]
6 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
10 [ C(RESULT_ACCESS
) ] = 0x0040, /* Data Cache Accesses */
11 [ C(RESULT_MISS
) ] = 0x0141, /* Data Cache Misses */
14 [ C(RESULT_ACCESS
) ] = 0x0142, /* Data Cache Refills :system */
15 [ C(RESULT_MISS
) ] = 0,
17 [ C(OP_PREFETCH
) ] = {
18 [ C(RESULT_ACCESS
) ] = 0x0267, /* Data Prefetcher :attempts */
19 [ C(RESULT_MISS
) ] = 0x0167, /* Data Prefetcher :cancelled */
24 [ C(RESULT_ACCESS
) ] = 0x0080, /* Instruction cache fetches */
25 [ C(RESULT_MISS
) ] = 0x0081, /* Instruction cache misses */
28 [ C(RESULT_ACCESS
) ] = -1,
29 [ C(RESULT_MISS
) ] = -1,
31 [ C(OP_PREFETCH
) ] = {
32 [ C(RESULT_ACCESS
) ] = 0x014B, /* Prefetch Instructions :Load */
33 [ C(RESULT_MISS
) ] = 0,
38 [ C(RESULT_ACCESS
) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
39 [ C(RESULT_MISS
) ] = 0x037E, /* L2 Cache Misses : IC+DC */
42 [ C(RESULT_ACCESS
) ] = 0x017F, /* L2 Fill/Writeback */
43 [ C(RESULT_MISS
) ] = 0,
45 [ C(OP_PREFETCH
) ] = {
46 [ C(RESULT_ACCESS
) ] = 0,
47 [ C(RESULT_MISS
) ] = 0,
52 [ C(RESULT_ACCESS
) ] = 0x0040, /* Data Cache Accesses */
53 [ C(RESULT_MISS
) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
56 [ C(RESULT_ACCESS
) ] = 0,
57 [ C(RESULT_MISS
) ] = 0,
59 [ C(OP_PREFETCH
) ] = {
60 [ C(RESULT_ACCESS
) ] = 0,
61 [ C(RESULT_MISS
) ] = 0,
66 [ C(RESULT_ACCESS
) ] = 0x0080, /* Instruction fecthes */
67 [ C(RESULT_MISS
) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
70 [ C(RESULT_ACCESS
) ] = -1,
71 [ C(RESULT_MISS
) ] = -1,
73 [ C(OP_PREFETCH
) ] = {
74 [ C(RESULT_ACCESS
) ] = -1,
75 [ C(RESULT_MISS
) ] = -1,
80 [ C(RESULT_ACCESS
) ] = 0x00c2, /* Retired Branch Instr. */
81 [ C(RESULT_MISS
) ] = 0x00c3, /* Retired Mispredicted BI */
84 [ C(RESULT_ACCESS
) ] = -1,
85 [ C(RESULT_MISS
) ] = -1,
87 [ C(OP_PREFETCH
) ] = {
88 [ C(RESULT_ACCESS
) ] = -1,
89 [ C(RESULT_MISS
) ] = -1,
95 * AMD Performance Monitor K7 and later.
97 static const u64 amd_perfmon_event_map
[] =
99 [PERF_COUNT_HW_CPU_CYCLES
] = 0x0076,
100 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
101 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x0080,
102 [PERF_COUNT_HW_CACHE_MISSES
] = 0x0081,
103 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c2,
104 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c3,
105 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = 0x00d0, /* "Decoder empty" event */
106 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = 0x00d1, /* "Dispatch stalls" event */
109 static u64
amd_pmu_event_map(int hw_event
)
111 return amd_perfmon_event_map
[hw_event
];
114 static int amd_pmu_hw_config(struct perf_event
*event
)
116 int ret
= x86_pmu_hw_config(event
);
121 if (event
->attr
.type
!= PERF_TYPE_RAW
)
124 event
->hw
.config
|= event
->attr
.config
& AMD64_RAW_EVENT_MASK
;
130 * AMD64 events are detected based on their event codes.
132 static inline unsigned int amd_get_event_code(struct hw_perf_event
*hwc
)
134 return ((hwc
->config
>> 24) & 0x0f00) | (hwc
->config
& 0x00ff);
137 static inline int amd_is_nb_event(struct hw_perf_event
*hwc
)
139 return (hwc
->config
& 0xe0) == 0xe0;
142 static inline int amd_has_nb(struct cpu_hw_events
*cpuc
)
144 struct amd_nb
*nb
= cpuc
->amd_nb
;
146 return nb
&& nb
->nb_id
!= -1;
149 static void amd_put_event_constraints(struct cpu_hw_events
*cpuc
,
150 struct perf_event
*event
)
152 struct hw_perf_event
*hwc
= &event
->hw
;
153 struct amd_nb
*nb
= cpuc
->amd_nb
;
157 * only care about NB events
159 if (!(amd_has_nb(cpuc
) && amd_is_nb_event(hwc
)))
163 * need to scan whole list because event may not have
164 * been assigned during scheduling
166 * no race condition possible because event can only
167 * be removed on one CPU at a time AND PMU is disabled
170 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
171 if (nb
->owners
[i
] == event
) {
172 cmpxchg(nb
->owners
+i
, event
, NULL
);
179 * AMD64 NorthBridge events need special treatment because
180 * counter access needs to be synchronized across all cores
181 * of a package. Refer to BKDG section 3.12
183 * NB events are events measuring L3 cache, Hypertransport
184 * traffic. They are identified by an event code >= 0xe00.
185 * They measure events on the NorthBride which is shared
186 * by all cores on a package. NB events are counted on a
187 * shared set of counters. When a NB event is programmed
188 * in a counter, the data actually comes from a shared
189 * counter. Thus, access to those counters needs to be
192 * We implement the synchronization such that no two cores
193 * can be measuring NB events using the same counters. Thus,
194 * we maintain a per-NB allocation table. The available slot
195 * is propagated using the event_constraint structure.
197 * We provide only one choice for each NB event based on
198 * the fact that only NB events have restrictions. Consequently,
199 * if a counter is available, there is a guarantee the NB event
200 * will be assigned to it. If no slot is available, an empty
201 * constraint is returned and scheduling will eventually fail
204 * Note that all cores attached the same NB compete for the same
205 * counters to host NB events, this is why we use atomic ops. Some
206 * multi-chip CPUs may have more than one NB.
208 * Given that resources are allocated (cmpxchg), they must be
209 * eventually freed for others to use. This is accomplished by
210 * calling amd_put_event_constraints().
212 * Non NB events are not impacted by this restriction.
214 static struct event_constraint
*
215 amd_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
217 struct hw_perf_event
*hwc
= &event
->hw
;
218 struct amd_nb
*nb
= cpuc
->amd_nb
;
219 struct perf_event
*old
= NULL
;
220 int max
= x86_pmu
.num_counters
;
224 * if not NB event or no NB, then no constraints
226 if (!(amd_has_nb(cpuc
) && amd_is_nb_event(hwc
)))
227 return &unconstrained
;
230 * detect if already present, if so reuse
232 * cannot merge with actual allocation
233 * because of possible holes
235 * event can already be present yet not assigned (in hwc->idx)
236 * because of successive calls to x86_schedule_events() from
237 * hw_perf_group_sched_in() without hw_perf_enable()
239 for (i
= 0; i
< max
; i
++) {
241 * keep track of first free slot
243 if (k
== -1 && !nb
->owners
[i
])
246 /* already present, reuse */
247 if (nb
->owners
[i
] == event
)
251 * not present, so grab a new slot
252 * starting either at:
254 if (hwc
->idx
!= -1) {
255 /* previous assignment */
257 } else if (k
!= -1) {
258 /* start from free slot found */
262 * event not found, no slot found in
263 * first pass, try again from the
270 old
= cmpxchg(nb
->owners
+i
, NULL
, event
);
278 return &nb
->event_constraints
[i
];
280 return &emptyconstraint
;
283 static struct amd_nb
*amd_alloc_nb(int cpu
)
288 nb
= kmalloc_node(sizeof(struct amd_nb
), GFP_KERNEL
| __GFP_ZERO
,
296 * initialize all possible NB constraints
298 for (i
= 0; i
< x86_pmu
.num_counters
; i
++) {
299 __set_bit(i
, nb
->event_constraints
[i
].idxmsk
);
300 nb
->event_constraints
[i
].weight
= 1;
305 static int amd_pmu_cpu_prepare(int cpu
)
307 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
309 WARN_ON_ONCE(cpuc
->amd_nb
);
311 if (boot_cpu_data
.x86_max_cores
< 2)
314 cpuc
->amd_nb
= amd_alloc_nb(cpu
);
321 static void amd_pmu_cpu_starting(int cpu
)
323 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
327 if (boot_cpu_data
.x86_max_cores
< 2)
330 nb_id
= amd_get_nb_id(cpu
);
331 WARN_ON_ONCE(nb_id
== BAD_APICID
);
333 for_each_online_cpu(i
) {
334 nb
= per_cpu(cpu_hw_events
, i
).amd_nb
;
335 if (WARN_ON_ONCE(!nb
))
338 if (nb
->nb_id
== nb_id
) {
345 cpuc
->amd_nb
->nb_id
= nb_id
;
346 cpuc
->amd_nb
->refcnt
++;
349 static void amd_pmu_cpu_dead(int cpu
)
351 struct cpu_hw_events
*cpuhw
;
353 if (boot_cpu_data
.x86_max_cores
< 2)
356 cpuhw
= &per_cpu(cpu_hw_events
, cpu
);
359 struct amd_nb
*nb
= cpuhw
->amd_nb
;
361 if (nb
->nb_id
== -1 || --nb
->refcnt
== 0)
364 cpuhw
->amd_nb
= NULL
;
368 static __initconst
const struct x86_pmu amd_pmu
= {
370 .handle_irq
= x86_pmu_handle_irq
,
371 .disable_all
= x86_pmu_disable_all
,
372 .enable_all
= x86_pmu_enable_all
,
373 .enable
= x86_pmu_enable_event
,
374 .disable
= x86_pmu_disable_event
,
375 .hw_config
= amd_pmu_hw_config
,
376 .schedule_events
= x86_schedule_events
,
377 .eventsel
= MSR_K7_EVNTSEL0
,
378 .perfctr
= MSR_K7_PERFCTR0
,
379 .event_map
= amd_pmu_event_map
,
380 .max_events
= ARRAY_SIZE(amd_perfmon_event_map
),
383 .cntval_mask
= (1ULL << 48) - 1,
385 /* use highest bit to detect overflow */
386 .max_period
= (1ULL << 47) - 1,
387 .get_event_constraints
= amd_get_event_constraints
,
388 .put_event_constraints
= amd_put_event_constraints
,
390 .cpu_prepare
= amd_pmu_cpu_prepare
,
391 .cpu_starting
= amd_pmu_cpu_starting
,
392 .cpu_dead
= amd_pmu_cpu_dead
,
397 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
399 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
400 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
401 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
402 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
403 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
404 #define AMD_EVENT_EX_LS 0x000000C0ULL
405 #define AMD_EVENT_DE 0x000000D0ULL
406 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
409 * AMD family 15h event code/PMC mappings:
411 * type = event_code & 0x0F0:
413 * 0x000 FP PERF_CTL[5:3]
414 * 0x010 FP PERF_CTL[5:3]
415 * 0x020 LS PERF_CTL[5:0]
416 * 0x030 LS PERF_CTL[5:0]
417 * 0x040 DC PERF_CTL[5:0]
418 * 0x050 DC PERF_CTL[5:0]
419 * 0x060 CU PERF_CTL[2:0]
420 * 0x070 CU PERF_CTL[2:0]
421 * 0x080 IC/DE PERF_CTL[2:0]
422 * 0x090 IC/DE PERF_CTL[2:0]
425 * 0x0C0 EX/LS PERF_CTL[5:0]
426 * 0x0D0 DE PERF_CTL[2:0]
427 * 0x0E0 NB NB_PERF_CTL[3:0]
428 * 0x0F0 NB NB_PERF_CTL[3:0]
432 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
433 * 0x003 FP PERF_CTL[3]
434 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
435 * 0x00B FP PERF_CTL[3]
436 * 0x00D FP PERF_CTL[3]
437 * 0x023 DE PERF_CTL[2:0]
438 * 0x02D LS PERF_CTL[3]
439 * 0x02E LS PERF_CTL[3,0]
440 * 0x043 CU PERF_CTL[2:0]
441 * 0x045 CU PERF_CTL[2:0]
442 * 0x046 CU PERF_CTL[2:0]
443 * 0x054 CU PERF_CTL[2:0]
444 * 0x055 CU PERF_CTL[2:0]
445 * 0x08F IC PERF_CTL[0]
446 * 0x187 DE PERF_CTL[0]
447 * 0x188 DE PERF_CTL[0]
448 * 0x0DB EX PERF_CTL[5:0]
449 * 0x0DC LS PERF_CTL[5:0]
450 * 0x0DD LS PERF_CTL[5:0]
451 * 0x0DE LS PERF_CTL[5:0]
452 * 0x0DF LS PERF_CTL[5:0]
453 * 0x1D6 EX PERF_CTL[5:0]
454 * 0x1D8 EX PERF_CTL[5:0]
456 * (*) depending on the umask all FPU counters may be used
459 static struct event_constraint amd_f15_PMC0
= EVENT_CONSTRAINT(0, 0x01, 0);
460 static struct event_constraint amd_f15_PMC20
= EVENT_CONSTRAINT(0, 0x07, 0);
461 static struct event_constraint amd_f15_PMC3
= EVENT_CONSTRAINT(0, 0x08, 0);
462 static struct event_constraint amd_f15_PMC30
= EVENT_CONSTRAINT(0, 0x09, 0);
463 static struct event_constraint amd_f15_PMC50
= EVENT_CONSTRAINT(0, 0x3F, 0);
464 static struct event_constraint amd_f15_PMC53
= EVENT_CONSTRAINT(0, 0x38, 0);
466 static struct event_constraint
*
467 amd_get_event_constraints_f15h(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
469 struct hw_perf_event
*hwc
= &event
->hw
;
470 unsigned int event_code
= amd_get_event_code(hwc
);
472 switch (event_code
& AMD_EVENT_TYPE_MASK
) {
474 switch (event_code
) {
476 if (!(hwc
->config
& 0x0000F000ULL
))
478 if (!(hwc
->config
& 0x00000F00ULL
))
480 return &amd_f15_PMC3
;
482 if (hweight_long(hwc
->config
& ARCH_PERFMON_EVENTSEL_UMASK
) <= 1)
484 return &amd_f15_PMC3
;
488 return &amd_f15_PMC3
;
490 return &amd_f15_PMC53
;
493 case AMD_EVENT_EX_LS
:
494 switch (event_code
) {
501 return &amd_f15_PMC20
;
503 return &amd_f15_PMC3
;
505 return &amd_f15_PMC30
;
507 return &amd_f15_PMC50
;
510 case AMD_EVENT_IC_DE
:
512 switch (event_code
) {
516 return &amd_f15_PMC0
;
517 case 0x0DB ... 0x0DF:
520 return &amd_f15_PMC50
;
522 return &amd_f15_PMC20
;
525 /* not yet implemented */
526 return &emptyconstraint
;
528 return &emptyconstraint
;
532 static __initconst
const struct x86_pmu amd_pmu_f15h
= {
533 .name
= "AMD Family 15h",
534 .handle_irq
= x86_pmu_handle_irq
,
535 .disable_all
= x86_pmu_disable_all
,
536 .enable_all
= x86_pmu_enable_all
,
537 .enable
= x86_pmu_enable_event
,
538 .disable
= x86_pmu_disable_event
,
539 .hw_config
= amd_pmu_hw_config
,
540 .schedule_events
= x86_schedule_events
,
541 .eventsel
= MSR_F15H_PERF_CTL
,
542 .perfctr
= MSR_F15H_PERF_CTR
,
543 .event_map
= amd_pmu_event_map
,
544 .max_events
= ARRAY_SIZE(amd_perfmon_event_map
),
547 .cntval_mask
= (1ULL << 48) - 1,
549 /* use highest bit to detect overflow */
550 .max_period
= (1ULL << 47) - 1,
551 .get_event_constraints
= amd_get_event_constraints_f15h
,
552 /* nortbridge counters not yet implemented: */
554 .put_event_constraints
= amd_put_event_constraints
,
556 .cpu_prepare
= amd_pmu_cpu_prepare
,
557 .cpu_starting
= amd_pmu_cpu_starting
,
558 .cpu_dead
= amd_pmu_cpu_dead
,
562 static __init
int amd_pmu_init(void)
564 /* Performance-monitoring supported from K7 and later: */
565 if (boot_cpu_data
.x86
< 6)
569 * If core performance counter extensions exists, it must be
570 * family 15h, otherwise fail. See x86_pmu_addr_offset().
572 switch (boot_cpu_data
.x86
) {
574 if (!cpu_has_perfctr_core
)
576 x86_pmu
= amd_pmu_f15h
;
579 if (cpu_has_perfctr_core
)
585 /* Events are common for all AMDs */
586 memcpy(hw_cache_event_ids
, amd_hw_cache_event_ids
,
587 sizeof(hw_cache_event_ids
));
592 #else /* CONFIG_CPU_SUP_AMD */
594 static int amd_pmu_init(void)