2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
4 * Author: Jacob Shin <jacob.shin@amd.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/perf_event.h>
12 #include <linux/percpu.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/cpu.h>
17 #include <linux/cpumask.h>
19 #include <asm/cpufeature.h>
20 #include <asm/perf_event.h>
23 #define NUM_COUNTERS_NB 4
24 #define NUM_COUNTERS_L2 4
25 #define MAX_COUNTERS NUM_COUNTERS_NB
27 #define RDPMC_BASE_NB 6
28 #define RDPMC_BASE_L2 10
30 #define COUNTER_SHIFT 16
39 cpumask_t
*active_mask
;
41 struct perf_event
*events
[MAX_COUNTERS
];
42 struct amd_uncore
*free_when_cpu_online
;
45 static struct amd_uncore
* __percpu
*amd_uncore_nb
;
46 static struct amd_uncore
* __percpu
*amd_uncore_l2
;
48 static struct pmu amd_nb_pmu
;
49 static struct pmu amd_l2_pmu
;
51 static cpumask_t amd_nb_active_mask
;
52 static cpumask_t amd_l2_active_mask
;
54 static bool is_nb_event(struct perf_event
*event
)
56 return event
->pmu
->type
== amd_nb_pmu
.type
;
59 static bool is_l2_event(struct perf_event
*event
)
61 return event
->pmu
->type
== amd_l2_pmu
.type
;
64 static struct amd_uncore
*event_to_amd_uncore(struct perf_event
*event
)
66 if (is_nb_event(event
) && amd_uncore_nb
)
67 return *per_cpu_ptr(amd_uncore_nb
, event
->cpu
);
68 else if (is_l2_event(event
) && amd_uncore_l2
)
69 return *per_cpu_ptr(amd_uncore_l2
, event
->cpu
);
74 static void amd_uncore_read(struct perf_event
*event
)
76 struct hw_perf_event
*hwc
= &event
->hw
;
81 * since we do not enable counter overflow interrupts,
82 * we do not have to worry about prev_count changing on us
85 prev
= local64_read(&hwc
->prev_count
);
86 rdpmcl(hwc
->event_base_rdpmc
, new);
87 local64_set(&hwc
->prev_count
, new);
88 delta
= (new << COUNTER_SHIFT
) - (prev
<< COUNTER_SHIFT
);
89 delta
>>= COUNTER_SHIFT
;
90 local64_add(delta
, &event
->count
);
93 static void amd_uncore_start(struct perf_event
*event
, int flags
)
95 struct hw_perf_event
*hwc
= &event
->hw
;
97 if (flags
& PERF_EF_RELOAD
)
98 wrmsrl(hwc
->event_base
, (u64
)local64_read(&hwc
->prev_count
));
101 wrmsrl(hwc
->config_base
, (hwc
->config
| ARCH_PERFMON_EVENTSEL_ENABLE
));
102 perf_event_update_userpage(event
);
105 static void amd_uncore_stop(struct perf_event
*event
, int flags
)
107 struct hw_perf_event
*hwc
= &event
->hw
;
109 wrmsrl(hwc
->config_base
, hwc
->config
);
110 hwc
->state
|= PERF_HES_STOPPED
;
112 if ((flags
& PERF_EF_UPDATE
) && !(hwc
->state
& PERF_HES_UPTODATE
)) {
113 amd_uncore_read(event
);
114 hwc
->state
|= PERF_HES_UPTODATE
;
118 static int amd_uncore_add(struct perf_event
*event
, int flags
)
121 struct amd_uncore
*uncore
= event_to_amd_uncore(event
);
122 struct hw_perf_event
*hwc
= &event
->hw
;
124 /* are we already assigned? */
125 if (hwc
->idx
!= -1 && uncore
->events
[hwc
->idx
] == event
)
128 for (i
= 0; i
< uncore
->num_counters
; i
++) {
129 if (uncore
->events
[i
] == event
) {
135 /* if not, take the first available counter */
137 for (i
= 0; i
< uncore
->num_counters
; i
++) {
138 if (cmpxchg(&uncore
->events
[i
], NULL
, event
) == NULL
) {
148 hwc
->config_base
= uncore
->msr_base
+ (2 * hwc
->idx
);
149 hwc
->event_base
= uncore
->msr_base
+ 1 + (2 * hwc
->idx
);
150 hwc
->event_base_rdpmc
= uncore
->rdpmc_base
+ hwc
->idx
;
151 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
153 if (flags
& PERF_EF_START
)
154 amd_uncore_start(event
, PERF_EF_RELOAD
);
159 static void amd_uncore_del(struct perf_event
*event
, int flags
)
162 struct amd_uncore
*uncore
= event_to_amd_uncore(event
);
163 struct hw_perf_event
*hwc
= &event
->hw
;
165 amd_uncore_stop(event
, PERF_EF_UPDATE
);
167 for (i
= 0; i
< uncore
->num_counters
; i
++) {
168 if (cmpxchg(&uncore
->events
[i
], event
, NULL
) == event
)
175 static int amd_uncore_event_init(struct perf_event
*event
)
177 struct amd_uncore
*uncore
;
178 struct hw_perf_event
*hwc
= &event
->hw
;
180 if (event
->attr
.type
!= event
->pmu
->type
)
184 * NB and L2 counters (MSRs) are shared across all cores that share the
185 * same NB / L2 cache. Interrupts can be directed to a single target
186 * core, however, event counts generated by processes running on other
187 * cores cannot be masked out. So we do not support sampling and
190 if (is_sampling_event(event
) || event
->attach_state
& PERF_ATTACH_TASK
)
193 /* NB and L2 counters do not have usr/os/guest/host bits */
194 if (event
->attr
.exclude_user
|| event
->attr
.exclude_kernel
||
195 event
->attr
.exclude_host
|| event
->attr
.exclude_guest
)
198 /* and we do not enable counter overflow interrupts */
199 hwc
->config
= event
->attr
.config
& AMD64_RAW_EVENT_MASK_NB
;
205 uncore
= event_to_amd_uncore(event
);
210 * since request can come in to any of the shared cores, we will remap
211 * to a single common cpu.
213 event
->cpu
= uncore
->cpu
;
218 static ssize_t
amd_uncore_attr_show_cpumask(struct device
*dev
,
219 struct device_attribute
*attr
,
223 cpumask_t
*active_mask
;
224 struct pmu
*pmu
= dev_get_drvdata(dev
);
226 if (pmu
->type
== amd_nb_pmu
.type
)
227 active_mask
= &amd_nb_active_mask
;
228 else if (pmu
->type
== amd_l2_pmu
.type
)
229 active_mask
= &amd_l2_active_mask
;
233 n
= cpulist_scnprintf(buf
, PAGE_SIZE
- 2, active_mask
);
238 static DEVICE_ATTR(cpumask
, S_IRUGO
, amd_uncore_attr_show_cpumask
, NULL
);
240 static struct attribute
*amd_uncore_attrs
[] = {
241 &dev_attr_cpumask
.attr
,
245 static struct attribute_group amd_uncore_attr_group
= {
246 .attrs
= amd_uncore_attrs
,
249 PMU_FORMAT_ATTR(event
, "config:0-7,32-35");
250 PMU_FORMAT_ATTR(umask
, "config:8-15");
252 static struct attribute
*amd_uncore_format_attr
[] = {
253 &format_attr_event
.attr
,
254 &format_attr_umask
.attr
,
258 static struct attribute_group amd_uncore_format_group
= {
260 .attrs
= amd_uncore_format_attr
,
263 static const struct attribute_group
*amd_uncore_attr_groups
[] = {
264 &amd_uncore_attr_group
,
265 &amd_uncore_format_group
,
269 static struct pmu amd_nb_pmu
= {
270 .attr_groups
= amd_uncore_attr_groups
,
272 .event_init
= amd_uncore_event_init
,
273 .add
= amd_uncore_add
,
274 .del
= amd_uncore_del
,
275 .start
= amd_uncore_start
,
276 .stop
= amd_uncore_stop
,
277 .read
= amd_uncore_read
,
280 static struct pmu amd_l2_pmu
= {
281 .attr_groups
= amd_uncore_attr_groups
,
283 .event_init
= amd_uncore_event_init
,
284 .add
= amd_uncore_add
,
285 .del
= amd_uncore_del
,
286 .start
= amd_uncore_start
,
287 .stop
= amd_uncore_stop
,
288 .read
= amd_uncore_read
,
291 static struct amd_uncore
* __cpuinit
amd_uncore_alloc(unsigned int cpu
)
293 return kzalloc_node(sizeof(struct amd_uncore
), GFP_KERNEL
,
297 static void __cpuinit
amd_uncore_cpu_up_prepare(unsigned int cpu
)
299 struct amd_uncore
*uncore
;
302 uncore
= amd_uncore_alloc(cpu
);
304 uncore
->num_counters
= NUM_COUNTERS_NB
;
305 uncore
->rdpmc_base
= RDPMC_BASE_NB
;
306 uncore
->msr_base
= MSR_F15H_NB_PERF_CTL
;
307 uncore
->active_mask
= &amd_nb_active_mask
;
308 uncore
->pmu
= &amd_nb_pmu
;
309 *per_cpu_ptr(amd_uncore_nb
, cpu
) = uncore
;
313 uncore
= amd_uncore_alloc(cpu
);
315 uncore
->num_counters
= NUM_COUNTERS_L2
;
316 uncore
->rdpmc_base
= RDPMC_BASE_L2
;
317 uncore
->msr_base
= MSR_F16H_L2I_PERF_CTL
;
318 uncore
->active_mask
= &amd_l2_active_mask
;
319 uncore
->pmu
= &amd_l2_pmu
;
320 *per_cpu_ptr(amd_uncore_l2
, cpu
) = uncore
;
324 static struct amd_uncore
*
325 __cpuinit
amd_uncore_find_online_sibling(struct amd_uncore
*this,
326 struct amd_uncore
* __percpu
*uncores
)
329 struct amd_uncore
*that
;
331 for_each_online_cpu(cpu
) {
332 that
= *per_cpu_ptr(uncores
, cpu
);
340 if (this->id
== that
->id
) {
341 that
->free_when_cpu_online
= this;
351 static void __cpuinit
amd_uncore_cpu_starting(unsigned int cpu
)
353 unsigned int eax
, ebx
, ecx
, edx
;
354 struct amd_uncore
*uncore
;
357 uncore
= *per_cpu_ptr(amd_uncore_nb
, cpu
);
358 cpuid(0x8000001e, &eax
, &ebx
, &ecx
, &edx
);
359 uncore
->id
= ecx
& 0xff;
361 uncore
= amd_uncore_find_online_sibling(uncore
, amd_uncore_nb
);
362 *per_cpu_ptr(amd_uncore_nb
, cpu
) = uncore
;
366 unsigned int apicid
= cpu_data(cpu
).apicid
;
367 unsigned int nshared
;
369 uncore
= *per_cpu_ptr(amd_uncore_l2
, cpu
);
370 cpuid_count(0x8000001d, 2, &eax
, &ebx
, &ecx
, &edx
);
371 nshared
= ((eax
>> 14) & 0xfff) + 1;
372 uncore
->id
= apicid
- (apicid
% nshared
);
374 uncore
= amd_uncore_find_online_sibling(uncore
, amd_uncore_l2
);
375 *per_cpu_ptr(amd_uncore_l2
, cpu
) = uncore
;
379 static void __cpuinit
uncore_online(unsigned int cpu
,
380 struct amd_uncore
* __percpu
*uncores
)
382 struct amd_uncore
*uncore
= *per_cpu_ptr(uncores
, cpu
);
384 kfree(uncore
->free_when_cpu_online
);
385 uncore
->free_when_cpu_online
= NULL
;
387 if (cpu
== uncore
->cpu
)
388 cpumask_set_cpu(cpu
, uncore
->active_mask
);
391 static void __cpuinit
amd_uncore_cpu_online(unsigned int cpu
)
394 uncore_online(cpu
, amd_uncore_nb
);
397 uncore_online(cpu
, amd_uncore_l2
);
400 static void __cpuinit
uncore_down_prepare(unsigned int cpu
,
401 struct amd_uncore
* __percpu
*uncores
)
404 struct amd_uncore
*this = *per_cpu_ptr(uncores
, cpu
);
406 if (this->cpu
!= cpu
)
409 /* this cpu is going down, migrate to a shared sibling if possible */
410 for_each_online_cpu(i
) {
411 struct amd_uncore
*that
= *per_cpu_ptr(uncores
, i
);
417 perf_pmu_migrate_context(this->pmu
, cpu
, i
);
418 cpumask_clear_cpu(cpu
, that
->active_mask
);
419 cpumask_set_cpu(i
, that
->active_mask
);
426 static void __cpuinit
amd_uncore_cpu_down_prepare(unsigned int cpu
)
429 uncore_down_prepare(cpu
, amd_uncore_nb
);
432 uncore_down_prepare(cpu
, amd_uncore_l2
);
435 static void __cpuinit
uncore_dead(unsigned int cpu
,
436 struct amd_uncore
* __percpu
*uncores
)
438 struct amd_uncore
*uncore
= *per_cpu_ptr(uncores
, cpu
);
440 if (cpu
== uncore
->cpu
)
441 cpumask_clear_cpu(cpu
, uncore
->active_mask
);
443 if (!--uncore
->refcnt
)
445 *per_cpu_ptr(amd_uncore_nb
, cpu
) = NULL
;
448 static void __cpuinit
amd_uncore_cpu_dead(unsigned int cpu
)
451 uncore_dead(cpu
, amd_uncore_nb
);
454 uncore_dead(cpu
, amd_uncore_l2
);
458 amd_uncore_cpu_notifier(struct notifier_block
*self
, unsigned long action
,
461 unsigned int cpu
= (long)hcpu
;
463 switch (action
& ~CPU_TASKS_FROZEN
) {
465 amd_uncore_cpu_up_prepare(cpu
);
469 amd_uncore_cpu_starting(cpu
);
473 amd_uncore_cpu_online(cpu
);
476 case CPU_DOWN_PREPARE
:
477 amd_uncore_cpu_down_prepare(cpu
);
480 case CPU_UP_CANCELED
:
482 amd_uncore_cpu_dead(cpu
);
492 static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata
= {
493 .notifier_call
= amd_uncore_cpu_notifier
,
494 .priority
= CPU_PRI_PERF
+ 1,
497 static void __init
init_cpu_already_online(void *dummy
)
499 unsigned int cpu
= smp_processor_id();
501 amd_uncore_cpu_starting(cpu
);
502 amd_uncore_cpu_online(cpu
);
505 static int __init
amd_uncore_init(void)
510 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
)
513 if (!cpu_has_topoext
)
516 if (cpu_has_perfctr_nb
) {
517 amd_uncore_nb
= alloc_percpu(struct amd_uncore
*);
518 perf_pmu_register(&amd_nb_pmu
, amd_nb_pmu
.name
, -1);
520 printk(KERN_INFO
"perf: AMD NB counters detected\n");
524 if (cpu_has_perfctr_l2
) {
525 amd_uncore_l2
= alloc_percpu(struct amd_uncore
*);
526 perf_pmu_register(&amd_l2_pmu
, amd_l2_pmu
.name
, -1);
528 printk(KERN_INFO
"perf: AMD L2I counters detected\n");
536 /* init cpus already online before registering for hotplug notifier */
537 for_each_online_cpu(cpu
) {
538 amd_uncore_cpu_up_prepare(cpu
);
539 smp_call_function_single(cpu
, init_cpu_already_online
, NULL
, 1);
542 register_cpu_notifier(&amd_uncore_cpu_notifier_block
);
547 device_initcall(amd_uncore_init
);