GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / x86 / oprofile / op_model_amd.c
blob33b4a9c2e10bcb887f5f963ce40d2b3f059297b7
1 /*
2 * @file op_model_amd.c
3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
5 * @remark Copyright 2002-2009 OProfile authors
6 * @remark Read the file COPYING
8 * @author John Levon
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf <barry.kasindorf@amd.com>
13 * @author Jason Yeh <jason.yeh@amd.com>
14 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
17 #include <linux/oprofile.h>
18 #include <linux/device.h>
19 #include <linux/pci.h>
20 #include <linux/percpu.h>
22 #include <asm/ptrace.h>
23 #include <asm/msr.h>
24 #include <asm/nmi.h>
25 #include <asm/apic.h>
26 #include <asm/processor.h>
27 #include <asm/cpufeature.h>
29 #include "op_x86_model.h"
30 #include "op_counter.h"
32 #define NUM_COUNTERS 4
33 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
34 #define NUM_VIRT_COUNTERS 32
35 #else
36 #define NUM_VIRT_COUNTERS NUM_COUNTERS
37 #endif
39 #define OP_EVENT_MASK 0x0FFF
40 #define OP_CTR_OVERFLOW (1ULL<<31)
42 #define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
44 static unsigned long reset_value[NUM_VIRT_COUNTERS];
46 #define IBS_FETCH_SIZE 6
47 #define IBS_OP_SIZE 12
49 static u32 ibs_caps;
51 struct op_ibs_config {
52 unsigned long op_enabled;
53 unsigned long fetch_enabled;
54 unsigned long max_cnt_fetch;
55 unsigned long max_cnt_op;
56 unsigned long rand_en;
57 unsigned long dispatched_ops;
60 static struct op_ibs_config ibs_config;
61 static u64 ibs_op_ctl;
64 * IBS cpuid feature detection
67 #define IBS_CPUID_FEATURES 0x8000001b
70 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
71 * bit 0 is used to indicate the existence of IBS.
73 #define IBS_CAPS_AVAIL (1LL<<0)
74 #define IBS_CAPS_RDWROPCNT (1LL<<3)
75 #define IBS_CAPS_OPCNT (1LL<<4)
78 * IBS randomization macros
80 #define IBS_RANDOM_BITS 12
81 #define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1)
82 #define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5))
84 static u32 get_ibs_caps(void)
86 u32 ibs_caps;
87 unsigned int max_level;
89 if (!boot_cpu_has(X86_FEATURE_IBS))
90 return 0;
92 /* check IBS cpuid feature flags */
93 max_level = cpuid_eax(0x80000000);
94 if (max_level < IBS_CPUID_FEATURES)
95 return IBS_CAPS_AVAIL;
97 ibs_caps = cpuid_eax(IBS_CPUID_FEATURES);
98 if (!(ibs_caps & IBS_CAPS_AVAIL))
99 /* cpuid flags not valid */
100 return IBS_CAPS_AVAIL;
102 return ibs_caps;
106 * 16-bit Linear Feedback Shift Register (LFSR)
108 * 16 14 13 11
109 * Feedback polynomial = X + X + X + X + 1
111 static unsigned int lfsr_random(void)
113 static unsigned int lfsr_value = 0xF00D;
114 unsigned int bit;
116 /* Compute next bit to shift in */
117 bit = ((lfsr_value >> 0) ^
118 (lfsr_value >> 2) ^
119 (lfsr_value >> 3) ^
120 (lfsr_value >> 5)) & 0x0001;
122 /* Advance to next register value */
123 lfsr_value = (lfsr_value >> 1) | (bit << 15);
125 return lfsr_value;
129 * IBS software randomization
131 * The IBS periodic op counter is randomized in software. The lower 12
132 * bits of the 20 bit counter are randomized. IbsOpCurCnt is
133 * initialized with a 12 bit random value.
135 static inline u64 op_amd_randomize_ibs_op(u64 val)
137 unsigned int random = lfsr_random();
139 if (!(ibs_caps & IBS_CAPS_RDWROPCNT))
140 val += (s8)(random >> 4);
141 else
142 val |= (u64)(random & IBS_RANDOM_MASK) << 32;
144 return val;
147 static inline void
148 op_amd_handle_ibs(struct pt_regs * const regs,
149 struct op_msrs const * const msrs)
151 u64 val, ctl;
152 struct op_entry entry;
154 if (!ibs_caps)
155 return;
157 if (ibs_config.fetch_enabled) {
158 rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
159 if (ctl & IBS_FETCH_VAL) {
160 rdmsrl(MSR_AMD64_IBSFETCHLINAD, val);
161 oprofile_write_reserve(&entry, regs, val,
162 IBS_FETCH_CODE, IBS_FETCH_SIZE);
163 oprofile_add_data64(&entry, val);
164 oprofile_add_data64(&entry, ctl);
165 rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val);
166 oprofile_add_data64(&entry, val);
167 oprofile_write_commit(&entry);
169 /* reenable the IRQ */
170 ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT);
171 ctl |= IBS_FETCH_ENABLE;
172 wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
176 if (ibs_config.op_enabled) {
177 rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
178 if (ctl & IBS_OP_VAL) {
179 rdmsrl(MSR_AMD64_IBSOPRIP, val);
180 oprofile_write_reserve(&entry, regs, val,
181 IBS_OP_CODE, IBS_OP_SIZE);
182 oprofile_add_data64(&entry, val);
183 rdmsrl(MSR_AMD64_IBSOPDATA, val);
184 oprofile_add_data64(&entry, val);
185 rdmsrl(MSR_AMD64_IBSOPDATA2, val);
186 oprofile_add_data64(&entry, val);
187 rdmsrl(MSR_AMD64_IBSOPDATA3, val);
188 oprofile_add_data64(&entry, val);
189 rdmsrl(MSR_AMD64_IBSDCLINAD, val);
190 oprofile_add_data64(&entry, val);
191 rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
192 oprofile_add_data64(&entry, val);
193 oprofile_write_commit(&entry);
195 /* reenable the IRQ */
196 ctl = op_amd_randomize_ibs_op(ibs_op_ctl);
197 wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
202 static inline void op_amd_start_ibs(void)
204 u64 val;
206 if (!ibs_caps)
207 return;
209 if (ibs_config.fetch_enabled) {
210 val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT;
211 val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
212 val |= IBS_FETCH_ENABLE;
213 wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
216 if (ibs_config.op_enabled) {
217 ibs_op_ctl = ibs_config.max_cnt_op >> 4;
218 if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) {
220 * IbsOpCurCnt not supported. See
221 * op_amd_randomize_ibs_op() for details.
223 ibs_op_ctl = clamp(ibs_op_ctl, 0x0081ULL, 0xFF80ULL);
224 } else {
226 * The start value is randomized with a
227 * positive offset, we need to compensate it
228 * with the half of the randomized range. Also
229 * avoid underflows.
231 ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET,
232 IBS_OP_MAX_CNT);
234 if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops)
235 ibs_op_ctl |= IBS_OP_CNT_CTL;
236 ibs_op_ctl |= IBS_OP_ENABLE;
237 val = op_amd_randomize_ibs_op(ibs_op_ctl);
238 wrmsrl(MSR_AMD64_IBSOPCTL, val);
242 static void op_amd_stop_ibs(void)
244 if (!ibs_caps)
245 return;
247 if (ibs_config.fetch_enabled)
248 /* clear max count and enable */
249 wrmsrl(MSR_AMD64_IBSFETCHCTL, 0);
251 if (ibs_config.op_enabled)
252 /* clear max count and enable */
253 wrmsrl(MSR_AMD64_IBSOPCTL, 0);
256 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
258 static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
259 struct op_msrs const * const msrs)
261 u64 val;
262 int i;
264 /* enable active counters */
265 for (i = 0; i < NUM_COUNTERS; ++i) {
266 int virt = op_x86_phys_to_virt(i);
267 if (!reset_value[virt])
268 continue;
269 rdmsrl(msrs->controls[i].addr, val);
270 val &= model->reserved;
271 val |= op_x86_get_ctrl(model, &counter_config[virt]);
272 wrmsrl(msrs->controls[i].addr, val);
276 #endif
278 /* functions for op_amd_spec */
280 static void op_amd_shutdown(struct op_msrs const * const msrs)
282 int i;
284 for (i = 0; i < NUM_COUNTERS; ++i) {
285 if (!msrs->counters[i].addr)
286 continue;
287 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
288 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
292 static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
294 int i;
296 for (i = 0; i < NUM_COUNTERS; i++) {
297 if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
298 goto fail;
299 if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
300 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
301 goto fail;
303 /* both registers must be reserved */
304 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
305 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
306 continue;
307 fail:
308 if (!counter_config[i].enabled)
309 continue;
310 op_x86_warn_reserved(i);
311 op_amd_shutdown(msrs);
312 return -EBUSY;
315 return 0;
318 static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
319 struct op_msrs const * const msrs)
321 u64 val;
322 int i;
324 /* setup reset_value */
325 for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
326 if (counter_config[i].enabled
327 && msrs->counters[op_x86_virt_to_phys(i)].addr)
328 reset_value[i] = counter_config[i].count;
329 else
330 reset_value[i] = 0;
333 /* clear all counters */
334 for (i = 0; i < NUM_COUNTERS; ++i) {
335 if (!msrs->controls[i].addr)
336 continue;
337 rdmsrl(msrs->controls[i].addr, val);
338 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
339 op_x86_warn_in_use(i);
340 val &= model->reserved;
341 wrmsrl(msrs->controls[i].addr, val);
343 * avoid a false detection of ctr overflows in NMI
344 * handler
346 wrmsrl(msrs->counters[i].addr, -1LL);
349 /* enable active counters */
350 for (i = 0; i < NUM_COUNTERS; ++i) {
351 int virt = op_x86_phys_to_virt(i);
352 if (!reset_value[virt])
353 continue;
355 /* setup counter registers */
356 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
358 /* setup control registers */
359 rdmsrl(msrs->controls[i].addr, val);
360 val &= model->reserved;
361 val |= op_x86_get_ctrl(model, &counter_config[virt]);
362 wrmsrl(msrs->controls[i].addr, val);
365 if (ibs_caps)
366 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
369 static void op_amd_cpu_shutdown(void)
371 if (ibs_caps)
372 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
375 static int op_amd_check_ctrs(struct pt_regs * const regs,
376 struct op_msrs const * const msrs)
378 u64 val;
379 int i;
381 for (i = 0; i < NUM_COUNTERS; ++i) {
382 int virt = op_x86_phys_to_virt(i);
383 if (!reset_value[virt])
384 continue;
385 rdmsrl(msrs->counters[i].addr, val);
386 /* bit is clear if overflowed: */
387 if (val & OP_CTR_OVERFLOW)
388 continue;
389 oprofile_add_sample(regs, virt);
390 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
393 op_amd_handle_ibs(regs, msrs);
395 /* See op_model_ppro.c */
396 return 1;
399 static void op_amd_start(struct op_msrs const * const msrs)
401 u64 val;
402 int i;
404 for (i = 0; i < NUM_COUNTERS; ++i) {
405 if (!reset_value[op_x86_phys_to_virt(i)])
406 continue;
407 rdmsrl(msrs->controls[i].addr, val);
408 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
409 wrmsrl(msrs->controls[i].addr, val);
412 op_amd_start_ibs();
415 static void op_amd_stop(struct op_msrs const * const msrs)
417 u64 val;
418 int i;
421 * Subtle: stop on all counters to avoid race with setting our
422 * pm callback
424 for (i = 0; i < NUM_COUNTERS; ++i) {
425 if (!reset_value[op_x86_phys_to_virt(i)])
426 continue;
427 rdmsrl(msrs->controls[i].addr, val);
428 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
429 wrmsrl(msrs->controls[i].addr, val);
432 op_amd_stop_ibs();
435 static int __init_ibs_nmi(void)
437 #define IBSCTL_LVTOFFSETVAL (1 << 8)
438 #define IBSCTL 0x1cc
439 struct pci_dev *cpu_cfg;
440 int nodes;
441 u32 value = 0;
442 u8 ibs_eilvt_off;
444 ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
446 nodes = 0;
447 cpu_cfg = NULL;
448 do {
449 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
450 PCI_DEVICE_ID_AMD_10H_NB_MISC,
451 cpu_cfg);
452 if (!cpu_cfg)
453 break;
454 ++nodes;
455 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
456 | IBSCTL_LVTOFFSETVAL);
457 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
458 if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
459 pci_dev_put(cpu_cfg);
460 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
461 "IBSCTL = 0x%08x", value);
462 return 1;
464 } while (1);
466 if (!nodes) {
467 printk(KERN_DEBUG "No CPU node configured for IBS");
468 return 1;
471 return 0;
475 * check and reserve APIC extended interrupt LVT offset for IBS if
476 * available
478 * init_ibs() preforms implicitly cpu-local operations, so pin this
479 * thread to its current CPU
482 static void init_ibs(void)
484 preempt_disable();
486 ibs_caps = get_ibs_caps();
487 if (!ibs_caps)
488 goto out;
490 if (__init_ibs_nmi() < 0)
491 ibs_caps = 0;
492 else
493 printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
495 out:
496 preempt_enable();
499 static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
501 static int setup_ibs_files(struct super_block *sb, struct dentry *root)
503 struct dentry *dir;
504 int ret = 0;
506 /* architecture specific files */
507 if (create_arch_files)
508 ret = create_arch_files(sb, root);
510 if (ret)
511 return ret;
513 if (!ibs_caps)
514 return ret;
516 /* model specific files */
518 /* setup some reasonable defaults */
519 ibs_config.max_cnt_fetch = 250000;
520 ibs_config.fetch_enabled = 0;
521 ibs_config.max_cnt_op = 250000;
522 ibs_config.op_enabled = 0;
523 ibs_config.dispatched_ops = 0;
525 dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
526 oprofilefs_create_ulong(sb, dir, "enable",
527 &ibs_config.fetch_enabled);
528 oprofilefs_create_ulong(sb, dir, "max_count",
529 &ibs_config.max_cnt_fetch);
530 oprofilefs_create_ulong(sb, dir, "rand_enable",
531 &ibs_config.rand_en);
533 dir = oprofilefs_mkdir(sb, root, "ibs_op");
534 oprofilefs_create_ulong(sb, dir, "enable",
535 &ibs_config.op_enabled);
536 oprofilefs_create_ulong(sb, dir, "max_count",
537 &ibs_config.max_cnt_op);
538 if (ibs_caps & IBS_CAPS_OPCNT)
539 oprofilefs_create_ulong(sb, dir, "dispatched_ops",
540 &ibs_config.dispatched_ops);
542 return 0;
545 static int op_amd_init(struct oprofile_operations *ops)
547 init_ibs();
548 create_arch_files = ops->create_files;
549 ops->create_files = setup_ibs_files;
550 return 0;
553 struct op_x86_model_spec op_amd_spec = {
554 .num_counters = NUM_COUNTERS,
555 .num_controls = NUM_COUNTERS,
556 .num_virt_counters = NUM_VIRT_COUNTERS,
557 .reserved = MSR_AMD_EVENTSEL_RESERVED,
558 .event_mask = OP_EVENT_MASK,
559 .init = op_amd_init,
560 .fill_in_addresses = &op_amd_fill_in_addresses,
561 .setup_ctrs = &op_amd_setup_ctrs,
562 .cpu_down = &op_amd_cpu_shutdown,
563 .check_ctrs = &op_amd_check_ctrs,
564 .start = &op_amd_start,
565 .stop = &op_amd_stop,
566 .shutdown = &op_amd_shutdown,
567 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
568 .switch_ctrl = &op_mux_switch_ctrl,
569 #endif