x86, mce: Fix compile warning in case of CONFIG_SMP=n
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / cpu / mcheck / mce_amd.c
blob83a3d1f4efca9ff447e0e2b2f902d4c7d4d16248
1 /*
2 * (c) 2005, 2006 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
7 * Written by Jacob Shin - AMD, Inc.
9 * Support : jacob.shin@amd.com
11 * April 2006
12 * - added support for AMD Family 0x10 processors
14 * All MC4_MISCi registers are shared between multi-cores
16 #include <linux/interrupt.h>
17 #include <linux/notifier.h>
18 #include <linux/kobject.h>
19 #include <linux/percpu.h>
20 #include <linux/sysdev.h>
21 #include <linux/errno.h>
22 #include <linux/sched.h>
23 #include <linux/sysfs.h>
24 #include <linux/init.h>
25 #include <linux/cpu.h>
26 #include <linux/smp.h>
28 #include <asm/apic.h>
29 #include <asm/idle.h>
30 #include <asm/mce.h>
31 #include <asm/msr.h>
33 #define PFX "mce_threshold: "
34 #define VERSION "version 1.1.1"
35 #define NR_BANKS 6
36 #define NR_BLOCKS 9
37 #define THRESHOLD_MAX 0xFFF
38 #define INT_TYPE_APIC 0x00020000
39 #define MASK_VALID_HI 0x80000000
40 #define MASK_CNTP_HI 0x40000000
41 #define MASK_LOCKED_HI 0x20000000
42 #define MASK_LVTOFF_HI 0x00F00000
43 #define MASK_COUNT_EN_HI 0x00080000
44 #define MASK_INT_TYPE_HI 0x00060000
45 #define MASK_OVERFLOW_HI 0x00010000
46 #define MASK_ERR_COUNT_HI 0x00000FFF
47 #define MASK_BLKPTR_LO 0xFF000000
48 #define MCG_XBLK_ADDR 0xC0000400
50 struct threshold_block {
51 unsigned int block;
52 unsigned int bank;
53 unsigned int cpu;
54 u32 address;
55 u16 interrupt_enable;
56 u16 threshold_limit;
57 struct kobject kobj;
58 struct list_head miscj;
61 /* defaults used early on boot */
62 static struct threshold_block threshold_defaults = {
63 .interrupt_enable = 0,
64 .threshold_limit = THRESHOLD_MAX,
67 struct threshold_bank {
68 struct kobject *kobj;
69 struct threshold_block *blocks;
70 cpumask_var_t cpus;
72 static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
74 #ifdef CONFIG_SMP
75 static unsigned char shared_bank[NR_BANKS] = {
76 0, 0, 0, 0, 1
78 #endif
80 static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
82 static void amd_threshold_interrupt(void);
85 * CPU Initialization
88 struct thresh_restart {
89 struct threshold_block *b;
90 int reset;
91 u16 old_limit;
94 /* must be called with correct cpu affinity */
95 /* Called via smp_call_function_single() */
96 static void threshold_restart_bank(void *_tr)
98 struct thresh_restart *tr = _tr;
99 u32 mci_misc_hi, mci_misc_lo;
101 rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
103 if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
104 tr->reset = 1; /* limit cannot be lower than err count */
106 if (tr->reset) { /* reset err count and overflow bit */
107 mci_misc_hi =
108 (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
109 (THRESHOLD_MAX - tr->b->threshold_limit);
110 } else if (tr->old_limit) { /* change limit w/o reset */
111 int new_count = (mci_misc_hi & THRESHOLD_MAX) +
112 (tr->old_limit - tr->b->threshold_limit);
114 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
115 (new_count & THRESHOLD_MAX);
118 tr->b->interrupt_enable ?
119 (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
120 (mci_misc_hi &= ~MASK_INT_TYPE_HI);
122 mci_misc_hi |= MASK_COUNT_EN_HI;
123 wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
126 /* cpu init entry point, called from mce.c with preempt off */
127 void mce_amd_feature_init(struct cpuinfo_x86 *c)
129 unsigned int cpu = smp_processor_id();
130 u32 low = 0, high = 0, address = 0;
131 unsigned int bank, block;
132 struct thresh_restart tr;
133 u8 lvt_off;
135 for (bank = 0; bank < NR_BANKS; ++bank) {
136 for (block = 0; block < NR_BLOCKS; ++block) {
137 if (block == 0)
138 address = MSR_IA32_MC0_MISC + bank * 4;
139 else if (block == 1) {
140 address = (low & MASK_BLKPTR_LO) >> 21;
141 if (!address)
142 break;
143 address += MCG_XBLK_ADDR;
144 } else
145 ++address;
147 if (rdmsr_safe(address, &low, &high))
148 break;
150 if (!(high & MASK_VALID_HI)) {
151 if (block)
152 continue;
153 else
154 break;
157 if (!(high & MASK_CNTP_HI) ||
158 (high & MASK_LOCKED_HI))
159 continue;
161 if (!block)
162 per_cpu(bank_map, cpu) |= (1 << bank);
163 #ifdef CONFIG_SMP
164 if (shared_bank[bank] && c->cpu_core_id)
165 break;
166 #endif
167 lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR,
168 APIC_EILVT_MSG_FIX, 0);
170 high &= ~MASK_LVTOFF_HI;
171 high |= lvt_off << 20;
172 wrmsr(address, low, high);
174 threshold_defaults.address = address;
175 tr.b = &threshold_defaults;
176 tr.reset = 0;
177 tr.old_limit = 0;
178 threshold_restart_bank(&tr);
180 mce_threshold_vector = amd_threshold_interrupt;
186 * APIC Interrupt Handler
190 * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
191 * the interrupt goes off when error_count reaches threshold_limit.
192 * the handler will simply log mcelog w/ software defined bank number.
194 static void amd_threshold_interrupt(void)
196 u32 low = 0, high = 0, address = 0;
197 unsigned int bank, block;
198 struct mce m;
200 mce_setup(&m);
202 /* assume first bank caused it */
203 for (bank = 0; bank < NR_BANKS; ++bank) {
204 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
205 continue;
206 for (block = 0; block < NR_BLOCKS; ++block) {
207 if (block == 0) {
208 address = MSR_IA32_MC0_MISC + bank * 4;
209 } else if (block == 1) {
210 address = (low & MASK_BLKPTR_LO) >> 21;
211 if (!address)
212 break;
213 address += MCG_XBLK_ADDR;
214 } else {
215 ++address;
218 if (rdmsr_safe(address, &low, &high))
219 break;
221 if (!(high & MASK_VALID_HI)) {
222 if (block)
223 continue;
224 else
225 break;
228 if (!(high & MASK_CNTP_HI) ||
229 (high & MASK_LOCKED_HI))
230 continue;
233 * Log the machine check that caused the threshold
234 * event.
236 machine_check_poll(MCP_TIMESTAMP,
237 &__get_cpu_var(mce_poll_banks));
239 if (high & MASK_OVERFLOW_HI) {
240 rdmsrl(address, m.misc);
241 rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
242 m.status);
243 m.bank = K8_MCE_THRESHOLD_BASE
244 + bank * NR_BLOCKS
245 + block;
246 mce_log(&m);
247 return;
254 * Sysfs Interface
257 struct threshold_attr {
258 struct attribute attr;
259 ssize_t (*show) (struct threshold_block *, char *);
260 ssize_t (*store) (struct threshold_block *, const char *, size_t count);
263 #define SHOW_FIELDS(name) \
264 static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
266 return sprintf(buf, "%lx\n", (unsigned long) b->name); \
268 SHOW_FIELDS(interrupt_enable)
269 SHOW_FIELDS(threshold_limit)
271 static ssize_t
272 store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
274 struct thresh_restart tr;
275 unsigned long new;
277 if (strict_strtoul(buf, 0, &new) < 0)
278 return -EINVAL;
280 b->interrupt_enable = !!new;
282 tr.b = b;
283 tr.reset = 0;
284 tr.old_limit = 0;
286 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
288 return size;
291 static ssize_t
292 store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
294 struct thresh_restart tr;
295 unsigned long new;
297 if (strict_strtoul(buf, 0, &new) < 0)
298 return -EINVAL;
300 if (new > THRESHOLD_MAX)
301 new = THRESHOLD_MAX;
302 if (new < 1)
303 new = 1;
305 tr.old_limit = b->threshold_limit;
306 b->threshold_limit = new;
307 tr.b = b;
308 tr.reset = 0;
310 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
312 return size;
315 struct threshold_block_cross_cpu {
316 struct threshold_block *tb;
317 long retval;
320 static void local_error_count_handler(void *_tbcc)
322 struct threshold_block_cross_cpu *tbcc = _tbcc;
323 struct threshold_block *b = tbcc->tb;
324 u32 low, high;
326 rdmsr(b->address, low, high);
327 tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
330 static ssize_t show_error_count(struct threshold_block *b, char *buf)
332 struct threshold_block_cross_cpu tbcc = { .tb = b, };
334 smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1);
335 return sprintf(buf, "%lx\n", tbcc.retval);
338 static ssize_t store_error_count(struct threshold_block *b,
339 const char *buf, size_t count)
341 struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
343 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
344 return 1;
347 #define RW_ATTR(val) \
348 static struct threshold_attr val = { \
349 .attr = {.name = __stringify(val), .mode = 0644 }, \
350 .show = show_## val, \
351 .store = store_## val, \
354 RW_ATTR(interrupt_enable);
355 RW_ATTR(threshold_limit);
356 RW_ATTR(error_count);
358 static struct attribute *default_attrs[] = {
359 &interrupt_enable.attr,
360 &threshold_limit.attr,
361 &error_count.attr,
362 NULL
365 #define to_block(k) container_of(k, struct threshold_block, kobj)
366 #define to_attr(a) container_of(a, struct threshold_attr, attr)
368 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
370 struct threshold_block *b = to_block(kobj);
371 struct threshold_attr *a = to_attr(attr);
372 ssize_t ret;
374 ret = a->show ? a->show(b, buf) : -EIO;
376 return ret;
379 static ssize_t store(struct kobject *kobj, struct attribute *attr,
380 const char *buf, size_t count)
382 struct threshold_block *b = to_block(kobj);
383 struct threshold_attr *a = to_attr(attr);
384 ssize_t ret;
386 ret = a->store ? a->store(b, buf, count) : -EIO;
388 return ret;
391 static struct sysfs_ops threshold_ops = {
392 .show = show,
393 .store = store,
396 static struct kobj_type threshold_ktype = {
397 .sysfs_ops = &threshold_ops,
398 .default_attrs = default_attrs,
401 static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
402 unsigned int bank,
403 unsigned int block,
404 u32 address)
406 struct threshold_block *b = NULL;
407 u32 low, high;
408 int err;
410 if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
411 return 0;
413 if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
414 return 0;
416 if (!(high & MASK_VALID_HI)) {
417 if (block)
418 goto recurse;
419 else
420 return 0;
423 if (!(high & MASK_CNTP_HI) ||
424 (high & MASK_LOCKED_HI))
425 goto recurse;
427 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
428 if (!b)
429 return -ENOMEM;
431 b->block = block;
432 b->bank = bank;
433 b->cpu = cpu;
434 b->address = address;
435 b->interrupt_enable = 0;
436 b->threshold_limit = THRESHOLD_MAX;
438 INIT_LIST_HEAD(&b->miscj);
440 if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
441 list_add(&b->miscj,
442 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
443 } else {
444 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
447 err = kobject_init_and_add(&b->kobj, &threshold_ktype,
448 per_cpu(threshold_banks, cpu)[bank]->kobj,
449 "misc%i", block);
450 if (err)
451 goto out_free;
452 recurse:
453 if (!block) {
454 address = (low & MASK_BLKPTR_LO) >> 21;
455 if (!address)
456 return 0;
457 address += MCG_XBLK_ADDR;
458 } else {
459 ++address;
462 err = allocate_threshold_blocks(cpu, bank, ++block, address);
463 if (err)
464 goto out_free;
466 if (b)
467 kobject_uevent(&b->kobj, KOBJ_ADD);
469 return err;
471 out_free:
472 if (b) {
473 kobject_put(&b->kobj);
474 kfree(b);
476 return err;
479 static __cpuinit long
480 local_allocate_threshold_blocks(int cpu, unsigned int bank)
482 return allocate_threshold_blocks(cpu, bank, 0,
483 MSR_IA32_MC0_MISC + bank * 4);
486 /* symlinks sibling shared banks to first core. first core owns dir/files. */
487 static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
489 int i, err = 0;
490 struct threshold_bank *b = NULL;
491 char name[32];
492 #ifdef CONFIG_SMP
493 struct cpuinfo_x86 *c = &cpu_data(cpu);
494 #endif
496 sprintf(name, "threshold_bank%i", bank);
498 #ifdef CONFIG_SMP
499 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
500 i = cpumask_first(c->llc_shared_map);
502 /* first core not up yet */
503 if (cpu_data(i).cpu_core_id)
504 goto out;
506 /* already linked */
507 if (per_cpu(threshold_banks, cpu)[bank])
508 goto out;
510 b = per_cpu(threshold_banks, i)[bank];
512 if (!b)
513 goto out;
515 err = sysfs_create_link(&per_cpu(mce_dev, cpu).kobj,
516 b->kobj, name);
517 if (err)
518 goto out;
520 cpumask_copy(b->cpus, c->llc_shared_map);
521 per_cpu(threshold_banks, cpu)[bank] = b;
523 goto out;
525 #endif
527 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
528 if (!b) {
529 err = -ENOMEM;
530 goto out;
532 if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
533 kfree(b);
534 err = -ENOMEM;
535 goto out;
538 b->kobj = kobject_create_and_add(name, &per_cpu(mce_dev, cpu).kobj);
539 if (!b->kobj)
540 goto out_free;
542 #ifndef CONFIG_SMP
543 cpumask_setall(b->cpus);
544 #else
545 cpumask_copy(b->cpus, c->llc_shared_map);
546 #endif
548 per_cpu(threshold_banks, cpu)[bank] = b;
550 err = local_allocate_threshold_blocks(cpu, bank);
551 if (err)
552 goto out_free;
554 for_each_cpu(i, b->cpus) {
555 if (i == cpu)
556 continue;
558 err = sysfs_create_link(&per_cpu(mce_dev, i).kobj,
559 b->kobj, name);
560 if (err)
561 goto out;
563 per_cpu(threshold_banks, i)[bank] = b;
566 goto out;
568 out_free:
569 per_cpu(threshold_banks, cpu)[bank] = NULL;
570 free_cpumask_var(b->cpus);
571 kfree(b);
572 out:
573 return err;
576 /* create dir/files for all valid threshold banks */
577 static __cpuinit int threshold_create_device(unsigned int cpu)
579 unsigned int bank;
580 int err = 0;
582 for (bank = 0; bank < NR_BANKS; ++bank) {
583 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
584 continue;
585 err = threshold_create_bank(cpu, bank);
586 if (err)
587 goto out;
589 out:
590 return err;
594 * let's be hotplug friendly.
595 * in case of multiple core processors, the first core always takes ownership
596 * of shared sysfs dir/files, and rest of the cores will be symlinked to it.
599 static void deallocate_threshold_block(unsigned int cpu,
600 unsigned int bank)
602 struct threshold_block *pos = NULL;
603 struct threshold_block *tmp = NULL;
604 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
606 if (!head)
607 return;
609 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
610 kobject_put(&pos->kobj);
611 list_del(&pos->miscj);
612 kfree(pos);
615 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
616 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
619 static void threshold_remove_bank(unsigned int cpu, int bank)
621 struct threshold_bank *b;
622 char name[32];
623 int i = 0;
625 b = per_cpu(threshold_banks, cpu)[bank];
626 if (!b)
627 return;
628 if (!b->blocks)
629 goto free_out;
631 sprintf(name, "threshold_bank%i", bank);
633 #ifdef CONFIG_SMP
634 /* sibling symlink */
635 if (shared_bank[bank] && b->blocks->cpu != cpu) {
636 sysfs_remove_link(&per_cpu(mce_dev, cpu).kobj, name);
637 per_cpu(threshold_banks, cpu)[bank] = NULL;
639 return;
641 #endif
643 /* remove all sibling symlinks before unregistering */
644 for_each_cpu(i, b->cpus) {
645 if (i == cpu)
646 continue;
648 sysfs_remove_link(&per_cpu(mce_dev, i).kobj, name);
649 per_cpu(threshold_banks, i)[bank] = NULL;
652 deallocate_threshold_block(cpu, bank);
654 free_out:
655 kobject_del(b->kobj);
656 kobject_put(b->kobj);
657 free_cpumask_var(b->cpus);
658 kfree(b);
659 per_cpu(threshold_banks, cpu)[bank] = NULL;
662 static void threshold_remove_device(unsigned int cpu)
664 unsigned int bank;
666 for (bank = 0; bank < NR_BANKS; ++bank) {
667 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
668 continue;
669 threshold_remove_bank(cpu, bank);
673 /* get notified when a cpu comes on/off */
674 static void __cpuinit
675 amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
677 switch (action) {
678 case CPU_ONLINE:
679 case CPU_ONLINE_FROZEN:
680 threshold_create_device(cpu);
681 break;
682 case CPU_DEAD:
683 case CPU_DEAD_FROZEN:
684 threshold_remove_device(cpu);
685 break;
686 default:
687 break;
691 static __init int threshold_init_device(void)
693 unsigned lcpu = 0;
695 /* to hit CPUs online before the notifier is up */
696 for_each_online_cpu(lcpu) {
697 int err = threshold_create_device(lcpu);
699 if (err)
700 return err;
702 threshold_cpu_callback = amd_64_threshold_cpu_callback;
704 return 0;
706 device_initcall(threshold_init_device);