x86 MCE: Fix CPU hotplug problem with multiple multicore AMD CPUs
[linux-2.6/mini2440.git] / arch / x86 / kernel / cpu / mcheck / mce_64.c
blob726a5fcdf34113efe492ecdaa324308aa1a61fbd
1 /*
2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it.
6 */
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/smp_lock.h>
13 #include <linux/string.h>
14 #include <linux/rcupdate.h>
15 #include <linux/kallsyms.h>
16 #include <linux/sysdev.h>
17 #include <linux/miscdevice.h>
18 #include <linux/fs.h>
19 #include <linux/capability.h>
20 #include <linux/cpu.h>
21 #include <linux/percpu.h>
22 #include <linux/poll.h>
23 #include <linux/thread_info.h>
24 #include <linux/ctype.h>
25 #include <linux/kmod.h>
26 #include <linux/kdebug.h>
27 #include <asm/processor.h>
28 #include <asm/msr.h>
29 #include <asm/mce.h>
30 #include <asm/uaccess.h>
31 #include <asm/smp.h>
32 #include <asm/idle.h>
34 #define MISC_MCELOG_MINOR 227
35 #define NR_SYSFS_BANKS 6
37 atomic_t mce_entry;
39 static int mce_dont_init;
42 * Tolerant levels:
43 * 0: always panic on uncorrected errors, log corrected errors
44 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
45 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
46 * 3: never panic or SIGBUS, log all errors (for testing only)
48 static int tolerant = 1;
49 static int banks;
50 static unsigned long bank[NR_SYSFS_BANKS] = { [0 ... NR_SYSFS_BANKS-1] = ~0UL };
51 static unsigned long notify_user;
52 static int rip_msr;
53 static int mce_bootlog = -1;
54 static atomic_t mce_events;
56 static char trigger[128];
57 static char *trigger_argv[2] = { trigger, NULL };
59 static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
62 * Lockless MCE logging infrastructure.
63 * This avoids deadlocks on printk locks without having to break locks. Also
64 * separate MCEs from kernel messages to avoid bogus bug reports.
67 static struct mce_log mcelog = {
68 MCE_LOG_SIGNATURE,
69 MCE_LOG_LEN,
72 void mce_log(struct mce *mce)
74 unsigned next, entry;
75 atomic_inc(&mce_events);
76 mce->finished = 0;
77 wmb();
78 for (;;) {
79 entry = rcu_dereference(mcelog.next);
80 for (;;) {
81 /* When the buffer fills up discard new entries. Assume
82 that the earlier errors are the more interesting. */
83 if (entry >= MCE_LOG_LEN) {
84 set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
85 return;
87 /* Old left over entry. Skip. */
88 if (mcelog.entry[entry].finished) {
89 entry++;
90 continue;
92 break;
94 smp_rmb();
95 next = entry + 1;
96 if (cmpxchg(&mcelog.next, entry, next) == entry)
97 break;
99 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
100 wmb();
101 mcelog.entry[entry].finished = 1;
102 wmb();
104 set_bit(0, &notify_user);
107 static void print_mce(struct mce *m)
109 printk(KERN_EMERG "\n"
110 KERN_EMERG "HARDWARE ERROR\n"
111 KERN_EMERG
112 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
113 m->cpu, m->mcgstatus, m->bank, m->status);
114 if (m->ip) {
115 printk(KERN_EMERG "RIP%s %02x:<%016Lx> ",
116 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
117 m->cs, m->ip);
118 if (m->cs == __KERNEL_CS)
119 print_symbol("{%s}", m->ip);
120 printk("\n");
122 printk(KERN_EMERG "TSC %Lx ", m->tsc);
123 if (m->addr)
124 printk("ADDR %Lx ", m->addr);
125 if (m->misc)
126 printk("MISC %Lx ", m->misc);
127 printk("\n");
128 printk(KERN_EMERG "This is not a software problem!\n");
129 printk(KERN_EMERG "Run through mcelog --ascii to decode "
130 "and contact your hardware vendor\n");
133 static void mce_panic(char *msg, struct mce *backup, unsigned long start)
135 int i;
137 oops_begin();
138 for (i = 0; i < MCE_LOG_LEN; i++) {
139 unsigned long tsc = mcelog.entry[i].tsc;
141 if (time_before(tsc, start))
142 continue;
143 print_mce(&mcelog.entry[i]);
144 if (backup && mcelog.entry[i].tsc == backup->tsc)
145 backup = NULL;
147 if (backup)
148 print_mce(backup);
149 panic(msg);
152 static int mce_available(struct cpuinfo_x86 *c)
154 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
157 static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
159 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
160 m->ip = regs->ip;
161 m->cs = regs->cs;
162 } else {
163 m->ip = 0;
164 m->cs = 0;
166 if (rip_msr) {
167 /* Assume the RIP in the MSR is exact. Is this true? */
168 m->mcgstatus |= MCG_STATUS_EIPV;
169 rdmsrl(rip_msr, m->ip);
170 m->cs = 0;
175 * The actual machine check handler
177 void do_machine_check(struct pt_regs * regs, long error_code)
179 struct mce m, panicm;
180 u64 mcestart = 0;
181 int i;
182 int panicm_found = 0;
184 * If no_way_out gets set, there is no safe way to recover from this
185 * MCE. If tolerant is cranked up, we'll try anyway.
187 int no_way_out = 0;
189 * If kill_it gets set, there might be a way to recover from this
190 * error.
192 int kill_it = 0;
194 atomic_inc(&mce_entry);
196 if ((regs
197 && notify_die(DIE_NMI, "machine check", regs, error_code,
198 18, SIGKILL) == NOTIFY_STOP)
199 || !banks)
200 goto out2;
202 memset(&m, 0, sizeof(struct mce));
203 m.cpu = smp_processor_id();
204 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
205 /* if the restart IP is not valid, we're done for */
206 if (!(m.mcgstatus & MCG_STATUS_RIPV))
207 no_way_out = 1;
209 rdtscll(mcestart);
210 barrier();
212 for (i = 0; i < banks; i++) {
213 if (i < NR_SYSFS_BANKS && !bank[i])
214 continue;
216 m.misc = 0;
217 m.addr = 0;
218 m.bank = i;
219 m.tsc = 0;
221 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
222 if ((m.status & MCI_STATUS_VAL) == 0)
223 continue;
225 if (m.status & MCI_STATUS_EN) {
226 /* if PCC was set, there's no way out */
227 no_way_out |= !!(m.status & MCI_STATUS_PCC);
229 * If this error was uncorrectable and there was
230 * an overflow, we're in trouble. If no overflow,
231 * we might get away with just killing a task.
233 if (m.status & MCI_STATUS_UC) {
234 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
235 no_way_out = 1;
236 kill_it = 1;
240 if (m.status & MCI_STATUS_MISCV)
241 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
242 if (m.status & MCI_STATUS_ADDRV)
243 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
245 mce_get_rip(&m, regs);
246 if (error_code >= 0)
247 rdtscll(m.tsc);
248 if (error_code != -2)
249 mce_log(&m);
251 /* Did this bank cause the exception? */
252 /* Assume that the bank with uncorrectable errors did it,
253 and that there is only a single one. */
254 if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) {
255 panicm = m;
256 panicm_found = 1;
259 add_taint(TAINT_MACHINE_CHECK);
262 /* Never do anything final in the polling timer */
263 if (!regs)
264 goto out;
266 /* If we didn't find an uncorrectable error, pick
267 the last one (shouldn't happen, just being safe). */
268 if (!panicm_found)
269 panicm = m;
272 * If we have decided that we just CAN'T continue, and the user
273 * has not set tolerant to an insane level, give up and die.
275 if (no_way_out && tolerant < 3)
276 mce_panic("Machine check", &panicm, mcestart);
279 * If the error seems to be unrecoverable, something should be
280 * done. Try to kill as little as possible. If we can kill just
281 * one task, do that. If the user has set the tolerance very
282 * high, don't try to do anything at all.
284 if (kill_it && tolerant < 3) {
285 int user_space = 0;
288 * If the EIPV bit is set, it means the saved IP is the
289 * instruction which caused the MCE.
291 if (m.mcgstatus & MCG_STATUS_EIPV)
292 user_space = panicm.ip && (panicm.cs & 3);
295 * If we know that the error was in user space, send a
296 * SIGBUS. Otherwise, panic if tolerance is low.
298 * do_exit() takes an awful lot of locks and has a slight
299 * risk of deadlocking.
301 if (user_space) {
302 do_exit(SIGBUS);
303 } else if (panic_on_oops || tolerant < 2) {
304 mce_panic("Uncorrected machine check",
305 &panicm, mcestart);
309 /* notify userspace ASAP */
310 set_thread_flag(TIF_MCE_NOTIFY);
312 out:
313 /* the last thing we do is clear state */
314 for (i = 0; i < banks; i++)
315 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
316 wrmsrl(MSR_IA32_MCG_STATUS, 0);
317 out2:
318 atomic_dec(&mce_entry);
321 #ifdef CONFIG_X86_MCE_INTEL
322 /***
323 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
324 * @cpu: The CPU on which the event occurred.
325 * @status: Event status information
327 * This function should be called by the thermal interrupt after the
328 * event has been processed and the decision was made to log the event
329 * further.
331 * The status parameter will be saved to the 'status' field of 'struct mce'
332 * and historically has been the register value of the
333 * MSR_IA32_THERMAL_STATUS (Intel) msr.
335 void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
337 struct mce m;
339 memset(&m, 0, sizeof(m));
340 m.cpu = cpu;
341 m.bank = MCE_THERMAL_BANK;
342 m.status = status;
343 rdtscll(m.tsc);
344 mce_log(&m);
346 #endif /* CONFIG_X86_MCE_INTEL */
349 * Periodic polling timer for "silent" machine check errors. If the
350 * poller finds an MCE, poll 2x faster. When the poller finds no more
351 * errors, poll 2x slower (up to check_interval seconds).
354 static int check_interval = 5 * 60; /* 5 minutes */
355 static int next_interval; /* in jiffies */
356 static void mcheck_timer(struct work_struct *work);
357 static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
359 static void mcheck_check_cpu(void *info)
361 if (mce_available(&current_cpu_data))
362 do_machine_check(NULL, 0);
365 static void mcheck_timer(struct work_struct *work)
367 on_each_cpu(mcheck_check_cpu, NULL, 1);
370 * Alert userspace if needed. If we logged an MCE, reduce the
371 * polling interval, otherwise increase the polling interval.
373 if (mce_notify_user()) {
374 next_interval = max(next_interval/2, HZ/100);
375 } else {
376 next_interval = min(next_interval * 2,
377 (int)round_jiffies_relative(check_interval*HZ));
380 schedule_delayed_work(&mcheck_work, next_interval);
384 * This is only called from process context. This is where we do
385 * anything we need to alert userspace about new MCEs. This is called
386 * directly from the poller and also from entry.S and idle, thanks to
387 * TIF_MCE_NOTIFY.
389 int mce_notify_user(void)
391 clear_thread_flag(TIF_MCE_NOTIFY);
392 if (test_and_clear_bit(0, &notify_user)) {
393 static unsigned long last_print;
394 unsigned long now = jiffies;
396 wake_up_interruptible(&mce_wait);
397 if (trigger[0])
398 call_usermodehelper(trigger, trigger_argv, NULL,
399 UMH_NO_WAIT);
401 if (time_after_eq(now, last_print + (check_interval*HZ))) {
402 last_print = now;
403 printk(KERN_INFO "Machine check events logged\n");
406 return 1;
408 return 0;
411 /* see if the idle task needs to notify userspace */
412 static int
413 mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk)
415 /* IDLE_END should be safe - interrupts are back on */
416 if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
417 mce_notify_user();
419 return NOTIFY_OK;
422 static struct notifier_block mce_idle_notifier = {
423 .notifier_call = mce_idle_callback,
426 static __init int periodic_mcheck_init(void)
428 next_interval = check_interval * HZ;
429 if (next_interval)
430 schedule_delayed_work(&mcheck_work,
431 round_jiffies_relative(next_interval));
432 idle_notifier_register(&mce_idle_notifier);
433 return 0;
435 __initcall(periodic_mcheck_init);
439 * Initialize Machine Checks for a CPU.
441 static void mce_init(void *dummy)
443 u64 cap;
444 int i;
446 rdmsrl(MSR_IA32_MCG_CAP, cap);
447 banks = cap & 0xff;
448 if (banks > MCE_EXTENDED_BANK) {
449 banks = MCE_EXTENDED_BANK;
450 printk(KERN_INFO "MCE: warning: using only %d banks\n",
451 MCE_EXTENDED_BANK);
453 /* Use accurate RIP reporting if available. */
454 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
455 rip_msr = MSR_IA32_MCG_EIP;
457 /* Log the machine checks left over from the previous reset.
458 This also clears all registers */
459 do_machine_check(NULL, mce_bootlog ? -1 : -2);
461 set_in_cr4(X86_CR4_MCE);
463 if (cap & MCG_CTL_P)
464 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
466 for (i = 0; i < banks; i++) {
467 if (i < NR_SYSFS_BANKS)
468 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
469 else
470 wrmsrl(MSR_IA32_MC0_CTL+4*i, ~0UL);
472 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
476 /* Add per CPU specific workarounds here */
477 static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
479 /* This should be disabled by the BIOS, but isn't always */
480 if (c->x86_vendor == X86_VENDOR_AMD) {
481 if(c->x86 == 15)
482 /* disable GART TBL walk error reporting, which trips off
483 incorrectly with the IOMMU & 3ware & Cerberus. */
484 clear_bit(10, &bank[4]);
485 if(c->x86 <= 17 && mce_bootlog < 0)
486 /* Lots of broken BIOS around that don't clear them
487 by default and leave crap in there. Don't log. */
488 mce_bootlog = 0;
493 static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
495 switch (c->x86_vendor) {
496 case X86_VENDOR_INTEL:
497 mce_intel_feature_init(c);
498 break;
499 case X86_VENDOR_AMD:
500 mce_amd_feature_init(c);
501 break;
502 default:
503 break;
508 * Called for each booted CPU to set up machine checks.
509 * Must be called with preempt off.
511 void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
513 static cpumask_t mce_cpus = CPU_MASK_NONE;
515 mce_cpu_quirks(c);
517 if (mce_dont_init ||
518 cpu_test_and_set(smp_processor_id(), mce_cpus) ||
519 !mce_available(c))
520 return;
522 mce_init(NULL);
523 mce_cpu_features(c);
527 * Character device to read and clear the MCE log.
530 static DEFINE_SPINLOCK(mce_state_lock);
531 static int open_count; /* #times opened */
532 static int open_exclu; /* already open exclusive? */
534 static int mce_open(struct inode *inode, struct file *file)
536 lock_kernel();
537 spin_lock(&mce_state_lock);
539 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
540 spin_unlock(&mce_state_lock);
541 unlock_kernel();
542 return -EBUSY;
545 if (file->f_flags & O_EXCL)
546 open_exclu = 1;
547 open_count++;
549 spin_unlock(&mce_state_lock);
550 unlock_kernel();
552 return nonseekable_open(inode, file);
555 static int mce_release(struct inode *inode, struct file *file)
557 spin_lock(&mce_state_lock);
559 open_count--;
560 open_exclu = 0;
562 spin_unlock(&mce_state_lock);
564 return 0;
567 static void collect_tscs(void *data)
569 unsigned long *cpu_tsc = (unsigned long *)data;
571 rdtscll(cpu_tsc[smp_processor_id()]);
574 static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
575 loff_t *off)
577 unsigned long *cpu_tsc;
578 static DEFINE_MUTEX(mce_read_mutex);
579 unsigned next;
580 char __user *buf = ubuf;
581 int i, err;
583 cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
584 if (!cpu_tsc)
585 return -ENOMEM;
587 mutex_lock(&mce_read_mutex);
588 next = rcu_dereference(mcelog.next);
590 /* Only supports full reads right now */
591 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
592 mutex_unlock(&mce_read_mutex);
593 kfree(cpu_tsc);
594 return -EINVAL;
597 err = 0;
598 for (i = 0; i < next; i++) {
599 unsigned long start = jiffies;
601 while (!mcelog.entry[i].finished) {
602 if (time_after_eq(jiffies, start + 2)) {
603 memset(mcelog.entry + i,0, sizeof(struct mce));
604 goto timeout;
606 cpu_relax();
608 smp_rmb();
609 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
610 buf += sizeof(struct mce);
611 timeout:
615 memset(mcelog.entry, 0, next * sizeof(struct mce));
616 mcelog.next = 0;
618 synchronize_sched();
621 * Collect entries that were still getting written before the
622 * synchronize.
624 on_each_cpu(collect_tscs, cpu_tsc, 1);
625 for (i = next; i < MCE_LOG_LEN; i++) {
626 if (mcelog.entry[i].finished &&
627 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
628 err |= copy_to_user(buf, mcelog.entry+i,
629 sizeof(struct mce));
630 smp_rmb();
631 buf += sizeof(struct mce);
632 memset(&mcelog.entry[i], 0, sizeof(struct mce));
635 mutex_unlock(&mce_read_mutex);
636 kfree(cpu_tsc);
637 return err ? -EFAULT : buf - ubuf;
640 static unsigned int mce_poll(struct file *file, poll_table *wait)
642 poll_wait(file, &mce_wait, wait);
643 if (rcu_dereference(mcelog.next))
644 return POLLIN | POLLRDNORM;
645 return 0;
648 static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
650 int __user *p = (int __user *)arg;
652 if (!capable(CAP_SYS_ADMIN))
653 return -EPERM;
654 switch (cmd) {
655 case MCE_GET_RECORD_LEN:
656 return put_user(sizeof(struct mce), p);
657 case MCE_GET_LOG_LEN:
658 return put_user(MCE_LOG_LEN, p);
659 case MCE_GETCLEAR_FLAGS: {
660 unsigned flags;
662 do {
663 flags = mcelog.flags;
664 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
665 return put_user(flags, p);
667 default:
668 return -ENOTTY;
672 static const struct file_operations mce_chrdev_ops = {
673 .open = mce_open,
674 .release = mce_release,
675 .read = mce_read,
676 .poll = mce_poll,
677 .unlocked_ioctl = mce_ioctl,
680 static struct miscdevice mce_log_device = {
681 MISC_MCELOG_MINOR,
682 "mcelog",
683 &mce_chrdev_ops,
686 static unsigned long old_cr4 __initdata;
688 void __init stop_mce(void)
690 old_cr4 = read_cr4();
691 clear_in_cr4(X86_CR4_MCE);
694 void __init restart_mce(void)
696 if (old_cr4 & X86_CR4_MCE)
697 set_in_cr4(X86_CR4_MCE);
701 * Old style boot options parsing. Only for compatibility.
703 static int __init mcheck_disable(char *str)
705 mce_dont_init = 1;
706 return 1;
709 /* mce=off disables machine check. Note you can re-enable it later
710 using sysfs.
711 mce=TOLERANCELEVEL (number, see above)
712 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
713 mce=nobootlog Don't log MCEs from before booting. */
714 static int __init mcheck_enable(char *str)
716 if (!strcmp(str, "off"))
717 mce_dont_init = 1;
718 else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
719 mce_bootlog = str[0] == 'b';
720 else if (isdigit(str[0]))
721 get_option(&str, &tolerant);
722 else
723 printk("mce= argument %s ignored. Please use /sys", str);
724 return 1;
727 __setup("nomce", mcheck_disable);
728 __setup("mce=", mcheck_enable);
731 * Sysfs support
734 /* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
735 Only one CPU is active at this time, the others get readded later using
736 CPU hotplug. */
737 static int mce_resume(struct sys_device *dev)
739 mce_init(NULL);
740 return 0;
743 /* Reinit MCEs after user configuration changes */
744 static void mce_restart(void)
746 if (next_interval)
747 cancel_delayed_work(&mcheck_work);
748 /* Timer race is harmless here */
749 on_each_cpu(mce_init, NULL, 1);
750 next_interval = check_interval * HZ;
751 if (next_interval)
752 schedule_delayed_work(&mcheck_work,
753 round_jiffies_relative(next_interval));
756 static struct sysdev_class mce_sysclass = {
757 .resume = mce_resume,
758 .name = "machinecheck",
761 DEFINE_PER_CPU(struct sys_device, device_mce);
762 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata;
764 /* Why are there no generic functions for this? */
765 #define ACCESSOR(name, var, start) \
766 static ssize_t show_ ## name(struct sys_device *s, \
767 struct sysdev_attribute *attr, \
768 char *buf) { \
769 return sprintf(buf, "%lx\n", (unsigned long)var); \
771 static ssize_t set_ ## name(struct sys_device *s, \
772 struct sysdev_attribute *attr, \
773 const char *buf, size_t siz) { \
774 char *end; \
775 unsigned long new = simple_strtoul(buf, &end, 0); \
776 if (end == buf) return -EINVAL; \
777 var = new; \
778 start; \
779 return end-buf; \
781 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
784 * TBD should generate these dynamically based on number of available banks.
785 * Have only 6 contol banks in /sysfs until then.
787 ACCESSOR(bank0ctl,bank[0],mce_restart())
788 ACCESSOR(bank1ctl,bank[1],mce_restart())
789 ACCESSOR(bank2ctl,bank[2],mce_restart())
790 ACCESSOR(bank3ctl,bank[3],mce_restart())
791 ACCESSOR(bank4ctl,bank[4],mce_restart())
792 ACCESSOR(bank5ctl,bank[5],mce_restart())
794 static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr,
795 char *buf)
797 strcpy(buf, trigger);
798 strcat(buf, "\n");
799 return strlen(trigger) + 1;
802 static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
803 const char *buf,size_t siz)
805 char *p;
806 int len;
807 strncpy(trigger, buf, sizeof(trigger));
808 trigger[sizeof(trigger)-1] = 0;
809 len = strlen(trigger);
810 p = strchr(trigger, '\n');
811 if (*p) *p = 0;
812 return len;
815 static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
816 static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
817 ACCESSOR(check_interval,check_interval,mce_restart())
818 static struct sysdev_attribute *mce_attributes[] = {
819 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
820 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
821 &attr_tolerant.attr, &attr_check_interval, &attr_trigger,
822 NULL
825 static cpumask_t mce_device_initialized = CPU_MASK_NONE;
827 /* Per cpu sysdev init. All of the cpus still share the same ctl bank */
828 static __cpuinit int mce_create_device(unsigned int cpu)
830 int err;
831 int i;
833 if (!mce_available(&boot_cpu_data))
834 return -EIO;
836 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
837 per_cpu(device_mce,cpu).id = cpu;
838 per_cpu(device_mce,cpu).cls = &mce_sysclass;
840 err = sysdev_register(&per_cpu(device_mce,cpu));
841 if (err)
842 return err;
844 for (i = 0; mce_attributes[i]; i++) {
845 err = sysdev_create_file(&per_cpu(device_mce,cpu),
846 mce_attributes[i]);
847 if (err)
848 goto error;
850 cpu_set(cpu, mce_device_initialized);
852 return 0;
853 error:
854 while (i--) {
855 sysdev_remove_file(&per_cpu(device_mce,cpu),
856 mce_attributes[i]);
858 sysdev_unregister(&per_cpu(device_mce,cpu));
860 return err;
863 static void mce_remove_device(unsigned int cpu)
865 int i;
867 if (!cpu_isset(cpu, mce_device_initialized))
868 return;
870 for (i = 0; mce_attributes[i]; i++)
871 sysdev_remove_file(&per_cpu(device_mce,cpu),
872 mce_attributes[i]);
873 sysdev_unregister(&per_cpu(device_mce,cpu));
874 cpu_clear(cpu, mce_device_initialized);
877 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
878 static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
879 unsigned long action, void *hcpu)
881 unsigned int cpu = (unsigned long)hcpu;
883 switch (action) {
884 case CPU_ONLINE:
885 case CPU_ONLINE_FROZEN:
886 mce_create_device(cpu);
887 if (threshold_cpu_callback)
888 threshold_cpu_callback(action, cpu);
889 break;
890 case CPU_DEAD:
891 case CPU_DEAD_FROZEN:
892 if (threshold_cpu_callback)
893 threshold_cpu_callback(action, cpu);
894 mce_remove_device(cpu);
895 break;
897 return NOTIFY_OK;
900 static struct notifier_block mce_cpu_notifier __cpuinitdata = {
901 .notifier_call = mce_cpu_callback,
904 static __init int mce_init_device(void)
906 int err;
907 int i = 0;
909 if (!mce_available(&boot_cpu_data))
910 return -EIO;
911 err = sysdev_class_register(&mce_sysclass);
912 if (err)
913 return err;
915 for_each_online_cpu(i) {
916 err = mce_create_device(i);
917 if (err)
918 return err;
921 register_hotcpu_notifier(&mce_cpu_notifier);
922 misc_register(&mce_log_device);
923 return err;
926 device_initcall(mce_init_device);