fuse: update backing_dev_info congestion state
[linux-2.6/cjktty.git] / arch / x86 / kernel / mce_64.c
blob97d2b757d6bdc0f48724fa230bec98e91042f0c7
1 /*
2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it.
6 */
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
13 #include <linux/rcupdate.h>
14 #include <linux/kallsyms.h>
15 #include <linux/sysdev.h>
16 #include <linux/miscdevice.h>
17 #include <linux/fs.h>
18 #include <linux/capability.h>
19 #include <linux/cpu.h>
20 #include <linux/percpu.h>
21 #include <linux/poll.h>
22 #include <linux/thread_info.h>
23 #include <linux/ctype.h>
24 #include <linux/kmod.h>
25 #include <linux/kdebug.h>
26 #include <asm/processor.h>
27 #include <asm/msr.h>
28 #include <asm/mce.h>
29 #include <asm/uaccess.h>
30 #include <asm/smp.h>
31 #include <asm/idle.h>
33 #define MISC_MCELOG_MINOR 227
34 #define NR_BANKS 6
36 atomic_t mce_entry;
38 static int mce_dont_init;
41 * Tolerant levels:
42 * 0: always panic on uncorrected errors, log corrected errors
43 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
44 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
45 * 3: never panic or SIGBUS, log all errors (for testing only)
47 static int tolerant = 1;
48 static int banks;
49 static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
50 static unsigned long notify_user;
51 static int rip_msr;
52 static int mce_bootlog = 1;
53 static atomic_t mce_events;
55 static char trigger[128];
56 static char *trigger_argv[2] = { trigger, NULL };
58 static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
61 * Lockless MCE logging infrastructure.
62 * This avoids deadlocks on printk locks without having to break locks. Also
63 * separate MCEs from kernel messages to avoid bogus bug reports.
66 struct mce_log mcelog = {
67 MCE_LOG_SIGNATURE,
68 MCE_LOG_LEN,
69 };
71 void mce_log(struct mce *mce)
73 unsigned next, entry;
74 atomic_inc(&mce_events);
75 mce->finished = 0;
76 wmb();
77 for (;;) {
78 entry = rcu_dereference(mcelog.next);
79 for (;;) {
80 /* When the buffer fills up discard new entries. Assume
81 that the earlier errors are the more interesting. */
82 if (entry >= MCE_LOG_LEN) {
83 set_bit(MCE_OVERFLOW, &mcelog.flags);
84 return;
86 /* Old left over entry. Skip. */
87 if (mcelog.entry[entry].finished) {
88 entry++;
89 continue;
91 break;
93 smp_rmb();
94 next = entry + 1;
95 if (cmpxchg(&mcelog.next, entry, next) == entry)
96 break;
98 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
99 wmb();
100 mcelog.entry[entry].finished = 1;
101 wmb();
103 set_bit(0, &notify_user);
106 static void print_mce(struct mce *m)
108 printk(KERN_EMERG "\n"
109 KERN_EMERG "HARDWARE ERROR\n"
110 KERN_EMERG
111 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
112 m->cpu, m->mcgstatus, m->bank, m->status);
113 if (m->rip) {
114 printk(KERN_EMERG
115 "RIP%s %02x:<%016Lx> ",
116 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
117 m->cs, m->rip);
118 if (m->cs == __KERNEL_CS)
119 print_symbol("{%s}", m->rip);
120 printk("\n");
122 printk(KERN_EMERG "TSC %Lx ", m->tsc);
123 if (m->addr)
124 printk("ADDR %Lx ", m->addr);
125 if (m->misc)
126 printk("MISC %Lx ", m->misc);
127 printk("\n");
128 printk(KERN_EMERG "This is not a software problem!\n");
129 printk(KERN_EMERG
130 "Run through mcelog --ascii to decode and contact your hardware vendor\n");
133 static void mce_panic(char *msg, struct mce *backup, unsigned long start)
135 int i;
137 oops_begin();
138 for (i = 0; i < MCE_LOG_LEN; i++) {
139 unsigned long tsc = mcelog.entry[i].tsc;
140 if (time_before(tsc, start))
141 continue;
142 print_mce(&mcelog.entry[i]);
143 if (backup && mcelog.entry[i].tsc == backup->tsc)
144 backup = NULL;
146 if (backup)
147 print_mce(backup);
148 panic(msg);
151 static int mce_available(struct cpuinfo_x86 *c)
153 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
156 static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
158 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
159 m->rip = regs->rip;
160 m->cs = regs->cs;
161 } else {
162 m->rip = 0;
163 m->cs = 0;
165 if (rip_msr) {
166 /* Assume the RIP in the MSR is exact. Is this true? */
167 m->mcgstatus |= MCG_STATUS_EIPV;
168 rdmsrl(rip_msr, m->rip);
169 m->cs = 0;
174 * The actual machine check handler
177 void do_machine_check(struct pt_regs * regs, long error_code)
179 struct mce m, panicm;
180 u64 mcestart = 0;
181 int i;
182 int panicm_found = 0;
184 * If no_way_out gets set, there is no safe way to recover from this
185 * MCE. If tolerant is cranked up, we'll try anyway.
187 int no_way_out = 0;
189 * If kill_it gets set, there might be a way to recover from this
190 * error.
192 int kill_it = 0;
194 atomic_inc(&mce_entry);
196 if (regs)
197 notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL);
198 if (!banks)
199 goto out2;
201 memset(&m, 0, sizeof(struct mce));
202 m.cpu = smp_processor_id();
203 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
204 /* if the restart IP is not valid, we're done for */
205 if (!(m.mcgstatus & MCG_STATUS_RIPV))
206 no_way_out = 1;
208 rdtscll(mcestart);
209 barrier();
211 for (i = 0; i < banks; i++) {
212 if (!bank[i])
213 continue;
215 m.misc = 0;
216 m.addr = 0;
217 m.bank = i;
218 m.tsc = 0;
220 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
221 if ((m.status & MCI_STATUS_VAL) == 0)
222 continue;
224 if (m.status & MCI_STATUS_EN) {
225 /* if PCC was set, there's no way out */
226 no_way_out |= !!(m.status & MCI_STATUS_PCC);
228 * If this error was uncorrectable and there was
229 * an overflow, we're in trouble. If no overflow,
230 * we might get away with just killing a task.
232 if (m.status & MCI_STATUS_UC) {
233 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
234 no_way_out = 1;
235 kill_it = 1;
239 if (m.status & MCI_STATUS_MISCV)
240 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
241 if (m.status & MCI_STATUS_ADDRV)
242 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
244 mce_get_rip(&m, regs);
245 if (error_code >= 0)
246 rdtscll(m.tsc);
247 if (error_code != -2)
248 mce_log(&m);
250 /* Did this bank cause the exception? */
251 /* Assume that the bank with uncorrectable errors did it,
252 and that there is only a single one. */
253 if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) {
254 panicm = m;
255 panicm_found = 1;
258 add_taint(TAINT_MACHINE_CHECK);
261 /* Never do anything final in the polling timer */
262 if (!regs)
263 goto out;
265 /* If we didn't find an uncorrectable error, pick
266 the last one (shouldn't happen, just being safe). */
267 if (!panicm_found)
268 panicm = m;
271 * If we have decided that we just CAN'T continue, and the user
272 * has not set tolerant to an insane level, give up and die.
274 if (no_way_out && tolerant < 3)
275 mce_panic("Machine check", &panicm, mcestart);
278 * If the error seems to be unrecoverable, something should be
279 * done. Try to kill as little as possible. If we can kill just
280 * one task, do that. If the user has set the tolerance very
281 * high, don't try to do anything at all.
283 if (kill_it && tolerant < 3) {
284 int user_space = 0;
287 * If the EIPV bit is set, it means the saved IP is the
288 * instruction which caused the MCE.
290 if (m.mcgstatus & MCG_STATUS_EIPV)
291 user_space = panicm.rip && (panicm.cs & 3);
294 * If we know that the error was in user space, send a
295 * SIGBUS. Otherwise, panic if tolerance is low.
297 * do_exit() takes an awful lot of locks and has a slight
298 * risk of deadlocking.
300 if (user_space) {
301 do_exit(SIGBUS);
302 } else if (panic_on_oops || tolerant < 2) {
303 mce_panic("Uncorrected machine check",
304 &panicm, mcestart);
308 /* notify userspace ASAP */
309 set_thread_flag(TIF_MCE_NOTIFY);
311 out:
312 /* the last thing we do is clear state */
313 for (i = 0; i < banks; i++)
314 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
315 wrmsrl(MSR_IA32_MCG_STATUS, 0);
316 out2:
317 atomic_dec(&mce_entry);
320 #ifdef CONFIG_X86_MCE_INTEL
321 /***
322 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
323 * @cpu: The CPU on which the event occured.
324 * @status: Event status information
326 * This function should be called by the thermal interrupt after the
327 * event has been processed and the decision was made to log the event
328 * further.
330 * The status parameter will be saved to the 'status' field of 'struct mce'
331 * and historically has been the register value of the
332 * MSR_IA32_THERMAL_STATUS (Intel) msr.
334 void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
336 struct mce m;
338 memset(&m, 0, sizeof(m));
339 m.cpu = cpu;
340 m.bank = MCE_THERMAL_BANK;
341 m.status = status;
342 rdtscll(m.tsc);
343 mce_log(&m);
345 #endif /* CONFIG_X86_MCE_INTEL */
348 * Periodic polling timer for "silent" machine check errors. If the
349 * poller finds an MCE, poll 2x faster. When the poller finds no more
350 * errors, poll 2x slower (up to check_interval seconds).
353 static int check_interval = 5 * 60; /* 5 minutes */
354 static int next_interval; /* in jiffies */
355 static void mcheck_timer(struct work_struct *work);
356 static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
358 static void mcheck_check_cpu(void *info)
360 if (mce_available(&current_cpu_data))
361 do_machine_check(NULL, 0);
364 static void mcheck_timer(struct work_struct *work)
366 on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
369 * Alert userspace if needed. If we logged an MCE, reduce the
370 * polling interval, otherwise increase the polling interval.
372 if (mce_notify_user()) {
373 next_interval = max(next_interval/2, HZ/100);
374 } else {
375 next_interval = min(next_interval*2,
376 (int)round_jiffies_relative(check_interval*HZ));
379 schedule_delayed_work(&mcheck_work, next_interval);
383 * This is only called from process context. This is where we do
384 * anything we need to alert userspace about new MCEs. This is called
385 * directly from the poller and also from entry.S and idle, thanks to
386 * TIF_MCE_NOTIFY.
388 int mce_notify_user(void)
390 clear_thread_flag(TIF_MCE_NOTIFY);
391 if (test_and_clear_bit(0, &notify_user)) {
392 static unsigned long last_print;
393 unsigned long now = jiffies;
395 wake_up_interruptible(&mce_wait);
396 if (trigger[0])
397 call_usermodehelper(trigger, trigger_argv, NULL,
398 UMH_NO_WAIT);
400 if (time_after_eq(now, last_print + (check_interval*HZ))) {
401 last_print = now;
402 printk(KERN_INFO "Machine check events logged\n");
405 return 1;
407 return 0;
410 /* see if the idle task needs to notify userspace */
411 static int
412 mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk)
414 /* IDLE_END should be safe - interrupts are back on */
415 if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
416 mce_notify_user();
418 return NOTIFY_OK;
421 static struct notifier_block mce_idle_notifier = {
422 .notifier_call = mce_idle_callback,
425 static __init int periodic_mcheck_init(void)
427 next_interval = check_interval * HZ;
428 if (next_interval)
429 schedule_delayed_work(&mcheck_work,
430 round_jiffies_relative(next_interval));
431 idle_notifier_register(&mce_idle_notifier);
432 return 0;
434 __initcall(periodic_mcheck_init);
438 * Initialize Machine Checks for a CPU.
440 static void mce_init(void *dummy)
442 u64 cap;
443 int i;
445 rdmsrl(MSR_IA32_MCG_CAP, cap);
446 banks = cap & 0xff;
447 if (banks > NR_BANKS) {
448 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks);
449 banks = NR_BANKS;
451 /* Use accurate RIP reporting if available. */
452 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
453 rip_msr = MSR_IA32_MCG_EIP;
455 /* Log the machine checks left over from the previous reset.
456 This also clears all registers */
457 do_machine_check(NULL, mce_bootlog ? -1 : -2);
459 set_in_cr4(X86_CR4_MCE);
461 if (cap & MCG_CTL_P)
462 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
464 for (i = 0; i < banks; i++) {
465 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
466 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
470 /* Add per CPU specific workarounds here */
471 static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
473 /* This should be disabled by the BIOS, but isn't always */
474 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) {
475 /* disable GART TBL walk error reporting, which trips off
476 incorrectly with the IOMMU & 3ware & Cerberus. */
477 clear_bit(10, &bank[4]);
478 /* Lots of broken BIOS around that don't clear them
479 by default and leave crap in there. Don't log. */
480 mce_bootlog = 0;
485 static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
487 switch (c->x86_vendor) {
488 case X86_VENDOR_INTEL:
489 mce_intel_feature_init(c);
490 break;
491 case X86_VENDOR_AMD:
492 mce_amd_feature_init(c);
493 break;
494 default:
495 break;
500 * Called for each booted CPU to set up machine checks.
501 * Must be called with preempt off.
503 void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
505 static cpumask_t mce_cpus = CPU_MASK_NONE;
507 mce_cpu_quirks(c);
509 if (mce_dont_init ||
510 cpu_test_and_set(smp_processor_id(), mce_cpus) ||
511 !mce_available(c))
512 return;
514 mce_init(NULL);
515 mce_cpu_features(c);
519 * Character device to read and clear the MCE log.
522 static DEFINE_SPINLOCK(mce_state_lock);
523 static int open_count; /* #times opened */
524 static int open_exclu; /* already open exclusive? */
526 static int mce_open(struct inode *inode, struct file *file)
528 spin_lock(&mce_state_lock);
530 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
531 spin_unlock(&mce_state_lock);
532 return -EBUSY;
535 if (file->f_flags & O_EXCL)
536 open_exclu = 1;
537 open_count++;
539 spin_unlock(&mce_state_lock);
541 return nonseekable_open(inode, file);
544 static int mce_release(struct inode *inode, struct file *file)
546 spin_lock(&mce_state_lock);
548 open_count--;
549 open_exclu = 0;
551 spin_unlock(&mce_state_lock);
553 return 0;
556 static void collect_tscs(void *data)
558 unsigned long *cpu_tsc = (unsigned long *)data;
559 rdtscll(cpu_tsc[smp_processor_id()]);
562 static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off)
564 unsigned long *cpu_tsc;
565 static DECLARE_MUTEX(mce_read_sem);
566 unsigned next;
567 char __user *buf = ubuf;
568 int i, err;
570 cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL);
571 if (!cpu_tsc)
572 return -ENOMEM;
574 down(&mce_read_sem);
575 next = rcu_dereference(mcelog.next);
577 /* Only supports full reads right now */
578 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
579 up(&mce_read_sem);
580 kfree(cpu_tsc);
581 return -EINVAL;
584 err = 0;
585 for (i = 0; i < next; i++) {
586 unsigned long start = jiffies;
587 while (!mcelog.entry[i].finished) {
588 if (time_after_eq(jiffies, start + 2)) {
589 memset(mcelog.entry + i,0, sizeof(struct mce));
590 goto timeout;
592 cpu_relax();
594 smp_rmb();
595 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
596 buf += sizeof(struct mce);
597 timeout:
601 memset(mcelog.entry, 0, next * sizeof(struct mce));
602 mcelog.next = 0;
604 synchronize_sched();
606 /* Collect entries that were still getting written before the synchronize. */
608 on_each_cpu(collect_tscs, cpu_tsc, 1, 1);
609 for (i = next; i < MCE_LOG_LEN; i++) {
610 if (mcelog.entry[i].finished &&
611 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
612 err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce));
613 smp_rmb();
614 buf += sizeof(struct mce);
615 memset(&mcelog.entry[i], 0, sizeof(struct mce));
618 up(&mce_read_sem);
619 kfree(cpu_tsc);
620 return err ? -EFAULT : buf - ubuf;
623 static unsigned int mce_poll(struct file *file, poll_table *wait)
625 poll_wait(file, &mce_wait, wait);
626 if (rcu_dereference(mcelog.next))
627 return POLLIN | POLLRDNORM;
628 return 0;
631 static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg)
633 int __user *p = (int __user *)arg;
634 if (!capable(CAP_SYS_ADMIN))
635 return -EPERM;
636 switch (cmd) {
637 case MCE_GET_RECORD_LEN:
638 return put_user(sizeof(struct mce), p);
639 case MCE_GET_LOG_LEN:
640 return put_user(MCE_LOG_LEN, p);
641 case MCE_GETCLEAR_FLAGS: {
642 unsigned flags;
643 do {
644 flags = mcelog.flags;
645 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
646 return put_user(flags, p);
648 default:
649 return -ENOTTY;
653 static const struct file_operations mce_chrdev_ops = {
654 .open = mce_open,
655 .release = mce_release,
656 .read = mce_read,
657 .poll = mce_poll,
658 .ioctl = mce_ioctl,
661 static struct miscdevice mce_log_device = {
662 MISC_MCELOG_MINOR,
663 "mcelog",
664 &mce_chrdev_ops,
667 static unsigned long old_cr4 __initdata;
669 void __init stop_mce(void)
671 old_cr4 = read_cr4();
672 clear_in_cr4(X86_CR4_MCE);
675 void __init restart_mce(void)
677 if (old_cr4 & X86_CR4_MCE)
678 set_in_cr4(X86_CR4_MCE);
682 * Old style boot options parsing. Only for compatibility.
685 static int __init mcheck_disable(char *str)
687 mce_dont_init = 1;
688 return 1;
691 /* mce=off disables machine check. Note you can reenable it later
692 using sysfs.
693 mce=TOLERANCELEVEL (number, see above)
694 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
695 mce=nobootlog Don't log MCEs from before booting. */
696 static int __init mcheck_enable(char *str)
698 if (*str == '=')
699 str++;
700 if (!strcmp(str, "off"))
701 mce_dont_init = 1;
702 else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
703 mce_bootlog = str[0] == 'b';
704 else if (isdigit(str[0]))
705 get_option(&str, &tolerant);
706 else
707 printk("mce= argument %s ignored. Please use /sys", str);
708 return 1;
711 __setup("nomce", mcheck_disable);
712 __setup("mce", mcheck_enable);
715 * Sysfs support
718 /* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
719 Only one CPU is active at this time, the others get readded later using
720 CPU hotplug. */
721 static int mce_resume(struct sys_device *dev)
723 mce_init(NULL);
724 return 0;
727 /* Reinit MCEs after user configuration changes */
728 static void mce_restart(void)
730 if (next_interval)
731 cancel_delayed_work(&mcheck_work);
732 /* Timer race is harmless here */
733 on_each_cpu(mce_init, NULL, 1, 1);
734 next_interval = check_interval * HZ;
735 if (next_interval)
736 schedule_delayed_work(&mcheck_work,
737 round_jiffies_relative(next_interval));
740 static struct sysdev_class mce_sysclass = {
741 .resume = mce_resume,
742 set_kset_name("machinecheck"),
745 DEFINE_PER_CPU(struct sys_device, device_mce);
747 /* Why are there no generic functions for this? */
748 #define ACCESSOR(name, var, start) \
749 static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
750 return sprintf(buf, "%lx\n", (unsigned long)var); \
752 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \
753 char *end; \
754 unsigned long new = simple_strtoul(buf, &end, 0); \
755 if (end == buf) return -EINVAL; \
756 var = new; \
757 start; \
758 return end-buf; \
760 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
762 /* TBD should generate these dynamically based on number of available banks */
763 ACCESSOR(bank0ctl,bank[0],mce_restart())
764 ACCESSOR(bank1ctl,bank[1],mce_restart())
765 ACCESSOR(bank2ctl,bank[2],mce_restart())
766 ACCESSOR(bank3ctl,bank[3],mce_restart())
767 ACCESSOR(bank4ctl,bank[4],mce_restart())
768 ACCESSOR(bank5ctl,bank[5],mce_restart())
770 static ssize_t show_trigger(struct sys_device *s, char *buf)
772 strcpy(buf, trigger);
773 strcat(buf, "\n");
774 return strlen(trigger) + 1;
777 static ssize_t set_trigger(struct sys_device *s,const char *buf,size_t siz)
779 char *p;
780 int len;
781 strncpy(trigger, buf, sizeof(trigger));
782 trigger[sizeof(trigger)-1] = 0;
783 len = strlen(trigger);
784 p = strchr(trigger, '\n');
785 if (*p) *p = 0;
786 return len;
789 static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
790 ACCESSOR(tolerant,tolerant,)
791 ACCESSOR(check_interval,check_interval,mce_restart())
792 static struct sysdev_attribute *mce_attributes[] = {
793 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
794 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
795 &attr_tolerant, &attr_check_interval, &attr_trigger,
796 NULL
799 /* Per cpu sysdev init. All of the cpus still share the same ctl bank */
800 static __cpuinit int mce_create_device(unsigned int cpu)
802 int err;
803 int i;
804 if (!mce_available(&cpu_data[cpu]))
805 return -EIO;
807 per_cpu(device_mce,cpu).id = cpu;
808 per_cpu(device_mce,cpu).cls = &mce_sysclass;
810 err = sysdev_register(&per_cpu(device_mce,cpu));
812 if (!err) {
813 for (i = 0; mce_attributes[i]; i++)
814 sysdev_create_file(&per_cpu(device_mce,cpu),
815 mce_attributes[i]);
817 return err;
820 static void mce_remove_device(unsigned int cpu)
822 int i;
824 for (i = 0; mce_attributes[i]; i++)
825 sysdev_remove_file(&per_cpu(device_mce,cpu),
826 mce_attributes[i]);
827 sysdev_unregister(&per_cpu(device_mce,cpu));
828 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
831 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
832 static int
833 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
835 unsigned int cpu = (unsigned long)hcpu;
837 switch (action) {
838 case CPU_ONLINE:
839 case CPU_ONLINE_FROZEN:
840 mce_create_device(cpu);
841 break;
842 case CPU_DEAD:
843 case CPU_DEAD_FROZEN:
844 mce_remove_device(cpu);
845 break;
847 return NOTIFY_OK;
850 static struct notifier_block mce_cpu_notifier = {
851 .notifier_call = mce_cpu_callback,
854 static __init int mce_init_device(void)
856 int err;
857 int i = 0;
859 if (!mce_available(&boot_cpu_data))
860 return -EIO;
861 err = sysdev_class_register(&mce_sysclass);
863 for_each_online_cpu(i) {
864 mce_create_device(i);
867 register_hotcpu_notifier(&mce_cpu_notifier);
868 misc_register(&mce_log_device);
869 return err;
872 device_initcall(mce_init_device);