x86: round_jiffies() for i386 and x86-64 non-critical/corrected MCE polling
[linux-2.6/mini2440.git] / arch / x86_64 / kernel / mce.c
blob4d8450ee3635e8ee587f5f639f7862b5a5155a74
1 /*
2 * Machine check handler.
3 * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 * Rest from unknown author(s).
5 * 2004 Andi Kleen. Rewrote most of it.
6 */
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
13 #include <linux/rcupdate.h>
14 #include <linux/kallsyms.h>
15 #include <linux/sysdev.h>
16 #include <linux/miscdevice.h>
17 #include <linux/fs.h>
18 #include <linux/capability.h>
19 #include <linux/cpu.h>
20 #include <linux/percpu.h>
21 #include <linux/poll.h>
22 #include <linux/thread_info.h>
23 #include <linux/ctype.h>
24 #include <linux/kmod.h>
25 #include <linux/kdebug.h>
26 #include <asm/processor.h>
27 #include <asm/msr.h>
28 #include <asm/mce.h>
29 #include <asm/uaccess.h>
30 #include <asm/smp.h>
31 #include <asm/idle.h>
33 #define MISC_MCELOG_MINOR 227
34 #define NR_BANKS 6
36 atomic_t mce_entry;
38 static int mce_dont_init;
41 * Tolerant levels:
42 * 0: always panic on uncorrected errors, log corrected errors
43 * 1: panic or SIGBUS on uncorrected errors, log corrected errors
44 * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
45 * 3: never panic or SIGBUS, log all errors (for testing only)
47 static int tolerant = 1;
48 static int banks;
49 static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
50 static unsigned long notify_user;
51 static int rip_msr;
52 static int mce_bootlog = 1;
53 static atomic_t mce_events;
55 static char trigger[128];
56 static char *trigger_argv[2] = { trigger, NULL };
58 static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
61 * Lockless MCE logging infrastructure.
62 * This avoids deadlocks on printk locks without having to break locks. Also
63 * separate MCEs from kernel messages to avoid bogus bug reports.
66 struct mce_log mcelog = {
67 MCE_LOG_SIGNATURE,
68 MCE_LOG_LEN,
69 };
71 void mce_log(struct mce *mce)
73 unsigned next, entry;
74 atomic_inc(&mce_events);
75 mce->finished = 0;
76 wmb();
77 for (;;) {
78 entry = rcu_dereference(mcelog.next);
79 /* The rmb forces the compiler to reload next in each
80 iteration */
81 rmb();
82 for (;;) {
83 /* When the buffer fills up discard new entries. Assume
84 that the earlier errors are the more interesting. */
85 if (entry >= MCE_LOG_LEN) {
86 set_bit(MCE_OVERFLOW, &mcelog.flags);
87 return;
89 /* Old left over entry. Skip. */
90 if (mcelog.entry[entry].finished) {
91 entry++;
92 continue;
94 break;
96 smp_rmb();
97 next = entry + 1;
98 if (cmpxchg(&mcelog.next, entry, next) == entry)
99 break;
101 memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
102 wmb();
103 mcelog.entry[entry].finished = 1;
104 wmb();
106 set_bit(0, &notify_user);
109 static void print_mce(struct mce *m)
111 printk(KERN_EMERG "\n"
112 KERN_EMERG "HARDWARE ERROR\n"
113 KERN_EMERG
114 "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
115 m->cpu, m->mcgstatus, m->bank, m->status);
116 if (m->rip) {
117 printk(KERN_EMERG
118 "RIP%s %02x:<%016Lx> ",
119 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
120 m->cs, m->rip);
121 if (m->cs == __KERNEL_CS)
122 print_symbol("{%s}", m->rip);
123 printk("\n");
125 printk(KERN_EMERG "TSC %Lx ", m->tsc);
126 if (m->addr)
127 printk("ADDR %Lx ", m->addr);
128 if (m->misc)
129 printk("MISC %Lx ", m->misc);
130 printk("\n");
131 printk(KERN_EMERG "This is not a software problem!\n");
132 printk(KERN_EMERG
133 "Run through mcelog --ascii to decode and contact your hardware vendor\n");
136 static void mce_panic(char *msg, struct mce *backup, unsigned long start)
138 int i;
140 oops_begin();
141 for (i = 0; i < MCE_LOG_LEN; i++) {
142 unsigned long tsc = mcelog.entry[i].tsc;
143 if (time_before(tsc, start))
144 continue;
145 print_mce(&mcelog.entry[i]);
146 if (backup && mcelog.entry[i].tsc == backup->tsc)
147 backup = NULL;
149 if (backup)
150 print_mce(backup);
151 panic(msg);
154 static int mce_available(struct cpuinfo_x86 *c)
156 return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
159 static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
161 if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) {
162 m->rip = regs->rip;
163 m->cs = regs->cs;
164 } else {
165 m->rip = 0;
166 m->cs = 0;
168 if (rip_msr) {
169 /* Assume the RIP in the MSR is exact. Is this true? */
170 m->mcgstatus |= MCG_STATUS_EIPV;
171 rdmsrl(rip_msr, m->rip);
172 m->cs = 0;
177 * The actual machine check handler
180 void do_machine_check(struct pt_regs * regs, long error_code)
182 struct mce m, panicm;
183 u64 mcestart = 0;
184 int i;
185 int panicm_found = 0;
187 * If no_way_out gets set, there is no safe way to recover from this
188 * MCE. If tolerant is cranked up, we'll try anyway.
190 int no_way_out = 0;
192 * If kill_it gets set, there might be a way to recover from this
193 * error.
195 int kill_it = 0;
197 atomic_inc(&mce_entry);
199 if (regs)
200 notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL);
201 if (!banks)
202 goto out2;
204 memset(&m, 0, sizeof(struct mce));
205 m.cpu = smp_processor_id();
206 rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
207 /* if the restart IP is not valid, we're done for */
208 if (!(m.mcgstatus & MCG_STATUS_RIPV))
209 no_way_out = 1;
211 rdtscll(mcestart);
212 barrier();
214 for (i = 0; i < banks; i++) {
215 if (!bank[i])
216 continue;
218 m.misc = 0;
219 m.addr = 0;
220 m.bank = i;
221 m.tsc = 0;
223 rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
224 if ((m.status & MCI_STATUS_VAL) == 0)
225 continue;
227 if (m.status & MCI_STATUS_EN) {
228 /* if PCC was set, there's no way out */
229 no_way_out |= !!(m.status & MCI_STATUS_PCC);
231 * If this error was uncorrectable and there was
232 * an overflow, we're in trouble. If no overflow,
233 * we might get away with just killing a task.
235 if (m.status & MCI_STATUS_UC) {
236 if (tolerant < 1 || m.status & MCI_STATUS_OVER)
237 no_way_out = 1;
238 kill_it = 1;
242 if (m.status & MCI_STATUS_MISCV)
243 rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
244 if (m.status & MCI_STATUS_ADDRV)
245 rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
247 mce_get_rip(&m, regs);
248 if (error_code >= 0)
249 rdtscll(m.tsc);
250 if (error_code != -2)
251 mce_log(&m);
253 /* Did this bank cause the exception? */
254 /* Assume that the bank with uncorrectable errors did it,
255 and that there is only a single one. */
256 if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) {
257 panicm = m;
258 panicm_found = 1;
261 add_taint(TAINT_MACHINE_CHECK);
264 /* Never do anything final in the polling timer */
265 if (!regs)
266 goto out;
268 /* If we didn't find an uncorrectable error, pick
269 the last one (shouldn't happen, just being safe). */
270 if (!panicm_found)
271 panicm = m;
274 * If we have decided that we just CAN'T continue, and the user
275 * has not set tolerant to an insane level, give up and die.
277 if (no_way_out && tolerant < 3)
278 mce_panic("Machine check", &panicm, mcestart);
281 * If the error seems to be unrecoverable, something should be
282 * done. Try to kill as little as possible. If we can kill just
283 * one task, do that. If the user has set the tolerance very
284 * high, don't try to do anything at all.
286 if (kill_it && tolerant < 3) {
287 int user_space = 0;
290 * If the EIPV bit is set, it means the saved IP is the
291 * instruction which caused the MCE.
293 if (m.mcgstatus & MCG_STATUS_EIPV)
294 user_space = panicm.rip && (panicm.cs & 3);
297 * If we know that the error was in user space, send a
298 * SIGBUS. Otherwise, panic if tolerance is low.
300 * do_exit() takes an awful lot of locks and has a slight
301 * risk of deadlocking.
303 if (user_space) {
304 do_exit(SIGBUS);
305 } else if (panic_on_oops || tolerant < 2) {
306 mce_panic("Uncorrected machine check",
307 &panicm, mcestart);
311 /* notify userspace ASAP */
312 set_thread_flag(TIF_MCE_NOTIFY);
314 out:
315 /* the last thing we do is clear state */
316 for (i = 0; i < banks; i++)
317 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
318 wrmsrl(MSR_IA32_MCG_STATUS, 0);
319 out2:
320 atomic_dec(&mce_entry);
323 #ifdef CONFIG_X86_MCE_INTEL
324 /***
325 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
326 * @cpu: The CPU on which the event occured.
327 * @status: Event status information
329 * This function should be called by the thermal interrupt after the
330 * event has been processed and the decision was made to log the event
331 * further.
333 * The status parameter will be saved to the 'status' field of 'struct mce'
334 * and historically has been the register value of the
335 * MSR_IA32_THERMAL_STATUS (Intel) msr.
337 void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
339 struct mce m;
341 memset(&m, 0, sizeof(m));
342 m.cpu = cpu;
343 m.bank = MCE_THERMAL_BANK;
344 m.status = status;
345 rdtscll(m.tsc);
346 mce_log(&m);
348 #endif /* CONFIG_X86_MCE_INTEL */
351 * Periodic polling timer for "silent" machine check errors. If the
352 * poller finds an MCE, poll 2x faster. When the poller finds no more
353 * errors, poll 2x slower (up to check_interval seconds).
356 static int check_interval = 5 * 60; /* 5 minutes */
357 static int next_interval; /* in jiffies */
358 static void mcheck_timer(struct work_struct *work);
359 static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
361 static void mcheck_check_cpu(void *info)
363 if (mce_available(&current_cpu_data))
364 do_machine_check(NULL, 0);
367 static void mcheck_timer(struct work_struct *work)
369 on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
372 * Alert userspace if needed. If we logged an MCE, reduce the
373 * polling interval, otherwise increase the polling interval.
375 if (mce_notify_user()) {
376 next_interval = max(next_interval/2, HZ/100);
377 } else {
378 next_interval = min(next_interval*2,
379 (int)round_jiffies_relative(check_interval*HZ));
382 schedule_delayed_work(&mcheck_work, next_interval);
386 * This is only called from process context. This is where we do
387 * anything we need to alert userspace about new MCEs. This is called
388 * directly from the poller and also from entry.S and idle, thanks to
389 * TIF_MCE_NOTIFY.
391 int mce_notify_user(void)
393 clear_thread_flag(TIF_MCE_NOTIFY);
394 if (test_and_clear_bit(0, &notify_user)) {
395 static unsigned long last_print;
396 unsigned long now = jiffies;
398 wake_up_interruptible(&mce_wait);
399 if (trigger[0])
400 call_usermodehelper(trigger, trigger_argv, NULL,
401 UMH_NO_WAIT);
403 if (time_after_eq(now, last_print + (check_interval*HZ))) {
404 last_print = now;
405 printk(KERN_INFO "Machine check events logged\n");
408 return 1;
410 return 0;
413 /* see if the idle task needs to notify userspace */
414 static int
415 mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk)
417 /* IDLE_END should be safe - interrupts are back on */
418 if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY))
419 mce_notify_user();
421 return NOTIFY_OK;
424 static struct notifier_block mce_idle_notifier = {
425 .notifier_call = mce_idle_callback,
428 static __init int periodic_mcheck_init(void)
430 next_interval = check_interval * HZ;
431 if (next_interval)
432 schedule_delayed_work(&mcheck_work,
433 round_jiffies_relative(next_interval));
434 idle_notifier_register(&mce_idle_notifier);
435 return 0;
437 __initcall(periodic_mcheck_init);
441 * Initialize Machine Checks for a CPU.
443 static void mce_init(void *dummy)
445 u64 cap;
446 int i;
448 rdmsrl(MSR_IA32_MCG_CAP, cap);
449 banks = cap & 0xff;
450 if (banks > NR_BANKS) {
451 printk(KERN_INFO "MCE: warning: using only %d banks\n", banks);
452 banks = NR_BANKS;
454 /* Use accurate RIP reporting if available. */
455 if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
456 rip_msr = MSR_IA32_MCG_EIP;
458 /* Log the machine checks left over from the previous reset.
459 This also clears all registers */
460 do_machine_check(NULL, mce_bootlog ? -1 : -2);
462 set_in_cr4(X86_CR4_MCE);
464 if (cap & MCG_CTL_P)
465 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
467 for (i = 0; i < banks; i++) {
468 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
469 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
473 /* Add per CPU specific workarounds here */
474 static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
476 /* This should be disabled by the BIOS, but isn't always */
477 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) {
478 /* disable GART TBL walk error reporting, which trips off
479 incorrectly with the IOMMU & 3ware & Cerberus. */
480 clear_bit(10, &bank[4]);
481 /* Lots of broken BIOS around that don't clear them
482 by default and leave crap in there. Don't log. */
483 mce_bootlog = 0;
488 static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
490 switch (c->x86_vendor) {
491 case X86_VENDOR_INTEL:
492 mce_intel_feature_init(c);
493 break;
494 case X86_VENDOR_AMD:
495 mce_amd_feature_init(c);
496 break;
497 default:
498 break;
503 * Called for each booted CPU to set up machine checks.
504 * Must be called with preempt off.
506 void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
508 static cpumask_t mce_cpus = CPU_MASK_NONE;
510 mce_cpu_quirks(c);
512 if (mce_dont_init ||
513 cpu_test_and_set(smp_processor_id(), mce_cpus) ||
514 !mce_available(c))
515 return;
517 mce_init(NULL);
518 mce_cpu_features(c);
522 * Character device to read and clear the MCE log.
525 static DEFINE_SPINLOCK(mce_state_lock);
526 static int open_count; /* #times opened */
527 static int open_exclu; /* already open exclusive? */
529 static int mce_open(struct inode *inode, struct file *file)
531 spin_lock(&mce_state_lock);
533 if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
534 spin_unlock(&mce_state_lock);
535 return -EBUSY;
538 if (file->f_flags & O_EXCL)
539 open_exclu = 1;
540 open_count++;
542 spin_unlock(&mce_state_lock);
544 return nonseekable_open(inode, file);
547 static int mce_release(struct inode *inode, struct file *file)
549 spin_lock(&mce_state_lock);
551 open_count--;
552 open_exclu = 0;
554 spin_unlock(&mce_state_lock);
556 return 0;
559 static void collect_tscs(void *data)
561 unsigned long *cpu_tsc = (unsigned long *)data;
562 rdtscll(cpu_tsc[smp_processor_id()]);
565 static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off)
567 unsigned long *cpu_tsc;
568 static DECLARE_MUTEX(mce_read_sem);
569 unsigned next;
570 char __user *buf = ubuf;
571 int i, err;
573 cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL);
574 if (!cpu_tsc)
575 return -ENOMEM;
577 down(&mce_read_sem);
578 next = rcu_dereference(mcelog.next);
580 /* Only supports full reads right now */
581 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
582 up(&mce_read_sem);
583 kfree(cpu_tsc);
584 return -EINVAL;
587 err = 0;
588 for (i = 0; i < next; i++) {
589 unsigned long start = jiffies;
590 while (!mcelog.entry[i].finished) {
591 if (time_after_eq(jiffies, start + 2)) {
592 memset(mcelog.entry + i,0, sizeof(struct mce));
593 goto timeout;
595 cpu_relax();
597 smp_rmb();
598 err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
599 buf += sizeof(struct mce);
600 timeout:
604 memset(mcelog.entry, 0, next * sizeof(struct mce));
605 mcelog.next = 0;
607 synchronize_sched();
609 /* Collect entries that were still getting written before the synchronize. */
611 on_each_cpu(collect_tscs, cpu_tsc, 1, 1);
612 for (i = next; i < MCE_LOG_LEN; i++) {
613 if (mcelog.entry[i].finished &&
614 mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
615 err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce));
616 smp_rmb();
617 buf += sizeof(struct mce);
618 memset(&mcelog.entry[i], 0, sizeof(struct mce));
621 up(&mce_read_sem);
622 kfree(cpu_tsc);
623 return err ? -EFAULT : buf - ubuf;
626 static unsigned int mce_poll(struct file *file, poll_table *wait)
628 poll_wait(file, &mce_wait, wait);
629 if (rcu_dereference(mcelog.next))
630 return POLLIN | POLLRDNORM;
631 return 0;
634 static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg)
636 int __user *p = (int __user *)arg;
637 if (!capable(CAP_SYS_ADMIN))
638 return -EPERM;
639 switch (cmd) {
640 case MCE_GET_RECORD_LEN:
641 return put_user(sizeof(struct mce), p);
642 case MCE_GET_LOG_LEN:
643 return put_user(MCE_LOG_LEN, p);
644 case MCE_GETCLEAR_FLAGS: {
645 unsigned flags;
646 do {
647 flags = mcelog.flags;
648 } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
649 return put_user(flags, p);
651 default:
652 return -ENOTTY;
656 static const struct file_operations mce_chrdev_ops = {
657 .open = mce_open,
658 .release = mce_release,
659 .read = mce_read,
660 .poll = mce_poll,
661 .ioctl = mce_ioctl,
664 static struct miscdevice mce_log_device = {
665 MISC_MCELOG_MINOR,
666 "mcelog",
667 &mce_chrdev_ops,
671 * Old style boot options parsing. Only for compatibility.
674 static int __init mcheck_disable(char *str)
676 mce_dont_init = 1;
677 return 1;
680 /* mce=off disables machine check. Note you can reenable it later
681 using sysfs.
682 mce=TOLERANCELEVEL (number, see above)
683 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
684 mce=nobootlog Don't log MCEs from before booting. */
685 static int __init mcheck_enable(char *str)
687 if (*str == '=')
688 str++;
689 if (!strcmp(str, "off"))
690 mce_dont_init = 1;
691 else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
692 mce_bootlog = str[0] == 'b';
693 else if (isdigit(str[0]))
694 get_option(&str, &tolerant);
695 else
696 printk("mce= argument %s ignored. Please use /sys", str);
697 return 1;
700 __setup("nomce", mcheck_disable);
701 __setup("mce", mcheck_enable);
704 * Sysfs support
707 /* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
708 Only one CPU is active at this time, the others get readded later using
709 CPU hotplug. */
710 static int mce_resume(struct sys_device *dev)
712 mce_init(NULL);
713 return 0;
716 /* Reinit MCEs after user configuration changes */
717 static void mce_restart(void)
719 if (next_interval)
720 cancel_delayed_work(&mcheck_work);
721 /* Timer race is harmless here */
722 on_each_cpu(mce_init, NULL, 1, 1);
723 next_interval = check_interval * HZ;
724 if (next_interval)
725 schedule_delayed_work(&mcheck_work,
726 round_jiffies_relative(next_interval));
729 static struct sysdev_class mce_sysclass = {
730 .resume = mce_resume,
731 set_kset_name("machinecheck"),
734 DEFINE_PER_CPU(struct sys_device, device_mce);
736 /* Why are there no generic functions for this? */
737 #define ACCESSOR(name, var, start) \
738 static ssize_t show_ ## name(struct sys_device *s, char *buf) { \
739 return sprintf(buf, "%lx\n", (unsigned long)var); \
741 static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \
742 char *end; \
743 unsigned long new = simple_strtoul(buf, &end, 0); \
744 if (end == buf) return -EINVAL; \
745 var = new; \
746 start; \
747 return end-buf; \
749 static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
751 /* TBD should generate these dynamically based on number of available banks */
752 ACCESSOR(bank0ctl,bank[0],mce_restart())
753 ACCESSOR(bank1ctl,bank[1],mce_restart())
754 ACCESSOR(bank2ctl,bank[2],mce_restart())
755 ACCESSOR(bank3ctl,bank[3],mce_restart())
756 ACCESSOR(bank4ctl,bank[4],mce_restart())
757 ACCESSOR(bank5ctl,bank[5],mce_restart())
759 static ssize_t show_trigger(struct sys_device *s, char *buf)
761 strcpy(buf, trigger);
762 strcat(buf, "\n");
763 return strlen(trigger) + 1;
766 static ssize_t set_trigger(struct sys_device *s,const char *buf,size_t siz)
768 char *p;
769 int len;
770 strncpy(trigger, buf, sizeof(trigger));
771 trigger[sizeof(trigger)-1] = 0;
772 len = strlen(trigger);
773 p = strchr(trigger, '\n');
774 if (*p) *p = 0;
775 return len;
778 static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
779 ACCESSOR(tolerant,tolerant,)
780 ACCESSOR(check_interval,check_interval,mce_restart())
781 static struct sysdev_attribute *mce_attributes[] = {
782 &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
783 &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
784 &attr_tolerant, &attr_check_interval, &attr_trigger,
785 NULL
788 /* Per cpu sysdev init. All of the cpus still share the same ctl bank */
789 static __cpuinit int mce_create_device(unsigned int cpu)
791 int err;
792 int i;
793 if (!mce_available(&cpu_data[cpu]))
794 return -EIO;
796 per_cpu(device_mce,cpu).id = cpu;
797 per_cpu(device_mce,cpu).cls = &mce_sysclass;
799 err = sysdev_register(&per_cpu(device_mce,cpu));
801 if (!err) {
802 for (i = 0; mce_attributes[i]; i++)
803 sysdev_create_file(&per_cpu(device_mce,cpu),
804 mce_attributes[i]);
806 return err;
809 static void mce_remove_device(unsigned int cpu)
811 int i;
813 for (i = 0; mce_attributes[i]; i++)
814 sysdev_remove_file(&per_cpu(device_mce,cpu),
815 mce_attributes[i]);
816 sysdev_unregister(&per_cpu(device_mce,cpu));
817 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
820 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
821 static int
822 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
824 unsigned int cpu = (unsigned long)hcpu;
826 switch (action) {
827 case CPU_ONLINE:
828 case CPU_ONLINE_FROZEN:
829 mce_create_device(cpu);
830 break;
831 case CPU_DEAD:
832 case CPU_DEAD_FROZEN:
833 mce_remove_device(cpu);
834 break;
836 return NOTIFY_OK;
839 static struct notifier_block mce_cpu_notifier = {
840 .notifier_call = mce_cpu_callback,
843 static __init int mce_init_device(void)
845 int err;
846 int i = 0;
848 if (!mce_available(&boot_cpu_data))
849 return -EIO;
850 err = sysdev_class_register(&mce_sysclass);
852 for_each_online_cpu(i) {
853 mce_create_device(i);
856 register_hotcpu_notifier(&mce_cpu_notifier);
857 misc_register(&mce_log_device);
858 return err;
861 device_initcall(mce_init_device);