EDAC, MCE: Add F15h FP MCE decoder
[linux-2.6/kvm.git] / drivers / edac / mce_amd.c
blobc14abe3e407415626bed61b669ef1e443ba54ab1
1 #include <linux/module.h>
2 #include <linux/slab.h>
4 #include "mce_amd.h"
6 static struct amd_decoder_ops *fam_ops;
8 static u8 xec_mask = 0xf;
9 static u8 nb_err_cpumask = 0xf;
11 static bool report_gart_errors;
12 static void (*nb_bus_decoder)(int node_id, struct mce *m, u32 nbcfg);
14 void amd_report_gart_errors(bool v)
16 report_gart_errors = v;
18 EXPORT_SYMBOL_GPL(amd_report_gart_errors);
20 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32))
22 nb_bus_decoder = f;
24 EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
26 void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32))
28 if (nb_bus_decoder) {
29 WARN_ON(nb_bus_decoder != f);
31 nb_bus_decoder = NULL;
34 EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
37 * string representation for the different MCA reported error types, see F3x48
38 * or MSR0000_0411.
41 /* transaction type */
42 const char *tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
43 EXPORT_SYMBOL_GPL(tt_msgs);
45 /* cache level */
46 const char *ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
47 EXPORT_SYMBOL_GPL(ll_msgs);
49 /* memory transaction type */
50 const char *rrrr_msgs[] = {
51 "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
53 EXPORT_SYMBOL_GPL(rrrr_msgs);
55 /* participating processor */
56 const char *pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
57 EXPORT_SYMBOL_GPL(pp_msgs);
59 /* request timeout */
60 const char *to_msgs[] = { "no timeout", "timed out" };
61 EXPORT_SYMBOL_GPL(to_msgs);
63 /* memory or i/o */
64 const char *ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
65 EXPORT_SYMBOL_GPL(ii_msgs);
67 static const char *f10h_nb_mce_desc[] = {
68 "HT link data error",
69 "Protocol error (link, L3, probe filter, etc.)",
70 "Parity error in NB-internal arrays",
71 "Link Retry due to IO link transmission error",
72 "L3 ECC data cache error",
73 "ECC error in L3 cache tag",
74 "L3 LRU parity bits error",
75 "ECC Error in the Probe Filter directory"
78 static const char * const f15h_ic_mce_desc[] = {
79 "UC during a demand linefill from L2",
80 "Parity error during data load from IC",
81 "Parity error for IC valid bit",
82 "Main tag parity error",
83 "Parity error in prediction queue",
84 "PFB data/address parity error",
85 "Parity error in the branch status reg",
86 "PFB promotion address error",
87 "Tag error during probe/victimization",
88 "Parity error for IC probe tag valid bit",
89 "PFB non-cacheable bit parity error",
90 "PFB valid bit parity error", /* xec = 0xd */
91 "patch RAM", /* xec = 010 */
92 "uop queue",
93 "insn buffer",
94 "predecode buffer",
95 "fetch address FIFO"
98 static const char * const f15h_cu_mce_desc[] = {
99 "Fill ECC error on data fills", /* xec = 0x4 */
100 "Fill parity error on insn fills",
101 "Prefetcher request FIFO parity error",
102 "PRQ address parity error",
103 "PRQ data parity error",
104 "WCC Tag ECC error",
105 "WCC Data ECC error",
106 "WCB Data parity error",
107 "VB Data/ECC error",
108 "L2 Tag ECC error", /* xec = 0x10 */
109 "Hard L2 Tag ECC error",
110 "Multiple hits on L2 tag",
111 "XAB parity error",
112 "PRB address parity error"
115 static const char * const fr_ex_mce_desc[] = {
116 "CPU Watchdog timer expire",
117 "Wakeup array dest tag",
118 "AG payload array",
119 "EX payload array",
120 "IDRF array",
121 "Retire dispatch queue",
122 "Mapper checkpoint array",
123 "Physical register file EX0 port",
124 "Physical register file EX1 port",
125 "Physical register file AG0 port",
126 "Physical register file AG1 port",
127 "Flag register file",
128 "DE correctable error could not be corrected"
131 static bool f12h_dc_mce(u16 ec, u8 xec)
133 bool ret = false;
135 if (MEM_ERROR(ec)) {
136 u8 ll = ec & 0x3;
137 ret = true;
139 if (ll == LL_L2)
140 pr_cont("during L1 linefill from L2.\n");
141 else if (ll == LL_L1)
142 pr_cont("Data/Tag %s error.\n", RRRR_MSG(ec));
143 else
144 ret = false;
146 return ret;
149 static bool f10h_dc_mce(u16 ec, u8 xec)
151 u8 r4 = (ec >> 4) & 0xf;
152 u8 ll = ec & 0x3;
154 if (r4 == R4_GEN && ll == LL_L1) {
155 pr_cont("during data scrub.\n");
156 return true;
158 return f12h_dc_mce(ec, xec);
161 static bool k8_dc_mce(u16 ec, u8 xec)
163 if (BUS_ERROR(ec)) {
164 pr_cont("during system linefill.\n");
165 return true;
168 return f10h_dc_mce(ec, xec);
171 static bool f14h_dc_mce(u16 ec, u8 xec)
173 u8 r4 = (ec >> 4) & 0xf;
174 u8 ll = ec & 0x3;
175 u8 tt = (ec >> 2) & 0x3;
176 u8 ii = tt;
177 bool ret = true;
179 if (MEM_ERROR(ec)) {
181 if (tt != TT_DATA || ll != LL_L1)
182 return false;
184 switch (r4) {
185 case R4_DRD:
186 case R4_DWR:
187 pr_cont("Data/Tag parity error due to %s.\n",
188 (r4 == R4_DRD ? "load/hw prf" : "store"));
189 break;
190 case R4_EVICT:
191 pr_cont("Copyback parity error on a tag miss.\n");
192 break;
193 case R4_SNOOP:
194 pr_cont("Tag parity error during snoop.\n");
195 break;
196 default:
197 ret = false;
199 } else if (BUS_ERROR(ec)) {
201 if ((ii != II_MEM && ii != II_IO) || ll != LL_LG)
202 return false;
204 pr_cont("System read data error on a ");
206 switch (r4) {
207 case R4_RD:
208 pr_cont("TLB reload.\n");
209 break;
210 case R4_DWR:
211 pr_cont("store.\n");
212 break;
213 case R4_DRD:
214 pr_cont("load.\n");
215 break;
216 default:
217 ret = false;
219 } else {
220 ret = false;
223 return ret;
226 static bool f15h_dc_mce(u16 ec, u8 xec)
228 bool ret = true;
230 if (MEM_ERROR(ec)) {
232 switch (xec) {
233 case 0x0:
234 pr_cont("Data Array access error.\n");
235 break;
237 case 0x1:
238 pr_cont("UC error during a linefill from L2/NB.\n");
239 break;
241 case 0x2:
242 case 0x11:
243 pr_cont("STQ access error.\n");
244 break;
246 case 0x3:
247 pr_cont("SCB access error.\n");
248 break;
250 case 0x10:
251 pr_cont("Tag error.\n");
252 break;
254 case 0x12:
255 pr_cont("LDQ access error.\n");
256 break;
258 default:
259 ret = false;
261 } else if (BUS_ERROR(ec)) {
263 if (!xec)
264 pr_cont("during system linefill.\n");
265 else
266 pr_cont(" Internal %s condition.\n",
267 ((xec == 1) ? "livelock" : "deadlock"));
268 } else
269 ret = false;
271 return ret;
274 static void amd_decode_dc_mce(struct mce *m)
276 u16 ec = m->status & 0xffff;
277 u8 xec = (m->status >> 16) & xec_mask;
279 pr_emerg(HW_ERR "Data Cache Error: ");
281 /* TLB error signatures are the same across families */
282 if (TLB_ERROR(ec)) {
283 u8 tt = (ec >> 2) & 0x3;
285 if (tt == TT_DATA) {
286 pr_cont("%s TLB %s.\n", LL_MSG(ec),
287 ((xec == 2) ? "locked miss"
288 : (xec ? "multimatch" : "parity")));
289 return;
291 } else if (fam_ops->dc_mce(ec, xec))
293 else
294 pr_emerg(HW_ERR "Corrupted DC MCE info?\n");
297 static bool k8_ic_mce(u16 ec, u8 xec)
299 u8 ll = ec & 0x3;
300 u8 r4 = (ec >> 4) & 0xf;
301 bool ret = true;
303 if (!MEM_ERROR(ec))
304 return false;
306 if (ll == 0x2)
307 pr_cont("during a linefill from L2.\n");
308 else if (ll == 0x1) {
309 switch (r4) {
310 case R4_IRD:
311 pr_cont("Parity error during data load.\n");
312 break;
314 case R4_EVICT:
315 pr_cont("Copyback Parity/Victim error.\n");
316 break;
318 case R4_SNOOP:
319 pr_cont("Tag Snoop error.\n");
320 break;
322 default:
323 ret = false;
324 break;
326 } else
327 ret = false;
329 return ret;
332 static bool f14h_ic_mce(u16 ec, u8 xec)
334 u8 ll = ec & 0x3;
335 u8 tt = (ec >> 2) & 0x3;
336 u8 r4 = (ec >> 4) & 0xf;
337 bool ret = true;
339 if (MEM_ERROR(ec)) {
340 if (tt != 0 || ll != 1)
341 ret = false;
343 if (r4 == R4_IRD)
344 pr_cont("Data/tag array parity error for a tag hit.\n");
345 else if (r4 == R4_SNOOP)
346 pr_cont("Tag error during snoop/victimization.\n");
347 else
348 ret = false;
350 return ret;
353 static bool f15h_ic_mce(u16 ec, u8 xec)
355 bool ret = true;
357 if (!MEM_ERROR(ec))
358 return false;
360 switch (xec) {
361 case 0x0 ... 0xa:
362 pr_cont("%s.\n", f15h_ic_mce_desc[xec]);
363 break;
365 case 0xd:
366 pr_cont("%s.\n", f15h_ic_mce_desc[xec-2]);
367 break;
369 case 0x10 ... 0x14:
370 pr_cont("Decoder %s parity error.\n", f15h_ic_mce_desc[xec-4]);
371 break;
373 default:
374 ret = false;
376 return ret;
379 static void amd_decode_ic_mce(struct mce *m)
381 u16 ec = m->status & 0xffff;
382 u8 xec = (m->status >> 16) & xec_mask;
384 pr_emerg(HW_ERR "Instruction Cache Error: ");
386 if (TLB_ERROR(ec))
387 pr_cont("%s TLB %s.\n", LL_MSG(ec),
388 (xec ? "multimatch" : "parity error"));
389 else if (BUS_ERROR(ec)) {
390 bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58)));
392 pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read"));
393 } else if (fam_ops->ic_mce(ec, xec))
395 else
396 pr_emerg(HW_ERR "Corrupted IC MCE info?\n");
399 static void amd_decode_bu_mce(struct mce *m)
401 u32 ec = m->status & 0xffff;
402 u32 xec = (m->status >> 16) & xec_mask;
404 pr_emerg(HW_ERR "Bus Unit Error");
406 if (xec == 0x1)
407 pr_cont(" in the write data buffers.\n");
408 else if (xec == 0x3)
409 pr_cont(" in the victim data buffers.\n");
410 else if (xec == 0x2 && MEM_ERROR(ec))
411 pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec));
412 else if (xec == 0x0) {
413 if (TLB_ERROR(ec))
414 pr_cont(": %s error in a Page Descriptor Cache or "
415 "Guest TLB.\n", TT_MSG(ec));
416 else if (BUS_ERROR(ec))
417 pr_cont(": %s/ECC error in data read from NB: %s.\n",
418 RRRR_MSG(ec), PP_MSG(ec));
419 else if (MEM_ERROR(ec)) {
420 u8 rrrr = (ec >> 4) & 0xf;
422 if (rrrr >= 0x7)
423 pr_cont(": %s error during data copyback.\n",
424 RRRR_MSG(ec));
425 else if (rrrr <= 0x1)
426 pr_cont(": %s parity/ECC error during data "
427 "access from L2.\n", RRRR_MSG(ec));
428 else
429 goto wrong_bu_mce;
430 } else
431 goto wrong_bu_mce;
432 } else
433 goto wrong_bu_mce;
435 return;
437 wrong_bu_mce:
438 pr_emerg(HW_ERR "Corrupted BU MCE info?\n");
441 static void amd_decode_cu_mce(struct mce *m)
443 u16 ec = m->status & 0xffff;
444 u8 xec = (m->status >> 16) & xec_mask;
446 pr_emerg(HW_ERR "Combined Unit Error: ");
448 if (TLB_ERROR(ec)) {
449 if (xec == 0x0)
450 pr_cont("Data parity TLB read error.\n");
451 else if (xec == 0x1)
452 pr_cont("Poison data provided for TLB fill.\n");
453 else
454 goto wrong_cu_mce;
455 } else if (BUS_ERROR(ec)) {
456 if (xec > 2)
457 goto wrong_cu_mce;
459 pr_cont("Error during attempted NB data read.\n");
460 } else if (MEM_ERROR(ec)) {
461 switch (xec) {
462 case 0x4 ... 0xc:
463 pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x4]);
464 break;
466 case 0x10 ... 0x14:
467 pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x7]);
468 break;
470 default:
471 goto wrong_cu_mce;
475 return;
477 wrong_cu_mce:
478 pr_emerg(HW_ERR "Corrupted CU MCE info?\n");
481 static void amd_decode_ls_mce(struct mce *m)
483 u16 ec = m->status & 0xffff;
484 u8 xec = (m->status >> 16) & xec_mask;
486 if (boot_cpu_data.x86 >= 0x14) {
487 pr_emerg("You shouldn't be seeing an LS MCE on this cpu family,"
488 " please report on LKML.\n");
489 return;
492 pr_emerg(HW_ERR "Load Store Error");
494 if (xec == 0x0) {
495 u8 r4 = (ec >> 4) & 0xf;
497 if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR))
498 goto wrong_ls_mce;
500 pr_cont(" during %s.\n", RRRR_MSG(ec));
501 } else
502 goto wrong_ls_mce;
504 return;
506 wrong_ls_mce:
507 pr_emerg(HW_ERR "Corrupted LS MCE info?\n");
510 static bool k8_nb_mce(u16 ec, u8 xec)
512 bool ret = true;
514 switch (xec) {
515 case 0x1:
516 pr_cont("CRC error detected on HT link.\n");
517 break;
519 case 0x5:
520 pr_cont("Invalid GART PTE entry during GART table walk.\n");
521 break;
523 case 0x6:
524 pr_cont("Unsupported atomic RMW received from an IO link.\n");
525 break;
527 case 0x0:
528 case 0x8:
529 if (boot_cpu_data.x86 == 0x11)
530 return false;
532 pr_cont("DRAM ECC error detected on the NB.\n");
533 break;
535 case 0xd:
536 pr_cont("Parity error on the DRAM addr/ctl signals.\n");
537 break;
539 default:
540 ret = false;
541 break;
544 return ret;
547 static bool f10h_nb_mce(u16 ec, u8 xec)
549 bool ret = true;
550 u8 offset = 0;
552 if (k8_nb_mce(ec, xec))
553 return true;
555 switch(xec) {
556 case 0xa ... 0xc:
557 offset = 10;
558 break;
560 case 0xe:
561 offset = 11;
562 break;
564 case 0xf:
565 if (TLB_ERROR(ec))
566 pr_cont("GART Table Walk data error.\n");
567 else if (BUS_ERROR(ec))
568 pr_cont("DMA Exclusion Vector Table Walk error.\n");
569 else
570 ret = false;
572 goto out;
573 break;
575 case 0x19:
576 if (boot_cpu_data.x86 == 0x15)
577 pr_cont("Compute Unit Data Error.\n");
578 else
579 ret = false;
581 goto out;
582 break;
584 case 0x1c ... 0x1f:
585 offset = 24;
586 break;
588 default:
589 ret = false;
591 goto out;
592 break;
595 pr_cont("%s.\n", f10h_nb_mce_desc[xec - offset]);
597 out:
598 return ret;
601 static bool nb_noop_mce(u16 ec, u8 xec)
603 return false;
606 void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
608 u8 xec = (m->status >> 16) & 0x1f;
609 u16 ec = m->status & 0xffff;
610 u32 nbsh = (u32)(m->status >> 32);
612 pr_emerg(HW_ERR "Northbridge Error, node %d: ", node_id);
615 * F10h, revD can disable ErrCpu[3:0] so check that first and also the
616 * value encoding has changed so interpret those differently
618 if ((boot_cpu_data.x86 == 0x10) &&
619 (boot_cpu_data.x86_model > 7)) {
620 if (nbsh & K8_NBSH_ERR_CPU_VAL)
621 pr_cont(", core: %u", (u8)(nbsh & nb_err_cpumask));
622 } else {
623 u8 assoc_cpus = nbsh & nb_err_cpumask;
625 if (assoc_cpus > 0)
626 pr_cont(", core: %d", fls(assoc_cpus) - 1);
629 switch (xec) {
630 case 0x2:
631 pr_cont("Sync error (sync packets on HT link detected).\n");
632 return;
634 case 0x3:
635 pr_cont("HT Master abort.\n");
636 return;
638 case 0x4:
639 pr_cont("HT Target abort.\n");
640 return;
642 case 0x7:
643 pr_cont("NB Watchdog timeout.\n");
644 return;
646 case 0x9:
647 pr_cont("SVM DMA Exclusion Vector error.\n");
648 return;
650 default:
651 break;
654 if (!fam_ops->nb_mce(ec, xec))
655 goto wrong_nb_mce;
657 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10)
658 if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
659 nb_bus_decoder(node_id, m, nbcfg);
661 return;
663 wrong_nb_mce:
664 pr_emerg(HW_ERR "Corrupted NB MCE info?\n");
666 EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
668 static void amd_decode_fr_mce(struct mce *m)
670 struct cpuinfo_x86 *c = &boot_cpu_data;
671 u8 xec = (m->status >> 16) & xec_mask;
673 if (c->x86 == 0xf || c->x86 == 0x11)
674 goto wrong_fr_mce;
676 if (c->x86 != 0x15 && xec != 0x0)
677 goto wrong_fr_mce;
679 pr_emerg(HW_ERR "%s Error: ",
680 (c->x86 == 0x15 ? "Execution Unit" : "FIROB"));
682 if (xec == 0x0 || xec == 0xc)
683 pr_cont("%s.\n", fr_ex_mce_desc[xec]);
684 else if (xec < 0xd)
685 pr_cont("%s parity error.\n", fr_ex_mce_desc[xec]);
686 else
687 goto wrong_fr_mce;
689 return;
691 wrong_fr_mce:
692 pr_emerg(HW_ERR "Corrupted FR MCE info?\n");
695 static void amd_decode_fp_mce(struct mce *m)
697 u8 xec = (m->status >> 16) & xec_mask;
699 pr_emerg(HW_ERR "Floating Point Unit Error: ");
701 switch (xec) {
702 case 0x1:
703 pr_cont("Free List");
704 break;
706 case 0x2:
707 pr_cont("Physical Register File");
708 break;
710 case 0x3:
711 pr_cont("Retire Queue");
712 break;
714 case 0x4:
715 pr_cont("Scheduler table");
716 break;
718 case 0x5:
719 pr_cont("Status Register File");
720 break;
722 default:
723 goto wrong_fp_mce;
724 break;
727 pr_cont(" parity error.\n");
729 return;
731 wrong_fp_mce:
732 pr_emerg(HW_ERR "Corrupted FP MCE info?\n");
735 static inline void amd_decode_err_code(u16 ec)
737 if (TLB_ERROR(ec)) {
738 pr_emerg(HW_ERR "Transaction: %s, Cache Level: %s\n",
739 TT_MSG(ec), LL_MSG(ec));
740 } else if (MEM_ERROR(ec)) {
741 pr_emerg(HW_ERR "Transaction: %s, Type: %s, Cache Level: %s\n",
742 RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
743 } else if (BUS_ERROR(ec)) {
744 pr_emerg(HW_ERR "Transaction: %s (%s), %s, Cache Level: %s, "
745 "Participating Processor: %s\n",
746 RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
747 PP_MSG(ec));
748 } else
749 pr_emerg(HW_ERR "Huh? Unknown MCE error 0x%x\n", ec);
753 * Filter out unwanted MCE signatures here.
755 static bool amd_filter_mce(struct mce *m)
757 u8 xec = (m->status >> 16) & 0x1f;
760 * NB GART TLB error reporting is disabled by default.
762 if (m->bank == 4 && xec == 0x5 && !report_gart_errors)
763 return true;
765 return false;
768 int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
770 struct mce *m = (struct mce *)data;
771 int node, ecc;
773 if (amd_filter_mce(m))
774 return NOTIFY_STOP;
776 pr_emerg(HW_ERR "MC%d_STATUS: ", m->bank);
778 pr_cont("%sorrected error, other errors lost: %s, "
779 "CPU context corrupt: %s",
780 ((m->status & MCI_STATUS_UC) ? "Unc" : "C"),
781 ((m->status & MCI_STATUS_OVER) ? "yes" : "no"),
782 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
784 /* do the two bits[14:13] together */
785 ecc = (m->status >> 45) & 0x3;
786 if (ecc)
787 pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
789 pr_cont("\n");
791 switch (m->bank) {
792 case 0:
793 amd_decode_dc_mce(m);
794 break;
796 case 1:
797 amd_decode_ic_mce(m);
798 break;
800 case 2:
801 if (boot_cpu_data.x86 == 0x15)
802 amd_decode_cu_mce(m);
803 else
804 amd_decode_bu_mce(m);
805 break;
807 case 3:
808 amd_decode_ls_mce(m);
809 break;
811 case 4:
812 node = amd_get_nb_id(m->extcpu);
813 amd_decode_nb_mce(node, m, 0);
814 break;
816 case 5:
817 amd_decode_fr_mce(m);
818 break;
820 case 6:
821 amd_decode_fp_mce(m);
822 break;
824 default:
825 break;
828 amd_decode_err_code(m->status & 0xffff);
830 return NOTIFY_STOP;
832 EXPORT_SYMBOL_GPL(amd_decode_mce);
834 static struct notifier_block amd_mce_dec_nb = {
835 .notifier_call = amd_decode_mce,
838 static int __init mce_amd_init(void)
840 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
841 return 0;
843 if ((boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x12) &&
844 (boot_cpu_data.x86 != 0x14 || boot_cpu_data.x86_model > 0xf))
845 return 0;
847 fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
848 if (!fam_ops)
849 return -ENOMEM;
851 switch (boot_cpu_data.x86) {
852 case 0xf:
853 fam_ops->dc_mce = k8_dc_mce;
854 fam_ops->ic_mce = k8_ic_mce;
855 fam_ops->nb_mce = k8_nb_mce;
856 break;
858 case 0x10:
859 fam_ops->dc_mce = f10h_dc_mce;
860 fam_ops->ic_mce = k8_ic_mce;
861 fam_ops->nb_mce = f10h_nb_mce;
862 break;
864 case 0x11:
865 fam_ops->dc_mce = k8_dc_mce;
866 fam_ops->ic_mce = k8_ic_mce;
867 fam_ops->nb_mce = f10h_nb_mce;
868 break;
870 case 0x12:
871 fam_ops->dc_mce = f12h_dc_mce;
872 fam_ops->ic_mce = k8_ic_mce;
873 fam_ops->nb_mce = nb_noop_mce;
874 break;
876 case 0x14:
877 nb_err_cpumask = 0x3;
878 fam_ops->dc_mce = f14h_dc_mce;
879 fam_ops->ic_mce = f14h_ic_mce;
880 fam_ops->nb_mce = nb_noop_mce;
881 break;
883 case 0x15:
884 xec_mask = 0x1f;
885 fam_ops->dc_mce = f15h_dc_mce;
886 fam_ops->ic_mce = f15h_ic_mce;
887 fam_ops->nb_mce = f10h_nb_mce;
888 break;
890 default:
891 printk(KERN_WARNING "Huh? What family is that: %d?!\n",
892 boot_cpu_data.x86);
893 kfree(fam_ops);
894 return -EINVAL;
897 pr_info("MCE: In-kernel MCE decoding enabled.\n");
899 atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
901 return 0;
903 early_initcall(mce_amd_init);
905 #ifdef MODULE
906 static void __exit mce_amd_exit(void)
908 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
909 kfree(fam_ops);
912 MODULE_DESCRIPTION("AMD MCE decoder");
913 MODULE_ALIAS("edac-mce-amd");
914 MODULE_LICENSE("GPL");
915 module_exit(mce_amd_exit);
916 #endif