signals: dequeue_signal: don't check SIGNAL_GROUP_EXIT when setting SIGNAL_STOP_DEQUEUED
[linux-2.6/mini2440.git] / drivers / edac / e752x_edac.c
blobc94a0eb492cb1eb2b01bfa5c8a5b47d7099c79a1
1 /*
2 * Intel e752x Memory Controller kernel module
3 * (C) 2004 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
7 * See "enum e752x_chips" below for supported chipsets
9 * Written by Tom Zimmerman
11 * Contributors:
12 * Thayne Harbaugh at realmsys.com (?)
13 * Wang Zhenyu at intel.com
14 * Dave Jiang at mvista.com
16 * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/pci.h>
23 #include <linux/pci_ids.h>
24 #include <linux/slab.h>
25 #include <linux/edac.h>
26 #include "edac_core.h"
28 #define E752X_REVISION " Ver: 2.0.2 " __DATE__
29 #define EDAC_MOD_STR "e752x_edac"
31 static int force_function_unhide;
32 static int sysbus_parity = -1;
34 static struct edac_pci_ctl_info *e752x_pci;
36 #define e752x_printk(level, fmt, arg...) \
37 edac_printk(level, "e752x", fmt, ##arg)
39 #define e752x_mc_printk(mci, level, fmt, arg...) \
40 edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
42 #ifndef PCI_DEVICE_ID_INTEL_7520_0
43 #define PCI_DEVICE_ID_INTEL_7520_0 0x3590
44 #endif /* PCI_DEVICE_ID_INTEL_7520_0 */
46 #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
47 #define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
48 #endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
50 #ifndef PCI_DEVICE_ID_INTEL_7525_0
51 #define PCI_DEVICE_ID_INTEL_7525_0 0x359E
52 #endif /* PCI_DEVICE_ID_INTEL_7525_0 */
54 #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
55 #define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
56 #endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
58 #ifndef PCI_DEVICE_ID_INTEL_7320_0
59 #define PCI_DEVICE_ID_INTEL_7320_0 0x3592
60 #endif /* PCI_DEVICE_ID_INTEL_7320_0 */
62 #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
63 #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
64 #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
66 #ifndef PCI_DEVICE_ID_INTEL_3100_0
67 #define PCI_DEVICE_ID_INTEL_3100_0 0x35B0
68 #endif /* PCI_DEVICE_ID_INTEL_3100_0 */
70 #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
71 #define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
72 #endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
74 #define E752X_NR_CSROWS 8 /* number of csrows */
76 /* E752X register addresses - device 0 function 0 */
77 #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
78 #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
80 * 31:30 Device width row 7
81 * 01=x8 10=x4 11=x8 DDR2
82 * 27:26 Device width row 6
83 * 23:22 Device width row 5
84 * 19:20 Device width row 4
85 * 15:14 Device width row 3
86 * 11:10 Device width row 2
87 * 7:6 Device width row 1
88 * 3:2 Device width row 0
90 #define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
91 /* FIXME:IS THIS RIGHT? */
93 * 22 Number channels 0=1,1=2
94 * 19:18 DRB Granularity 32/64MB
96 #define E752X_DRM 0x80 /* Dimm mapping register */
97 #define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
99 * 14:12 1 single A, 2 single B, 3 dual
101 #define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
102 #define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
103 #define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
104 #define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
106 /* E752X register addresses - device 0 function 1 */
107 #define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
108 #define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
109 #define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
110 #define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
111 #define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
112 #define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
113 #define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
114 #define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
115 #define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
116 #define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
117 #define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
118 #define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
119 #define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
120 #define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI command reg (8b) */
121 #define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
122 #define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
123 #define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
124 #define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
125 #define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
126 #define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
127 /* error address register (32b) */
129 * 31 Reserved
130 * 30:2 CE address (64 byte block 34:6)
131 * 1 Reserved
132 * 0 HiLoCS
134 #define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
135 /* error address register (32b) */
137 * 31 Reserved
138 * 30:2 CE address (64 byte block 34:6)
139 * 1 Reserved
140 * 0 HiLoCS
142 #define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
143 /* error address register (32b) */
145 * 31 Reserved
146 * 30:2 CE address (64 byte block 34:6)
147 * 1 Reserved
148 * 0 HiLoCS
150 #define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM first uncorrectable scrub memory */
151 /* error address register (32b) */
153 * 31 Reserved
154 * 30:2 CE address (64 byte block 34:6)
155 * 1 Reserved
156 * 0 HiLoCS
158 #define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
159 /* error syndrome register (16b) */
160 #define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
161 /* error syndrome register (16b) */
162 #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
164 /* 3100 IMCH specific register addresses - device 0 function 1 */
165 #define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */
166 #define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */
167 #define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */
168 #define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */
170 /* ICH5R register addresses - device 30 function 0 */
171 #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
172 #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
173 #define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
175 enum e752x_chips {
176 E7520 = 0,
177 E7525 = 1,
178 E7320 = 2,
179 I3100 = 3
182 struct e752x_pvt {
183 struct pci_dev *bridge_ck;
184 struct pci_dev *dev_d0f0;
185 struct pci_dev *dev_d0f1;
186 u32 tolm;
187 u32 remapbase;
188 u32 remaplimit;
189 int mc_symmetric;
190 u8 map[8];
191 int map_type;
192 const struct e752x_dev_info *dev_info;
195 struct e752x_dev_info {
196 u16 err_dev;
197 u16 ctl_dev;
198 const char *ctl_name;
201 struct e752x_error_info {
202 u32 ferr_global;
203 u32 nerr_global;
204 u32 nsi_ferr; /* 3100 only */
205 u32 nsi_nerr; /* 3100 only */
206 u8 hi_ferr; /* all but 3100 */
207 u8 hi_nerr; /* all but 3100 */
208 u16 sysbus_ferr;
209 u16 sysbus_nerr;
210 u8 buf_ferr;
211 u8 buf_nerr;
212 u16 dram_ferr;
213 u16 dram_nerr;
214 u32 dram_sec1_add;
215 u32 dram_sec2_add;
216 u16 dram_sec1_syndrome;
217 u16 dram_sec2_syndrome;
218 u32 dram_ded_add;
219 u32 dram_scrb_add;
220 u32 dram_retr_add;
223 static const struct e752x_dev_info e752x_devs[] = {
224 [E7520] = {
225 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
226 .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
227 .ctl_name = "E7520"},
228 [E7525] = {
229 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
230 .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
231 .ctl_name = "E7525"},
232 [E7320] = {
233 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
234 .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
235 .ctl_name = "E7320"},
236 [I3100] = {
237 .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
238 .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
239 .ctl_name = "3100"},
242 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
243 unsigned long page)
245 u32 remap;
246 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
248 debugf3("%s()\n", __func__);
250 if (page < pvt->tolm)
251 return page;
253 if ((page >= 0x100000) && (page < pvt->remapbase))
254 return page;
256 remap = (page - pvt->tolm) + pvt->remapbase;
258 if (remap < pvt->remaplimit)
259 return remap;
261 e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
262 return pvt->tolm - 1;
265 static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
266 u32 sec1_add, u16 sec1_syndrome)
268 u32 page;
269 int row;
270 int channel;
271 int i;
272 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
274 debugf3("%s()\n", __func__);
276 /* convert the addr to 4k page */
277 page = sec1_add >> (PAGE_SHIFT - 4);
279 /* FIXME - check for -1 */
280 if (pvt->mc_symmetric) {
281 /* chip select are bits 14 & 13 */
282 row = ((page >> 1) & 3);
283 e752x_printk(KERN_WARNING,
284 "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
285 pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
286 pvt->map[4], pvt->map[5], pvt->map[6],
287 pvt->map[7]);
289 /* test for channel remapping */
290 for (i = 0; i < 8; i++) {
291 if (pvt->map[i] == row)
292 break;
295 e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
297 if (i < 8)
298 row = i;
299 else
300 e752x_mc_printk(mci, KERN_WARNING,
301 "row %d not found in remap table\n",
302 row);
303 } else
304 row = edac_mc_find_csrow_by_page(mci, page);
306 /* 0 = channel A, 1 = channel B */
307 channel = !(error_one & 1);
309 /* e752x mc reads 34:6 of the DRAM linear address */
310 edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
311 sec1_syndrome, row, channel, "e752x CE");
314 static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
315 u32 sec1_add, u16 sec1_syndrome, int *error_found,
316 int handle_error)
318 *error_found = 1;
320 if (handle_error)
321 do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
324 static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
325 u32 ded_add, u32 scrb_add)
327 u32 error_2b, block_page;
328 int row;
329 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
331 debugf3("%s()\n", __func__);
333 if (error_one & 0x0202) {
334 error_2b = ded_add;
336 /* convert to 4k address */
337 block_page = error_2b >> (PAGE_SHIFT - 4);
339 row = pvt->mc_symmetric ?
340 /* chip select are bits 14 & 13 */
341 ((block_page >> 1) & 3) :
342 edac_mc_find_csrow_by_page(mci, block_page);
344 /* e752x mc reads 34:6 of the DRAM linear address */
345 edac_mc_handle_ue(mci, block_page,
346 offset_in_page(error_2b << 4),
347 row, "e752x UE from Read");
349 if (error_one & 0x0404) {
350 error_2b = scrb_add;
352 /* convert to 4k address */
353 block_page = error_2b >> (PAGE_SHIFT - 4);
355 row = pvt->mc_symmetric ?
356 /* chip select are bits 14 & 13 */
357 ((block_page >> 1) & 3) :
358 edac_mc_find_csrow_by_page(mci, block_page);
360 /* e752x mc reads 34:6 of the DRAM linear address */
361 edac_mc_handle_ue(mci, block_page,
362 offset_in_page(error_2b << 4),
363 row, "e752x UE from Scruber");
367 static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
368 u32 ded_add, u32 scrb_add, int *error_found,
369 int handle_error)
371 *error_found = 1;
373 if (handle_error)
374 do_process_ue(mci, error_one, ded_add, scrb_add);
377 static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
378 int *error_found, int handle_error)
380 *error_found = 1;
382 if (!handle_error)
383 return;
385 debugf3("%s()\n", __func__);
386 edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
389 static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
390 u32 retry_add)
392 u32 error_1b, page;
393 int row;
394 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
396 error_1b = retry_add;
397 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
398 row = pvt->mc_symmetric ? ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
399 edac_mc_find_csrow_by_page(mci, page);
400 e752x_mc_printk(mci, KERN_WARNING,
401 "CE page 0x%lx, row %d : Memory read retry\n",
402 (long unsigned int)page, row);
405 static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
406 u32 retry_add, int *error_found,
407 int handle_error)
409 *error_found = 1;
411 if (handle_error)
412 do_process_ded_retry(mci, error, retry_add);
415 static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
416 int *error_found, int handle_error)
418 *error_found = 1;
420 if (handle_error)
421 e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
424 static char *global_message[11] = {
425 "PCI Express C1", "PCI Express C", "PCI Express B1",
426 "PCI Express B", "PCI Express A1", "PCI Express A",
427 "DMA Controler", "HUB or NS Interface", "System Bus",
428 "DRAM Controler", "Internal Buffer"
431 static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
433 static void do_global_error(int fatal, u32 errors)
435 int i;
437 for (i = 0; i < 11; i++) {
438 if (errors & (1 << i))
439 e752x_printk(KERN_WARNING, "%sError %s\n",
440 fatal_message[fatal], global_message[i]);
444 static inline void global_error(int fatal, u32 errors, int *error_found,
445 int handle_error)
447 *error_found = 1;
449 if (handle_error)
450 do_global_error(fatal, errors);
453 static char *hub_message[7] = {
454 "HI Address or Command Parity", "HI Illegal Access",
455 "HI Internal Parity", "Out of Range Access",
456 "HI Data Parity", "Enhanced Config Access",
457 "Hub Interface Target Abort"
460 static void do_hub_error(int fatal, u8 errors)
462 int i;
464 for (i = 0; i < 7; i++) {
465 if (errors & (1 << i))
466 e752x_printk(KERN_WARNING, "%sError %s\n",
467 fatal_message[fatal], hub_message[i]);
471 static inline void hub_error(int fatal, u8 errors, int *error_found,
472 int handle_error)
474 *error_found = 1;
476 if (handle_error)
477 do_hub_error(fatal, errors);
480 #define NSI_FATAL_MASK 0x0c080081
481 #define NSI_NON_FATAL_MASK 0x23a0ba64
482 #define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
484 static char *nsi_message[30] = {
485 "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */
486 "", /* reserved */
487 "NSI Parity Error", /* bit 2, non-fatal */
488 "", /* reserved */
489 "", /* reserved */
490 "Correctable Error Message", /* bit 5, non-fatal */
491 "Non-Fatal Error Message", /* bit 6, non-fatal */
492 "Fatal Error Message", /* bit 7, fatal */
493 "", /* reserved */
494 "Receiver Error", /* bit 9, non-fatal */
495 "", /* reserved */
496 "Bad TLP", /* bit 11, non-fatal */
497 "Bad DLLP", /* bit 12, non-fatal */
498 "REPLAY_NUM Rollover", /* bit 13, non-fatal */
499 "", /* reserved */
500 "Replay Timer Timeout", /* bit 15, non-fatal */
501 "", /* reserved */
502 "", /* reserved */
503 "", /* reserved */
504 "Data Link Protocol Error", /* bit 19, fatal */
505 "", /* reserved */
506 "Poisoned TLP", /* bit 21, non-fatal */
507 "", /* reserved */
508 "Completion Timeout", /* bit 23, non-fatal */
509 "Completer Abort", /* bit 24, non-fatal */
510 "Unexpected Completion", /* bit 25, non-fatal */
511 "Receiver Overflow", /* bit 26, fatal */
512 "Malformed TLP", /* bit 27, fatal */
513 "", /* reserved */
514 "Unsupported Request" /* bit 29, non-fatal */
517 static void do_nsi_error(int fatal, u32 errors)
519 int i;
521 for (i = 0; i < 30; i++) {
522 if (errors & (1 << i))
523 printk(KERN_WARNING "%sError %s\n",
524 fatal_message[fatal], nsi_message[i]);
528 static inline void nsi_error(int fatal, u32 errors, int *error_found,
529 int handle_error)
531 *error_found = 1;
533 if (handle_error)
534 do_nsi_error(fatal, errors);
537 static char *membuf_message[4] = {
538 "Internal PMWB to DRAM parity",
539 "Internal PMWB to System Bus Parity",
540 "Internal System Bus or IO to PMWB Parity",
541 "Internal DRAM to PMWB Parity"
544 static void do_membuf_error(u8 errors)
546 int i;
548 for (i = 0; i < 4; i++) {
549 if (errors & (1 << i))
550 e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
551 membuf_message[i]);
555 static inline void membuf_error(u8 errors, int *error_found, int handle_error)
557 *error_found = 1;
559 if (handle_error)
560 do_membuf_error(errors);
563 static char *sysbus_message[10] = {
564 "Addr or Request Parity",
565 "Data Strobe Glitch",
566 "Addr Strobe Glitch",
567 "Data Parity",
568 "Addr Above TOM",
569 "Non DRAM Lock Error",
570 "MCERR", "BINIT",
571 "Memory Parity",
572 "IO Subsystem Parity"
575 static void do_sysbus_error(int fatal, u32 errors)
577 int i;
579 for (i = 0; i < 10; i++) {
580 if (errors & (1 << i))
581 e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
582 fatal_message[fatal], sysbus_message[i]);
586 static inline void sysbus_error(int fatal, u32 errors, int *error_found,
587 int handle_error)
589 *error_found = 1;
591 if (handle_error)
592 do_sysbus_error(fatal, errors);
595 static void e752x_check_hub_interface(struct e752x_error_info *info,
596 int *error_found, int handle_error)
598 u8 stat8;
600 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
602 stat8 = info->hi_ferr;
604 if (stat8 & 0x7f) { /* Error, so process */
605 stat8 &= 0x7f;
607 if (stat8 & 0x2b)
608 hub_error(1, stat8 & 0x2b, error_found, handle_error);
610 if (stat8 & 0x54)
611 hub_error(0, stat8 & 0x54, error_found, handle_error);
613 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
615 stat8 = info->hi_nerr;
617 if (stat8 & 0x7f) { /* Error, so process */
618 stat8 &= 0x7f;
620 if (stat8 & 0x2b)
621 hub_error(1, stat8 & 0x2b, error_found, handle_error);
623 if (stat8 & 0x54)
624 hub_error(0, stat8 & 0x54, error_found, handle_error);
628 static void e752x_check_ns_interface(struct e752x_error_info *info,
629 int *error_found, int handle_error)
631 u32 stat32;
633 stat32 = info->nsi_ferr;
634 if (stat32 & NSI_ERR_MASK) { /* Error, so process */
635 if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */
636 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
637 handle_error);
638 if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
639 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
640 handle_error);
642 stat32 = info->nsi_nerr;
643 if (stat32 & NSI_ERR_MASK) {
644 if (stat32 & NSI_FATAL_MASK)
645 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
646 handle_error);
647 if (stat32 & NSI_NON_FATAL_MASK)
648 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
649 handle_error);
653 static void e752x_check_sysbus(struct e752x_error_info *info,
654 int *error_found, int handle_error)
656 u32 stat32, error32;
658 //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
659 stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
661 if (stat32 == 0)
662 return; /* no errors */
664 error32 = (stat32 >> 16) & 0x3ff;
665 stat32 = stat32 & 0x3ff;
667 if (stat32 & 0x087)
668 sysbus_error(1, stat32 & 0x087, error_found, handle_error);
670 if (stat32 & 0x378)
671 sysbus_error(0, stat32 & 0x378, error_found, handle_error);
673 if (error32 & 0x087)
674 sysbus_error(1, error32 & 0x087, error_found, handle_error);
676 if (error32 & 0x378)
677 sysbus_error(0, error32 & 0x378, error_found, handle_error);
680 static void e752x_check_membuf(struct e752x_error_info *info,
681 int *error_found, int handle_error)
683 u8 stat8;
685 stat8 = info->buf_ferr;
687 if (stat8 & 0x0f) { /* Error, so process */
688 stat8 &= 0x0f;
689 membuf_error(stat8, error_found, handle_error);
692 stat8 = info->buf_nerr;
694 if (stat8 & 0x0f) { /* Error, so process */
695 stat8 &= 0x0f;
696 membuf_error(stat8, error_found, handle_error);
700 static void e752x_check_dram(struct mem_ctl_info *mci,
701 struct e752x_error_info *info, int *error_found,
702 int handle_error)
704 u16 error_one, error_next;
706 error_one = info->dram_ferr;
707 error_next = info->dram_nerr;
709 /* decode and report errors */
710 if (error_one & 0x0101) /* check first error correctable */
711 process_ce(mci, error_one, info->dram_sec1_add,
712 info->dram_sec1_syndrome, error_found, handle_error);
714 if (error_next & 0x0101) /* check next error correctable */
715 process_ce(mci, error_next, info->dram_sec2_add,
716 info->dram_sec2_syndrome, error_found, handle_error);
718 if (error_one & 0x4040)
719 process_ue_no_info_wr(mci, error_found, handle_error);
721 if (error_next & 0x4040)
722 process_ue_no_info_wr(mci, error_found, handle_error);
724 if (error_one & 0x2020)
725 process_ded_retry(mci, error_one, info->dram_retr_add,
726 error_found, handle_error);
728 if (error_next & 0x2020)
729 process_ded_retry(mci, error_next, info->dram_retr_add,
730 error_found, handle_error);
732 if (error_one & 0x0808)
733 process_threshold_ce(mci, error_one, error_found, handle_error);
735 if (error_next & 0x0808)
736 process_threshold_ce(mci, error_next, error_found,
737 handle_error);
739 if (error_one & 0x0606)
740 process_ue(mci, error_one, info->dram_ded_add,
741 info->dram_scrb_add, error_found, handle_error);
743 if (error_next & 0x0606)
744 process_ue(mci, error_next, info->dram_ded_add,
745 info->dram_scrb_add, error_found, handle_error);
748 static void e752x_get_error_info(struct mem_ctl_info *mci,
749 struct e752x_error_info *info)
751 struct pci_dev *dev;
752 struct e752x_pvt *pvt;
754 memset(info, 0, sizeof(*info));
755 pvt = (struct e752x_pvt *)mci->pvt_info;
756 dev = pvt->dev_d0f1;
757 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
759 if (info->ferr_global) {
760 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
761 pci_read_config_dword(dev, I3100_NSI_FERR,
762 &info->nsi_ferr);
763 info->hi_ferr = 0;
764 } else {
765 pci_read_config_byte(dev, E752X_HI_FERR,
766 &info->hi_ferr);
767 info->nsi_ferr = 0;
769 pci_read_config_word(dev, E752X_SYSBUS_FERR,
770 &info->sysbus_ferr);
771 pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
772 pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
773 pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
774 &info->dram_sec1_add);
775 pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
776 &info->dram_sec1_syndrome);
777 pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
778 &info->dram_ded_add);
779 pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
780 &info->dram_scrb_add);
781 pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
782 &info->dram_retr_add);
784 /* ignore the reserved bits just in case */
785 if (info->hi_ferr & 0x7f)
786 pci_write_config_byte(dev, E752X_HI_FERR,
787 info->hi_ferr);
789 if (info->nsi_ferr & NSI_ERR_MASK)
790 pci_write_config_dword(dev, I3100_NSI_FERR,
791 info->nsi_ferr);
793 if (info->sysbus_ferr)
794 pci_write_config_word(dev, E752X_SYSBUS_FERR,
795 info->sysbus_ferr);
797 if (info->buf_ferr & 0x0f)
798 pci_write_config_byte(dev, E752X_BUF_FERR,
799 info->buf_ferr);
801 if (info->dram_ferr)
802 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
803 info->dram_ferr, info->dram_ferr);
805 pci_write_config_dword(dev, E752X_FERR_GLOBAL,
806 info->ferr_global);
809 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
811 if (info->nerr_global) {
812 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
813 pci_read_config_dword(dev, I3100_NSI_NERR,
814 &info->nsi_nerr);
815 info->hi_nerr = 0;
816 } else {
817 pci_read_config_byte(dev, E752X_HI_NERR,
818 &info->hi_nerr);
819 info->nsi_nerr = 0;
821 pci_read_config_word(dev, E752X_SYSBUS_NERR,
822 &info->sysbus_nerr);
823 pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
824 pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
825 pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
826 &info->dram_sec2_add);
827 pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
828 &info->dram_sec2_syndrome);
830 if (info->hi_nerr & 0x7f)
831 pci_write_config_byte(dev, E752X_HI_NERR,
832 info->hi_nerr);
834 if (info->nsi_nerr & NSI_ERR_MASK)
835 pci_write_config_dword(dev, I3100_NSI_NERR,
836 info->nsi_nerr);
838 if (info->sysbus_nerr)
839 pci_write_config_word(dev, E752X_SYSBUS_NERR,
840 info->sysbus_nerr);
842 if (info->buf_nerr & 0x0f)
843 pci_write_config_byte(dev, E752X_BUF_NERR,
844 info->buf_nerr);
846 if (info->dram_nerr)
847 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
848 info->dram_nerr, info->dram_nerr);
850 pci_write_config_dword(dev, E752X_NERR_GLOBAL,
851 info->nerr_global);
855 static int e752x_process_error_info(struct mem_ctl_info *mci,
856 struct e752x_error_info *info,
857 int handle_errors)
859 u32 error32, stat32;
860 int error_found;
862 error_found = 0;
863 error32 = (info->ferr_global >> 18) & 0x3ff;
864 stat32 = (info->ferr_global >> 4) & 0x7ff;
866 if (error32)
867 global_error(1, error32, &error_found, handle_errors);
869 if (stat32)
870 global_error(0, stat32, &error_found, handle_errors);
872 error32 = (info->nerr_global >> 18) & 0x3ff;
873 stat32 = (info->nerr_global >> 4) & 0x7ff;
875 if (error32)
876 global_error(1, error32, &error_found, handle_errors);
878 if (stat32)
879 global_error(0, stat32, &error_found, handle_errors);
881 e752x_check_hub_interface(info, &error_found, handle_errors);
882 e752x_check_ns_interface(info, &error_found, handle_errors);
883 e752x_check_sysbus(info, &error_found, handle_errors);
884 e752x_check_membuf(info, &error_found, handle_errors);
885 e752x_check_dram(mci, info, &error_found, handle_errors);
886 return error_found;
889 static void e752x_check(struct mem_ctl_info *mci)
891 struct e752x_error_info info;
893 debugf3("%s()\n", __func__);
894 e752x_get_error_info(mci, &info);
895 e752x_process_error_info(mci, &info, 1);
898 /* Return 1 if dual channel mode is active. Else return 0. */
899 static inline int dual_channel_active(u16 ddrcsr)
901 return (((ddrcsr >> 12) & 3) == 3);
904 /* Remap csrow index numbers if map_type is "reverse"
906 static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
908 struct e752x_pvt *pvt = mci->pvt_info;
910 if (!pvt->map_type)
911 return (7 - index);
913 return (index);
916 static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
917 u16 ddrcsr)
919 struct csrow_info *csrow;
920 unsigned long last_cumul_size;
921 int index, mem_dev, drc_chan;
922 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
923 int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
924 u8 value;
925 u32 dra, drc, cumul_size;
927 dra = 0;
928 for (index = 0; index < 4; index++) {
929 u8 dra_reg;
930 pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
931 dra |= dra_reg << (index * 8);
933 pci_read_config_dword(pdev, E752X_DRC, &drc);
934 drc_chan = dual_channel_active(ddrcsr);
935 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
936 drc_ddim = (drc >> 20) & 0x3;
938 /* The dram row boundary (DRB) reg values are boundary address for
939 * each DRAM row with a granularity of 64 or 128MB (single/dual
940 * channel operation). DRB regs are cumulative; therefore DRB7 will
941 * contain the total memory contained in all eight rows.
943 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
944 /* mem_dev 0=x8, 1=x4 */
945 mem_dev = (dra >> (index * 4 + 2)) & 0x3;
946 csrow = &mci->csrows[remap_csrow_index(mci, index)];
948 mem_dev = (mem_dev == 2);
949 pci_read_config_byte(pdev, E752X_DRB + index, &value);
950 /* convert a 128 or 64 MiB DRB to a page size. */
951 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
952 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
953 cumul_size);
954 if (cumul_size == last_cumul_size)
955 continue; /* not populated */
957 csrow->first_page = last_cumul_size;
958 csrow->last_page = cumul_size - 1;
959 csrow->nr_pages = cumul_size - last_cumul_size;
960 last_cumul_size = cumul_size;
961 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
962 csrow->mtype = MEM_RDDR; /* only one type supported */
963 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
966 * if single channel or x8 devices then SECDED
967 * if dual channel and x4 then S4ECD4ED
969 if (drc_ddim) {
970 if (drc_chan && mem_dev) {
971 csrow->edac_mode = EDAC_S4ECD4ED;
972 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
973 } else {
974 csrow->edac_mode = EDAC_SECDED;
975 mci->edac_cap |= EDAC_FLAG_SECDED;
977 } else
978 csrow->edac_mode = EDAC_NONE;
982 static void e752x_init_mem_map_table(struct pci_dev *pdev,
983 struct e752x_pvt *pvt)
985 int index;
986 u8 value, last, row;
988 last = 0;
989 row = 0;
991 for (index = 0; index < 8; index += 2) {
992 pci_read_config_byte(pdev, E752X_DRB + index, &value);
993 /* test if there is a dimm in this slot */
994 if (value == last) {
995 /* no dimm in the slot, so flag it as empty */
996 pvt->map[index] = 0xff;
997 pvt->map[index + 1] = 0xff;
998 } else { /* there is a dimm in the slot */
999 pvt->map[index] = row;
1000 row++;
1001 last = value;
1002 /* test the next value to see if the dimm is double
1003 * sided
1005 pci_read_config_byte(pdev, E752X_DRB + index + 1,
1006 &value);
1008 /* the dimm is single sided, so flag as empty */
1009 /* this is a double sided dimm to save the next row #*/
1010 pvt->map[index + 1] = (value == last) ? 0xff : row;
1011 row++;
1012 last = value;
1017 /* Return 0 on success or 1 on failure. */
1018 static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
1019 struct e752x_pvt *pvt)
1021 struct pci_dev *dev;
1023 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
1024 pvt->dev_info->err_dev, pvt->bridge_ck);
1026 if (pvt->bridge_ck == NULL)
1027 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
1028 PCI_DEVFN(0, 1));
1030 if (pvt->bridge_ck == NULL) {
1031 e752x_printk(KERN_ERR, "error reporting device not found:"
1032 "vendor %x device 0x%x (broken BIOS?)\n",
1033 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
1034 return 1;
1037 dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev,
1038 NULL);
1040 if (dev == NULL)
1041 goto fail;
1043 pvt->dev_d0f0 = dev;
1044 pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
1046 return 0;
1048 fail:
1049 pci_dev_put(pvt->bridge_ck);
1050 return 1;
1053 /* Setup system bus parity mask register.
1054 * Sysbus parity supported on:
1055 * e7320/e7520/e7525 + Xeon
1056 * i3100 + Xeon/Celeron
1057 * Sysbus parity not supported on:
1058 * i3100 + Pentium M/Celeron M/Core Duo/Core2 Duo
1060 static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
1062 char *cpu_id = cpu_data(0).x86_model_id;
1063 struct pci_dev *dev = pvt->dev_d0f1;
1064 int enable = 1;
1066 /* Allow module paramter override, else see if CPU supports parity */
1067 if (sysbus_parity != -1) {
1068 enable = sysbus_parity;
1069 } else if (cpu_id[0] &&
1070 ((strstr(cpu_id, "Pentium") && strstr(cpu_id, " M ")) ||
1071 (strstr(cpu_id, "Celeron") && strstr(cpu_id, " M ")) ||
1072 (strstr(cpu_id, "Core") && strstr(cpu_id, "Duo")))) {
1073 e752x_printk(KERN_INFO, "System Bus Parity not "
1074 "supported by CPU, disabling\n");
1075 enable = 0;
1078 if (enable)
1079 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
1080 else
1081 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
1084 static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
1086 struct pci_dev *dev;
1088 dev = pvt->dev_d0f1;
1089 /* Turn off error disable & SMI in case the BIOS turned it on */
1090 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
1091 pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
1092 pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
1093 } else {
1094 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
1095 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
1098 e752x_init_sysbus_parity_mask(pvt);
1100 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
1101 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
1102 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
1103 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
1104 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
1107 static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1109 u16 pci_data;
1110 u8 stat8;
1111 struct mem_ctl_info *mci;
1112 struct e752x_pvt *pvt;
1113 u16 ddrcsr;
1114 int drc_chan; /* Number of channels 0=1chan,1=2chan */
1115 struct e752x_error_info discard;
1117 debugf0("%s(): mci\n", __func__);
1118 debugf0("Starting Probe1\n");
1120 /* check to see if device 0 function 1 is enabled; if it isn't, we
1121 * assume the BIOS has reserved it for a reason and is expecting
1122 * exclusive access, we take care not to violate that assumption and
1123 * fail the probe. */
1124 pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
1125 if (!force_function_unhide && !(stat8 & (1 << 5))) {
1126 printk(KERN_INFO "Contact your BIOS vendor to see if the "
1127 "E752x error registers can be safely un-hidden\n");
1128 return -ENODEV;
1130 stat8 |= (1 << 5);
1131 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
1133 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
1134 /* FIXME: should check >>12 or 0xf, true for all? */
1135 /* Dual channel = 1, Single channel = 0 */
1136 drc_chan = dual_channel_active(ddrcsr);
1138 mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
1140 if (mci == NULL) {
1141 return -ENOMEM;
1144 debugf3("%s(): init mci\n", __func__);
1145 mci->mtype_cap = MEM_FLAG_RDDR;
1146 /* 3100 IMCH supports SECDEC only */
1147 mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
1148 (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
1149 /* FIXME - what if different memory types are in different csrows? */
1150 mci->mod_name = EDAC_MOD_STR;
1151 mci->mod_ver = E752X_REVISION;
1152 mci->dev = &pdev->dev;
1154 debugf3("%s(): init pvt\n", __func__);
1155 pvt = (struct e752x_pvt *)mci->pvt_info;
1156 pvt->dev_info = &e752x_devs[dev_idx];
1157 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
1159 if (e752x_get_devs(pdev, dev_idx, pvt)) {
1160 edac_mc_free(mci);
1161 return -ENODEV;
1164 debugf3("%s(): more mci init\n", __func__);
1165 mci->ctl_name = pvt->dev_info->ctl_name;
1166 mci->dev_name = pci_name(pdev);
1167 mci->edac_check = e752x_check;
1168 mci->ctl_page_to_phys = ctl_page_to_phys;
1170 /* set the map type. 1 = normal, 0 = reversed
1171 * Must be set before e752x_init_csrows in case csrow mapping
1172 * is reversed.
1174 pci_read_config_byte(pdev, E752X_DRM, &stat8);
1175 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
1177 e752x_init_csrows(mci, pdev, ddrcsr);
1178 e752x_init_mem_map_table(pdev, pvt);
1180 if (dev_idx == I3100)
1181 mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
1182 else
1183 mci->edac_cap |= EDAC_FLAG_NONE;
1184 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
1186 /* load the top of low memory, remap base, and remap limit vars */
1187 pci_read_config_word(pdev, E752X_TOLM, &pci_data);
1188 pvt->tolm = ((u32) pci_data) << 4;
1189 pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
1190 pvt->remapbase = ((u32) pci_data) << 14;
1191 pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
1192 pvt->remaplimit = ((u32) pci_data) << 14;
1193 e752x_printk(KERN_INFO,
1194 "tolm = %x, remapbase = %x, remaplimit = %x\n",
1195 pvt->tolm, pvt->remapbase, pvt->remaplimit);
1197 /* Here we assume that we will never see multiple instances of this
1198 * type of memory controller. The ID is therefore hardcoded to 0.
1200 if (edac_mc_add_mc(mci)) {
1201 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
1202 goto fail;
1205 e752x_init_error_reporting_regs(pvt);
1206 e752x_get_error_info(mci, &discard); /* clear other MCH errors */
1208 /* allocating generic PCI control info */
1209 e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1210 if (!e752x_pci) {
1211 printk(KERN_WARNING
1212 "%s(): Unable to create PCI control\n", __func__);
1213 printk(KERN_WARNING
1214 "%s(): PCI error report via EDAC not setup\n",
1215 __func__);
1218 /* get this far and it's successful */
1219 debugf3("%s(): success\n", __func__);
1220 return 0;
1222 fail:
1223 pci_dev_put(pvt->dev_d0f0);
1224 pci_dev_put(pvt->dev_d0f1);
1225 pci_dev_put(pvt->bridge_ck);
1226 edac_mc_free(mci);
1228 return -ENODEV;
1231 /* returns count (>= 0), or negative on error */
1232 static int __devinit e752x_init_one(struct pci_dev *pdev,
1233 const struct pci_device_id *ent)
1235 debugf0("%s()\n", __func__);
1237 /* wake up and enable device */
1238 if (pci_enable_device(pdev) < 0)
1239 return -EIO;
1241 return e752x_probe1(pdev, ent->driver_data);
1244 static void __devexit e752x_remove_one(struct pci_dev *pdev)
1246 struct mem_ctl_info *mci;
1247 struct e752x_pvt *pvt;
1249 debugf0("%s()\n", __func__);
1251 if (e752x_pci)
1252 edac_pci_release_generic_ctl(e752x_pci);
1254 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
1255 return;
1257 pvt = (struct e752x_pvt *)mci->pvt_info;
1258 pci_dev_put(pvt->dev_d0f0);
1259 pci_dev_put(pvt->dev_d0f1);
1260 pci_dev_put(pvt->bridge_ck);
1261 edac_mc_free(mci);
1264 static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
1266 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1267 E7520},
1269 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1270 E7525},
1272 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1273 E7320},
1275 PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1276 I3100},
1279 } /* 0 terminated list. */
1282 MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1284 static struct pci_driver e752x_driver = {
1285 .name = EDAC_MOD_STR,
1286 .probe = e752x_init_one,
1287 .remove = __devexit_p(e752x_remove_one),
1288 .id_table = e752x_pci_tbl,
1291 static int __init e752x_init(void)
1293 int pci_rc;
1295 debugf3("%s()\n", __func__);
1297 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1298 opstate_init();
1300 pci_rc = pci_register_driver(&e752x_driver);
1301 return (pci_rc < 0) ? pci_rc : 0;
1304 static void __exit e752x_exit(void)
1306 debugf3("%s()\n", __func__);
1307 pci_unregister_driver(&e752x_driver);
1310 module_init(e752x_init);
1311 module_exit(e752x_exit);
1313 MODULE_LICENSE("GPL");
1314 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1315 MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
1317 module_param(force_function_unhide, int, 0444);
1318 MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
1319 " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access");
1321 module_param(edac_op_state, int, 0444);
1322 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1324 module_param(sysbus_parity, int, 0444);
1325 MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
1326 " 1=enable system bus parity checking, default=auto-detect");