2 * Intel e752x Memory Controller kernel module
3 * (C) 2004 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
7 * See "enum e752x_chips" below for supported chipsets
9 * Written by Tom Zimmerman
12 * Thayne Harbaugh at realmsys.com (?)
13 * Wang Zhenyu at intel.com
14 * Dave Jiang at mvista.com
16 * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/pci.h>
23 #include <linux/pci_ids.h>
24 #include <linux/edac.h>
25 #include "edac_core.h"
27 #define E752X_REVISION " Ver: 2.0.2 " __DATE__
28 #define EDAC_MOD_STR "e752x_edac"
30 static int report_non_memory_errors
;
31 static int force_function_unhide
;
32 static int sysbus_parity
= -1;
34 static struct edac_pci_ctl_info
*e752x_pci
;
36 #define e752x_printk(level, fmt, arg...) \
37 edac_printk(level, "e752x", fmt, ##arg)
39 #define e752x_mc_printk(mci, level, fmt, arg...) \
40 edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
42 #ifndef PCI_DEVICE_ID_INTEL_7520_0
43 #define PCI_DEVICE_ID_INTEL_7520_0 0x3590
44 #endif /* PCI_DEVICE_ID_INTEL_7520_0 */
46 #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
47 #define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
48 #endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
50 #ifndef PCI_DEVICE_ID_INTEL_7525_0
51 #define PCI_DEVICE_ID_INTEL_7525_0 0x359E
52 #endif /* PCI_DEVICE_ID_INTEL_7525_0 */
54 #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
55 #define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
56 #endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
58 #ifndef PCI_DEVICE_ID_INTEL_7320_0
59 #define PCI_DEVICE_ID_INTEL_7320_0 0x3592
60 #endif /* PCI_DEVICE_ID_INTEL_7320_0 */
62 #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
63 #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
64 #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
66 #ifndef PCI_DEVICE_ID_INTEL_3100_0
67 #define PCI_DEVICE_ID_INTEL_3100_0 0x35B0
68 #endif /* PCI_DEVICE_ID_INTEL_3100_0 */
70 #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
71 #define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
72 #endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
74 #define E752X_NR_CSROWS 8 /* number of csrows */
76 /* E752X register addresses - device 0 function 0 */
77 #define E752X_MCHSCRB 0x52 /* Memory Scrub register (16b) */
79 * 6:5 Scrub Completion Count
80 * 3:2 Scrub Rate (i3100 only)
82 * 1:0 Scrub Mode enable
85 #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
86 #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
88 * 31:30 Device width row 7
89 * 01=x8 10=x4 11=x8 DDR2
90 * 27:26 Device width row 6
91 * 23:22 Device width row 5
92 * 19:20 Device width row 4
93 * 15:14 Device width row 3
94 * 11:10 Device width row 2
95 * 7:6 Device width row 1
96 * 3:2 Device width row 0
98 #define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
99 /* FIXME:IS THIS RIGHT? */
101 * 22 Number channels 0=1,1=2
102 * 19:18 DRB Granularity 32/64MB
104 #define E752X_DRM 0x80 /* Dimm mapping register */
105 #define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
107 * 14:12 1 single A, 2 single B, 3 dual
109 #define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
110 #define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
111 #define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
112 #define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
114 /* E752X register addresses - device 0 function 1 */
115 #define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
116 #define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
117 #define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
118 #define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
119 #define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
120 #define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
121 #define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
122 #define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
123 #define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
124 #define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
125 #define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
126 #define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
127 #define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
128 #define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */
129 #define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
130 #define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
131 #define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
132 #define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
133 #define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
134 #define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
135 /* error address register (32b) */
138 * 30:2 CE address (64 byte block 34:6
142 #define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
143 /* error address register (32b) */
146 * 30:2 CE address (64 byte block 34:6)
150 #define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
151 /* error address register (32b) */
154 * 30:2 CE address (64 byte block 34:6)
158 #define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */
159 /* error address register (32b) */
162 * 30:2 CE address (64 byte block 34:6
166 #define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
167 /* error syndrome register (16b) */
168 #define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
169 /* error syndrome register (16b) */
170 #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
172 /* 3100 IMCH specific register addresses - device 0 function 1 */
173 #define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */
174 #define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */
175 #define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */
176 #define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */
178 /* ICH5R register addresses - device 30 function 0 */
179 #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
180 #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
181 #define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
191 struct pci_dev
*bridge_ck
;
192 struct pci_dev
*dev_d0f0
;
193 struct pci_dev
*dev_d0f1
;
200 const struct e752x_dev_info
*dev_info
;
203 struct e752x_dev_info
{
206 const char *ctl_name
;
209 struct e752x_error_info
{
212 u32 nsi_ferr
; /* 3100 only */
213 u32 nsi_nerr
; /* 3100 only */
214 u8 hi_ferr
; /* all but 3100 */
215 u8 hi_nerr
; /* all but 3100 */
224 u16 dram_sec1_syndrome
;
225 u16 dram_sec2_syndrome
;
231 static const struct e752x_dev_info e752x_devs
[] = {
233 .err_dev
= PCI_DEVICE_ID_INTEL_7520_1_ERR
,
234 .ctl_dev
= PCI_DEVICE_ID_INTEL_7520_0
,
235 .ctl_name
= "E7520"},
237 .err_dev
= PCI_DEVICE_ID_INTEL_7525_1_ERR
,
238 .ctl_dev
= PCI_DEVICE_ID_INTEL_7525_0
,
239 .ctl_name
= "E7525"},
241 .err_dev
= PCI_DEVICE_ID_INTEL_7320_1_ERR
,
242 .ctl_dev
= PCI_DEVICE_ID_INTEL_7320_0
,
243 .ctl_name
= "E7320"},
245 .err_dev
= PCI_DEVICE_ID_INTEL_3100_1_ERR
,
246 .ctl_dev
= PCI_DEVICE_ID_INTEL_3100_0
,
250 /* Valid scrub rates for the e752x/3100 hardware memory scrubber. We
251 * map the scrubbing bandwidth to a hardware register value. The 'set'
252 * operation finds the 'matching or higher value'. Note that scrubbing
253 * on the e752x can only be enabled/disabled. The 3100 supports
254 * a normal and fast mode.
257 #define SDRATE_EOT 0xFFFFFFFF
260 u32 bandwidth
; /* bandwidth consumed by scrubbing in bytes/sec */
261 u16 scrubval
; /* register value for scrub rate */
264 /* Rate below assumes same performance as i3100 using PC3200 DDR2 in
265 * normal mode. e752x bridges don't support choosing normal or fast mode,
266 * so the scrubbing bandwidth value isn't all that important - scrubbing is
269 static const struct scrubrate scrubrates_e752x
[] = {
270 {0, 0x00}, /* Scrubbing Off */
271 {500000, 0x02}, /* Scrubbing On */
272 {SDRATE_EOT
, 0x00} /* End of Table */
275 /* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s
276 * Normal mode: 125 (32000 / 256) times slower than fast mode.
278 static const struct scrubrate scrubrates_i3100
[] = {
279 {0, 0x00}, /* Scrubbing Off */
280 {500000, 0x0a}, /* Normal mode - 32k clocks */
281 {62500000, 0x06}, /* Fast mode - 256 clocks */
282 {SDRATE_EOT
, 0x00} /* End of Table */
285 static unsigned long ctl_page_to_phys(struct mem_ctl_info
*mci
,
289 struct e752x_pvt
*pvt
= (struct e752x_pvt
*)mci
->pvt_info
;
291 debugf3("%s()\n", __func__
);
293 if (page
< pvt
->tolm
)
296 if ((page
>= 0x100000) && (page
< pvt
->remapbase
))
299 remap
= (page
- pvt
->tolm
) + pvt
->remapbase
;
301 if (remap
< pvt
->remaplimit
)
304 e752x_printk(KERN_ERR
, "Invalid page %lx - out of range\n", page
);
305 return pvt
->tolm
- 1;
308 static void do_process_ce(struct mem_ctl_info
*mci
, u16 error_one
,
309 u32 sec1_add
, u16 sec1_syndrome
)
315 struct e752x_pvt
*pvt
= (struct e752x_pvt
*)mci
->pvt_info
;
317 debugf3("%s()\n", __func__
);
319 /* convert the addr to 4k page */
320 page
= sec1_add
>> (PAGE_SHIFT
- 4);
322 /* FIXME - check for -1 */
323 if (pvt
->mc_symmetric
) {
324 /* chip select are bits 14 & 13 */
325 row
= ((page
>> 1) & 3);
326 e752x_printk(KERN_WARNING
,
327 "Test row %d Table %d %d %d %d %d %d %d %d\n", row
,
328 pvt
->map
[0], pvt
->map
[1], pvt
->map
[2], pvt
->map
[3],
329 pvt
->map
[4], pvt
->map
[5], pvt
->map
[6],
332 /* test for channel remapping */
333 for (i
= 0; i
< 8; i
++) {
334 if (pvt
->map
[i
] == row
)
338 e752x_printk(KERN_WARNING
, "Test computed row %d\n", i
);
343 e752x_mc_printk(mci
, KERN_WARNING
,
344 "row %d not found in remap table\n",
347 row
= edac_mc_find_csrow_by_page(mci
, page
);
349 /* 0 = channel A, 1 = channel B */
350 channel
= !(error_one
& 1);
352 /* e752x mc reads 34:6 of the DRAM linear address */
353 edac_mc_handle_ce(mci
, page
, offset_in_page(sec1_add
<< 4),
354 sec1_syndrome
, row
, channel
, "e752x CE");
357 static inline void process_ce(struct mem_ctl_info
*mci
, u16 error_one
,
358 u32 sec1_add
, u16 sec1_syndrome
, int *error_found
,
364 do_process_ce(mci
, error_one
, sec1_add
, sec1_syndrome
);
367 static void do_process_ue(struct mem_ctl_info
*mci
, u16 error_one
,
368 u32 ded_add
, u32 scrb_add
)
370 u32 error_2b
, block_page
;
372 struct e752x_pvt
*pvt
= (struct e752x_pvt
*)mci
->pvt_info
;
374 debugf3("%s()\n", __func__
);
376 if (error_one
& 0x0202) {
379 /* convert to 4k address */
380 block_page
= error_2b
>> (PAGE_SHIFT
- 4);
382 row
= pvt
->mc_symmetric
?
383 /* chip select are bits 14 & 13 */
384 ((block_page
>> 1) & 3) :
385 edac_mc_find_csrow_by_page(mci
, block_page
);
387 /* e752x mc reads 34:6 of the DRAM linear address */
388 edac_mc_handle_ue(mci
, block_page
,
389 offset_in_page(error_2b
<< 4),
390 row
, "e752x UE from Read");
392 if (error_one
& 0x0404) {
395 /* convert to 4k address */
396 block_page
= error_2b
>> (PAGE_SHIFT
- 4);
398 row
= pvt
->mc_symmetric
?
399 /* chip select are bits 14 & 13 */
400 ((block_page
>> 1) & 3) :
401 edac_mc_find_csrow_by_page(mci
, block_page
);
403 /* e752x mc reads 34:6 of the DRAM linear address */
404 edac_mc_handle_ue(mci
, block_page
,
405 offset_in_page(error_2b
<< 4),
406 row
, "e752x UE from Scruber");
410 static inline void process_ue(struct mem_ctl_info
*mci
, u16 error_one
,
411 u32 ded_add
, u32 scrb_add
, int *error_found
,
417 do_process_ue(mci
, error_one
, ded_add
, scrb_add
);
420 static inline void process_ue_no_info_wr(struct mem_ctl_info
*mci
,
421 int *error_found
, int handle_error
)
428 debugf3("%s()\n", __func__
);
429 edac_mc_handle_ue_no_info(mci
, "e752x UE log memory write");
432 static void do_process_ded_retry(struct mem_ctl_info
*mci
, u16 error
,
437 struct e752x_pvt
*pvt
= (struct e752x_pvt
*)mci
->pvt_info
;
439 error_1b
= retry_add
;
440 page
= error_1b
>> (PAGE_SHIFT
- 4); /* convert the addr to 4k page */
442 /* chip select are bits 14 & 13 */
443 row
= pvt
->mc_symmetric
? ((page
>> 1) & 3) :
444 edac_mc_find_csrow_by_page(mci
, page
);
446 e752x_mc_printk(mci
, KERN_WARNING
,
447 "CE page 0x%lx, row %d : Memory read retry\n",
448 (long unsigned int)page
, row
);
451 static inline void process_ded_retry(struct mem_ctl_info
*mci
, u16 error
,
452 u32 retry_add
, int *error_found
,
458 do_process_ded_retry(mci
, error
, retry_add
);
461 static inline void process_threshold_ce(struct mem_ctl_info
*mci
, u16 error
,
462 int *error_found
, int handle_error
)
467 e752x_mc_printk(mci
, KERN_WARNING
, "Memory threshold CE\n");
470 static char *global_message
[11] = {
478 "HUB or NS Interface",
480 "DRAM Controller", /* 9th entry */
486 static char *fatal_message
[2] = { "Non-Fatal ", "Fatal " };
488 static void do_global_error(int fatal
, u32 errors
)
492 for (i
= 0; i
< 11; i
++) {
493 if (errors
& (1 << i
)) {
494 /* If the error is from DRAM Controller OR
495 * we are to report ALL errors, then
498 if ((i
== DRAM_ENTRY
) || report_non_memory_errors
)
499 e752x_printk(KERN_WARNING
, "%sError %s\n",
500 fatal_message
[fatal
],
506 static inline void global_error(int fatal
, u32 errors
, int *error_found
,
512 do_global_error(fatal
, errors
);
515 static char *hub_message
[7] = {
516 "HI Address or Command Parity", "HI Illegal Access",
517 "HI Internal Parity", "Out of Range Access",
518 "HI Data Parity", "Enhanced Config Access",
519 "Hub Interface Target Abort"
522 static void do_hub_error(int fatal
, u8 errors
)
526 for (i
= 0; i
< 7; i
++) {
527 if (errors
& (1 << i
))
528 e752x_printk(KERN_WARNING
, "%sError %s\n",
529 fatal_message
[fatal
], hub_message
[i
]);
533 static inline void hub_error(int fatal
, u8 errors
, int *error_found
,
539 do_hub_error(fatal
, errors
);
542 #define NSI_FATAL_MASK 0x0c080081
543 #define NSI_NON_FATAL_MASK 0x23a0ba64
544 #define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
546 static char *nsi_message
[30] = {
547 "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */
549 "NSI Parity Error", /* bit 2, non-fatal */
552 "Correctable Error Message", /* bit 5, non-fatal */
553 "Non-Fatal Error Message", /* bit 6, non-fatal */
554 "Fatal Error Message", /* bit 7, fatal */
556 "Receiver Error", /* bit 9, non-fatal */
558 "Bad TLP", /* bit 11, non-fatal */
559 "Bad DLLP", /* bit 12, non-fatal */
560 "REPLAY_NUM Rollover", /* bit 13, non-fatal */
562 "Replay Timer Timeout", /* bit 15, non-fatal */
566 "Data Link Protocol Error", /* bit 19, fatal */
568 "Poisoned TLP", /* bit 21, non-fatal */
570 "Completion Timeout", /* bit 23, non-fatal */
571 "Completer Abort", /* bit 24, non-fatal */
572 "Unexpected Completion", /* bit 25, non-fatal */
573 "Receiver Overflow", /* bit 26, fatal */
574 "Malformed TLP", /* bit 27, fatal */
576 "Unsupported Request" /* bit 29, non-fatal */
579 static void do_nsi_error(int fatal
, u32 errors
)
583 for (i
= 0; i
< 30; i
++) {
584 if (errors
& (1 << i
))
585 printk(KERN_WARNING
"%sError %s\n",
586 fatal_message
[fatal
], nsi_message
[i
]);
590 static inline void nsi_error(int fatal
, u32 errors
, int *error_found
,
596 do_nsi_error(fatal
, errors
);
599 static char *membuf_message
[4] = {
600 "Internal PMWB to DRAM parity",
601 "Internal PMWB to System Bus Parity",
602 "Internal System Bus or IO to PMWB Parity",
603 "Internal DRAM to PMWB Parity"
606 static void do_membuf_error(u8 errors
)
610 for (i
= 0; i
< 4; i
++) {
611 if (errors
& (1 << i
))
612 e752x_printk(KERN_WARNING
, "Non-Fatal Error %s\n",
617 static inline void membuf_error(u8 errors
, int *error_found
, int handle_error
)
622 do_membuf_error(errors
);
625 static char *sysbus_message
[10] = {
626 "Addr or Request Parity",
627 "Data Strobe Glitch",
628 "Addr Strobe Glitch",
631 "Non DRAM Lock Error",
634 "IO Subsystem Parity"
637 static void do_sysbus_error(int fatal
, u32 errors
)
641 for (i
= 0; i
< 10; i
++) {
642 if (errors
& (1 << i
))
643 e752x_printk(KERN_WARNING
, "%sError System Bus %s\n",
644 fatal_message
[fatal
], sysbus_message
[i
]);
648 static inline void sysbus_error(int fatal
, u32 errors
, int *error_found
,
654 do_sysbus_error(fatal
, errors
);
657 static void e752x_check_hub_interface(struct e752x_error_info
*info
,
658 int *error_found
, int handle_error
)
662 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
664 stat8
= info
->hi_ferr
;
666 if (stat8
& 0x7f) { /* Error, so process */
670 hub_error(1, stat8
& 0x2b, error_found
, handle_error
);
673 hub_error(0, stat8
& 0x54, error_found
, handle_error
);
675 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
677 stat8
= info
->hi_nerr
;
679 if (stat8
& 0x7f) { /* Error, so process */
683 hub_error(1, stat8
& 0x2b, error_found
, handle_error
);
686 hub_error(0, stat8
& 0x54, error_found
, handle_error
);
690 static void e752x_check_ns_interface(struct e752x_error_info
*info
,
691 int *error_found
, int handle_error
)
695 stat32
= info
->nsi_ferr
;
696 if (stat32
& NSI_ERR_MASK
) { /* Error, so process */
697 if (stat32
& NSI_FATAL_MASK
) /* check for fatal errors */
698 nsi_error(1, stat32
& NSI_FATAL_MASK
, error_found
,
700 if (stat32
& NSI_NON_FATAL_MASK
) /* check for non-fatal ones */
701 nsi_error(0, stat32
& NSI_NON_FATAL_MASK
, error_found
,
704 stat32
= info
->nsi_nerr
;
705 if (stat32
& NSI_ERR_MASK
) {
706 if (stat32
& NSI_FATAL_MASK
)
707 nsi_error(1, stat32
& NSI_FATAL_MASK
, error_found
,
709 if (stat32
& NSI_NON_FATAL_MASK
)
710 nsi_error(0, stat32
& NSI_NON_FATAL_MASK
, error_found
,
715 static void e752x_check_sysbus(struct e752x_error_info
*info
,
716 int *error_found
, int handle_error
)
720 //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
721 stat32
= info
->sysbus_ferr
+ (info
->sysbus_nerr
<< 16);
724 return; /* no errors */
726 error32
= (stat32
>> 16) & 0x3ff;
727 stat32
= stat32
& 0x3ff;
730 sysbus_error(1, stat32
& 0x087, error_found
, handle_error
);
733 sysbus_error(0, stat32
& 0x378, error_found
, handle_error
);
736 sysbus_error(1, error32
& 0x087, error_found
, handle_error
);
739 sysbus_error(0, error32
& 0x378, error_found
, handle_error
);
742 static void e752x_check_membuf(struct e752x_error_info
*info
,
743 int *error_found
, int handle_error
)
747 stat8
= info
->buf_ferr
;
749 if (stat8
& 0x0f) { /* Error, so process */
751 membuf_error(stat8
, error_found
, handle_error
);
754 stat8
= info
->buf_nerr
;
756 if (stat8
& 0x0f) { /* Error, so process */
758 membuf_error(stat8
, error_found
, handle_error
);
762 static void e752x_check_dram(struct mem_ctl_info
*mci
,
763 struct e752x_error_info
*info
, int *error_found
,
766 u16 error_one
, error_next
;
768 error_one
= info
->dram_ferr
;
769 error_next
= info
->dram_nerr
;
771 /* decode and report errors */
772 if (error_one
& 0x0101) /* check first error correctable */
773 process_ce(mci
, error_one
, info
->dram_sec1_add
,
774 info
->dram_sec1_syndrome
, error_found
, handle_error
);
776 if (error_next
& 0x0101) /* check next error correctable */
777 process_ce(mci
, error_next
, info
->dram_sec2_add
,
778 info
->dram_sec2_syndrome
, error_found
, handle_error
);
780 if (error_one
& 0x4040)
781 process_ue_no_info_wr(mci
, error_found
, handle_error
);
783 if (error_next
& 0x4040)
784 process_ue_no_info_wr(mci
, error_found
, handle_error
);
786 if (error_one
& 0x2020)
787 process_ded_retry(mci
, error_one
, info
->dram_retr_add
,
788 error_found
, handle_error
);
790 if (error_next
& 0x2020)
791 process_ded_retry(mci
, error_next
, info
->dram_retr_add
,
792 error_found
, handle_error
);
794 if (error_one
& 0x0808)
795 process_threshold_ce(mci
, error_one
, error_found
, handle_error
);
797 if (error_next
& 0x0808)
798 process_threshold_ce(mci
, error_next
, error_found
,
801 if (error_one
& 0x0606)
802 process_ue(mci
, error_one
, info
->dram_ded_add
,
803 info
->dram_scrb_add
, error_found
, handle_error
);
805 if (error_next
& 0x0606)
806 process_ue(mci
, error_next
, info
->dram_ded_add
,
807 info
->dram_scrb_add
, error_found
, handle_error
);
810 static void e752x_get_error_info(struct mem_ctl_info
*mci
,
811 struct e752x_error_info
*info
)
814 struct e752x_pvt
*pvt
;
816 memset(info
, 0, sizeof(*info
));
817 pvt
= (struct e752x_pvt
*)mci
->pvt_info
;
819 pci_read_config_dword(dev
, E752X_FERR_GLOBAL
, &info
->ferr_global
);
821 if (info
->ferr_global
) {
822 if (pvt
->dev_info
->err_dev
== PCI_DEVICE_ID_INTEL_3100_1_ERR
) {
823 pci_read_config_dword(dev
, I3100_NSI_FERR
,
827 pci_read_config_byte(dev
, E752X_HI_FERR
,
831 pci_read_config_word(dev
, E752X_SYSBUS_FERR
,
833 pci_read_config_byte(dev
, E752X_BUF_FERR
, &info
->buf_ferr
);
834 pci_read_config_word(dev
, E752X_DRAM_FERR
, &info
->dram_ferr
);
835 pci_read_config_dword(dev
, E752X_DRAM_SEC1_ADD
,
836 &info
->dram_sec1_add
);
837 pci_read_config_word(dev
, E752X_DRAM_SEC1_SYNDROME
,
838 &info
->dram_sec1_syndrome
);
839 pci_read_config_dword(dev
, E752X_DRAM_DED_ADD
,
840 &info
->dram_ded_add
);
841 pci_read_config_dword(dev
, E752X_DRAM_SCRB_ADD
,
842 &info
->dram_scrb_add
);
843 pci_read_config_dword(dev
, E752X_DRAM_RETR_ADD
,
844 &info
->dram_retr_add
);
846 /* ignore the reserved bits just in case */
847 if (info
->hi_ferr
& 0x7f)
848 pci_write_config_byte(dev
, E752X_HI_FERR
,
851 if (info
->nsi_ferr
& NSI_ERR_MASK
)
852 pci_write_config_dword(dev
, I3100_NSI_FERR
,
855 if (info
->sysbus_ferr
)
856 pci_write_config_word(dev
, E752X_SYSBUS_FERR
,
859 if (info
->buf_ferr
& 0x0f)
860 pci_write_config_byte(dev
, E752X_BUF_FERR
,
864 pci_write_bits16(pvt
->bridge_ck
, E752X_DRAM_FERR
,
865 info
->dram_ferr
, info
->dram_ferr
);
867 pci_write_config_dword(dev
, E752X_FERR_GLOBAL
,
871 pci_read_config_dword(dev
, E752X_NERR_GLOBAL
, &info
->nerr_global
);
873 if (info
->nerr_global
) {
874 if (pvt
->dev_info
->err_dev
== PCI_DEVICE_ID_INTEL_3100_1_ERR
) {
875 pci_read_config_dword(dev
, I3100_NSI_NERR
,
879 pci_read_config_byte(dev
, E752X_HI_NERR
,
883 pci_read_config_word(dev
, E752X_SYSBUS_NERR
,
885 pci_read_config_byte(dev
, E752X_BUF_NERR
, &info
->buf_nerr
);
886 pci_read_config_word(dev
, E752X_DRAM_NERR
, &info
->dram_nerr
);
887 pci_read_config_dword(dev
, E752X_DRAM_SEC2_ADD
,
888 &info
->dram_sec2_add
);
889 pci_read_config_word(dev
, E752X_DRAM_SEC2_SYNDROME
,
890 &info
->dram_sec2_syndrome
);
892 if (info
->hi_nerr
& 0x7f)
893 pci_write_config_byte(dev
, E752X_HI_NERR
,
896 if (info
->nsi_nerr
& NSI_ERR_MASK
)
897 pci_write_config_dword(dev
, I3100_NSI_NERR
,
900 if (info
->sysbus_nerr
)
901 pci_write_config_word(dev
, E752X_SYSBUS_NERR
,
904 if (info
->buf_nerr
& 0x0f)
905 pci_write_config_byte(dev
, E752X_BUF_NERR
,
909 pci_write_bits16(pvt
->bridge_ck
, E752X_DRAM_NERR
,
910 info
->dram_nerr
, info
->dram_nerr
);
912 pci_write_config_dword(dev
, E752X_NERR_GLOBAL
,
917 static int e752x_process_error_info(struct mem_ctl_info
*mci
,
918 struct e752x_error_info
*info
,
925 error32
= (info
->ferr_global
>> 18) & 0x3ff;
926 stat32
= (info
->ferr_global
>> 4) & 0x7ff;
929 global_error(1, error32
, &error_found
, handle_errors
);
932 global_error(0, stat32
, &error_found
, handle_errors
);
934 error32
= (info
->nerr_global
>> 18) & 0x3ff;
935 stat32
= (info
->nerr_global
>> 4) & 0x7ff;
938 global_error(1, error32
, &error_found
, handle_errors
);
941 global_error(0, stat32
, &error_found
, handle_errors
);
943 e752x_check_hub_interface(info
, &error_found
, handle_errors
);
944 e752x_check_ns_interface(info
, &error_found
, handle_errors
);
945 e752x_check_sysbus(info
, &error_found
, handle_errors
);
946 e752x_check_membuf(info
, &error_found
, handle_errors
);
947 e752x_check_dram(mci
, info
, &error_found
, handle_errors
);
951 static void e752x_check(struct mem_ctl_info
*mci
)
953 struct e752x_error_info info
;
955 debugf3("%s()\n", __func__
);
956 e752x_get_error_info(mci
, &info
);
957 e752x_process_error_info(mci
, &info
, 1);
960 /* Program byte/sec bandwidth scrub rate to hardware */
961 static int set_sdram_scrub_rate(struct mem_ctl_info
*mci
, u32
*new_bw
)
963 const struct scrubrate
*scrubrates
;
964 struct e752x_pvt
*pvt
= (struct e752x_pvt
*) mci
->pvt_info
;
965 struct pci_dev
*pdev
= pvt
->dev_d0f0
;
968 if (pvt
->dev_info
->ctl_dev
== PCI_DEVICE_ID_INTEL_3100_0
)
969 scrubrates
= scrubrates_i3100
;
971 scrubrates
= scrubrates_e752x
;
973 /* Translate the desired scrub rate to a e752x/3100 register value.
974 * Search for the bandwidth that is equal or greater than the
975 * desired rate and program the cooresponding register value.
977 for (i
= 0; scrubrates
[i
].bandwidth
!= SDRATE_EOT
; i
++)
978 if (scrubrates
[i
].bandwidth
>= *new_bw
)
981 if (scrubrates
[i
].bandwidth
== SDRATE_EOT
)
984 pci_write_config_word(pdev
, E752X_MCHSCRB
, scrubrates
[i
].scrubval
);
989 /* Convert current scrub rate value into byte/sec bandwidth */
990 static int get_sdram_scrub_rate(struct mem_ctl_info
*mci
, u32
*bw
)
992 const struct scrubrate
*scrubrates
;
993 struct e752x_pvt
*pvt
= (struct e752x_pvt
*) mci
->pvt_info
;
994 struct pci_dev
*pdev
= pvt
->dev_d0f0
;
998 if (pvt
->dev_info
->ctl_dev
== PCI_DEVICE_ID_INTEL_3100_0
)
999 scrubrates
= scrubrates_i3100
;
1001 scrubrates
= scrubrates_e752x
;
1003 /* Find the bandwidth matching the memory scrubber configuration */
1004 pci_read_config_word(pdev
, E752X_MCHSCRB
, &scrubval
);
1005 scrubval
= scrubval
& 0x0f;
1007 for (i
= 0; scrubrates
[i
].bandwidth
!= SDRATE_EOT
; i
++)
1008 if (scrubrates
[i
].scrubval
== scrubval
)
1011 if (scrubrates
[i
].bandwidth
== SDRATE_EOT
) {
1012 e752x_printk(KERN_WARNING
,
1013 "Invalid sdram scrub control value: 0x%x\n", scrubval
);
1017 *bw
= scrubrates
[i
].bandwidth
;
1022 /* Return 1 if dual channel mode is active. Else return 0. */
1023 static inline int dual_channel_active(u16 ddrcsr
)
1025 return (((ddrcsr
>> 12) & 3) == 3);
1028 /* Remap csrow index numbers if map_type is "reverse"
1030 static inline int remap_csrow_index(struct mem_ctl_info
*mci
, int index
)
1032 struct e752x_pvt
*pvt
= mci
->pvt_info
;
1040 static void e752x_init_csrows(struct mem_ctl_info
*mci
, struct pci_dev
*pdev
,
1043 struct csrow_info
*csrow
;
1044 unsigned long last_cumul_size
;
1045 int index
, mem_dev
, drc_chan
;
1046 int drc_drbg
; /* DRB granularity 0=64mb, 1=128mb */
1047 int drc_ddim
; /* DRAM Data Integrity Mode 0=none, 2=edac */
1049 u32 dra
, drc
, cumul_size
;
1052 for (index
= 0; index
< 4; index
++) {
1054 pci_read_config_byte(pdev
, E752X_DRA
+ index
, &dra_reg
);
1055 dra
|= dra_reg
<< (index
* 8);
1057 pci_read_config_dword(pdev
, E752X_DRC
, &drc
);
1058 drc_chan
= dual_channel_active(ddrcsr
);
1059 drc_drbg
= drc_chan
+ 1; /* 128 in dual mode, 64 in single */
1060 drc_ddim
= (drc
>> 20) & 0x3;
1062 /* The dram row boundary (DRB) reg values are boundary address for
1063 * each DRAM row with a granularity of 64 or 128MB (single/dual
1064 * channel operation). DRB regs are cumulative; therefore DRB7 will
1065 * contain the total memory contained in all eight rows.
1067 for (last_cumul_size
= index
= 0; index
< mci
->nr_csrows
; index
++) {
1068 /* mem_dev 0=x8, 1=x4 */
1069 mem_dev
= (dra
>> (index
* 4 + 2)) & 0x3;
1070 csrow
= &mci
->csrows
[remap_csrow_index(mci
, index
)];
1072 mem_dev
= (mem_dev
== 2);
1073 pci_read_config_byte(pdev
, E752X_DRB
+ index
, &value
);
1074 /* convert a 128 or 64 MiB DRB to a page size. */
1075 cumul_size
= value
<< (25 + drc_drbg
- PAGE_SHIFT
);
1076 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__
, index
,
1078 if (cumul_size
== last_cumul_size
)
1079 continue; /* not populated */
1081 csrow
->first_page
= last_cumul_size
;
1082 csrow
->last_page
= cumul_size
- 1;
1083 csrow
->nr_pages
= cumul_size
- last_cumul_size
;
1084 last_cumul_size
= cumul_size
;
1085 csrow
->grain
= 1 << 12; /* 4KiB - resolution of CELOG */
1086 csrow
->mtype
= MEM_RDDR
; /* only one type supported */
1087 csrow
->dtype
= mem_dev
? DEV_X4
: DEV_X8
;
1090 * if single channel or x8 devices then SECDED
1091 * if dual channel and x4 then S4ECD4ED
1094 if (drc_chan
&& mem_dev
) {
1095 csrow
->edac_mode
= EDAC_S4ECD4ED
;
1096 mci
->edac_cap
|= EDAC_FLAG_S4ECD4ED
;
1098 csrow
->edac_mode
= EDAC_SECDED
;
1099 mci
->edac_cap
|= EDAC_FLAG_SECDED
;
1102 csrow
->edac_mode
= EDAC_NONE
;
1106 static void e752x_init_mem_map_table(struct pci_dev
*pdev
,
1107 struct e752x_pvt
*pvt
)
1110 u8 value
, last
, row
;
1115 for (index
= 0; index
< 8; index
+= 2) {
1116 pci_read_config_byte(pdev
, E752X_DRB
+ index
, &value
);
1117 /* test if there is a dimm in this slot */
1118 if (value
== last
) {
1119 /* no dimm in the slot, so flag it as empty */
1120 pvt
->map
[index
] = 0xff;
1121 pvt
->map
[index
+ 1] = 0xff;
1122 } else { /* there is a dimm in the slot */
1123 pvt
->map
[index
] = row
;
1126 /* test the next value to see if the dimm is double
1129 pci_read_config_byte(pdev
, E752X_DRB
+ index
+ 1,
1132 /* the dimm is single sided, so flag as empty */
1133 /* this is a double sided dimm to save the next row #*/
1134 pvt
->map
[index
+ 1] = (value
== last
) ? 0xff : row
;
1141 /* Return 0 on success or 1 on failure. */
1142 static int e752x_get_devs(struct pci_dev
*pdev
, int dev_idx
,
1143 struct e752x_pvt
*pvt
)
1145 struct pci_dev
*dev
;
1147 pvt
->bridge_ck
= pci_get_device(PCI_VENDOR_ID_INTEL
,
1148 pvt
->dev_info
->err_dev
, pvt
->bridge_ck
);
1150 if (pvt
->bridge_ck
== NULL
)
1151 pvt
->bridge_ck
= pci_scan_single_device(pdev
->bus
,
1154 if (pvt
->bridge_ck
== NULL
) {
1155 e752x_printk(KERN_ERR
, "error reporting device not found:"
1156 "vendor %x device 0x%x (broken BIOS?)\n",
1157 PCI_VENDOR_ID_INTEL
, e752x_devs
[dev_idx
].err_dev
);
1161 dev
= pci_get_device(PCI_VENDOR_ID_INTEL
,
1162 e752x_devs
[dev_idx
].ctl_dev
,
1168 pvt
->dev_d0f0
= dev
;
1169 pvt
->dev_d0f1
= pci_dev_get(pvt
->bridge_ck
);
1174 pci_dev_put(pvt
->bridge_ck
);
1178 /* Setup system bus parity mask register.
1179 * Sysbus parity supported on:
1180 * e7320/e7520/e7525 + Xeon
1182 static void e752x_init_sysbus_parity_mask(struct e752x_pvt
*pvt
)
1184 char *cpu_id
= cpu_data(0).x86_model_id
;
1185 struct pci_dev
*dev
= pvt
->dev_d0f1
;
1188 /* Allow module parameter override, else see if CPU supports parity */
1189 if (sysbus_parity
!= -1) {
1190 enable
= sysbus_parity
;
1191 } else if (cpu_id
[0] && !strstr(cpu_id
, "Xeon")) {
1192 e752x_printk(KERN_INFO
, "System Bus Parity not "
1193 "supported by CPU, disabling\n");
1198 pci_write_config_word(dev
, E752X_SYSBUS_ERRMASK
, 0x0000);
1200 pci_write_config_word(dev
, E752X_SYSBUS_ERRMASK
, 0x0309);
1203 static void e752x_init_error_reporting_regs(struct e752x_pvt
*pvt
)
1205 struct pci_dev
*dev
;
1207 dev
= pvt
->dev_d0f1
;
1208 /* Turn off error disable & SMI in case the BIOS turned it on */
1209 if (pvt
->dev_info
->err_dev
== PCI_DEVICE_ID_INTEL_3100_1_ERR
) {
1210 pci_write_config_dword(dev
, I3100_NSI_EMASK
, 0);
1211 pci_write_config_dword(dev
, I3100_NSI_SMICMD
, 0);
1213 pci_write_config_byte(dev
, E752X_HI_ERRMASK
, 0x00);
1214 pci_write_config_byte(dev
, E752X_HI_SMICMD
, 0x00);
1217 e752x_init_sysbus_parity_mask(pvt
);
1219 pci_write_config_word(dev
, E752X_SYSBUS_SMICMD
, 0x00);
1220 pci_write_config_byte(dev
, E752X_BUF_ERRMASK
, 0x00);
1221 pci_write_config_byte(dev
, E752X_BUF_SMICMD
, 0x00);
1222 pci_write_config_byte(dev
, E752X_DRAM_ERRMASK
, 0x00);
1223 pci_write_config_byte(dev
, E752X_DRAM_SMICMD
, 0x00);
1226 static int e752x_probe1(struct pci_dev
*pdev
, int dev_idx
)
1230 struct mem_ctl_info
*mci
;
1231 struct e752x_pvt
*pvt
;
1233 int drc_chan
; /* Number of channels 0=1chan,1=2chan */
1234 struct e752x_error_info discard
;
1236 debugf0("%s(): mci\n", __func__
);
1237 debugf0("Starting Probe1\n");
1239 /* check to see if device 0 function 1 is enabled; if it isn't, we
1240 * assume the BIOS has reserved it for a reason and is expecting
1241 * exclusive access, we take care not to violate that assumption and
1242 * fail the probe. */
1243 pci_read_config_byte(pdev
, E752X_DEVPRES1
, &stat8
);
1244 if (!force_function_unhide
&& !(stat8
& (1 << 5))) {
1245 printk(KERN_INFO
"Contact your BIOS vendor to see if the "
1246 "E752x error registers can be safely un-hidden\n");
1250 pci_write_config_byte(pdev
, E752X_DEVPRES1
, stat8
);
1252 pci_read_config_word(pdev
, E752X_DDRCSR
, &ddrcsr
);
1253 /* FIXME: should check >>12 or 0xf, true for all? */
1254 /* Dual channel = 1, Single channel = 0 */
1255 drc_chan
= dual_channel_active(ddrcsr
);
1257 mci
= edac_mc_alloc(sizeof(*pvt
), E752X_NR_CSROWS
, drc_chan
+ 1, 0);
1263 debugf3("%s(): init mci\n", __func__
);
1264 mci
->mtype_cap
= MEM_FLAG_RDDR
;
1265 /* 3100 IMCH supports SECDEC only */
1266 mci
->edac_ctl_cap
= (dev_idx
== I3100
) ? EDAC_FLAG_SECDED
:
1267 (EDAC_FLAG_NONE
| EDAC_FLAG_SECDED
| EDAC_FLAG_S4ECD4ED
);
1268 /* FIXME - what if different memory types are in different csrows? */
1269 mci
->mod_name
= EDAC_MOD_STR
;
1270 mci
->mod_ver
= E752X_REVISION
;
1271 mci
->dev
= &pdev
->dev
;
1273 debugf3("%s(): init pvt\n", __func__
);
1274 pvt
= (struct e752x_pvt
*)mci
->pvt_info
;
1275 pvt
->dev_info
= &e752x_devs
[dev_idx
];
1276 pvt
->mc_symmetric
= ((ddrcsr
& 0x10) != 0);
1278 if (e752x_get_devs(pdev
, dev_idx
, pvt
)) {
1283 debugf3("%s(): more mci init\n", __func__
);
1284 mci
->ctl_name
= pvt
->dev_info
->ctl_name
;
1285 mci
->dev_name
= pci_name(pdev
);
1286 mci
->edac_check
= e752x_check
;
1287 mci
->ctl_page_to_phys
= ctl_page_to_phys
;
1288 mci
->set_sdram_scrub_rate
= set_sdram_scrub_rate
;
1289 mci
->get_sdram_scrub_rate
= get_sdram_scrub_rate
;
1291 /* set the map type. 1 = normal, 0 = reversed
1292 * Must be set before e752x_init_csrows in case csrow mapping
1295 pci_read_config_byte(pdev
, E752X_DRM
, &stat8
);
1296 pvt
->map_type
= ((stat8
& 0x0f) > ((stat8
>> 4) & 0x0f));
1298 e752x_init_csrows(mci
, pdev
, ddrcsr
);
1299 e752x_init_mem_map_table(pdev
, pvt
);
1301 if (dev_idx
== I3100
)
1302 mci
->edac_cap
= EDAC_FLAG_SECDED
; /* the only mode supported */
1304 mci
->edac_cap
|= EDAC_FLAG_NONE
;
1305 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__
);
1307 /* load the top of low memory, remap base, and remap limit vars */
1308 pci_read_config_word(pdev
, E752X_TOLM
, &pci_data
);
1309 pvt
->tolm
= ((u32
) pci_data
) << 4;
1310 pci_read_config_word(pdev
, E752X_REMAPBASE
, &pci_data
);
1311 pvt
->remapbase
= ((u32
) pci_data
) << 14;
1312 pci_read_config_word(pdev
, E752X_REMAPLIMIT
, &pci_data
);
1313 pvt
->remaplimit
= ((u32
) pci_data
) << 14;
1314 e752x_printk(KERN_INFO
,
1315 "tolm = %x, remapbase = %x, remaplimit = %x\n",
1316 pvt
->tolm
, pvt
->remapbase
, pvt
->remaplimit
);
1318 /* Here we assume that we will never see multiple instances of this
1319 * type of memory controller. The ID is therefore hardcoded to 0.
1321 if (edac_mc_add_mc(mci
)) {
1322 debugf3("%s(): failed edac_mc_add_mc()\n", __func__
);
1326 e752x_init_error_reporting_regs(pvt
);
1327 e752x_get_error_info(mci
, &discard
); /* clear other MCH errors */
1329 /* allocating generic PCI control info */
1330 e752x_pci
= edac_pci_create_generic_ctl(&pdev
->dev
, EDAC_MOD_STR
);
1333 "%s(): Unable to create PCI control\n", __func__
);
1335 "%s(): PCI error report via EDAC not setup\n",
1339 /* get this far and it's successful */
1340 debugf3("%s(): success\n", __func__
);
1344 pci_dev_put(pvt
->dev_d0f0
);
1345 pci_dev_put(pvt
->dev_d0f1
);
1346 pci_dev_put(pvt
->bridge_ck
);
1352 /* returns count (>= 0), or negative on error */
1353 static int __devinit
e752x_init_one(struct pci_dev
*pdev
,
1354 const struct pci_device_id
*ent
)
1356 debugf0("%s()\n", __func__
);
1358 /* wake up and enable device */
1359 if (pci_enable_device(pdev
) < 0)
1362 return e752x_probe1(pdev
, ent
->driver_data
);
1365 static void __devexit
e752x_remove_one(struct pci_dev
*pdev
)
1367 struct mem_ctl_info
*mci
;
1368 struct e752x_pvt
*pvt
;
1370 debugf0("%s()\n", __func__
);
1373 edac_pci_release_generic_ctl(e752x_pci
);
1375 if ((mci
= edac_mc_del_mc(&pdev
->dev
)) == NULL
)
1378 pvt
= (struct e752x_pvt
*)mci
->pvt_info
;
1379 pci_dev_put(pvt
->dev_d0f0
);
1380 pci_dev_put(pvt
->dev_d0f1
);
1381 pci_dev_put(pvt
->bridge_ck
);
1385 static const struct pci_device_id e752x_pci_tbl
[] __devinitdata
= {
1387 PCI_VEND_DEV(INTEL
, 7520_0
), PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
1390 PCI_VEND_DEV(INTEL
, 7525_0
), PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
1393 PCI_VEND_DEV(INTEL
, 7320_0
), PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
1396 PCI_VEND_DEV(INTEL
, 3100_0
), PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
1400 } /* 0 terminated list. */
1403 MODULE_DEVICE_TABLE(pci
, e752x_pci_tbl
);
1405 static struct pci_driver e752x_driver
= {
1406 .name
= EDAC_MOD_STR
,
1407 .probe
= e752x_init_one
,
1408 .remove
= __devexit_p(e752x_remove_one
),
1409 .id_table
= e752x_pci_tbl
,
1412 static int __init
e752x_init(void)
1416 debugf3("%s()\n", __func__
);
1418 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1421 pci_rc
= pci_register_driver(&e752x_driver
);
1422 return (pci_rc
< 0) ? pci_rc
: 0;
1425 static void __exit
e752x_exit(void)
1427 debugf3("%s()\n", __func__
);
1428 pci_unregister_driver(&e752x_driver
);
1431 module_init(e752x_init
);
1432 module_exit(e752x_exit
);
1434 MODULE_LICENSE("GPL");
1435 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1436 MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
1438 module_param(force_function_unhide
, int, 0444);
1439 MODULE_PARM_DESC(force_function_unhide
, "if BIOS sets Dev0:Fun1 up as hidden:"
1440 " 1=force unhide and hope BIOS doesn't fight driver for "
1441 "Dev0:Fun1 access");
1443 module_param(edac_op_state
, int, 0444);
1444 MODULE_PARM_DESC(edac_op_state
, "EDAC Error Reporting state: 0=Poll,1=NMI");
1446 module_param(sysbus_parity
, int, 0444);
1447 MODULE_PARM_DESC(sysbus_parity
, "0=disable system bus parity checking,"
1448 " 1=enable system bus parity checking, default=auto-detect");
1449 module_param(report_non_memory_errors
, int, 0644);
1450 MODULE_PARM_DESC(report_non_memory_errors
, "0=disable non-memory error "
1451 "reporting, 1=enable non-memory error reporting");