2 * Intel 5100 Memory Controllers kernel module
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
7 * This module is based on the following document:
9 * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
10 * http://download.intel.com/design/chipsets/datashts/318378.pdf
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/pci_ids.h>
17 #include <linux/slab.h>
18 #include <linux/edac.h>
19 #include <linux/delay.h>
20 #include <linux/mmzone.h>
22 #include "edac_core.h"
24 /* register addresses */
26 /* device 16, func 1 */
27 #define I5100_MC 0x40 /* Memory Control Register */
28 #define I5100_MS 0x44 /* Memory Status Register */
29 #define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
30 #define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
31 #define I5100_TOLM 0x6c /* Top of Low Memory */
32 #define I5100_MIR0 0x80 /* Memory Interleave Range 0 */
33 #define I5100_MIR1 0x84 /* Memory Interleave Range 1 */
34 #define I5100_AMIR_0 0x8c /* Adjusted Memory Interleave Range 0 */
35 #define I5100_AMIR_1 0x90 /* Adjusted Memory Interleave Range 1 */
36 #define I5100_FERR_NF_MEM 0xa0 /* MC First Non Fatal Errors */
37 #define I5100_FERR_NF_MEM_M16ERR_MASK (1 << 16)
38 #define I5100_FERR_NF_MEM_M15ERR_MASK (1 << 15)
39 #define I5100_FERR_NF_MEM_M14ERR_MASK (1 << 14)
40 #define I5100_FERR_NF_MEM_M12ERR_MASK (1 << 12)
41 #define I5100_FERR_NF_MEM_M11ERR_MASK (1 << 11)
42 #define I5100_FERR_NF_MEM_M10ERR_MASK (1 << 10)
43 #define I5100_FERR_NF_MEM_M6ERR_MASK (1 << 6)
44 #define I5100_FERR_NF_MEM_M5ERR_MASK (1 << 5)
45 #define I5100_FERR_NF_MEM_M4ERR_MASK (1 << 4)
46 #define I5100_FERR_NF_MEM_M1ERR_MASK 1
47 #define I5100_FERR_NF_MEM_ANY_MASK \
48 (I5100_FERR_NF_MEM_M16ERR_MASK | \
49 I5100_FERR_NF_MEM_M15ERR_MASK | \
50 I5100_FERR_NF_MEM_M14ERR_MASK | \
51 I5100_FERR_NF_MEM_M12ERR_MASK | \
52 I5100_FERR_NF_MEM_M11ERR_MASK | \
53 I5100_FERR_NF_MEM_M10ERR_MASK | \
54 I5100_FERR_NF_MEM_M6ERR_MASK | \
55 I5100_FERR_NF_MEM_M5ERR_MASK | \
56 I5100_FERR_NF_MEM_M4ERR_MASK | \
57 I5100_FERR_NF_MEM_M1ERR_MASK)
58 #define I5100_NERR_NF_MEM 0xa4 /* MC Next Non-Fatal Errors */
59 #define I5100_EMASK_MEM 0xa8 /* MC Error Mask Register */
61 /* device 21 and 22, func 0 */
62 #define I5100_MTR_0 0x154 /* Memory Technology Registers 0-3 */
63 #define I5100_DMIR 0x15c /* DIMM Interleave Range */
64 #define I5100_VALIDLOG 0x18c /* Valid Log Markers */
65 #define I5100_NRECMEMA 0x190 /* Non-Recoverable Memory Error Log Reg A */
66 #define I5100_NRECMEMB 0x194 /* Non-Recoverable Memory Error Log Reg B */
67 #define I5100_REDMEMA 0x198 /* Recoverable Memory Data Error Log Reg A */
68 #define I5100_REDMEMB 0x19c /* Recoverable Memory Data Error Log Reg B */
69 #define I5100_RECMEMA 0x1a0 /* Recoverable Memory Error Log Reg A */
70 #define I5100_RECMEMB 0x1a4 /* Recoverable Memory Error Log Reg B */
71 #define I5100_MTR_4 0x1b0 /* Memory Technology Registers 4,5 */
73 /* bit field accessors */
75 static inline u32
i5100_mc_errdeten(u32 mc
)
80 static inline u16
i5100_spddata_rdo(u16 a
)
85 static inline u16
i5100_spddata_sbe(u16 a
)
90 static inline u16
i5100_spddata_busy(u16 a
)
95 static inline u16
i5100_spddata_data(u16 a
)
97 return a
& ((1 << 8) - 1);
100 static inline u32
i5100_spdcmd_create(u32 dti
, u32 ckovrd
, u32 sa
, u32 ba
,
103 return ((dti
& ((1 << 4) - 1)) << 28) |
104 ((ckovrd
& 1) << 27) |
105 ((sa
& ((1 << 3) - 1)) << 24) |
106 ((ba
& ((1 << 8) - 1)) << 16) |
107 ((data
& ((1 << 8) - 1)) << 8) |
111 static inline u16
i5100_tolm_tolm(u16 a
)
113 return a
>> 12 & ((1 << 4) - 1);
116 static inline u16
i5100_mir_limit(u16 a
)
118 return a
>> 4 & ((1 << 12) - 1);
121 static inline u16
i5100_mir_way1(u16 a
)
126 static inline u16
i5100_mir_way0(u16 a
)
131 static inline u32
i5100_ferr_nf_mem_chan_indx(u32 a
)
136 static inline u32
i5100_ferr_nf_mem_any(u32 a
)
138 return a
& I5100_FERR_NF_MEM_ANY_MASK
;
141 static inline u32
i5100_nerr_nf_mem_any(u32 a
)
143 return i5100_ferr_nf_mem_any(a
);
146 static inline u32
i5100_dmir_limit(u32 a
)
148 return a
>> 16 & ((1 << 11) - 1);
151 static inline u32
i5100_dmir_rank(u32 a
, u32 i
)
153 return a
>> (4 * i
) & ((1 << 2) - 1);
156 static inline u16
i5100_mtr_present(u16 a
)
161 static inline u16
i5100_mtr_ethrottle(u16 a
)
166 static inline u16
i5100_mtr_width(u16 a
)
171 static inline u16
i5100_mtr_numbank(u16 a
)
176 static inline u16
i5100_mtr_numrow(u16 a
)
178 return a
>> 2 & ((1 << 2) - 1);
181 static inline u16
i5100_mtr_numcol(u16 a
)
183 return a
& ((1 << 2) - 1);
187 static inline u32
i5100_validlog_redmemvalid(u32 a
)
192 static inline u32
i5100_validlog_recmemvalid(u32 a
)
197 static inline u32
i5100_validlog_nrecmemvalid(u32 a
)
202 static inline u32
i5100_nrecmema_merr(u32 a
)
204 return a
>> 15 & ((1 << 5) - 1);
207 static inline u32
i5100_nrecmema_bank(u32 a
)
209 return a
>> 12 & ((1 << 3) - 1);
212 static inline u32
i5100_nrecmema_rank(u32 a
)
214 return a
>> 8 & ((1 << 3) - 1);
217 static inline u32
i5100_nrecmema_dm_buf_id(u32 a
)
219 return a
& ((1 << 8) - 1);
222 static inline u32
i5100_nrecmemb_cas(u32 a
)
224 return a
>> 16 & ((1 << 13) - 1);
227 static inline u32
i5100_nrecmemb_ras(u32 a
)
229 return a
& ((1 << 16) - 1);
232 static inline u32
i5100_redmemb_ecc_locator(u32 a
)
234 return a
& ((1 << 18) - 1);
237 static inline u32
i5100_recmema_merr(u32 a
)
239 return i5100_nrecmema_merr(a
);
242 static inline u32
i5100_recmema_bank(u32 a
)
244 return i5100_nrecmema_bank(a
);
247 static inline u32
i5100_recmema_rank(u32 a
)
249 return i5100_nrecmema_rank(a
);
252 static inline u32
i5100_recmema_dm_buf_id(u32 a
)
254 return i5100_nrecmema_dm_buf_id(a
);
257 static inline u32
i5100_recmemb_cas(u32 a
)
259 return i5100_nrecmemb_cas(a
);
262 static inline u32
i5100_recmemb_ras(u32 a
)
264 return i5100_nrecmemb_ras(a
);
267 /* some generic limits */
268 #define I5100_MAX_RANKS_PER_CTLR 6
269 #define I5100_MAX_CTLRS 2
270 #define I5100_MAX_RANKS_PER_DIMM 4
271 #define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
272 #define I5100_MAX_DIMM_SLOTS_PER_CTLR 4
273 #define I5100_MAX_RANK_INTERLEAVE 4
274 #define I5100_MAX_DMIRS 5
277 /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
278 int dimm_numrank
[I5100_MAX_CTLRS
][I5100_MAX_DIMM_SLOTS_PER_CTLR
];
281 * mainboard chip select map -- maps i5100 chip selects to
282 * DIMM slot chip selects. In the case of only 4 ranks per
283 * controller, the mapping is fairly obvious but not unique.
284 * we map -1 -> NC and assume both controllers use the same
288 int dimm_csmap
[I5100_MAX_DIMM_SLOTS_PER_CTLR
][I5100_MAX_RANKS_PER_DIMM
];
290 /* memory interleave range */
294 } mir
[I5100_MAX_CTLRS
];
296 /* adjusted memory interleave range register */
297 unsigned amir
[I5100_MAX_CTLRS
];
299 /* dimm interleave range */
301 unsigned rank
[I5100_MAX_RANK_INTERLEAVE
];
303 } dmir
[I5100_MAX_CTLRS
][I5100_MAX_DMIRS
];
305 /* memory technology registers... */
307 unsigned present
; /* 0 or 1 */
308 unsigned ethrottle
; /* 0 or 1 */
309 unsigned width
; /* 4 or 8 bits */
310 unsigned numbank
; /* 2 or 3 lines */
311 unsigned numrow
; /* 13 .. 16 lines */
312 unsigned numcol
; /* 11 .. 12 lines */
313 } mtr
[I5100_MAX_CTLRS
][I5100_MAX_RANKS_PER_CTLR
];
315 u64 tolm
; /* top of low memory in bytes */
316 unsigned ranksperctlr
; /* number of ranks per controller */
318 struct pci_dev
*mc
; /* device 16 func 1 */
319 struct pci_dev
*ch0mm
; /* device 21 func 0 */
320 struct pci_dev
*ch1mm
; /* device 22 func 0 */
323 /* map a rank/ctlr to a slot number on the mainboard */
324 static int i5100_rank_to_slot(const struct mem_ctl_info
*mci
,
327 const struct i5100_priv
*priv
= mci
->pvt_info
;
330 for (i
= 0; i
< I5100_MAX_DIMM_SLOTS_PER_CTLR
; i
++) {
332 const int numrank
= priv
->dimm_numrank
[ctlr
][i
];
334 for (j
= 0; j
< numrank
; j
++)
335 if (priv
->dimm_csmap
[i
][j
] == rank
)
342 static const char *i5100_err_msg(unsigned err
)
344 static const char *merrs
[] = {
346 "uncorrectable data ECC on replay", /* 1 */
349 "aliased uncorrectable demand data ECC", /* 4 */
350 "aliased uncorrectable spare-copy data ECC", /* 5 */
351 "aliased uncorrectable patrol data ECC", /* 6 */
355 "non-aliased uncorrectable demand data ECC", /* 10 */
356 "non-aliased uncorrectable spare-copy data ECC", /* 11 */
357 "non-aliased uncorrectable patrol data ECC", /* 12 */
359 "correctable demand data ECC", /* 14 */
360 "correctable spare-copy data ECC", /* 15 */
361 "correctable patrol data ECC", /* 16 */
363 "SPD protocol error", /* 18 */
365 "spare copy initiated", /* 20 */
366 "spare copy completed", /* 21 */
370 for (i
= 0; i
< ARRAY_SIZE(merrs
); i
++)
377 /* convert csrow index into a rank (per controller -- 0..5) */
378 static int i5100_csrow_to_rank(const struct mem_ctl_info
*mci
, int csrow
)
380 const struct i5100_priv
*priv
= mci
->pvt_info
;
382 return csrow
% priv
->ranksperctlr
;
385 /* convert csrow index into a controller (0..1) */
386 static int i5100_csrow_to_cntlr(const struct mem_ctl_info
*mci
, int csrow
)
388 const struct i5100_priv
*priv
= mci
->pvt_info
;
390 return csrow
/ priv
->ranksperctlr
;
393 static unsigned i5100_rank_to_csrow(const struct mem_ctl_info
*mci
,
396 const struct i5100_priv
*priv
= mci
->pvt_info
;
398 return ctlr
* priv
->ranksperctlr
+ rank
;
401 static void i5100_handle_ce(struct mem_ctl_info
*mci
,
405 unsigned long syndrome
,
410 const int csrow
= i5100_rank_to_csrow(mci
, ctlr
, rank
);
413 "CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
414 "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
415 ctlr
, bank
, rank
, syndrome
, cas
, ras
,
416 csrow
, mci
->csrows
[csrow
].channels
[0].label
, msg
);
419 mci
->csrows
[csrow
].ce_count
++;
420 mci
->csrows
[csrow
].channels
[0].ce_count
++;
423 static void i5100_handle_ue(struct mem_ctl_info
*mci
,
427 unsigned long syndrome
,
432 const int csrow
= i5100_rank_to_csrow(mci
, ctlr
, rank
);
435 "UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
436 "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
437 ctlr
, bank
, rank
, syndrome
, cas
, ras
,
438 csrow
, mci
->csrows
[csrow
].channels
[0].label
, msg
);
441 mci
->csrows
[csrow
].ue_count
++;
444 static void i5100_read_log(struct mem_ctl_info
*mci
, int ctlr
,
447 struct i5100_priv
*priv
= mci
->pvt_info
;
448 struct pci_dev
*pdev
= (ctlr
) ? priv
->ch1mm
: priv
->ch0mm
;
451 unsigned syndrome
= 0;
452 unsigned ecc_loc
= 0;
459 pci_read_config_dword(pdev
, I5100_VALIDLOG
, &dw
);
461 if (i5100_validlog_redmemvalid(dw
)) {
462 pci_read_config_dword(pdev
, I5100_REDMEMA
, &dw2
);
464 pci_read_config_dword(pdev
, I5100_REDMEMB
, &dw2
);
465 ecc_loc
= i5100_redmemb_ecc_locator(dw2
);
468 if (i5100_validlog_recmemvalid(dw
)) {
471 pci_read_config_dword(pdev
, I5100_RECMEMA
, &dw2
);
472 merr
= i5100_recmema_merr(dw2
);
473 bank
= i5100_recmema_bank(dw2
);
474 rank
= i5100_recmema_rank(dw2
);
476 pci_read_config_dword(pdev
, I5100_RECMEMB
, &dw2
);
477 cas
= i5100_recmemb_cas(dw2
);
478 ras
= i5100_recmemb_ras(dw2
);
480 /* FIXME: not really sure if this is what merr is...
483 msg
= i5100_err_msg(ferr
);
485 msg
= i5100_err_msg(nerr
);
487 i5100_handle_ce(mci
, ctlr
, bank
, rank
, syndrome
, cas
, ras
, msg
);
490 if (i5100_validlog_nrecmemvalid(dw
)) {
493 pci_read_config_dword(pdev
, I5100_NRECMEMA
, &dw2
);
494 merr
= i5100_nrecmema_merr(dw2
);
495 bank
= i5100_nrecmema_bank(dw2
);
496 rank
= i5100_nrecmema_rank(dw2
);
498 pci_read_config_dword(pdev
, I5100_NRECMEMB
, &dw2
);
499 cas
= i5100_nrecmemb_cas(dw2
);
500 ras
= i5100_nrecmemb_ras(dw2
);
502 /* FIXME: not really sure if this is what merr is...
505 msg
= i5100_err_msg(ferr
);
507 msg
= i5100_err_msg(nerr
);
509 i5100_handle_ue(mci
, ctlr
, bank
, rank
, syndrome
, cas
, ras
, msg
);
512 pci_write_config_dword(pdev
, I5100_VALIDLOG
, dw
);
515 static void i5100_check_error(struct mem_ctl_info
*mci
)
517 struct i5100_priv
*priv
= mci
->pvt_info
;
521 pci_read_config_dword(priv
->mc
, I5100_FERR_NF_MEM
, &dw
);
522 if (i5100_ferr_nf_mem_any(dw
)) {
525 pci_read_config_dword(priv
->mc
, I5100_NERR_NF_MEM
, &dw2
);
527 pci_write_config_dword(priv
->mc
, I5100_NERR_NF_MEM
,
529 pci_write_config_dword(priv
->mc
, I5100_FERR_NF_MEM
, dw
);
531 i5100_read_log(mci
, i5100_ferr_nf_mem_chan_indx(dw
),
532 i5100_ferr_nf_mem_any(dw
),
533 i5100_nerr_nf_mem_any(dw2
));
537 static struct pci_dev
*pci_get_device_func(unsigned vendor
,
541 struct pci_dev
*ret
= NULL
;
544 ret
= pci_get_device(vendor
, device
, ret
);
549 if (PCI_FUNC(ret
->devfn
) == func
)
556 static unsigned long __devinit
i5100_npages(struct mem_ctl_info
*mci
,
559 struct i5100_priv
*priv
= mci
->pvt_info
;
560 const unsigned ctlr_rank
= i5100_csrow_to_rank(mci
, csrow
);
561 const unsigned ctlr
= i5100_csrow_to_cntlr(mci
, csrow
);
565 if (!priv
->mtr
[ctlr
][ctlr_rank
].present
)
569 I5100_DIMM_ADDR_LINES
+
570 priv
->mtr
[ctlr
][ctlr_rank
].numcol
+
571 priv
->mtr
[ctlr
][ctlr_rank
].numrow
+
572 priv
->mtr
[ctlr
][ctlr_rank
].numbank
;
574 return (unsigned long)
575 ((unsigned long long) (1ULL << addr_lines
) / PAGE_SIZE
);
578 static void __devinit
i5100_init_mtr(struct mem_ctl_info
*mci
)
580 struct i5100_priv
*priv
= mci
->pvt_info
;
581 struct pci_dev
*mms
[2] = { priv
->ch0mm
, priv
->ch1mm
};
584 for (i
= 0; i
< I5100_MAX_CTLRS
; i
++) {
586 struct pci_dev
*pdev
= mms
[i
];
588 for (j
= 0; j
< I5100_MAX_RANKS_PER_CTLR
; j
++) {
589 const unsigned addr
=
590 (j
< 4) ? I5100_MTR_0
+ j
* 2 :
591 I5100_MTR_4
+ (j
- 4) * 2;
594 pci_read_config_word(pdev
, addr
, &w
);
596 priv
->mtr
[i
][j
].present
= i5100_mtr_present(w
);
597 priv
->mtr
[i
][j
].ethrottle
= i5100_mtr_ethrottle(w
);
598 priv
->mtr
[i
][j
].width
= 4 + 4 * i5100_mtr_width(w
);
599 priv
->mtr
[i
][j
].numbank
= 2 + i5100_mtr_numbank(w
);
600 priv
->mtr
[i
][j
].numrow
= 13 + i5100_mtr_numrow(w
);
601 priv
->mtr
[i
][j
].numcol
= 10 + i5100_mtr_numcol(w
);
607 * FIXME: make this into a real i2c adapter (so that dimm-decode
610 static int i5100_read_spd_byte(const struct mem_ctl_info
*mci
,
611 u8 ch
, u8 slot
, u8 addr
, u8
*byte
)
613 struct i5100_priv
*priv
= mci
->pvt_info
;
617 pci_read_config_word(priv
->mc
, I5100_SPDDATA
, &w
);
618 if (i5100_spddata_busy(w
))
621 pci_write_config_dword(priv
->mc
, I5100_SPDCMD
,
622 i5100_spdcmd_create(0xa, 1, ch
* 4 + slot
, addr
,
625 /* wait up to 100ms */
626 et
= jiffies
+ HZ
/ 10;
629 pci_read_config_word(priv
->mc
, I5100_SPDDATA
, &w
);
630 if (!i5100_spddata_busy(w
))
635 if (!i5100_spddata_rdo(w
) || i5100_spddata_sbe(w
))
638 *byte
= i5100_spddata_data(w
);
644 * fill dimm chip select map
647 * o only valid for 4 ranks per controller
648 * o not the only way to may chip selects to dimm slots
649 * o investigate if there is some way to obtain this map from the bios
651 static void __devinit
i5100_init_dimm_csmap(struct mem_ctl_info
*mci
)
653 struct i5100_priv
*priv
= mci
->pvt_info
;
656 WARN_ON(priv
->ranksperctlr
!= 4);
658 for (i
= 0; i
< I5100_MAX_DIMM_SLOTS_PER_CTLR
; i
++) {
661 for (j
= 0; j
< I5100_MAX_RANKS_PER_DIMM
; j
++)
662 priv
->dimm_csmap
[i
][j
] = -1; /* default NC */
665 /* only 2 chip selects per slot... */
666 priv
->dimm_csmap
[0][0] = 0;
667 priv
->dimm_csmap
[0][1] = 3;
668 priv
->dimm_csmap
[1][0] = 1;
669 priv
->dimm_csmap
[1][1] = 2;
670 priv
->dimm_csmap
[2][0] = 2;
671 priv
->dimm_csmap
[3][0] = 3;
674 static void __devinit
i5100_init_dimm_layout(struct pci_dev
*pdev
,
675 struct mem_ctl_info
*mci
)
677 struct i5100_priv
*priv
= mci
->pvt_info
;
680 for (i
= 0; i
< I5100_MAX_CTLRS
; i
++) {
683 for (j
= 0; j
< I5100_MAX_DIMM_SLOTS_PER_CTLR
; j
++) {
686 if (i5100_read_spd_byte(mci
, i
, j
, 5, &rank
) < 0)
687 priv
->dimm_numrank
[i
][j
] = 0;
689 priv
->dimm_numrank
[i
][j
] = (rank
& 3) + 1;
693 i5100_init_dimm_csmap(mci
);
696 static void __devinit
i5100_init_interleaving(struct pci_dev
*pdev
,
697 struct mem_ctl_info
*mci
)
701 struct i5100_priv
*priv
= mci
->pvt_info
;
702 struct pci_dev
*mms
[2] = { priv
->ch0mm
, priv
->ch1mm
};
705 pci_read_config_word(pdev
, I5100_TOLM
, &w
);
706 priv
->tolm
= (u64
) i5100_tolm_tolm(w
) * 256 * 1024 * 1024;
708 pci_read_config_word(pdev
, I5100_MIR0
, &w
);
709 priv
->mir
[0].limit
= (u64
) i5100_mir_limit(w
) << 28;
710 priv
->mir
[0].way
[1] = i5100_mir_way1(w
);
711 priv
->mir
[0].way
[0] = i5100_mir_way0(w
);
713 pci_read_config_word(pdev
, I5100_MIR1
, &w
);
714 priv
->mir
[1].limit
= (u64
) i5100_mir_limit(w
) << 28;
715 priv
->mir
[1].way
[1] = i5100_mir_way1(w
);
716 priv
->mir
[1].way
[0] = i5100_mir_way0(w
);
718 pci_read_config_word(pdev
, I5100_AMIR_0
, &w
);
720 pci_read_config_word(pdev
, I5100_AMIR_1
, &w
);
723 for (i
= 0; i
< I5100_MAX_CTLRS
; i
++) {
726 for (j
= 0; j
< 5; j
++) {
729 pci_read_config_dword(mms
[i
], I5100_DMIR
+ j
* 4, &dw
);
731 priv
->dmir
[i
][j
].limit
=
732 (u64
) i5100_dmir_limit(dw
) << 28;
733 for (k
= 0; k
< I5100_MAX_RANKS_PER_DIMM
; k
++)
734 priv
->dmir
[i
][j
].rank
[k
] =
735 i5100_dmir_rank(dw
, k
);
742 static void __devinit
i5100_init_csrows(struct mem_ctl_info
*mci
)
745 unsigned long total_pages
= 0UL;
746 struct i5100_priv
*priv
= mci
->pvt_info
;
748 for (i
= 0; i
< mci
->nr_csrows
; i
++) {
749 const unsigned long npages
= i5100_npages(mci
, i
);
750 const unsigned cntlr
= i5100_csrow_to_cntlr(mci
, i
);
751 const unsigned rank
= i5100_csrow_to_rank(mci
, i
);
757 * FIXME: these two are totally bogus -- I don't see how to
758 * map them correctly to this structure...
760 mci
->csrows
[i
].first_page
= total_pages
;
761 mci
->csrows
[i
].last_page
= total_pages
+ npages
- 1;
762 mci
->csrows
[i
].page_mask
= 0UL;
764 mci
->csrows
[i
].nr_pages
= npages
;
765 mci
->csrows
[i
].grain
= 32;
766 mci
->csrows
[i
].csrow_idx
= i
;
767 mci
->csrows
[i
].dtype
=
768 (priv
->mtr
[cntlr
][rank
].width
== 4) ? DEV_X4
: DEV_X8
;
769 mci
->csrows
[i
].ue_count
= 0;
770 mci
->csrows
[i
].ce_count
= 0;
771 mci
->csrows
[i
].mtype
= MEM_RDDR2
;
772 mci
->csrows
[i
].edac_mode
= EDAC_SECDED
;
773 mci
->csrows
[i
].mci
= mci
;
774 mci
->csrows
[i
].nr_channels
= 1;
775 mci
->csrows
[i
].channels
[0].chan_idx
= 0;
776 mci
->csrows
[i
].channels
[0].ce_count
= 0;
777 mci
->csrows
[i
].channels
[0].csrow
= mci
->csrows
+ i
;
778 snprintf(mci
->csrows
[i
].channels
[0].label
,
779 sizeof(mci
->csrows
[i
].channels
[0].label
),
780 "DIMM%u", i5100_rank_to_slot(mci
, cntlr
, rank
));
782 total_pages
+= npages
;
786 static int __devinit
i5100_init_one(struct pci_dev
*pdev
,
787 const struct pci_device_id
*id
)
790 struct mem_ctl_info
*mci
;
791 struct i5100_priv
*priv
;
792 struct pci_dev
*ch0mm
, *ch1mm
;
797 if (PCI_FUNC(pdev
->devfn
) != 1)
800 rc
= pci_enable_device(pdev
);
807 pci_read_config_dword(pdev
, I5100_MC
, &dw
);
808 if (!i5100_mc_errdeten(dw
)) {
809 printk(KERN_INFO
"i5100_edac: ECC not enabled.\n");
814 /* figure out how many ranks, from strapped state of 48GB_Mode input */
815 pci_read_config_dword(pdev
, I5100_MS
, &dw
);
816 ranksperch
= !!(dw
& (1 << 8)) * 2 + 4;
818 if (ranksperch
!= 4) {
819 /* FIXME: get 6 ranks / controller to work - need hw... */
820 printk(KERN_INFO
"i5100_edac: unsupported configuration.\n");
825 /* enable error reporting... */
826 pci_read_config_dword(pdev
, I5100_EMASK_MEM
, &dw
);
827 dw
&= ~I5100_FERR_NF_MEM_ANY_MASK
;
828 pci_write_config_dword(pdev
, I5100_EMASK_MEM
, dw
);
830 /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */
831 ch0mm
= pci_get_device_func(PCI_VENDOR_ID_INTEL
,
832 PCI_DEVICE_ID_INTEL_5100_21
, 0);
838 rc
= pci_enable_device(ch0mm
);
844 /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */
845 ch1mm
= pci_get_device_func(PCI_VENDOR_ID_INTEL
,
846 PCI_DEVICE_ID_INTEL_5100_22
, 0);
849 goto bail_disable_ch0
;
852 rc
= pci_enable_device(ch1mm
);
858 mci
= edac_mc_alloc(sizeof(*priv
), ranksperch
* 2, 1, 0);
861 goto bail_disable_ch1
;
864 mci
->dev
= &pdev
->dev
;
866 priv
= mci
->pvt_info
;
867 priv
->ranksperctlr
= ranksperch
;
872 i5100_init_dimm_layout(pdev
, mci
);
873 i5100_init_interleaving(pdev
, mci
);
875 mci
->mtype_cap
= MEM_FLAG_FB_DDR2
;
876 mci
->edac_ctl_cap
= EDAC_FLAG_SECDED
;
877 mci
->edac_cap
= EDAC_FLAG_SECDED
;
878 mci
->mod_name
= "i5100_edac.c";
879 mci
->mod_ver
= "not versioned";
880 mci
->ctl_name
= "i5100";
881 mci
->dev_name
= pci_name(pdev
);
882 mci
->ctl_page_to_phys
= NULL
;
884 mci
->edac_check
= i5100_check_error
;
886 i5100_init_csrows(mci
);
888 /* this strange construction seems to be in every driver, dunno why */
889 switch (edac_op_state
) {
890 case EDAC_OPSTATE_POLL
:
891 case EDAC_OPSTATE_NMI
:
894 edac_op_state
= EDAC_OPSTATE_POLL
;
898 if (edac_mc_add_mc(mci
)) {
909 pci_disable_device(ch1mm
);
915 pci_disable_device(ch0mm
);
921 pci_disable_device(pdev
);
927 static void __devexit
i5100_remove_one(struct pci_dev
*pdev
)
929 struct mem_ctl_info
*mci
;
930 struct i5100_priv
*priv
;
932 mci
= edac_mc_del_mc(&pdev
->dev
);
937 priv
= mci
->pvt_info
;
938 pci_disable_device(pdev
);
939 pci_disable_device(priv
->ch0mm
);
940 pci_disable_device(priv
->ch1mm
);
941 pci_dev_put(priv
->ch0mm
);
942 pci_dev_put(priv
->ch1mm
);
947 static const struct pci_device_id i5100_pci_tbl
[] __devinitdata
= {
948 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
949 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_5100_16
) },
952 MODULE_DEVICE_TABLE(pci
, i5100_pci_tbl
);
954 static struct pci_driver i5100_driver
= {
955 .name
= KBUILD_BASENAME
,
956 .probe
= i5100_init_one
,
957 .remove
= __devexit_p(i5100_remove_one
),
958 .id_table
= i5100_pci_tbl
,
961 static int __init
i5100_init(void)
965 pci_rc
= pci_register_driver(&i5100_driver
);
967 return (pci_rc
< 0) ? pci_rc
: 0;
970 static void __exit
i5100_exit(void)
972 pci_unregister_driver(&i5100_driver
);
975 module_init(i5100_init
);
976 module_exit(i5100_exit
);
978 MODULE_LICENSE("GPL");
980 ("Arthur Jones <ajones@riverbed.com>");
981 MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");