1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info
*amd64_ctl_pci
;
6 static int report_gart_errors
;
7 module_param(report_gart_errors
, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override
;
14 module_param(ecc_enable_override
, int, 0644);
16 static struct msr __percpu
*msrs
;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances
= ATOMIC_INIT(0);
23 /* Per-node driver instances */
24 static struct mem_ctl_info
**mcis
;
25 static struct ecc_settings
**ecc_stngs
;
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
32 *FIXME: Produce a better mapping/linearisation.
34 static const struct scrubrate
{
35 u32 scrubval
; /* bit pattern for scrub rate */
36 u32 bandwidth
; /* bandwidth consumed (bytes/sec) */
38 { 0x01, 1600000000UL},
60 { 0x00, 0UL}, /* scrubbing off */
63 int __amd64_read_pci_cfg_dword(struct pci_dev
*pdev
, int offset
,
64 u32
*val
, const char *func
)
68 err
= pci_read_config_dword(pdev
, offset
, val
);
70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func
, PCI_FUNC(pdev
->devfn
), offset
);
76 int __amd64_write_pci_cfg_dword(struct pci_dev
*pdev
, int offset
,
77 u32 val
, const char *func
)
81 err
= pci_write_config_dword(pdev
, offset
, val
);
83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func
, PCI_FUNC(pdev
->devfn
), offset
);
91 * Depending on the family, F2 DCT reads need special handling:
93 * K8: has a single DCT only
95 * F10h: each DCT has its own set of regs
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
102 static int k8_read_dct_pci_cfg(struct amd64_pvt
*pvt
, int addr
, u32
*val
,
108 return __amd64_read_pci_cfg_dword(pvt
->F2
, addr
, val
, func
);
111 static int f10_read_dct_pci_cfg(struct amd64_pvt
*pvt
, int addr
, u32
*val
,
114 return __amd64_read_pci_cfg_dword(pvt
->F2
, addr
, val
, func
);
118 * Select DCT to which PCI cfg accesses are routed
120 static void f15h_select_dct(struct amd64_pvt
*pvt
, u8 dct
)
124 amd64_read_pci_cfg(pvt
->F1
, DCT_CFG_SEL
, ®
);
127 amd64_write_pci_cfg(pvt
->F1
, DCT_CFG_SEL
, reg
);
130 static int f15_read_dct_pci_cfg(struct amd64_pvt
*pvt
, int addr
, u32
*val
,
135 if (addr
>= 0x140 && addr
<= 0x1a0) {
140 f15h_select_dct(pvt
, dct
);
142 return __amd64_read_pci_cfg_dword(pvt
->F2
, addr
, val
, func
);
146 * Memory scrubber control interface. For K8, memory scrubbing is handled by
147 * hardware and can involve L2 cache, dcache as well as the main memory. With
148 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
151 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
152 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
153 * bytes/sec for the setting.
155 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
156 * other archs, we might not have access to the caches directly.
160 * scan the scrub rate mapping table for a close or matching bandwidth value to
161 * issue. If requested is too big, then use last maximum value found.
163 static int __amd64_set_scrub_rate(struct pci_dev
*ctl
, u32 new_bw
, u32 min_rate
)
169 * map the configured rate (new_bw) to a value specific to the AMD64
170 * memory controller and apply to register. Search for the first
171 * bandwidth entry that is greater or equal than the setting requested
172 * and program that. If at last entry, turn off DRAM scrubbing.
174 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
175 * by falling back to the last element in scrubrates[].
177 for (i
= 0; i
< ARRAY_SIZE(scrubrates
) - 1; i
++) {
179 * skip scrub rates which aren't recommended
180 * (see F10 BKDG, F3x58)
182 if (scrubrates
[i
].scrubval
< min_rate
)
185 if (scrubrates
[i
].bandwidth
<= new_bw
)
189 scrubval
= scrubrates
[i
].scrubval
;
191 pci_write_bits32(ctl
, SCRCTRL
, scrubval
, 0x001F);
194 return scrubrates
[i
].bandwidth
;
199 static int amd64_set_scrub_rate(struct mem_ctl_info
*mci
, u32 bw
)
201 struct amd64_pvt
*pvt
= mci
->pvt_info
;
202 u32 min_scrubrate
= 0x5;
204 if (boot_cpu_data
.x86
== 0xf)
207 /* F15h Erratum #505 */
208 if (boot_cpu_data
.x86
== 0x15)
209 f15h_select_dct(pvt
, 0);
211 return __amd64_set_scrub_rate(pvt
->F3
, bw
, min_scrubrate
);
214 static int amd64_get_scrub_rate(struct mem_ctl_info
*mci
)
216 struct amd64_pvt
*pvt
= mci
->pvt_info
;
218 int i
, retval
= -EINVAL
;
220 /* F15h Erratum #505 */
221 if (boot_cpu_data
.x86
== 0x15)
222 f15h_select_dct(pvt
, 0);
224 amd64_read_pci_cfg(pvt
->F3
, SCRCTRL
, &scrubval
);
226 scrubval
= scrubval
& 0x001F;
228 for (i
= 0; i
< ARRAY_SIZE(scrubrates
); i
++) {
229 if (scrubrates
[i
].scrubval
== scrubval
) {
230 retval
= scrubrates
[i
].bandwidth
;
238 * returns true if the SysAddr given by sys_addr matches the
239 * DRAM base/limit associated with node_id
241 static bool amd64_base_limit_match(struct amd64_pvt
*pvt
, u64 sys_addr
,
246 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
247 * all ones if the most significant implemented address bit is 1.
248 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
249 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
250 * Application Programming.
252 addr
= sys_addr
& 0x000000ffffffffffull
;
254 return ((addr
>= get_dram_base(pvt
, nid
)) &&
255 (addr
<= get_dram_limit(pvt
, nid
)));
259 * Attempt to map a SysAddr to a node. On success, return a pointer to the
260 * mem_ctl_info structure for the node that the SysAddr maps to.
262 * On failure, return NULL.
264 static struct mem_ctl_info
*find_mc_by_sys_addr(struct mem_ctl_info
*mci
,
267 struct amd64_pvt
*pvt
;
272 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
273 * 3.4.4.2) registers to map the SysAddr to a node ID.
278 * The value of this field should be the same for all DRAM Base
279 * registers. Therefore we arbitrarily choose to read it from the
280 * register for node 0.
282 intlv_en
= dram_intlv_en(pvt
, 0);
285 for (node_id
= 0; node_id
< DRAM_RANGES
; node_id
++) {
286 if (amd64_base_limit_match(pvt
, sys_addr
, node_id
))
292 if (unlikely((intlv_en
!= 0x01) &&
293 (intlv_en
!= 0x03) &&
294 (intlv_en
!= 0x07))) {
295 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en
);
299 bits
= (((u32
) sys_addr
) >> 12) & intlv_en
;
301 for (node_id
= 0; ; ) {
302 if ((dram_intlv_sel(pvt
, node_id
) & intlv_en
) == bits
)
303 break; /* intlv_sel field matches */
305 if (++node_id
>= DRAM_RANGES
)
309 /* sanity test for sys_addr */
310 if (unlikely(!amd64_base_limit_match(pvt
, sys_addr
, node_id
))) {
311 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
312 "range for node %d with node interleaving enabled.\n",
313 __func__
, sys_addr
, node_id
);
318 return edac_mc_find((int)node_id
);
321 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
322 (unsigned long)sys_addr
);
328 * compute the CS base address of the @csrow on the DRAM controller @dct.
329 * For details see F2x[5C:40] in the processor's BKDG
331 static void get_cs_base_and_mask(struct amd64_pvt
*pvt
, int csrow
, u8 dct
,
332 u64
*base
, u64
*mask
)
334 u64 csbase
, csmask
, base_bits
, mask_bits
;
337 if (boot_cpu_data
.x86
== 0xf && pvt
->ext_model
< K8_REV_F
) {
338 csbase
= pvt
->csels
[dct
].csbases
[csrow
];
339 csmask
= pvt
->csels
[dct
].csmasks
[csrow
];
340 base_bits
= GENMASK(21, 31) | GENMASK(9, 15);
341 mask_bits
= GENMASK(21, 29) | GENMASK(9, 15);
344 csbase
= pvt
->csels
[dct
].csbases
[csrow
];
345 csmask
= pvt
->csels
[dct
].csmasks
[csrow
>> 1];
348 if (boot_cpu_data
.x86
== 0x15)
349 base_bits
= mask_bits
= GENMASK(19,30) | GENMASK(5,13);
351 base_bits
= mask_bits
= GENMASK(19,28) | GENMASK(5,13);
354 *base
= (csbase
& base_bits
) << addr_shift
;
357 /* poke holes for the csmask */
358 *mask
&= ~(mask_bits
<< addr_shift
);
360 *mask
|= (csmask
& mask_bits
) << addr_shift
;
363 #define for_each_chip_select(i, dct, pvt) \
364 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
366 #define chip_select_base(i, dct, pvt) \
367 pvt->csels[dct].csbases[i]
369 #define for_each_chip_select_mask(i, dct, pvt) \
370 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
373 * @input_addr is an InputAddr associated with the node given by mci. Return the
374 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
376 static int input_addr_to_csrow(struct mem_ctl_info
*mci
, u64 input_addr
)
378 struct amd64_pvt
*pvt
;
384 for_each_chip_select(csrow
, 0, pvt
) {
385 if (!csrow_enabled(csrow
, 0, pvt
))
388 get_cs_base_and_mask(pvt
, csrow
, 0, &base
, &mask
);
392 if ((input_addr
& mask
) == (base
& mask
)) {
393 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
394 (unsigned long)input_addr
, csrow
,
400 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
401 (unsigned long)input_addr
, pvt
->mc_node_id
);
407 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
408 * for the node represented by mci. Info is passed back in *hole_base,
409 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
410 * info is invalid. Info may be invalid for either of the following reasons:
412 * - The revision of the node is not E or greater. In this case, the DRAM Hole
413 * Address Register does not exist.
415 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
416 * indicating that its contents are not valid.
418 * The values passed back in *hole_base, *hole_offset, and *hole_size are
419 * complete 32-bit values despite the fact that the bitfields in the DHAR
420 * only represent bits 31-24 of the base and offset values.
422 int amd64_get_dram_hole_info(struct mem_ctl_info
*mci
, u64
*hole_base
,
423 u64
*hole_offset
, u64
*hole_size
)
425 struct amd64_pvt
*pvt
= mci
->pvt_info
;
427 /* only revE and later have the DRAM Hole Address Register */
428 if (boot_cpu_data
.x86
== 0xf && pvt
->ext_model
< K8_REV_E
) {
429 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
430 pvt
->ext_model
, pvt
->mc_node_id
);
434 /* valid for Fam10h and above */
435 if (boot_cpu_data
.x86
>= 0x10 && !dhar_mem_hoist_valid(pvt
)) {
436 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
440 if (!dhar_valid(pvt
)) {
441 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
446 /* This node has Memory Hoisting */
448 /* +------------------+--------------------+--------------------+-----
449 * | memory | DRAM hole | relocated |
450 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
452 * | | | [0x100000000, |
453 * | | | (0x100000000+ |
454 * | | | (0xffffffff-x))] |
455 * +------------------+--------------------+--------------------+-----
457 * Above is a diagram of physical memory showing the DRAM hole and the
458 * relocated addresses from the DRAM hole. As shown, the DRAM hole
459 * starts at address x (the base address) and extends through address
460 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
461 * addresses in the hole so that they start at 0x100000000.
464 *hole_base
= dhar_base(pvt
);
465 *hole_size
= (1ULL << 32) - *hole_base
;
467 if (boot_cpu_data
.x86
> 0xf)
468 *hole_offset
= f10_dhar_offset(pvt
);
470 *hole_offset
= k8_dhar_offset(pvt
);
472 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
473 pvt
->mc_node_id
, (unsigned long)*hole_base
,
474 (unsigned long)*hole_offset
, (unsigned long)*hole_size
);
478 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info
);
481 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
482 * assumed that sys_addr maps to the node given by mci.
484 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
485 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
486 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
487 * then it is also involved in translating a SysAddr to a DramAddr. Sections
488 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
489 * These parts of the documentation are unclear. I interpret them as follows:
491 * When node n receives a SysAddr, it processes the SysAddr as follows:
493 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
494 * Limit registers for node n. If the SysAddr is not within the range
495 * specified by the base and limit values, then node n ignores the Sysaddr
496 * (since it does not map to node n). Otherwise continue to step 2 below.
498 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
499 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
500 * the range of relocated addresses (starting at 0x100000000) from the DRAM
501 * hole. If not, skip to step 3 below. Else get the value of the
502 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
503 * offset defined by this value from the SysAddr.
505 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
506 * Base register for node n. To obtain the DramAddr, subtract the base
507 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
509 static u64
sys_addr_to_dram_addr(struct mem_ctl_info
*mci
, u64 sys_addr
)
511 struct amd64_pvt
*pvt
= mci
->pvt_info
;
512 u64 dram_base
, hole_base
, hole_offset
, hole_size
, dram_addr
;
515 dram_base
= get_dram_base(pvt
, pvt
->mc_node_id
);
517 ret
= amd64_get_dram_hole_info(mci
, &hole_base
, &hole_offset
,
520 if ((sys_addr
>= (1ULL << 32)) &&
521 (sys_addr
< ((1ULL << 32) + hole_size
))) {
522 /* use DHAR to translate SysAddr to DramAddr */
523 dram_addr
= sys_addr
- hole_offset
;
525 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
526 (unsigned long)sys_addr
,
527 (unsigned long)dram_addr
);
534 * Translate the SysAddr to a DramAddr as shown near the start of
535 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
536 * only deals with 40-bit values. Therefore we discard bits 63-40 of
537 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
538 * discard are all 1s. Otherwise the bits we discard are all 0s. See
539 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
540 * Programmer's Manual Volume 1 Application Programming.
542 dram_addr
= (sys_addr
& GENMASK(0, 39)) - dram_base
;
544 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
545 (unsigned long)sys_addr
, (unsigned long)dram_addr
);
550 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
551 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
552 * for node interleaving.
554 static int num_node_interleave_bits(unsigned intlv_en
)
556 static const int intlv_shift_table
[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
559 BUG_ON(intlv_en
> 7);
560 n
= intlv_shift_table
[intlv_en
];
564 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
565 static u64
dram_addr_to_input_addr(struct mem_ctl_info
*mci
, u64 dram_addr
)
567 struct amd64_pvt
*pvt
;
574 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
575 * concerning translating a DramAddr to an InputAddr.
577 intlv_shift
= num_node_interleave_bits(dram_intlv_en(pvt
, 0));
578 input_addr
= ((dram_addr
>> intlv_shift
) & GENMASK(12, 35)) +
581 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
582 intlv_shift
, (unsigned long)dram_addr
,
583 (unsigned long)input_addr
);
589 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
590 * assumed that @sys_addr maps to the node given by mci.
592 static u64
sys_addr_to_input_addr(struct mem_ctl_info
*mci
, u64 sys_addr
)
597 dram_addr_to_input_addr(mci
, sys_addr_to_dram_addr(mci
, sys_addr
));
599 edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
600 (unsigned long)sys_addr
, (unsigned long)input_addr
);
605 /* Map the Error address to a PAGE and PAGE OFFSET. */
606 static inline void error_address_to_page_and_offset(u64 error_address
,
607 struct err_info
*err
)
609 err
->page
= (u32
) (error_address
>> PAGE_SHIFT
);
610 err
->offset
= ((u32
) error_address
) & ~PAGE_MASK
;
614 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
615 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
616 * of a node that detected an ECC memory error. mci represents the node that
617 * the error address maps to (possibly different from the node that detected
618 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
621 static int sys_addr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
)
625 csrow
= input_addr_to_csrow(mci
, sys_addr_to_input_addr(mci
, sys_addr
));
628 amd64_mc_err(mci
, "Failed to translate InputAddr to csrow for "
629 "address 0x%lx\n", (unsigned long)sys_addr
);
633 static int get_channel_from_ecc_syndrome(struct mem_ctl_info
*, u16
);
636 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
639 static unsigned long amd64_determine_edac_cap(struct amd64_pvt
*pvt
)
642 unsigned long edac_cap
= EDAC_FLAG_NONE
;
644 bit
= (boot_cpu_data
.x86
> 0xf || pvt
->ext_model
>= K8_REV_F
)
648 if (pvt
->dclr0
& BIT(bit
))
649 edac_cap
= EDAC_FLAG_SECDED
;
654 static void amd64_debug_display_dimm_sizes(struct amd64_pvt
*, u8
);
656 static void amd64_dump_dramcfg_low(u32 dclr
, int chan
)
658 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan
, dclr
);
660 edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
661 (dclr
& BIT(16)) ? "un" : "",
662 (dclr
& BIT(19)) ? "yes" : "no");
664 edac_dbg(1, " PAR/ERR parity: %s\n",
665 (dclr
& BIT(8)) ? "enabled" : "disabled");
667 if (boot_cpu_data
.x86
== 0x10)
668 edac_dbg(1, " DCT 128bit mode width: %s\n",
669 (dclr
& BIT(11)) ? "128b" : "64b");
671 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
672 (dclr
& BIT(12)) ? "yes" : "no",
673 (dclr
& BIT(13)) ? "yes" : "no",
674 (dclr
& BIT(14)) ? "yes" : "no",
675 (dclr
& BIT(15)) ? "yes" : "no");
678 /* Display and decode various NB registers for debug purposes. */
679 static void dump_misc_regs(struct amd64_pvt
*pvt
)
681 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt
->nbcap
);
683 edac_dbg(1, " NB two channel DRAM capable: %s\n",
684 (pvt
->nbcap
& NBCAP_DCT_DUAL
) ? "yes" : "no");
686 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
687 (pvt
->nbcap
& NBCAP_SECDED
) ? "yes" : "no",
688 (pvt
->nbcap
& NBCAP_CHIPKILL
) ? "yes" : "no");
690 amd64_dump_dramcfg_low(pvt
->dclr0
, 0);
692 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt
->online_spare
);
694 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
695 pvt
->dhar
, dhar_base(pvt
),
696 (boot_cpu_data
.x86
== 0xf) ? k8_dhar_offset(pvt
)
697 : f10_dhar_offset(pvt
));
699 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt
) ? "yes" : "no");
701 amd64_debug_display_dimm_sizes(pvt
, 0);
703 /* everything below this point is Fam10h and above */
704 if (boot_cpu_data
.x86
== 0xf)
707 amd64_debug_display_dimm_sizes(pvt
, 1);
709 amd64_info("using %s syndromes.\n", ((pvt
->ecc_sym_sz
== 8) ? "x8" : "x4"));
711 /* Only if NOT ganged does dclr1 have valid info */
712 if (!dct_ganging_enabled(pvt
))
713 amd64_dump_dramcfg_low(pvt
->dclr1
, 1);
717 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
719 static void prep_chip_selects(struct amd64_pvt
*pvt
)
721 if (boot_cpu_data
.x86
== 0xf && pvt
->ext_model
< K8_REV_F
) {
722 pvt
->csels
[0].b_cnt
= pvt
->csels
[1].b_cnt
= 8;
723 pvt
->csels
[0].m_cnt
= pvt
->csels
[1].m_cnt
= 8;
725 pvt
->csels
[0].b_cnt
= pvt
->csels
[1].b_cnt
= 8;
726 pvt
->csels
[0].m_cnt
= pvt
->csels
[1].m_cnt
= 4;
731 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
733 static void read_dct_base_mask(struct amd64_pvt
*pvt
)
737 prep_chip_selects(pvt
);
739 for_each_chip_select(cs
, 0, pvt
) {
740 int reg0
= DCSB0
+ (cs
* 4);
741 int reg1
= DCSB1
+ (cs
* 4);
742 u32
*base0
= &pvt
->csels
[0].csbases
[cs
];
743 u32
*base1
= &pvt
->csels
[1].csbases
[cs
];
745 if (!amd64_read_dct_pci_cfg(pvt
, reg0
, base0
))
746 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
749 if (boot_cpu_data
.x86
== 0xf || dct_ganging_enabled(pvt
))
752 if (!amd64_read_dct_pci_cfg(pvt
, reg1
, base1
))
753 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
757 for_each_chip_select_mask(cs
, 0, pvt
) {
758 int reg0
= DCSM0
+ (cs
* 4);
759 int reg1
= DCSM1
+ (cs
* 4);
760 u32
*mask0
= &pvt
->csels
[0].csmasks
[cs
];
761 u32
*mask1
= &pvt
->csels
[1].csmasks
[cs
];
763 if (!amd64_read_dct_pci_cfg(pvt
, reg0
, mask0
))
764 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
767 if (boot_cpu_data
.x86
== 0xf || dct_ganging_enabled(pvt
))
770 if (!amd64_read_dct_pci_cfg(pvt
, reg1
, mask1
))
771 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
776 static enum mem_type
amd64_determine_memory_type(struct amd64_pvt
*pvt
, int cs
)
780 /* F15h supports only DDR3 */
781 if (boot_cpu_data
.x86
>= 0x15)
782 type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR3
: MEM_RDDR3
;
783 else if (boot_cpu_data
.x86
== 0x10 || pvt
->ext_model
>= K8_REV_F
) {
784 if (pvt
->dchr0
& DDR3_MODE
)
785 type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR3
: MEM_RDDR3
;
787 type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR2
: MEM_RDDR2
;
789 type
= (pvt
->dclr0
& BIT(18)) ? MEM_DDR
: MEM_RDDR
;
792 amd64_info("CS%d: %s\n", cs
, edac_mem_types
[type
]);
797 /* Get the number of DCT channels the memory controller is using. */
798 static int k8_early_channel_count(struct amd64_pvt
*pvt
)
802 if (pvt
->ext_model
>= K8_REV_F
)
803 /* RevF (NPT) and later */
804 flag
= pvt
->dclr0
& WIDTH_128
;
806 /* RevE and earlier */
807 flag
= pvt
->dclr0
& REVE_WIDTH_128
;
812 return (flag
) ? 2 : 1;
815 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
816 static u64
get_error_address(struct mce
*m
)
818 struct cpuinfo_x86
*c
= &boot_cpu_data
;
828 addr
= m
->addr
& GENMASK(start_bit
, end_bit
);
831 * Erratum 637 workaround
833 if (c
->x86
== 0x15) {
834 struct amd64_pvt
*pvt
;
835 u64 cc6_base
, tmp_addr
;
840 if ((addr
& GENMASK(24, 47)) >> 24 != 0x00fdf7)
843 mce_nid
= amd_get_nb_id(m
->extcpu
);
844 pvt
= mcis
[mce_nid
]->pvt_info
;
846 amd64_read_pci_cfg(pvt
->F1
, DRAM_LOCAL_NODE_LIM
, &tmp
);
847 intlv_en
= tmp
>> 21 & 0x7;
849 /* add [47:27] + 3 trailing bits */
850 cc6_base
= (tmp
& GENMASK(0, 20)) << 3;
852 /* reverse and add DramIntlvEn */
853 cc6_base
|= intlv_en
^ 0x7;
859 return cc6_base
| (addr
& GENMASK(0, 23));
861 amd64_read_pci_cfg(pvt
->F1
, DRAM_LOCAL_NODE_BASE
, &tmp
);
864 tmp_addr
= (addr
& GENMASK(12, 23)) << __fls(intlv_en
+ 1);
866 /* OR DramIntlvSel into bits [14:12] */
867 tmp_addr
|= (tmp
& GENMASK(21, 23)) >> 9;
869 /* add remaining [11:0] bits from original MC4_ADDR */
870 tmp_addr
|= addr
& GENMASK(0, 11);
872 return cc6_base
| tmp_addr
;
878 static struct pci_dev
*pci_get_related_function(unsigned int vendor
,
880 struct pci_dev
*related
)
882 struct pci_dev
*dev
= NULL
;
884 while ((dev
= pci_get_device(vendor
, device
, dev
))) {
885 if (pci_domain_nr(dev
->bus
) == pci_domain_nr(related
->bus
) &&
886 (dev
->bus
->number
== related
->bus
->number
) &&
887 (PCI_SLOT(dev
->devfn
) == PCI_SLOT(related
->devfn
)))
894 static void read_dram_base_limit_regs(struct amd64_pvt
*pvt
, unsigned range
)
896 struct amd_northbridge
*nb
;
897 struct pci_dev
*misc
, *f1
= NULL
;
898 struct cpuinfo_x86
*c
= &boot_cpu_data
;
899 int off
= range
<< 3;
902 amd64_read_pci_cfg(pvt
->F1
, DRAM_BASE_LO
+ off
, &pvt
->ranges
[range
].base
.lo
);
903 amd64_read_pci_cfg(pvt
->F1
, DRAM_LIMIT_LO
+ off
, &pvt
->ranges
[range
].lim
.lo
);
908 if (!dram_rw(pvt
, range
))
911 amd64_read_pci_cfg(pvt
->F1
, DRAM_BASE_HI
+ off
, &pvt
->ranges
[range
].base
.hi
);
912 amd64_read_pci_cfg(pvt
->F1
, DRAM_LIMIT_HI
+ off
, &pvt
->ranges
[range
].lim
.hi
);
914 /* F15h: factor in CC6 save area by reading dst node's limit reg */
918 nb
= node_to_amd_nb(dram_dst_node(pvt
, range
));
923 f1
= pci_get_related_function(misc
->vendor
, PCI_DEVICE_ID_AMD_15H_NB_F1
, misc
);
927 amd64_read_pci_cfg(f1
, DRAM_LOCAL_NODE_LIM
, &llim
);
929 pvt
->ranges
[range
].lim
.lo
&= GENMASK(0, 15);
932 pvt
->ranges
[range
].lim
.lo
|= ((llim
& 0x1fff) << 3 | 0x7) << 16;
934 pvt
->ranges
[range
].lim
.hi
&= GENMASK(0, 7);
937 pvt
->ranges
[range
].lim
.hi
|= llim
>> 13;
942 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
,
943 struct err_info
*err
)
945 struct amd64_pvt
*pvt
= mci
->pvt_info
;
947 error_address_to_page_and_offset(sys_addr
, err
);
950 * Find out which node the error address belongs to. This may be
951 * different from the node that detected the error.
953 err
->src_mci
= find_mc_by_sys_addr(mci
, sys_addr
);
955 amd64_mc_err(mci
, "failed to map error addr 0x%lx to a node\n",
956 (unsigned long)sys_addr
);
957 err
->err_code
= ERR_NODE
;
961 /* Now map the sys_addr to a CSROW */
962 err
->csrow
= sys_addr_to_csrow(err
->src_mci
, sys_addr
);
963 if (err
->csrow
< 0) {
964 err
->err_code
= ERR_CSROW
;
968 /* CHIPKILL enabled */
969 if (pvt
->nbcfg
& NBCFG_CHIPKILL
) {
970 err
->channel
= get_channel_from_ecc_syndrome(mci
, err
->syndrome
);
971 if (err
->channel
< 0) {
973 * Syndrome didn't map, so we don't know which of the
974 * 2 DIMMs is in error. So we need to ID 'both' of them
977 amd64_mc_warn(err
->src_mci
, "unknown syndrome 0x%04x - "
978 "possible error reporting race\n",
980 err
->err_code
= ERR_CHANNEL
;
985 * non-chipkill ecc mode
987 * The k8 documentation is unclear about how to determine the
988 * channel number when using non-chipkill memory. This method
989 * was obtained from email communication with someone at AMD.
990 * (Wish the email was placed in this comment - norsk)
992 err
->channel
= ((sys_addr
& BIT(3)) != 0);
996 static int ddr2_cs_size(unsigned i
, bool dct_width
)
1002 else if (!(i
& 0x1))
1005 shift
= (i
+ 1) >> 1;
1007 return 128 << (shift
+ !!dct_width
);
1010 static int k8_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1013 u32 dclr
= dct
? pvt
->dclr1
: pvt
->dclr0
;
1015 if (pvt
->ext_model
>= K8_REV_F
) {
1016 WARN_ON(cs_mode
> 11);
1017 return ddr2_cs_size(cs_mode
, dclr
& WIDTH_128
);
1019 else if (pvt
->ext_model
>= K8_REV_D
) {
1021 WARN_ON(cs_mode
> 10);
1024 * the below calculation, besides trying to win an obfuscated C
1025 * contest, maps cs_mode values to DIMM chip select sizes. The
1028 * cs_mode CS size (mb)
1029 * ======= ============
1042 * Basically, it calculates a value with which to shift the
1043 * smallest CS size of 32MB.
1045 * ddr[23]_cs_size have a similar purpose.
1047 diff
= cs_mode
/3 + (unsigned)(cs_mode
> 5);
1049 return 32 << (cs_mode
- diff
);
1052 WARN_ON(cs_mode
> 6);
1053 return 32 << cs_mode
;
1058 * Get the number of DCT channels in use.
1061 * number of Memory Channels in operation
1063 * contents of the DCL0_LOW register
1065 static int f1x_early_channel_count(struct amd64_pvt
*pvt
)
1067 int i
, j
, channels
= 0;
1069 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1070 if (boot_cpu_data
.x86
== 0x10 && (pvt
->dclr0
& WIDTH_128
))
1074 * Need to check if in unganged mode: In such, there are 2 channels,
1075 * but they are not in 128 bit mode and thus the above 'dclr0' status
1078 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1079 * their CSEnable bit on. If so, then SINGLE DIMM case.
1081 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1084 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1085 * is more than just one DIMM present in unganged mode. Need to check
1086 * both controllers since DIMMs can be placed in either one.
1088 for (i
= 0; i
< 2; i
++) {
1089 u32 dbam
= (i
? pvt
->dbam1
: pvt
->dbam0
);
1091 for (j
= 0; j
< 4; j
++) {
1092 if (DBAM_DIMM(j
, dbam
) > 0) {
1102 amd64_info("MCT channel count: %d\n", channels
);
1107 static int ddr3_cs_size(unsigned i
, bool dct_width
)
1112 if (i
== 0 || i
== 3 || i
== 4)
1118 else if (!(i
& 0x1))
1121 shift
= (i
+ 1) >> 1;
1124 cs_size
= (128 * (1 << !!dct_width
)) << shift
;
1129 static int f10_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1132 u32 dclr
= dct
? pvt
->dclr1
: pvt
->dclr0
;
1134 WARN_ON(cs_mode
> 11);
1136 if (pvt
->dchr0
& DDR3_MODE
|| pvt
->dchr1
& DDR3_MODE
)
1137 return ddr3_cs_size(cs_mode
, dclr
& WIDTH_128
);
1139 return ddr2_cs_size(cs_mode
, dclr
& WIDTH_128
);
1143 * F15h supports only 64bit DCT interfaces
1145 static int f15_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1148 WARN_ON(cs_mode
> 12);
1150 return ddr3_cs_size(cs_mode
, false);
1153 static void read_dram_ctl_register(struct amd64_pvt
*pvt
)
1156 if (boot_cpu_data
.x86
== 0xf)
1159 if (!amd64_read_dct_pci_cfg(pvt
, DCT_SEL_LO
, &pvt
->dct_sel_lo
)) {
1160 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1161 pvt
->dct_sel_lo
, dct_sel_baseaddr(pvt
));
1163 edac_dbg(0, " DCTs operate in %s mode\n",
1164 (dct_ganging_enabled(pvt
) ? "ganged" : "unganged"));
1166 if (!dct_ganging_enabled(pvt
))
1167 edac_dbg(0, " Address range split per DCT: %s\n",
1168 (dct_high_range_enabled(pvt
) ? "yes" : "no"));
1170 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1171 (dct_data_intlv_enabled(pvt
) ? "enabled" : "disabled"),
1172 (dct_memory_cleared(pvt
) ? "yes" : "no"));
1174 edac_dbg(0, " channel interleave: %s, "
1175 "interleave bits selector: 0x%x\n",
1176 (dct_interleave_enabled(pvt
) ? "enabled" : "disabled"),
1177 dct_sel_interleave_addr(pvt
));
1180 amd64_read_dct_pci_cfg(pvt
, DCT_SEL_HI
, &pvt
->dct_sel_hi
);
1184 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1185 * Interleaving Modes.
1187 static u8
f1x_determine_channel(struct amd64_pvt
*pvt
, u64 sys_addr
,
1188 bool hi_range_sel
, u8 intlv_en
)
1190 u8 dct_sel_high
= (pvt
->dct_sel_lo
>> 1) & 1;
1192 if (dct_ganging_enabled(pvt
))
1196 return dct_sel_high
;
1199 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1201 if (dct_interleave_enabled(pvt
)) {
1202 u8 intlv_addr
= dct_sel_interleave_addr(pvt
);
1204 /* return DCT select function: 0=DCT0, 1=DCT1 */
1206 return sys_addr
>> 6 & 1;
1208 if (intlv_addr
& 0x2) {
1209 u8 shift
= intlv_addr
& 0x1 ? 9 : 6;
1210 u32 temp
= hweight_long((u32
) ((sys_addr
>> 16) & 0x1F)) % 2;
1212 return ((sys_addr
>> shift
) & 1) ^ temp
;
1215 return (sys_addr
>> (12 + hweight8(intlv_en
))) & 1;
1218 if (dct_high_range_enabled(pvt
))
1219 return ~dct_sel_high
& 1;
1224 /* Convert the sys_addr to the normalized DCT address */
1225 static u64
f1x_get_norm_dct_addr(struct amd64_pvt
*pvt
, u8 range
,
1226 u64 sys_addr
, bool hi_rng
,
1227 u32 dct_sel_base_addr
)
1230 u64 dram_base
= get_dram_base(pvt
, range
);
1231 u64 hole_off
= f10_dhar_offset(pvt
);
1232 u64 dct_sel_base_off
= (pvt
->dct_sel_hi
& 0xFFFFFC00) << 16;
1237 * base address of high range is below 4Gb
1238 * (bits [47:27] at [31:11])
1239 * DRAM address space on this DCT is hoisted above 4Gb &&
1242 * remove hole offset from sys_addr
1244 * remove high range offset from sys_addr
1246 if ((!(dct_sel_base_addr
>> 16) ||
1247 dct_sel_base_addr
< dhar_base(pvt
)) &&
1249 (sys_addr
>= BIT_64(32)))
1250 chan_off
= hole_off
;
1252 chan_off
= dct_sel_base_off
;
1256 * we have a valid hole &&
1261 * remove dram base to normalize to DCT address
1263 if (dhar_valid(pvt
) && (sys_addr
>= BIT_64(32)))
1264 chan_off
= hole_off
;
1266 chan_off
= dram_base
;
1269 return (sys_addr
& GENMASK(6,47)) - (chan_off
& GENMASK(23,47));
1273 * checks if the csrow passed in is marked as SPARED, if so returns the new
1276 static int f10_process_possible_spare(struct amd64_pvt
*pvt
, u8 dct
, int csrow
)
1280 if (online_spare_swap_done(pvt
, dct
) &&
1281 csrow
== online_spare_bad_dramcs(pvt
, dct
)) {
1283 for_each_chip_select(tmp_cs
, dct
, pvt
) {
1284 if (chip_select_base(tmp_cs
, dct
, pvt
) & 0x2) {
1294 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1295 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1298 * -EINVAL: NOT FOUND
1299 * 0..csrow = Chip-Select Row
1301 static int f1x_lookup_addr_in_dct(u64 in_addr
, u8 nid
, u8 dct
)
1303 struct mem_ctl_info
*mci
;
1304 struct amd64_pvt
*pvt
;
1305 u64 cs_base
, cs_mask
;
1306 int cs_found
= -EINVAL
;
1313 pvt
= mci
->pvt_info
;
1315 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr
, dct
);
1317 for_each_chip_select(csrow
, dct
, pvt
) {
1318 if (!csrow_enabled(csrow
, dct
, pvt
))
1321 get_cs_base_and_mask(pvt
, csrow
, dct
, &cs_base
, &cs_mask
);
1323 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1324 csrow
, cs_base
, cs_mask
);
1328 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1329 (in_addr
& cs_mask
), (cs_base
& cs_mask
));
1331 if ((in_addr
& cs_mask
) == (cs_base
& cs_mask
)) {
1332 cs_found
= f10_process_possible_spare(pvt
, dct
, csrow
);
1334 edac_dbg(1, " MATCH csrow=%d\n", cs_found
);
1342 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1343 * swapped with a region located at the bottom of memory so that the GPU can use
1344 * the interleaved region and thus two channels.
1346 static u64
f1x_swap_interleaved_region(struct amd64_pvt
*pvt
, u64 sys_addr
)
1348 u32 swap_reg
, swap_base
, swap_limit
, rgn_size
, tmp_addr
;
1350 if (boot_cpu_data
.x86
== 0x10) {
1351 /* only revC3 and revE have that feature */
1352 if (boot_cpu_data
.x86_model
< 4 ||
1353 (boot_cpu_data
.x86_model
< 0xa &&
1354 boot_cpu_data
.x86_mask
< 3))
1358 amd64_read_dct_pci_cfg(pvt
, SWAP_INTLV_REG
, &swap_reg
);
1360 if (!(swap_reg
& 0x1))
1363 swap_base
= (swap_reg
>> 3) & 0x7f;
1364 swap_limit
= (swap_reg
>> 11) & 0x7f;
1365 rgn_size
= (swap_reg
>> 20) & 0x7f;
1366 tmp_addr
= sys_addr
>> 27;
1368 if (!(sys_addr
>> 34) &&
1369 (((tmp_addr
>= swap_base
) &&
1370 (tmp_addr
<= swap_limit
)) ||
1371 (tmp_addr
< rgn_size
)))
1372 return sys_addr
^ (u64
)swap_base
<< 27;
1377 /* For a given @dram_range, check if @sys_addr falls within it. */
1378 static int f1x_match_to_this_node(struct amd64_pvt
*pvt
, unsigned range
,
1379 u64 sys_addr
, int *chan_sel
)
1381 int cs_found
= -EINVAL
;
1385 bool high_range
= false;
1387 u8 node_id
= dram_dst_node(pvt
, range
);
1388 u8 intlv_en
= dram_intlv_en(pvt
, range
);
1389 u32 intlv_sel
= dram_intlv_sel(pvt
, range
);
1391 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1392 range
, sys_addr
, get_dram_limit(pvt
, range
));
1394 if (dhar_valid(pvt
) &&
1395 dhar_base(pvt
) <= sys_addr
&&
1396 sys_addr
< BIT_64(32)) {
1397 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1402 if (intlv_en
&& (intlv_sel
!= ((sys_addr
>> 12) & intlv_en
)))
1405 sys_addr
= f1x_swap_interleaved_region(pvt
, sys_addr
);
1407 dct_sel_base
= dct_sel_baseaddr(pvt
);
1410 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1411 * select between DCT0 and DCT1.
1413 if (dct_high_range_enabled(pvt
) &&
1414 !dct_ganging_enabled(pvt
) &&
1415 ((sys_addr
>> 27) >= (dct_sel_base
>> 11)))
1418 channel
= f1x_determine_channel(pvt
, sys_addr
, high_range
, intlv_en
);
1420 chan_addr
= f1x_get_norm_dct_addr(pvt
, range
, sys_addr
,
1421 high_range
, dct_sel_base
);
1423 /* Remove node interleaving, see F1x120 */
1425 chan_addr
= ((chan_addr
>> (12 + hweight8(intlv_en
))) << 12) |
1426 (chan_addr
& 0xfff);
1428 /* remove channel interleave */
1429 if (dct_interleave_enabled(pvt
) &&
1430 !dct_high_range_enabled(pvt
) &&
1431 !dct_ganging_enabled(pvt
)) {
1433 if (dct_sel_interleave_addr(pvt
) != 1) {
1434 if (dct_sel_interleave_addr(pvt
) == 0x3)
1436 chan_addr
= ((chan_addr
>> 10) << 9) |
1437 (chan_addr
& 0x1ff);
1439 /* A[6] or hash 6 */
1440 chan_addr
= ((chan_addr
>> 7) << 6) |
1444 chan_addr
= ((chan_addr
>> 13) << 12) |
1445 (chan_addr
& 0xfff);
1448 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr
);
1450 cs_found
= f1x_lookup_addr_in_dct(chan_addr
, node_id
, channel
);
1453 *chan_sel
= channel
;
1458 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt
*pvt
, u64 sys_addr
,
1461 int cs_found
= -EINVAL
;
1464 for (range
= 0; range
< DRAM_RANGES
; range
++) {
1466 if (!dram_rw(pvt
, range
))
1469 if ((get_dram_base(pvt
, range
) <= sys_addr
) &&
1470 (get_dram_limit(pvt
, range
) >= sys_addr
)) {
1472 cs_found
= f1x_match_to_this_node(pvt
, range
,
1473 sys_addr
, chan_sel
);
1482 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1483 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1485 * The @sys_addr is usually an error address received from the hardware
1488 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
,
1489 struct err_info
*err
)
1491 struct amd64_pvt
*pvt
= mci
->pvt_info
;
1493 error_address_to_page_and_offset(sys_addr
, err
);
1495 err
->csrow
= f1x_translate_sysaddr_to_cs(pvt
, sys_addr
, &err
->channel
);
1496 if (err
->csrow
< 0) {
1497 err
->err_code
= ERR_CSROW
;
1502 * We need the syndromes for channel detection only when we're
1503 * ganged. Otherwise @chan should already contain the channel at
1506 if (dct_ganging_enabled(pvt
))
1507 err
->channel
= get_channel_from_ecc_syndrome(mci
, err
->syndrome
);
1511 * debug routine to display the memory sizes of all logical DIMMs and its
1514 static void amd64_debug_display_dimm_sizes(struct amd64_pvt
*pvt
, u8 ctrl
)
1516 int dimm
, size0
, size1
;
1517 u32
*dcsb
= ctrl
? pvt
->csels
[1].csbases
: pvt
->csels
[0].csbases
;
1518 u32 dbam
= ctrl
? pvt
->dbam1
: pvt
->dbam0
;
1520 if (boot_cpu_data
.x86
== 0xf) {
1521 /* K8 families < revF not supported yet */
1522 if (pvt
->ext_model
< K8_REV_F
)
1528 dbam
= (ctrl
&& !dct_ganging_enabled(pvt
)) ? pvt
->dbam1
: pvt
->dbam0
;
1529 dcsb
= (ctrl
&& !dct_ganging_enabled(pvt
)) ? pvt
->csels
[1].csbases
1530 : pvt
->csels
[0].csbases
;
1532 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1535 edac_printk(KERN_DEBUG
, EDAC_MC
, "DCT%d chip selects:\n", ctrl
);
1537 /* Dump memory sizes for DIMM and its CSROWs */
1538 for (dimm
= 0; dimm
< 4; dimm
++) {
1541 if (dcsb
[dimm
*2] & DCSB_CS_ENABLE
)
1542 size0
= pvt
->ops
->dbam_to_cs(pvt
, ctrl
,
1543 DBAM_DIMM(dimm
, dbam
));
1546 if (dcsb
[dimm
*2 + 1] & DCSB_CS_ENABLE
)
1547 size1
= pvt
->ops
->dbam_to_cs(pvt
, ctrl
,
1548 DBAM_DIMM(dimm
, dbam
));
1550 amd64_info(EDAC_MC
": %d: %5dMB %d: %5dMB\n",
1552 dimm
* 2 + 1, size1
);
1556 static struct amd64_family_type amd64_family_types
[] = {
1559 .f1_id
= PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP
,
1560 .f3_id
= PCI_DEVICE_ID_AMD_K8_NB_MISC
,
1562 .early_channel_count
= k8_early_channel_count
,
1563 .map_sysaddr_to_csrow
= k8_map_sysaddr_to_csrow
,
1564 .dbam_to_cs
= k8_dbam_to_chip_select
,
1565 .read_dct_pci_cfg
= k8_read_dct_pci_cfg
,
1570 .f1_id
= PCI_DEVICE_ID_AMD_10H_NB_MAP
,
1571 .f3_id
= PCI_DEVICE_ID_AMD_10H_NB_MISC
,
1573 .early_channel_count
= f1x_early_channel_count
,
1574 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1575 .dbam_to_cs
= f10_dbam_to_chip_select
,
1576 .read_dct_pci_cfg
= f10_read_dct_pci_cfg
,
1581 .f1_id
= PCI_DEVICE_ID_AMD_15H_NB_F1
,
1582 .f3_id
= PCI_DEVICE_ID_AMD_15H_NB_F3
,
1584 .early_channel_count
= f1x_early_channel_count
,
1585 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1586 .dbam_to_cs
= f15_dbam_to_chip_select
,
1587 .read_dct_pci_cfg
= f15_read_dct_pci_cfg
,
1593 * These are tables of eigenvectors (one per line) which can be used for the
1594 * construction of the syndrome tables. The modified syndrome search algorithm
1595 * uses those to find the symbol in error and thus the DIMM.
1597 * Algorithm courtesy of Ross LaFetra from AMD.
1599 static const u16 x4_vectors
[] = {
1600 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1601 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1602 0x0001, 0x0002, 0x0004, 0x0008,
1603 0x1013, 0x3032, 0x4044, 0x8088,
1604 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1605 0x4857, 0xc4fe, 0x13cc, 0x3288,
1606 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1607 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1608 0x15c1, 0x2a42, 0x89ac, 0x4758,
1609 0x2b03, 0x1602, 0x4f0c, 0xca08,
1610 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1611 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1612 0x2b87, 0x164e, 0x642c, 0xdc18,
1613 0x40b9, 0x80de, 0x1094, 0x20e8,
1614 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1615 0x11c1, 0x2242, 0x84ac, 0x4c58,
1616 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1617 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1618 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1619 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1620 0x16b3, 0x3d62, 0x4f34, 0x8518,
1621 0x1e2f, 0x391a, 0x5cac, 0xf858,
1622 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1623 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1624 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1625 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1626 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1627 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1628 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1629 0x185d, 0x2ca6, 0x7914, 0x9e28,
1630 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1631 0x4199, 0x82ee, 0x19f4, 0x2e58,
1632 0x4807, 0xc40e, 0x130c, 0x3208,
1633 0x1905, 0x2e0a, 0x5804, 0xac08,
1634 0x213f, 0x132a, 0xadfc, 0x5ba8,
1635 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1638 static const u16 x8_vectors
[] = {
1639 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1640 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1641 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1642 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1643 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1644 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1645 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1646 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1647 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1648 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1649 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1650 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1651 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1652 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1653 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1654 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1655 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1656 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1657 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1660 static int decode_syndrome(u16 syndrome
, const u16
*vectors
, unsigned num_vecs
,
1663 unsigned int i
, err_sym
;
1665 for (err_sym
= 0; err_sym
< num_vecs
/ v_dim
; err_sym
++) {
1667 unsigned v_idx
= err_sym
* v_dim
;
1668 unsigned v_end
= (err_sym
+ 1) * v_dim
;
1670 /* walk over all 16 bits of the syndrome */
1671 for (i
= 1; i
< (1U << 16); i
<<= 1) {
1673 /* if bit is set in that eigenvector... */
1674 if (v_idx
< v_end
&& vectors
[v_idx
] & i
) {
1675 u16 ev_comp
= vectors
[v_idx
++];
1677 /* ... and bit set in the modified syndrome, */
1687 /* can't get to zero, move to next symbol */
1692 edac_dbg(0, "syndrome(%x) not found\n", syndrome
);
1696 static int map_err_sym_to_channel(int err_sym
, int sym_size
)
1709 return err_sym
>> 4;
1715 /* imaginary bits not in a DIMM */
1717 WARN(1, KERN_ERR
"Invalid error symbol: 0x%x\n",
1729 return err_sym
>> 3;
1735 static int get_channel_from_ecc_syndrome(struct mem_ctl_info
*mci
, u16 syndrome
)
1737 struct amd64_pvt
*pvt
= mci
->pvt_info
;
1740 if (pvt
->ecc_sym_sz
== 8)
1741 err_sym
= decode_syndrome(syndrome
, x8_vectors
,
1742 ARRAY_SIZE(x8_vectors
),
1744 else if (pvt
->ecc_sym_sz
== 4)
1745 err_sym
= decode_syndrome(syndrome
, x4_vectors
,
1746 ARRAY_SIZE(x4_vectors
),
1749 amd64_warn("Illegal syndrome type: %u\n", pvt
->ecc_sym_sz
);
1753 return map_err_sym_to_channel(err_sym
, pvt
->ecc_sym_sz
);
1756 static void __log_bus_error(struct mem_ctl_info
*mci
, struct err_info
*err
,
1759 enum hw_event_mc_err_type err_type
;
1763 err_type
= HW_EVENT_ERR_CORRECTED
;
1764 else if (ecc_type
== 1)
1765 err_type
= HW_EVENT_ERR_UNCORRECTED
;
1767 WARN(1, "Something is rotten in the state of Denmark.\n");
1771 switch (err
->err_code
) {
1776 string
= "Failed to map error addr to a node";
1779 string
= "Failed to map error addr to a csrow";
1782 string
= "unknown syndrome - possible error reporting race";
1785 string
= "WTF error";
1789 edac_mc_handle_error(err_type
, mci
, 1,
1790 err
->page
, err
->offset
, err
->syndrome
,
1791 err
->csrow
, err
->channel
, -1,
1795 static inline void __amd64_decode_bus_error(struct mem_ctl_info
*mci
,
1798 struct amd64_pvt
*pvt
= mci
->pvt_info
;
1799 u8 ecc_type
= (m
->status
>> 45) & 0x3;
1800 u8 xec
= XEC(m
->status
, 0x1f);
1801 u16 ec
= EC(m
->status
);
1803 struct err_info err
;
1805 /* Bail out early if this was an 'observed' error */
1806 if (PP(ec
) == NBSL_PP_OBS
)
1809 /* Do only ECC errors */
1810 if (xec
&& xec
!= F10_NBSL_EXT_ERR_ECC
)
1813 memset(&err
, 0, sizeof(err
));
1815 sys_addr
= get_error_address(m
);
1818 err
.syndrome
= extract_syndrome(m
->status
);
1820 pvt
->ops
->map_sysaddr_to_csrow(mci
, sys_addr
, &err
);
1822 __log_bus_error(mci
, &err
, ecc_type
);
1825 void amd64_decode_bus_error(int node_id
, struct mce
*m
)
1827 __amd64_decode_bus_error(mcis
[node_id
], m
);
1831 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
1832 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
1834 static int reserve_mc_sibling_devs(struct amd64_pvt
*pvt
, u16 f1_id
, u16 f3_id
)
1836 /* Reserve the ADDRESS MAP Device */
1837 pvt
->F1
= pci_get_related_function(pvt
->F2
->vendor
, f1_id
, pvt
->F2
);
1839 amd64_err("error address map device not found: "
1840 "vendor %x device 0x%x (broken BIOS?)\n",
1841 PCI_VENDOR_ID_AMD
, f1_id
);
1845 /* Reserve the MISC Device */
1846 pvt
->F3
= pci_get_related_function(pvt
->F2
->vendor
, f3_id
, pvt
->F2
);
1848 pci_dev_put(pvt
->F1
);
1851 amd64_err("error F3 device not found: "
1852 "vendor %x device 0x%x (broken BIOS?)\n",
1853 PCI_VENDOR_ID_AMD
, f3_id
);
1857 edac_dbg(1, "F1: %s\n", pci_name(pvt
->F1
));
1858 edac_dbg(1, "F2: %s\n", pci_name(pvt
->F2
));
1859 edac_dbg(1, "F3: %s\n", pci_name(pvt
->F3
));
1864 static void free_mc_sibling_devs(struct amd64_pvt
*pvt
)
1866 pci_dev_put(pvt
->F1
);
1867 pci_dev_put(pvt
->F3
);
1871 * Retrieve the hardware registers of the memory controller (this includes the
1872 * 'Address Map' and 'Misc' device regs)
1874 static void read_mc_regs(struct amd64_pvt
*pvt
)
1876 struct cpuinfo_x86
*c
= &boot_cpu_data
;
1882 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
1883 * those are Read-As-Zero
1885 rdmsrl(MSR_K8_TOP_MEM1
, pvt
->top_mem
);
1886 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt
->top_mem
);
1888 /* check first whether TOP_MEM2 is enabled */
1889 rdmsrl(MSR_K8_SYSCFG
, msr_val
);
1890 if (msr_val
& (1U << 21)) {
1891 rdmsrl(MSR_K8_TOP_MEM2
, pvt
->top_mem2
);
1892 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt
->top_mem2
);
1894 edac_dbg(0, " TOP_MEM2 disabled\n");
1896 amd64_read_pci_cfg(pvt
->F3
, NBCAP
, &pvt
->nbcap
);
1898 read_dram_ctl_register(pvt
);
1900 for (range
= 0; range
< DRAM_RANGES
; range
++) {
1903 /* read settings for this DRAM range */
1904 read_dram_base_limit_regs(pvt
, range
);
1906 rw
= dram_rw(pvt
, range
);
1910 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
1912 get_dram_base(pvt
, range
),
1913 get_dram_limit(pvt
, range
));
1915 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
1916 dram_intlv_en(pvt
, range
) ? "Enabled" : "Disabled",
1917 (rw
& 0x1) ? "R" : "-",
1918 (rw
& 0x2) ? "W" : "-",
1919 dram_intlv_sel(pvt
, range
),
1920 dram_dst_node(pvt
, range
));
1923 read_dct_base_mask(pvt
);
1925 amd64_read_pci_cfg(pvt
->F1
, DHAR
, &pvt
->dhar
);
1926 amd64_read_dct_pci_cfg(pvt
, DBAM0
, &pvt
->dbam0
);
1928 amd64_read_pci_cfg(pvt
->F3
, F10_ONLINE_SPARE
, &pvt
->online_spare
);
1930 amd64_read_dct_pci_cfg(pvt
, DCLR0
, &pvt
->dclr0
);
1931 amd64_read_dct_pci_cfg(pvt
, DCHR0
, &pvt
->dchr0
);
1933 if (!dct_ganging_enabled(pvt
)) {
1934 amd64_read_dct_pci_cfg(pvt
, DCLR1
, &pvt
->dclr1
);
1935 amd64_read_dct_pci_cfg(pvt
, DCHR1
, &pvt
->dchr1
);
1938 pvt
->ecc_sym_sz
= 4;
1940 if (c
->x86
>= 0x10) {
1941 amd64_read_pci_cfg(pvt
->F3
, EXT_NB_MCA_CFG
, &tmp
);
1942 amd64_read_dct_pci_cfg(pvt
, DBAM1
, &pvt
->dbam1
);
1944 /* F10h, revD and later can do x8 ECC too */
1945 if ((c
->x86
> 0x10 || c
->x86_model
> 7) && tmp
& BIT(25))
1946 pvt
->ecc_sym_sz
= 8;
1948 dump_misc_regs(pvt
);
1952 * NOTE: CPU Revision Dependent code
1955 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
1956 * k8 private pointer to -->
1957 * DRAM Bank Address mapping register
1959 * DCL register where dual_channel_active is
1961 * The DBAM register consists of 4 sets of 4 bits each definitions:
1964 * 0-3 CSROWs 0 and 1
1965 * 4-7 CSROWs 2 and 3
1966 * 8-11 CSROWs 4 and 5
1967 * 12-15 CSROWs 6 and 7
1969 * Values range from: 0 to 15
1970 * The meaning of the values depends on CPU revision and dual-channel state,
1971 * see relevant BKDG more info.
1973 * The memory controller provides for total of only 8 CSROWs in its current
1974 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
1975 * single channel or two (2) DIMMs in dual channel mode.
1977 * The following code logic collapses the various tables for CSROW based on CPU
1981 * The number of PAGE_SIZE pages on the specified CSROW number it
1985 static u32
amd64_csrow_nr_pages(struct amd64_pvt
*pvt
, u8 dct
, int csrow_nr
)
1987 u32 cs_mode
, nr_pages
;
1988 u32 dbam
= dct
? pvt
->dbam1
: pvt
->dbam0
;
1992 * The math on this doesn't look right on the surface because x/2*4 can
1993 * be simplified to x*2 but this expression makes use of the fact that
1994 * it is integral math where 1/2=0. This intermediate value becomes the
1995 * number of bits to shift the DBAM register to extract the proper CSROW
1998 cs_mode
= DBAM_DIMM(csrow_nr
/ 2, dbam
);
2000 nr_pages
= pvt
->ops
->dbam_to_cs(pvt
, dct
, cs_mode
) << (20 - PAGE_SHIFT
);
2002 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2003 csrow_nr
, dct
, cs_mode
);
2004 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages
);
2010 * Initialize the array of csrow attribute instances, based on the values
2011 * from pci config hardware registers.
2013 static int init_csrows(struct mem_ctl_info
*mci
)
2015 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2016 struct csrow_info
*csrow
;
2017 struct dimm_info
*dimm
;
2018 enum edac_type edac_mode
;
2019 enum mem_type mtype
;
2020 int i
, j
, empty
= 1;
2024 amd64_read_pci_cfg(pvt
->F3
, NBCFG
, &val
);
2028 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2029 pvt
->mc_node_id
, val
,
2030 !!(val
& NBCFG_CHIPKILL
), !!(val
& NBCFG_ECC_ENABLE
));
2033 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2035 for_each_chip_select(i
, 0, pvt
) {
2036 bool row_dct0
= !!csrow_enabled(i
, 0, pvt
);
2037 bool row_dct1
= false;
2039 if (boot_cpu_data
.x86
!= 0xf)
2040 row_dct1
= !!csrow_enabled(i
, 1, pvt
);
2042 if (!row_dct0
&& !row_dct1
)
2045 csrow
= mci
->csrows
[i
];
2048 edac_dbg(1, "MC node: %d, csrow: %d\n",
2049 pvt
->mc_node_id
, i
);
2052 nr_pages
= amd64_csrow_nr_pages(pvt
, 0, i
);
2054 /* K8 has only one DCT */
2055 if (boot_cpu_data
.x86
!= 0xf && row_dct1
)
2056 nr_pages
+= amd64_csrow_nr_pages(pvt
, 1, i
);
2058 mtype
= amd64_determine_memory_type(pvt
, i
);
2060 edac_dbg(1, "Total csrow%d pages: %u\n", i
, nr_pages
);
2063 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2065 if (pvt
->nbcfg
& NBCFG_ECC_ENABLE
)
2066 edac_mode
= (pvt
->nbcfg
& NBCFG_CHIPKILL
) ?
2067 EDAC_S4ECD4ED
: EDAC_SECDED
;
2069 edac_mode
= EDAC_NONE
;
2071 for (j
= 0; j
< pvt
->channel_count
; j
++) {
2072 dimm
= csrow
->channels
[j
]->dimm
;
2073 dimm
->mtype
= mtype
;
2074 dimm
->edac_mode
= edac_mode
;
2075 dimm
->nr_pages
= nr_pages
;
2077 csrow
->nr_pages
= nr_pages
;
2083 /* get all cores on this DCT */
2084 static void get_cpus_on_this_dct_cpumask(struct cpumask
*mask
, u16 nid
)
2088 for_each_online_cpu(cpu
)
2089 if (amd_get_nb_id(cpu
) == nid
)
2090 cpumask_set_cpu(cpu
, mask
);
2093 /* check MCG_CTL on all the cpus on this node */
2094 static bool amd64_nb_mce_bank_enabled_on_node(u16 nid
)
2100 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
)) {
2101 amd64_warn("%s: Error allocating mask\n", __func__
);
2105 get_cpus_on_this_dct_cpumask(mask
, nid
);
2107 rdmsr_on_cpus(mask
, MSR_IA32_MCG_CTL
, msrs
);
2109 for_each_cpu(cpu
, mask
) {
2110 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
2111 nbe
= reg
->l
& MSR_MCGCTL_NBE
;
2113 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2115 (nbe
? "enabled" : "disabled"));
2123 free_cpumask_var(mask
);
2127 static int toggle_ecc_err_reporting(struct ecc_settings
*s
, u16 nid
, bool on
)
2129 cpumask_var_t cmask
;
2132 if (!zalloc_cpumask_var(&cmask
, GFP_KERNEL
)) {
2133 amd64_warn("%s: error allocating mask\n", __func__
);
2137 get_cpus_on_this_dct_cpumask(cmask
, nid
);
2139 rdmsr_on_cpus(cmask
, MSR_IA32_MCG_CTL
, msrs
);
2141 for_each_cpu(cpu
, cmask
) {
2143 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
2146 if (reg
->l
& MSR_MCGCTL_NBE
)
2147 s
->flags
.nb_mce_enable
= 1;
2149 reg
->l
|= MSR_MCGCTL_NBE
;
2152 * Turn off NB MCE reporting only when it was off before
2154 if (!s
->flags
.nb_mce_enable
)
2155 reg
->l
&= ~MSR_MCGCTL_NBE
;
2158 wrmsr_on_cpus(cmask
, MSR_IA32_MCG_CTL
, msrs
);
2160 free_cpumask_var(cmask
);
2165 static bool enable_ecc_error_reporting(struct ecc_settings
*s
, u16 nid
,
2169 u32 value
, mask
= 0x3; /* UECC/CECC enable */
2171 if (toggle_ecc_err_reporting(s
, nid
, ON
)) {
2172 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2176 amd64_read_pci_cfg(F3
, NBCTL
, &value
);
2178 s
->old_nbctl
= value
& mask
;
2179 s
->nbctl_valid
= true;
2182 amd64_write_pci_cfg(F3
, NBCTL
, value
);
2184 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2186 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2187 nid
, value
, !!(value
& NBCFG_ECC_ENABLE
));
2189 if (!(value
& NBCFG_ECC_ENABLE
)) {
2190 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2192 s
->flags
.nb_ecc_prev
= 0;
2194 /* Attempt to turn on DRAM ECC Enable */
2195 value
|= NBCFG_ECC_ENABLE
;
2196 amd64_write_pci_cfg(F3
, NBCFG
, value
);
2198 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2200 if (!(value
& NBCFG_ECC_ENABLE
)) {
2201 amd64_warn("Hardware rejected DRAM ECC enable,"
2202 "check memory DIMM configuration.\n");
2205 amd64_info("Hardware accepted DRAM ECC Enable\n");
2208 s
->flags
.nb_ecc_prev
= 1;
2211 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2212 nid
, value
, !!(value
& NBCFG_ECC_ENABLE
));
2217 static void restore_ecc_error_reporting(struct ecc_settings
*s
, u16 nid
,
2220 u32 value
, mask
= 0x3; /* UECC/CECC enable */
2223 if (!s
->nbctl_valid
)
2226 amd64_read_pci_cfg(F3
, NBCTL
, &value
);
2228 value
|= s
->old_nbctl
;
2230 amd64_write_pci_cfg(F3
, NBCTL
, value
);
2232 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2233 if (!s
->flags
.nb_ecc_prev
) {
2234 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2235 value
&= ~NBCFG_ECC_ENABLE
;
2236 amd64_write_pci_cfg(F3
, NBCFG
, value
);
2239 /* restore the NB Enable MCGCTL bit */
2240 if (toggle_ecc_err_reporting(s
, nid
, OFF
))
2241 amd64_warn("Error restoring NB MCGCTL settings!\n");
2245 * EDAC requires that the BIOS have ECC enabled before
2246 * taking over the processing of ECC errors. A command line
2247 * option allows to force-enable hardware ECC later in
2248 * enable_ecc_error_reporting().
2250 static const char *ecc_msg
=
2251 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2252 " Either enable ECC checking or force module loading by setting "
2253 "'ecc_enable_override'.\n"
2254 " (Note that use of the override may cause unknown side effects.)\n";
2256 static bool ecc_enabled(struct pci_dev
*F3
, u16 nid
)
2260 bool nb_mce_en
= false;
2262 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2264 ecc_en
= !!(value
& NBCFG_ECC_ENABLE
);
2265 amd64_info("DRAM ECC %s.\n", (ecc_en
? "enabled" : "disabled"));
2267 nb_mce_en
= amd64_nb_mce_bank_enabled_on_node(nid
);
2269 amd64_notice("NB MCE bank disabled, set MSR "
2270 "0x%08x[4] on node %d to enable.\n",
2271 MSR_IA32_MCG_CTL
, nid
);
2273 if (!ecc_en
|| !nb_mce_en
) {
2274 amd64_notice("%s", ecc_msg
);
2280 static int set_mc_sysfs_attrs(struct mem_ctl_info
*mci
)
2284 rc
= amd64_create_sysfs_dbg_files(mci
);
2288 if (boot_cpu_data
.x86
>= 0x10) {
2289 rc
= amd64_create_sysfs_inject_files(mci
);
2297 static void del_mc_sysfs_attrs(struct mem_ctl_info
*mci
)
2299 amd64_remove_sysfs_dbg_files(mci
);
2301 if (boot_cpu_data
.x86
>= 0x10)
2302 amd64_remove_sysfs_inject_files(mci
);
2305 static void setup_mci_misc_attrs(struct mem_ctl_info
*mci
,
2306 struct amd64_family_type
*fam
)
2308 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2310 mci
->mtype_cap
= MEM_FLAG_DDR2
| MEM_FLAG_RDDR2
;
2311 mci
->edac_ctl_cap
= EDAC_FLAG_NONE
;
2313 if (pvt
->nbcap
& NBCAP_SECDED
)
2314 mci
->edac_ctl_cap
|= EDAC_FLAG_SECDED
;
2316 if (pvt
->nbcap
& NBCAP_CHIPKILL
)
2317 mci
->edac_ctl_cap
|= EDAC_FLAG_S4ECD4ED
;
2319 mci
->edac_cap
= amd64_determine_edac_cap(pvt
);
2320 mci
->mod_name
= EDAC_MOD_STR
;
2321 mci
->mod_ver
= EDAC_AMD64_VERSION
;
2322 mci
->ctl_name
= fam
->ctl_name
;
2323 mci
->dev_name
= pci_name(pvt
->F2
);
2324 mci
->ctl_page_to_phys
= NULL
;
2326 /* memory scrubber interface */
2327 mci
->set_sdram_scrub_rate
= amd64_set_scrub_rate
;
2328 mci
->get_sdram_scrub_rate
= amd64_get_scrub_rate
;
2332 * returns a pointer to the family descriptor on success, NULL otherwise.
2334 static struct amd64_family_type
*amd64_per_family_init(struct amd64_pvt
*pvt
)
2336 u8 fam
= boot_cpu_data
.x86
;
2337 struct amd64_family_type
*fam_type
= NULL
;
2341 fam_type
= &amd64_family_types
[K8_CPUS
];
2342 pvt
->ops
= &amd64_family_types
[K8_CPUS
].ops
;
2346 fam_type
= &amd64_family_types
[F10_CPUS
];
2347 pvt
->ops
= &amd64_family_types
[F10_CPUS
].ops
;
2351 fam_type
= &amd64_family_types
[F15_CPUS
];
2352 pvt
->ops
= &amd64_family_types
[F15_CPUS
].ops
;
2356 amd64_err("Unsupported family!\n");
2360 pvt
->ext_model
= boot_cpu_data
.x86_model
>> 4;
2362 amd64_info("%s %sdetected (node %d).\n", fam_type
->ctl_name
,
2364 (pvt
->ext_model
>= K8_REV_F
? "revF or later "
2365 : "revE or earlier ")
2366 : ""), pvt
->mc_node_id
);
2370 static int amd64_init_one_instance(struct pci_dev
*F2
)
2372 struct amd64_pvt
*pvt
= NULL
;
2373 struct amd64_family_type
*fam_type
= NULL
;
2374 struct mem_ctl_info
*mci
= NULL
;
2375 struct edac_mc_layer layers
[2];
2377 u16 nid
= amd_get_node_id(F2
);
2380 pvt
= kzalloc(sizeof(struct amd64_pvt
), GFP_KERNEL
);
2384 pvt
->mc_node_id
= nid
;
2388 fam_type
= amd64_per_family_init(pvt
);
2393 err
= reserve_mc_sibling_devs(pvt
, fam_type
->f1_id
, fam_type
->f3_id
);
2400 * We need to determine how many memory channels there are. Then use
2401 * that information for calculating the size of the dynamic instance
2402 * tables in the 'mci' structure.
2405 pvt
->channel_count
= pvt
->ops
->early_channel_count(pvt
);
2406 if (pvt
->channel_count
< 0)
2410 layers
[0].type
= EDAC_MC_LAYER_CHIP_SELECT
;
2411 layers
[0].size
= pvt
->csels
[0].b_cnt
;
2412 layers
[0].is_virt_csrow
= true;
2413 layers
[1].type
= EDAC_MC_LAYER_CHANNEL
;
2414 layers
[1].size
= pvt
->channel_count
;
2415 layers
[1].is_virt_csrow
= false;
2416 mci
= edac_mc_alloc(nid
, ARRAY_SIZE(layers
), layers
, 0);
2420 mci
->pvt_info
= pvt
;
2421 mci
->pdev
= &pvt
->F2
->dev
;
2424 setup_mci_misc_attrs(mci
, fam_type
);
2426 if (init_csrows(mci
))
2427 mci
->edac_cap
= EDAC_FLAG_NONE
;
2430 if (edac_mc_add_mc(mci
)) {
2431 edac_dbg(1, "failed edac_mc_add_mc()\n");
2434 if (set_mc_sysfs_attrs(mci
)) {
2435 edac_dbg(1, "failed edac_mc_add_mc()\n");
2439 /* register stuff with EDAC MCE */
2440 if (report_gart_errors
)
2441 amd_report_gart_errors(true);
2443 amd_register_ecc_decoder(amd64_decode_bus_error
);
2447 atomic_inc(&drv_instances
);
2452 edac_mc_del_mc(mci
->pdev
);
2457 free_mc_sibling_devs(pvt
);
2466 static int amd64_probe_one_instance(struct pci_dev
*pdev
,
2467 const struct pci_device_id
*mc_type
)
2469 u16 nid
= amd_get_node_id(pdev
);
2470 struct pci_dev
*F3
= node_to_amd_nb(nid
)->misc
;
2471 struct ecc_settings
*s
;
2474 ret
= pci_enable_device(pdev
);
2476 edac_dbg(0, "ret=%d\n", ret
);
2481 s
= kzalloc(sizeof(struct ecc_settings
), GFP_KERNEL
);
2487 if (!ecc_enabled(F3
, nid
)) {
2490 if (!ecc_enable_override
)
2493 amd64_warn("Forcing ECC on!\n");
2495 if (!enable_ecc_error_reporting(s
, nid
, F3
))
2499 ret
= amd64_init_one_instance(pdev
);
2501 amd64_err("Error probing instance: %d\n", nid
);
2502 restore_ecc_error_reporting(s
, nid
, F3
);
2509 ecc_stngs
[nid
] = NULL
;
2515 static void amd64_remove_one_instance(struct pci_dev
*pdev
)
2517 struct mem_ctl_info
*mci
;
2518 struct amd64_pvt
*pvt
;
2519 u16 nid
= amd_get_node_id(pdev
);
2520 struct pci_dev
*F3
= node_to_amd_nb(nid
)->misc
;
2521 struct ecc_settings
*s
= ecc_stngs
[nid
];
2523 mci
= find_mci_by_dev(&pdev
->dev
);
2524 del_mc_sysfs_attrs(mci
);
2525 /* Remove from EDAC CORE tracking list */
2526 mci
= edac_mc_del_mc(&pdev
->dev
);
2530 pvt
= mci
->pvt_info
;
2532 restore_ecc_error_reporting(s
, nid
, F3
);
2534 free_mc_sibling_devs(pvt
);
2536 /* unregister from EDAC MCE */
2537 amd_report_gart_errors(false);
2538 amd_unregister_ecc_decoder(amd64_decode_bus_error
);
2540 kfree(ecc_stngs
[nid
]);
2541 ecc_stngs
[nid
] = NULL
;
2543 /* Free the EDAC CORE resources */
2544 mci
->pvt_info
= NULL
;
2552 * This table is part of the interface for loading drivers for PCI devices. The
2553 * PCI core identifies what devices are on a system during boot, and then
2554 * inquiry this table to see if this driver is for a given device found.
2556 static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table
) = {
2558 .vendor
= PCI_VENDOR_ID_AMD
,
2559 .device
= PCI_DEVICE_ID_AMD_K8_NB_MEMCTL
,
2560 .subvendor
= PCI_ANY_ID
,
2561 .subdevice
= PCI_ANY_ID
,
2566 .vendor
= PCI_VENDOR_ID_AMD
,
2567 .device
= PCI_DEVICE_ID_AMD_10H_NB_DRAM
,
2568 .subvendor
= PCI_ANY_ID
,
2569 .subdevice
= PCI_ANY_ID
,
2574 .vendor
= PCI_VENDOR_ID_AMD
,
2575 .device
= PCI_DEVICE_ID_AMD_15H_NB_F2
,
2576 .subvendor
= PCI_ANY_ID
,
2577 .subdevice
= PCI_ANY_ID
,
2584 MODULE_DEVICE_TABLE(pci
, amd64_pci_table
);
2586 static struct pci_driver amd64_pci_driver
= {
2587 .name
= EDAC_MOD_STR
,
2588 .probe
= amd64_probe_one_instance
,
2589 .remove
= amd64_remove_one_instance
,
2590 .id_table
= amd64_pci_table
,
2593 static void setup_pci_device(void)
2595 struct mem_ctl_info
*mci
;
2596 struct amd64_pvt
*pvt
;
2604 pvt
= mci
->pvt_info
;
2606 edac_pci_create_generic_ctl(&pvt
->F2
->dev
, EDAC_MOD_STR
);
2608 if (!amd64_ctl_pci
) {
2609 pr_warning("%s(): Unable to create PCI control\n",
2612 pr_warning("%s(): PCI error report via EDAC not set\n",
2618 static int __init
amd64_edac_init(void)
2622 printk(KERN_INFO
"AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION
);
2626 if (amd_cache_northbridges() < 0)
2630 mcis
= kzalloc(amd_nb_num() * sizeof(mcis
[0]), GFP_KERNEL
);
2631 ecc_stngs
= kzalloc(amd_nb_num() * sizeof(ecc_stngs
[0]), GFP_KERNEL
);
2632 if (!(mcis
&& ecc_stngs
))
2635 msrs
= msrs_alloc();
2639 err
= pci_register_driver(&amd64_pci_driver
);
2644 if (!atomic_read(&drv_instances
))
2645 goto err_no_instances
;
2651 pci_unregister_driver(&amd64_pci_driver
);
2668 static void __exit
amd64_edac_exit(void)
2671 edac_pci_release_generic_ctl(amd64_ctl_pci
);
2673 pci_unregister_driver(&amd64_pci_driver
);
2685 module_init(amd64_edac_init
);
2686 module_exit(amd64_edac_exit
);
2688 MODULE_LICENSE("GPL");
2689 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2690 "Dave Peterson, Thayne Harbaugh");
2691 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2692 EDAC_AMD64_VERSION
);
2694 module_param(edac_op_state
, int, 0444);
2695 MODULE_PARM_DESC(edac_op_state
, "EDAC Error Reporting state: 0=Poll,1=NMI");