drm/radeon: remove cayman_gpu_is_lockup
[linux-2.6/libata-dev.git] / drivers / edac / amd64_edac.c
blob7ef73c919c5da24a0e260a600f97c775e7fb8117
1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
9 /*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances = ATOMIC_INIT(0);
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
30 * or higher value'.
32 *FIXME: Produce a better mapping/linearisation.
34 struct scrubrate {
35 u32 scrubval; /* bit pattern for scrub rate */
36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
37 } scrubrates[] = {
38 { 0x01, 1600000000UL},
39 { 0x02, 800000000UL},
40 { 0x03, 400000000UL},
41 { 0x04, 200000000UL},
42 { 0x05, 100000000UL},
43 { 0x06, 50000000UL},
44 { 0x07, 25000000UL},
45 { 0x08, 12284069UL},
46 { 0x09, 6274509UL},
47 { 0x0A, 3121951UL},
48 { 0x0B, 1560975UL},
49 { 0x0C, 781440UL},
50 { 0x0D, 390720UL},
51 { 0x0E, 195300UL},
52 { 0x0F, 97650UL},
53 { 0x10, 48854UL},
54 { 0x11, 24427UL},
55 { 0x12, 12213UL},
56 { 0x13, 6101UL},
57 { 0x14, 3051UL},
58 { 0x15, 1523UL},
59 { 0x16, 761UL},
60 { 0x00, 0UL}, /* scrubbing off */
63 static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 u32 *val, const char *func)
66 int err = 0;
68 err = pci_read_config_dword(pdev, offset, val);
69 if (err)
70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func, PCI_FUNC(pdev->devfn), offset);
73 return err;
76 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 u32 val, const char *func)
79 int err = 0;
81 err = pci_write_config_dword(pdev, offset, val);
82 if (err)
83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func, PCI_FUNC(pdev->devfn), offset);
86 return err;
91 * Depending on the family, F2 DCT reads need special handling:
93 * K8: has a single DCT only
95 * F10h: each DCT has its own set of regs
96 * DCT0 -> F2x040..
97 * DCT1 -> F2x140..
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
102 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
103 const char *func)
105 if (addr >= 0x100)
106 return -EINVAL;
108 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
111 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
112 const char *func)
114 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
118 * Select DCT to which PCI cfg accesses are routed
120 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
122 u32 reg = 0;
124 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
125 reg &= 0xfffffffe;
126 reg |= dct;
127 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
130 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
131 const char *func)
133 u8 dct = 0;
135 if (addr >= 0x140 && addr <= 0x1a0) {
136 dct = 1;
137 addr -= 0x100;
140 f15h_select_dct(pvt, dct);
142 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
146 * Memory scrubber control interface. For K8, memory scrubbing is handled by
147 * hardware and can involve L2 cache, dcache as well as the main memory. With
148 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
149 * functionality.
151 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
152 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
153 * bytes/sec for the setting.
155 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
156 * other archs, we might not have access to the caches directly.
160 * scan the scrub rate mapping table for a close or matching bandwidth value to
161 * issue. If requested is too big, then use last maximum value found.
163 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
165 u32 scrubval;
166 int i;
169 * map the configured rate (new_bw) to a value specific to the AMD64
170 * memory controller and apply to register. Search for the first
171 * bandwidth entry that is greater or equal than the setting requested
172 * and program that. If at last entry, turn off DRAM scrubbing.
174 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
176 * skip scrub rates which aren't recommended
177 * (see F10 BKDG, F3x58)
179 if (scrubrates[i].scrubval < min_rate)
180 continue;
182 if (scrubrates[i].bandwidth <= new_bw)
183 break;
186 * if no suitable bandwidth found, turn off DRAM scrubbing
187 * entirely by falling back to the last element in the
188 * scrubrates array.
192 scrubval = scrubrates[i].scrubval;
194 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
196 if (scrubval)
197 return scrubrates[i].bandwidth;
199 return 0;
202 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
204 struct amd64_pvt *pvt = mci->pvt_info;
205 u32 min_scrubrate = 0x5;
207 if (boot_cpu_data.x86 == 0xf)
208 min_scrubrate = 0x0;
210 /* F15h Erratum #505 */
211 if (boot_cpu_data.x86 == 0x15)
212 f15h_select_dct(pvt, 0);
214 return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
217 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
219 struct amd64_pvt *pvt = mci->pvt_info;
220 u32 scrubval = 0;
221 int i, retval = -EINVAL;
223 /* F15h Erratum #505 */
224 if (boot_cpu_data.x86 == 0x15)
225 f15h_select_dct(pvt, 0);
227 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
229 scrubval = scrubval & 0x001F;
231 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
232 if (scrubrates[i].scrubval == scrubval) {
233 retval = scrubrates[i].bandwidth;
234 break;
237 return retval;
241 * returns true if the SysAddr given by sys_addr matches the
242 * DRAM base/limit associated with node_id
244 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
245 unsigned nid)
247 u64 addr;
249 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
250 * all ones if the most significant implemented address bit is 1.
251 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
252 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
253 * Application Programming.
255 addr = sys_addr & 0x000000ffffffffffull;
257 return ((addr >= get_dram_base(pvt, nid)) &&
258 (addr <= get_dram_limit(pvt, nid)));
262 * Attempt to map a SysAddr to a node. On success, return a pointer to the
263 * mem_ctl_info structure for the node that the SysAddr maps to.
265 * On failure, return NULL.
267 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
268 u64 sys_addr)
270 struct amd64_pvt *pvt;
271 unsigned node_id;
272 u32 intlv_en, bits;
275 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
276 * 3.4.4.2) registers to map the SysAddr to a node ID.
278 pvt = mci->pvt_info;
281 * The value of this field should be the same for all DRAM Base
282 * registers. Therefore we arbitrarily choose to read it from the
283 * register for node 0.
285 intlv_en = dram_intlv_en(pvt, 0);
287 if (intlv_en == 0) {
288 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
289 if (amd64_base_limit_match(pvt, sys_addr, node_id))
290 goto found;
292 goto err_no_match;
295 if (unlikely((intlv_en != 0x01) &&
296 (intlv_en != 0x03) &&
297 (intlv_en != 0x07))) {
298 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
299 return NULL;
302 bits = (((u32) sys_addr) >> 12) & intlv_en;
304 for (node_id = 0; ; ) {
305 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
306 break; /* intlv_sel field matches */
308 if (++node_id >= DRAM_RANGES)
309 goto err_no_match;
312 /* sanity test for sys_addr */
313 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
314 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
315 "range for node %d with node interleaving enabled.\n",
316 __func__, sys_addr, node_id);
317 return NULL;
320 found:
321 return edac_mc_find((int)node_id);
323 err_no_match:
324 debugf2("sys_addr 0x%lx doesn't match any node\n",
325 (unsigned long)sys_addr);
327 return NULL;
331 * compute the CS base address of the @csrow on the DRAM controller @dct.
332 * For details see F2x[5C:40] in the processor's BKDG
334 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
335 u64 *base, u64 *mask)
337 u64 csbase, csmask, base_bits, mask_bits;
338 u8 addr_shift;
340 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
341 csbase = pvt->csels[dct].csbases[csrow];
342 csmask = pvt->csels[dct].csmasks[csrow];
343 base_bits = GENMASK(21, 31) | GENMASK(9, 15);
344 mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
345 addr_shift = 4;
346 } else {
347 csbase = pvt->csels[dct].csbases[csrow];
348 csmask = pvt->csels[dct].csmasks[csrow >> 1];
349 addr_shift = 8;
351 if (boot_cpu_data.x86 == 0x15)
352 base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
353 else
354 base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
357 *base = (csbase & base_bits) << addr_shift;
359 *mask = ~0ULL;
360 /* poke holes for the csmask */
361 *mask &= ~(mask_bits << addr_shift);
362 /* OR them in */
363 *mask |= (csmask & mask_bits) << addr_shift;
366 #define for_each_chip_select(i, dct, pvt) \
367 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
369 #define chip_select_base(i, dct, pvt) \
370 pvt->csels[dct].csbases[i]
372 #define for_each_chip_select_mask(i, dct, pvt) \
373 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
376 * @input_addr is an InputAddr associated with the node given by mci. Return the
377 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
379 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
381 struct amd64_pvt *pvt;
382 int csrow;
383 u64 base, mask;
385 pvt = mci->pvt_info;
387 for_each_chip_select(csrow, 0, pvt) {
388 if (!csrow_enabled(csrow, 0, pvt))
389 continue;
391 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
393 mask = ~mask;
395 if ((input_addr & mask) == (base & mask)) {
396 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
397 (unsigned long)input_addr, csrow,
398 pvt->mc_node_id);
400 return csrow;
403 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
404 (unsigned long)input_addr, pvt->mc_node_id);
406 return -1;
410 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
411 * for the node represented by mci. Info is passed back in *hole_base,
412 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
413 * info is invalid. Info may be invalid for either of the following reasons:
415 * - The revision of the node is not E or greater. In this case, the DRAM Hole
416 * Address Register does not exist.
418 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
419 * indicating that its contents are not valid.
421 * The values passed back in *hole_base, *hole_offset, and *hole_size are
422 * complete 32-bit values despite the fact that the bitfields in the DHAR
423 * only represent bits 31-24 of the base and offset values.
425 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
426 u64 *hole_offset, u64 *hole_size)
428 struct amd64_pvt *pvt = mci->pvt_info;
429 u64 base;
431 /* only revE and later have the DRAM Hole Address Register */
432 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
433 debugf1(" revision %d for node %d does not support DHAR\n",
434 pvt->ext_model, pvt->mc_node_id);
435 return 1;
438 /* valid for Fam10h and above */
439 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
440 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
441 return 1;
444 if (!dhar_valid(pvt)) {
445 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
446 pvt->mc_node_id);
447 return 1;
450 /* This node has Memory Hoisting */
452 /* +------------------+--------------------+--------------------+-----
453 * | memory | DRAM hole | relocated |
454 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
455 * | | | DRAM hole |
456 * | | | [0x100000000, |
457 * | | | (0x100000000+ |
458 * | | | (0xffffffff-x))] |
459 * +------------------+--------------------+--------------------+-----
461 * Above is a diagram of physical memory showing the DRAM hole and the
462 * relocated addresses from the DRAM hole. As shown, the DRAM hole
463 * starts at address x (the base address) and extends through address
464 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
465 * addresses in the hole so that they start at 0x100000000.
468 base = dhar_base(pvt);
470 *hole_base = base;
471 *hole_size = (0x1ull << 32) - base;
473 if (boot_cpu_data.x86 > 0xf)
474 *hole_offset = f10_dhar_offset(pvt);
475 else
476 *hole_offset = k8_dhar_offset(pvt);
478 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
479 pvt->mc_node_id, (unsigned long)*hole_base,
480 (unsigned long)*hole_offset, (unsigned long)*hole_size);
482 return 0;
484 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
487 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
488 * assumed that sys_addr maps to the node given by mci.
490 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
491 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
492 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
493 * then it is also involved in translating a SysAddr to a DramAddr. Sections
494 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
495 * These parts of the documentation are unclear. I interpret them as follows:
497 * When node n receives a SysAddr, it processes the SysAddr as follows:
499 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
500 * Limit registers for node n. If the SysAddr is not within the range
501 * specified by the base and limit values, then node n ignores the Sysaddr
502 * (since it does not map to node n). Otherwise continue to step 2 below.
504 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
505 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
506 * the range of relocated addresses (starting at 0x100000000) from the DRAM
507 * hole. If not, skip to step 3 below. Else get the value of the
508 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
509 * offset defined by this value from the SysAddr.
511 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
512 * Base register for node n. To obtain the DramAddr, subtract the base
513 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
515 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
517 struct amd64_pvt *pvt = mci->pvt_info;
518 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
519 int ret = 0;
521 dram_base = get_dram_base(pvt, pvt->mc_node_id);
523 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
524 &hole_size);
525 if (!ret) {
526 if ((sys_addr >= (1ull << 32)) &&
527 (sys_addr < ((1ull << 32) + hole_size))) {
528 /* use DHAR to translate SysAddr to DramAddr */
529 dram_addr = sys_addr - hole_offset;
531 debugf2("using DHAR to translate SysAddr 0x%lx to "
532 "DramAddr 0x%lx\n",
533 (unsigned long)sys_addr,
534 (unsigned long)dram_addr);
536 return dram_addr;
541 * Translate the SysAddr to a DramAddr as shown near the start of
542 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
543 * only deals with 40-bit values. Therefore we discard bits 63-40 of
544 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
545 * discard are all 1s. Otherwise the bits we discard are all 0s. See
546 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
547 * Programmer's Manual Volume 1 Application Programming.
549 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
551 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
552 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
553 (unsigned long)dram_addr);
554 return dram_addr;
558 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
559 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
560 * for node interleaving.
562 static int num_node_interleave_bits(unsigned intlv_en)
564 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
565 int n;
567 BUG_ON(intlv_en > 7);
568 n = intlv_shift_table[intlv_en];
569 return n;
572 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
573 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
575 struct amd64_pvt *pvt;
576 int intlv_shift;
577 u64 input_addr;
579 pvt = mci->pvt_info;
582 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
583 * concerning translating a DramAddr to an InputAddr.
585 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
586 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
587 (dram_addr & 0xfff);
589 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
590 intlv_shift, (unsigned long)dram_addr,
591 (unsigned long)input_addr);
593 return input_addr;
597 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
598 * assumed that @sys_addr maps to the node given by mci.
600 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
602 u64 input_addr;
604 input_addr =
605 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
607 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
608 (unsigned long)sys_addr, (unsigned long)input_addr);
610 return input_addr;
615 * @input_addr is an InputAddr associated with the node represented by mci.
616 * Translate @input_addr to a DramAddr and return the result.
618 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
620 struct amd64_pvt *pvt;
621 unsigned node_id, intlv_shift;
622 u64 bits, dram_addr;
623 u32 intlv_sel;
626 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
627 * shows how to translate a DramAddr to an InputAddr. Here we reverse
628 * this procedure. When translating from a DramAddr to an InputAddr, the
629 * bits used for node interleaving are discarded. Here we recover these
630 * bits from the IntlvSel field of the DRAM Limit register (section
631 * 3.4.4.2) for the node that input_addr is associated with.
633 pvt = mci->pvt_info;
634 node_id = pvt->mc_node_id;
636 BUG_ON(node_id > 7);
638 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
639 if (intlv_shift == 0) {
640 debugf1(" InputAddr 0x%lx translates to DramAddr of "
641 "same value\n", (unsigned long)input_addr);
643 return input_addr;
646 bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
647 (input_addr & 0xfff);
649 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
650 dram_addr = bits + (intlv_sel << 12);
652 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
653 "(%d node interleave bits)\n", (unsigned long)input_addr,
654 (unsigned long)dram_addr, intlv_shift);
656 return dram_addr;
660 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
661 * @dram_addr to a SysAddr.
663 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
665 struct amd64_pvt *pvt = mci->pvt_info;
666 u64 hole_base, hole_offset, hole_size, base, sys_addr;
667 int ret = 0;
669 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
670 &hole_size);
671 if (!ret) {
672 if ((dram_addr >= hole_base) &&
673 (dram_addr < (hole_base + hole_size))) {
674 sys_addr = dram_addr + hole_offset;
676 debugf1("using DHAR to translate DramAddr 0x%lx to "
677 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
678 (unsigned long)sys_addr);
680 return sys_addr;
684 base = get_dram_base(pvt, pvt->mc_node_id);
685 sys_addr = dram_addr + base;
688 * The sys_addr we have computed up to this point is a 40-bit value
689 * because the k8 deals with 40-bit values. However, the value we are
690 * supposed to return is a full 64-bit physical address. The AMD
691 * x86-64 architecture specifies that the most significant implemented
692 * address bit through bit 63 of a physical address must be either all
693 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
694 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
695 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
696 * Programming.
698 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
700 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
701 pvt->mc_node_id, (unsigned long)dram_addr,
702 (unsigned long)sys_addr);
704 return sys_addr;
708 * @input_addr is an InputAddr associated with the node given by mci. Translate
709 * @input_addr to a SysAddr.
711 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
712 u64 input_addr)
714 return dram_addr_to_sys_addr(mci,
715 input_addr_to_dram_addr(mci, input_addr));
719 * Find the minimum and maximum InputAddr values that map to the given @csrow.
720 * Pass back these values in *input_addr_min and *input_addr_max.
722 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
723 u64 *input_addr_min, u64 *input_addr_max)
725 struct amd64_pvt *pvt;
726 u64 base, mask;
728 pvt = mci->pvt_info;
729 BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
731 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
733 *input_addr_min = base & ~mask;
734 *input_addr_max = base | mask;
737 /* Map the Error address to a PAGE and PAGE OFFSET. */
738 static inline void error_address_to_page_and_offset(u64 error_address,
739 u32 *page, u32 *offset)
741 *page = (u32) (error_address >> PAGE_SHIFT);
742 *offset = ((u32) error_address) & ~PAGE_MASK;
746 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
747 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
748 * of a node that detected an ECC memory error. mci represents the node that
749 * the error address maps to (possibly different from the node that detected
750 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
751 * error.
753 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
755 int csrow;
757 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
759 if (csrow == -1)
760 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
761 "address 0x%lx\n", (unsigned long)sys_addr);
762 return csrow;
765 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
768 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
769 * are ECC capable.
771 static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
773 u8 bit;
774 unsigned long edac_cap = EDAC_FLAG_NONE;
776 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
777 ? 19
778 : 17;
780 if (pvt->dclr0 & BIT(bit))
781 edac_cap = EDAC_FLAG_SECDED;
783 return edac_cap;
786 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
788 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
790 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
792 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
793 (dclr & BIT(16)) ? "un" : "",
794 (dclr & BIT(19)) ? "yes" : "no");
796 debugf1(" PAR/ERR parity: %s\n",
797 (dclr & BIT(8)) ? "enabled" : "disabled");
799 if (boot_cpu_data.x86 == 0x10)
800 debugf1(" DCT 128bit mode width: %s\n",
801 (dclr & BIT(11)) ? "128b" : "64b");
803 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
804 (dclr & BIT(12)) ? "yes" : "no",
805 (dclr & BIT(13)) ? "yes" : "no",
806 (dclr & BIT(14)) ? "yes" : "no",
807 (dclr & BIT(15)) ? "yes" : "no");
810 /* Display and decode various NB registers for debug purposes. */
811 static void dump_misc_regs(struct amd64_pvt *pvt)
813 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
815 debugf1(" NB two channel DRAM capable: %s\n",
816 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
818 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
819 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
820 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
822 amd64_dump_dramcfg_low(pvt->dclr0, 0);
824 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
826 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
827 "offset: 0x%08x\n",
828 pvt->dhar, dhar_base(pvt),
829 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
830 : f10_dhar_offset(pvt));
832 debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
834 amd64_debug_display_dimm_sizes(pvt, 0);
836 /* everything below this point is Fam10h and above */
837 if (boot_cpu_data.x86 == 0xf)
838 return;
840 amd64_debug_display_dimm_sizes(pvt, 1);
842 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
844 /* Only if NOT ganged does dclr1 have valid info */
845 if (!dct_ganging_enabled(pvt))
846 amd64_dump_dramcfg_low(pvt->dclr1, 1);
850 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
852 static void prep_chip_selects(struct amd64_pvt *pvt)
854 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
855 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
856 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
857 } else {
858 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
859 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
864 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
866 static void read_dct_base_mask(struct amd64_pvt *pvt)
868 int cs;
870 prep_chip_selects(pvt);
872 for_each_chip_select(cs, 0, pvt) {
873 int reg0 = DCSB0 + (cs * 4);
874 int reg1 = DCSB1 + (cs * 4);
875 u32 *base0 = &pvt->csels[0].csbases[cs];
876 u32 *base1 = &pvt->csels[1].csbases[cs];
878 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
879 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
880 cs, *base0, reg0);
882 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
883 continue;
885 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
886 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
887 cs, *base1, reg1);
890 for_each_chip_select_mask(cs, 0, pvt) {
891 int reg0 = DCSM0 + (cs * 4);
892 int reg1 = DCSM1 + (cs * 4);
893 u32 *mask0 = &pvt->csels[0].csmasks[cs];
894 u32 *mask1 = &pvt->csels[1].csmasks[cs];
896 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
897 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
898 cs, *mask0, reg0);
900 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
901 continue;
903 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
904 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
905 cs, *mask1, reg1);
909 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
911 enum mem_type type;
913 /* F15h supports only DDR3 */
914 if (boot_cpu_data.x86 >= 0x15)
915 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
916 else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
917 if (pvt->dchr0 & DDR3_MODE)
918 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
919 else
920 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
921 } else {
922 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
925 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
927 return type;
930 /* Get the number of DCT channels the memory controller is using. */
931 static int k8_early_channel_count(struct amd64_pvt *pvt)
933 int flag;
935 if (pvt->ext_model >= K8_REV_F)
936 /* RevF (NPT) and later */
937 flag = pvt->dclr0 & WIDTH_128;
938 else
939 /* RevE and earlier */
940 flag = pvt->dclr0 & REVE_WIDTH_128;
942 /* not used */
943 pvt->dclr1 = 0;
945 return (flag) ? 2 : 1;
948 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
949 static u64 get_error_address(struct mce *m)
951 struct cpuinfo_x86 *c = &boot_cpu_data;
952 u64 addr;
953 u8 start_bit = 1;
954 u8 end_bit = 47;
956 if (c->x86 == 0xf) {
957 start_bit = 3;
958 end_bit = 39;
961 addr = m->addr & GENMASK(start_bit, end_bit);
964 * Erratum 637 workaround
966 if (c->x86 == 0x15) {
967 struct amd64_pvt *pvt;
968 u64 cc6_base, tmp_addr;
969 u32 tmp;
970 u8 mce_nid, intlv_en;
972 if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
973 return addr;
975 mce_nid = amd_get_nb_id(m->extcpu);
976 pvt = mcis[mce_nid]->pvt_info;
978 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
979 intlv_en = tmp >> 21 & 0x7;
981 /* add [47:27] + 3 trailing bits */
982 cc6_base = (tmp & GENMASK(0, 20)) << 3;
984 /* reverse and add DramIntlvEn */
985 cc6_base |= intlv_en ^ 0x7;
987 /* pin at [47:24] */
988 cc6_base <<= 24;
990 if (!intlv_en)
991 return cc6_base | (addr & GENMASK(0, 23));
993 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
995 /* faster log2 */
996 tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1);
998 /* OR DramIntlvSel into bits [14:12] */
999 tmp_addr |= (tmp & GENMASK(21, 23)) >> 9;
1001 /* add remaining [11:0] bits from original MC4_ADDR */
1002 tmp_addr |= addr & GENMASK(0, 11);
1004 return cc6_base | tmp_addr;
1007 return addr;
1010 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1012 struct cpuinfo_x86 *c = &boot_cpu_data;
1013 int off = range << 3;
1015 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1016 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1018 if (c->x86 == 0xf)
1019 return;
1021 if (!dram_rw(pvt, range))
1022 return;
1024 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1025 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1027 /* Factor in CC6 save area by reading dst node's limit reg */
1028 if (c->x86 == 0x15) {
1029 struct pci_dev *f1 = NULL;
1030 u8 nid = dram_dst_node(pvt, range);
1031 u32 llim;
1033 f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
1034 if (WARN_ON(!f1))
1035 return;
1037 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1039 pvt->ranges[range].lim.lo &= GENMASK(0, 15);
1041 /* {[39:27],111b} */
1042 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1044 pvt->ranges[range].lim.hi &= GENMASK(0, 7);
1046 /* [47:40] */
1047 pvt->ranges[range].lim.hi |= llim >> 13;
1049 pci_dev_put(f1);
1053 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1054 u16 syndrome)
1056 struct mem_ctl_info *src_mci;
1057 struct amd64_pvt *pvt = mci->pvt_info;
1058 int channel, csrow;
1059 u32 page, offset;
1061 /* CHIPKILL enabled */
1062 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1063 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1064 if (channel < 0) {
1066 * Syndrome didn't map, so we don't know which of the
1067 * 2 DIMMs is in error. So we need to ID 'both' of them
1068 * as suspect.
1070 amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
1071 "error reporting race\n", syndrome);
1072 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1073 return;
1075 } else {
1077 * non-chipkill ecc mode
1079 * The k8 documentation is unclear about how to determine the
1080 * channel number when using non-chipkill memory. This method
1081 * was obtained from email communication with someone at AMD.
1082 * (Wish the email was placed in this comment - norsk)
1084 channel = ((sys_addr & BIT(3)) != 0);
1088 * Find out which node the error address belongs to. This may be
1089 * different from the node that detected the error.
1091 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1092 if (!src_mci) {
1093 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1094 (unsigned long)sys_addr);
1095 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1096 return;
1099 /* Now map the sys_addr to a CSROW */
1100 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1101 if (csrow < 0) {
1102 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1103 } else {
1104 error_address_to_page_and_offset(sys_addr, &page, &offset);
1106 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1107 channel, EDAC_MOD_STR);
1111 static int ddr2_cs_size(unsigned i, bool dct_width)
1113 unsigned shift = 0;
1115 if (i <= 2)
1116 shift = i;
1117 else if (!(i & 0x1))
1118 shift = i >> 1;
1119 else
1120 shift = (i + 1) >> 1;
1122 return 128 << (shift + !!dct_width);
1125 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1126 unsigned cs_mode)
1128 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1130 if (pvt->ext_model >= K8_REV_F) {
1131 WARN_ON(cs_mode > 11);
1132 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1134 else if (pvt->ext_model >= K8_REV_D) {
1135 unsigned diff;
1136 WARN_ON(cs_mode > 10);
1139 * the below calculation, besides trying to win an obfuscated C
1140 * contest, maps cs_mode values to DIMM chip select sizes. The
1141 * mappings are:
1143 * cs_mode CS size (mb)
1144 * ======= ============
1145 * 0 32
1146 * 1 64
1147 * 2 128
1148 * 3 128
1149 * 4 256
1150 * 5 512
1151 * 6 256
1152 * 7 512
1153 * 8 1024
1154 * 9 1024
1155 * 10 2048
1157 * Basically, it calculates a value with which to shift the
1158 * smallest CS size of 32MB.
1160 * ddr[23]_cs_size have a similar purpose.
1162 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1164 return 32 << (cs_mode - diff);
1166 else {
1167 WARN_ON(cs_mode > 6);
1168 return 32 << cs_mode;
1173 * Get the number of DCT channels in use.
1175 * Return:
1176 * number of Memory Channels in operation
1177 * Pass back:
1178 * contents of the DCL0_LOW register
1180 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1182 int i, j, channels = 0;
1184 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1185 if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
1186 return 2;
1189 * Need to check if in unganged mode: In such, there are 2 channels,
1190 * but they are not in 128 bit mode and thus the above 'dclr0' status
1191 * bit will be OFF.
1193 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1194 * their CSEnable bit on. If so, then SINGLE DIMM case.
1196 debugf0("Data width is not 128 bits - need more decoding\n");
1199 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1200 * is more than just one DIMM present in unganged mode. Need to check
1201 * both controllers since DIMMs can be placed in either one.
1203 for (i = 0; i < 2; i++) {
1204 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1206 for (j = 0; j < 4; j++) {
1207 if (DBAM_DIMM(j, dbam) > 0) {
1208 channels++;
1209 break;
1214 if (channels > 2)
1215 channels = 2;
1217 amd64_info("MCT channel count: %d\n", channels);
1219 return channels;
1222 static int ddr3_cs_size(unsigned i, bool dct_width)
1224 unsigned shift = 0;
1225 int cs_size = 0;
1227 if (i == 0 || i == 3 || i == 4)
1228 cs_size = -1;
1229 else if (i <= 2)
1230 shift = i;
1231 else if (i == 12)
1232 shift = 7;
1233 else if (!(i & 0x1))
1234 shift = i >> 1;
1235 else
1236 shift = (i + 1) >> 1;
1238 if (cs_size != -1)
1239 cs_size = (128 * (1 << !!dct_width)) << shift;
1241 return cs_size;
1244 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1245 unsigned cs_mode)
1247 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1249 WARN_ON(cs_mode > 11);
1251 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1252 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1253 else
1254 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1258 * F15h supports only 64bit DCT interfaces
1260 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1261 unsigned cs_mode)
1263 WARN_ON(cs_mode > 12);
1265 return ddr3_cs_size(cs_mode, false);
1268 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1271 if (boot_cpu_data.x86 == 0xf)
1272 return;
1274 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1275 debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1276 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1278 debugf0(" DCTs operate in %s mode.\n",
1279 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1281 if (!dct_ganging_enabled(pvt))
1282 debugf0(" Address range split per DCT: %s\n",
1283 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1285 debugf0(" data interleave for ECC: %s, "
1286 "DRAM cleared since last warm reset: %s\n",
1287 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1288 (dct_memory_cleared(pvt) ? "yes" : "no"));
1290 debugf0(" channel interleave: %s, "
1291 "interleave bits selector: 0x%x\n",
1292 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1293 dct_sel_interleave_addr(pvt));
1296 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1300 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1301 * Interleaving Modes.
1303 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1304 bool hi_range_sel, u8 intlv_en)
1306 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1308 if (dct_ganging_enabled(pvt))
1309 return 0;
1311 if (hi_range_sel)
1312 return dct_sel_high;
1315 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1317 if (dct_interleave_enabled(pvt)) {
1318 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1320 /* return DCT select function: 0=DCT0, 1=DCT1 */
1321 if (!intlv_addr)
1322 return sys_addr >> 6 & 1;
1324 if (intlv_addr & 0x2) {
1325 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1326 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1328 return ((sys_addr >> shift) & 1) ^ temp;
1331 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1334 if (dct_high_range_enabled(pvt))
1335 return ~dct_sel_high & 1;
1337 return 0;
1340 /* Convert the sys_addr to the normalized DCT address */
1341 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
1342 u64 sys_addr, bool hi_rng,
1343 u32 dct_sel_base_addr)
1345 u64 chan_off;
1346 u64 dram_base = get_dram_base(pvt, range);
1347 u64 hole_off = f10_dhar_offset(pvt);
1348 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1350 if (hi_rng) {
1352 * if
1353 * base address of high range is below 4Gb
1354 * (bits [47:27] at [31:11])
1355 * DRAM address space on this DCT is hoisted above 4Gb &&
1356 * sys_addr > 4Gb
1358 * remove hole offset from sys_addr
1359 * else
1360 * remove high range offset from sys_addr
1362 if ((!(dct_sel_base_addr >> 16) ||
1363 dct_sel_base_addr < dhar_base(pvt)) &&
1364 dhar_valid(pvt) &&
1365 (sys_addr >= BIT_64(32)))
1366 chan_off = hole_off;
1367 else
1368 chan_off = dct_sel_base_off;
1369 } else {
1371 * if
1372 * we have a valid hole &&
1373 * sys_addr > 4Gb
1375 * remove hole
1376 * else
1377 * remove dram base to normalize to DCT address
1379 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1380 chan_off = hole_off;
1381 else
1382 chan_off = dram_base;
1385 return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
1389 * checks if the csrow passed in is marked as SPARED, if so returns the new
1390 * spare row
1392 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1394 int tmp_cs;
1396 if (online_spare_swap_done(pvt, dct) &&
1397 csrow == online_spare_bad_dramcs(pvt, dct)) {
1399 for_each_chip_select(tmp_cs, dct, pvt) {
1400 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1401 csrow = tmp_cs;
1402 break;
1406 return csrow;
1410 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1411 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1413 * Return:
1414 * -EINVAL: NOT FOUND
1415 * 0..csrow = Chip-Select Row
1417 static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1419 struct mem_ctl_info *mci;
1420 struct amd64_pvt *pvt;
1421 u64 cs_base, cs_mask;
1422 int cs_found = -EINVAL;
1423 int csrow;
1425 mci = mcis[nid];
1426 if (!mci)
1427 return cs_found;
1429 pvt = mci->pvt_info;
1431 debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1433 for_each_chip_select(csrow, dct, pvt) {
1434 if (!csrow_enabled(csrow, dct, pvt))
1435 continue;
1437 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1439 debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1440 csrow, cs_base, cs_mask);
1442 cs_mask = ~cs_mask;
1444 debugf1(" (InputAddr & ~CSMask)=0x%llx "
1445 "(CSBase & ~CSMask)=0x%llx\n",
1446 (in_addr & cs_mask), (cs_base & cs_mask));
1448 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1449 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1451 debugf1(" MATCH csrow=%d\n", cs_found);
1452 break;
1455 return cs_found;
1459 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1460 * swapped with a region located at the bottom of memory so that the GPU can use
1461 * the interleaved region and thus two channels.
1463 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1465 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1467 if (boot_cpu_data.x86 == 0x10) {
1468 /* only revC3 and revE have that feature */
1469 if (boot_cpu_data.x86_model < 4 ||
1470 (boot_cpu_data.x86_model < 0xa &&
1471 boot_cpu_data.x86_mask < 3))
1472 return sys_addr;
1475 amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1477 if (!(swap_reg & 0x1))
1478 return sys_addr;
1480 swap_base = (swap_reg >> 3) & 0x7f;
1481 swap_limit = (swap_reg >> 11) & 0x7f;
1482 rgn_size = (swap_reg >> 20) & 0x7f;
1483 tmp_addr = sys_addr >> 27;
1485 if (!(sys_addr >> 34) &&
1486 (((tmp_addr >= swap_base) &&
1487 (tmp_addr <= swap_limit)) ||
1488 (tmp_addr < rgn_size)))
1489 return sys_addr ^ (u64)swap_base << 27;
1491 return sys_addr;
1494 /* For a given @dram_range, check if @sys_addr falls within it. */
1495 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1496 u64 sys_addr, int *nid, int *chan_sel)
1498 int cs_found = -EINVAL;
1499 u64 chan_addr;
1500 u32 dct_sel_base;
1501 u8 channel;
1502 bool high_range = false;
1504 u8 node_id = dram_dst_node(pvt, range);
1505 u8 intlv_en = dram_intlv_en(pvt, range);
1506 u32 intlv_sel = dram_intlv_sel(pvt, range);
1508 debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1509 range, sys_addr, get_dram_limit(pvt, range));
1511 if (dhar_valid(pvt) &&
1512 dhar_base(pvt) <= sys_addr &&
1513 sys_addr < BIT_64(32)) {
1514 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1515 sys_addr);
1516 return -EINVAL;
1519 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1520 return -EINVAL;
1522 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1524 dct_sel_base = dct_sel_baseaddr(pvt);
1527 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1528 * select between DCT0 and DCT1.
1530 if (dct_high_range_enabled(pvt) &&
1531 !dct_ganging_enabled(pvt) &&
1532 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1533 high_range = true;
1535 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1537 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1538 high_range, dct_sel_base);
1540 /* Remove node interleaving, see F1x120 */
1541 if (intlv_en)
1542 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1543 (chan_addr & 0xfff);
1545 /* remove channel interleave */
1546 if (dct_interleave_enabled(pvt) &&
1547 !dct_high_range_enabled(pvt) &&
1548 !dct_ganging_enabled(pvt)) {
1550 if (dct_sel_interleave_addr(pvt) != 1) {
1551 if (dct_sel_interleave_addr(pvt) == 0x3)
1552 /* hash 9 */
1553 chan_addr = ((chan_addr >> 10) << 9) |
1554 (chan_addr & 0x1ff);
1555 else
1556 /* A[6] or hash 6 */
1557 chan_addr = ((chan_addr >> 7) << 6) |
1558 (chan_addr & 0x3f);
1559 } else
1560 /* A[12] */
1561 chan_addr = ((chan_addr >> 13) << 12) |
1562 (chan_addr & 0xfff);
1565 debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
1567 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1569 if (cs_found >= 0) {
1570 *nid = node_id;
1571 *chan_sel = channel;
1573 return cs_found;
1576 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1577 int *node, int *chan_sel)
1579 int cs_found = -EINVAL;
1580 unsigned range;
1582 for (range = 0; range < DRAM_RANGES; range++) {
1584 if (!dram_rw(pvt, range))
1585 continue;
1587 if ((get_dram_base(pvt, range) <= sys_addr) &&
1588 (get_dram_limit(pvt, range) >= sys_addr)) {
1590 cs_found = f1x_match_to_this_node(pvt, range,
1591 sys_addr, node,
1592 chan_sel);
1593 if (cs_found >= 0)
1594 break;
1597 return cs_found;
1601 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1602 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1604 * The @sys_addr is usually an error address received from the hardware
1605 * (MCX_ADDR).
1607 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1608 u16 syndrome)
1610 struct amd64_pvt *pvt = mci->pvt_info;
1611 u32 page, offset;
1612 int nid, csrow, chan = 0;
1614 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1616 if (csrow < 0) {
1617 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1618 return;
1621 error_address_to_page_and_offset(sys_addr, &page, &offset);
1624 * We need the syndromes for channel detection only when we're
1625 * ganged. Otherwise @chan should already contain the channel at
1626 * this point.
1628 if (dct_ganging_enabled(pvt))
1629 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1631 if (chan >= 0)
1632 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1633 EDAC_MOD_STR);
1634 else
1636 * Channel unknown, report all channels on this CSROW as failed.
1638 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1639 edac_mc_handle_ce(mci, page, offset, syndrome,
1640 csrow, chan, EDAC_MOD_STR);
1644 * debug routine to display the memory sizes of all logical DIMMs and its
1645 * CSROWs
1647 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1649 int dimm, size0, size1, factor = 0;
1650 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1651 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1653 if (boot_cpu_data.x86 == 0xf) {
1654 if (pvt->dclr0 & WIDTH_128)
1655 factor = 1;
1657 /* K8 families < revF not supported yet */
1658 if (pvt->ext_model < K8_REV_F)
1659 return;
1660 else
1661 WARN_ON(ctrl != 0);
1664 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1665 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1666 : pvt->csels[0].csbases;
1668 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
1670 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1672 /* Dump memory sizes for DIMM and its CSROWs */
1673 for (dimm = 0; dimm < 4; dimm++) {
1675 size0 = 0;
1676 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1677 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1678 DBAM_DIMM(dimm, dbam));
1680 size1 = 0;
1681 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1682 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1683 DBAM_DIMM(dimm, dbam));
1685 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1686 dimm * 2, size0 << factor,
1687 dimm * 2 + 1, size1 << factor);
1691 static struct amd64_family_type amd64_family_types[] = {
1692 [K8_CPUS] = {
1693 .ctl_name = "K8",
1694 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1695 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1696 .ops = {
1697 .early_channel_count = k8_early_channel_count,
1698 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1699 .dbam_to_cs = k8_dbam_to_chip_select,
1700 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1703 [F10_CPUS] = {
1704 .ctl_name = "F10h",
1705 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1706 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1707 .ops = {
1708 .early_channel_count = f1x_early_channel_count,
1709 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1710 .dbam_to_cs = f10_dbam_to_chip_select,
1711 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1714 [F15_CPUS] = {
1715 .ctl_name = "F15h",
1716 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1717 .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1718 .ops = {
1719 .early_channel_count = f1x_early_channel_count,
1720 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1721 .dbam_to_cs = f15_dbam_to_chip_select,
1722 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1727 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1728 unsigned int device,
1729 struct pci_dev *related)
1731 struct pci_dev *dev = NULL;
1733 dev = pci_get_device(vendor, device, dev);
1734 while (dev) {
1735 if ((dev->bus->number == related->bus->number) &&
1736 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1737 break;
1738 dev = pci_get_device(vendor, device, dev);
1741 return dev;
1745 * These are tables of eigenvectors (one per line) which can be used for the
1746 * construction of the syndrome tables. The modified syndrome search algorithm
1747 * uses those to find the symbol in error and thus the DIMM.
1749 * Algorithm courtesy of Ross LaFetra from AMD.
1751 static u16 x4_vectors[] = {
1752 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1753 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1754 0x0001, 0x0002, 0x0004, 0x0008,
1755 0x1013, 0x3032, 0x4044, 0x8088,
1756 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1757 0x4857, 0xc4fe, 0x13cc, 0x3288,
1758 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1759 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1760 0x15c1, 0x2a42, 0x89ac, 0x4758,
1761 0x2b03, 0x1602, 0x4f0c, 0xca08,
1762 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1763 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1764 0x2b87, 0x164e, 0x642c, 0xdc18,
1765 0x40b9, 0x80de, 0x1094, 0x20e8,
1766 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1767 0x11c1, 0x2242, 0x84ac, 0x4c58,
1768 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1769 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1770 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1771 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1772 0x16b3, 0x3d62, 0x4f34, 0x8518,
1773 0x1e2f, 0x391a, 0x5cac, 0xf858,
1774 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1775 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1776 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1777 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1778 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1779 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1780 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1781 0x185d, 0x2ca6, 0x7914, 0x9e28,
1782 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1783 0x4199, 0x82ee, 0x19f4, 0x2e58,
1784 0x4807, 0xc40e, 0x130c, 0x3208,
1785 0x1905, 0x2e0a, 0x5804, 0xac08,
1786 0x213f, 0x132a, 0xadfc, 0x5ba8,
1787 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1790 static u16 x8_vectors[] = {
1791 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1792 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1793 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1794 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1795 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1796 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1797 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1798 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1799 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1800 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1801 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1802 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1803 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1804 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1805 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1806 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1807 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1808 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1809 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1812 static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
1813 unsigned v_dim)
1815 unsigned int i, err_sym;
1817 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1818 u16 s = syndrome;
1819 unsigned v_idx = err_sym * v_dim;
1820 unsigned v_end = (err_sym + 1) * v_dim;
1822 /* walk over all 16 bits of the syndrome */
1823 for (i = 1; i < (1U << 16); i <<= 1) {
1825 /* if bit is set in that eigenvector... */
1826 if (v_idx < v_end && vectors[v_idx] & i) {
1827 u16 ev_comp = vectors[v_idx++];
1829 /* ... and bit set in the modified syndrome, */
1830 if (s & i) {
1831 /* remove it. */
1832 s ^= ev_comp;
1834 if (!s)
1835 return err_sym;
1838 } else if (s & i)
1839 /* can't get to zero, move to next symbol */
1840 break;
1844 debugf0("syndrome(%x) not found\n", syndrome);
1845 return -1;
1848 static int map_err_sym_to_channel(int err_sym, int sym_size)
1850 if (sym_size == 4)
1851 switch (err_sym) {
1852 case 0x20:
1853 case 0x21:
1854 return 0;
1855 break;
1856 case 0x22:
1857 case 0x23:
1858 return 1;
1859 break;
1860 default:
1861 return err_sym >> 4;
1862 break;
1864 /* x8 symbols */
1865 else
1866 switch (err_sym) {
1867 /* imaginary bits not in a DIMM */
1868 case 0x10:
1869 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1870 err_sym);
1871 return -1;
1872 break;
1874 case 0x11:
1875 return 0;
1876 break;
1877 case 0x12:
1878 return 1;
1879 break;
1880 default:
1881 return err_sym >> 3;
1882 break;
1884 return -1;
1887 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1889 struct amd64_pvt *pvt = mci->pvt_info;
1890 int err_sym = -1;
1892 if (pvt->ecc_sym_sz == 8)
1893 err_sym = decode_syndrome(syndrome, x8_vectors,
1894 ARRAY_SIZE(x8_vectors),
1895 pvt->ecc_sym_sz);
1896 else if (pvt->ecc_sym_sz == 4)
1897 err_sym = decode_syndrome(syndrome, x4_vectors,
1898 ARRAY_SIZE(x4_vectors),
1899 pvt->ecc_sym_sz);
1900 else {
1901 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1902 return err_sym;
1905 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
1909 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1910 * ADDRESS and process.
1912 static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
1914 struct amd64_pvt *pvt = mci->pvt_info;
1915 u64 sys_addr;
1916 u16 syndrome;
1918 /* Ensure that the Error Address is VALID */
1919 if (!(m->status & MCI_STATUS_ADDRV)) {
1920 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1921 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1922 return;
1925 sys_addr = get_error_address(m);
1926 syndrome = extract_syndrome(m->status);
1928 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1930 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
1933 /* Handle any Un-correctable Errors (UEs) */
1934 static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1936 struct mem_ctl_info *log_mci, *src_mci = NULL;
1937 int csrow;
1938 u64 sys_addr;
1939 u32 page, offset;
1941 log_mci = mci;
1943 if (!(m->status & MCI_STATUS_ADDRV)) {
1944 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1945 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1946 return;
1949 sys_addr = get_error_address(m);
1952 * Find out which node the error address belongs to. This may be
1953 * different from the node that detected the error.
1955 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1956 if (!src_mci) {
1957 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1958 (unsigned long)sys_addr);
1959 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1960 return;
1963 log_mci = src_mci;
1965 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1966 if (csrow < 0) {
1967 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1968 (unsigned long)sys_addr);
1969 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1970 } else {
1971 error_address_to_page_and_offset(sys_addr, &page, &offset);
1972 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
1976 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1977 struct mce *m)
1979 u16 ec = EC(m->status);
1980 u8 xec = XEC(m->status, 0x1f);
1981 u8 ecc_type = (m->status >> 45) & 0x3;
1983 /* Bail early out if this was an 'observed' error */
1984 if (PP(ec) == NBSL_PP_OBS)
1985 return;
1987 /* Do only ECC errors */
1988 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
1989 return;
1991 if (ecc_type == 2)
1992 amd64_handle_ce(mci, m);
1993 else if (ecc_type == 1)
1994 amd64_handle_ue(mci, m);
1997 void amd64_decode_bus_error(int node_id, struct mce *m)
1999 __amd64_decode_bus_error(mcis[node_id], m);
2003 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2004 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2006 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
2008 /* Reserve the ADDRESS MAP Device */
2009 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2010 if (!pvt->F1) {
2011 amd64_err("error address map device not found: "
2012 "vendor %x device 0x%x (broken BIOS?)\n",
2013 PCI_VENDOR_ID_AMD, f1_id);
2014 return -ENODEV;
2017 /* Reserve the MISC Device */
2018 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2019 if (!pvt->F3) {
2020 pci_dev_put(pvt->F1);
2021 pvt->F1 = NULL;
2023 amd64_err("error F3 device not found: "
2024 "vendor %x device 0x%x (broken BIOS?)\n",
2025 PCI_VENDOR_ID_AMD, f3_id);
2027 return -ENODEV;
2029 debugf1("F1: %s\n", pci_name(pvt->F1));
2030 debugf1("F2: %s\n", pci_name(pvt->F2));
2031 debugf1("F3: %s\n", pci_name(pvt->F3));
2033 return 0;
2036 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2038 pci_dev_put(pvt->F1);
2039 pci_dev_put(pvt->F3);
2043 * Retrieve the hardware registers of the memory controller (this includes the
2044 * 'Address Map' and 'Misc' device regs)
2046 static void read_mc_regs(struct amd64_pvt *pvt)
2048 struct cpuinfo_x86 *c = &boot_cpu_data;
2049 u64 msr_val;
2050 u32 tmp;
2051 unsigned range;
2054 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2055 * those are Read-As-Zero
2057 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2058 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
2060 /* check first whether TOP_MEM2 is enabled */
2061 rdmsrl(MSR_K8_SYSCFG, msr_val);
2062 if (msr_val & (1U << 21)) {
2063 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2064 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2065 } else
2066 debugf0(" TOP_MEM2 disabled.\n");
2068 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2070 read_dram_ctl_register(pvt);
2072 for (range = 0; range < DRAM_RANGES; range++) {
2073 u8 rw;
2075 /* read settings for this DRAM range */
2076 read_dram_base_limit_regs(pvt, range);
2078 rw = dram_rw(pvt, range);
2079 if (!rw)
2080 continue;
2082 debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2083 range,
2084 get_dram_base(pvt, range),
2085 get_dram_limit(pvt, range));
2087 debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2088 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2089 (rw & 0x1) ? "R" : "-",
2090 (rw & 0x2) ? "W" : "-",
2091 dram_intlv_sel(pvt, range),
2092 dram_dst_node(pvt, range));
2095 read_dct_base_mask(pvt);
2097 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2098 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
2100 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2102 amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
2103 amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
2105 if (!dct_ganging_enabled(pvt)) {
2106 amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
2107 amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
2110 pvt->ecc_sym_sz = 4;
2112 if (c->x86 >= 0x10) {
2113 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2114 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2116 /* F10h, revD and later can do x8 ECC too */
2117 if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
2118 pvt->ecc_sym_sz = 8;
2120 dump_misc_regs(pvt);
2124 * NOTE: CPU Revision Dependent code
2126 * Input:
2127 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2128 * k8 private pointer to -->
2129 * DRAM Bank Address mapping register
2130 * node_id
2131 * DCL register where dual_channel_active is
2133 * The DBAM register consists of 4 sets of 4 bits each definitions:
2135 * Bits: CSROWs
2136 * 0-3 CSROWs 0 and 1
2137 * 4-7 CSROWs 2 and 3
2138 * 8-11 CSROWs 4 and 5
2139 * 12-15 CSROWs 6 and 7
2141 * Values range from: 0 to 15
2142 * The meaning of the values depends on CPU revision and dual-channel state,
2143 * see relevant BKDG more info.
2145 * The memory controller provides for total of only 8 CSROWs in its current
2146 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2147 * single channel or two (2) DIMMs in dual channel mode.
2149 * The following code logic collapses the various tables for CSROW based on CPU
2150 * revision.
2152 * Returns:
2153 * The number of PAGE_SIZE pages on the specified CSROW number it
2154 * encompasses
2157 static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2159 u32 cs_mode, nr_pages;
2160 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2163 * The math on this doesn't look right on the surface because x/2*4 can
2164 * be simplified to x*2 but this expression makes use of the fact that
2165 * it is integral math where 1/2=0. This intermediate value becomes the
2166 * number of bits to shift the DBAM register to extract the proper CSROW
2167 * field.
2169 cs_mode = (dbam >> ((csrow_nr / 2) * 4)) & 0xF;
2171 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2173 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2174 debugf0(" nr_pages= %u channel-count = %d\n",
2175 nr_pages, pvt->channel_count);
2177 return nr_pages;
2181 * Initialize the array of csrow attribute instances, based on the values
2182 * from pci config hardware registers.
2184 static int init_csrows(struct mem_ctl_info *mci)
2186 struct csrow_info *csrow;
2187 struct amd64_pvt *pvt = mci->pvt_info;
2188 u64 input_addr_min, input_addr_max, sys_addr, base, mask;
2189 u32 val;
2190 int i, empty = 1;
2192 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2194 pvt->nbcfg = val;
2196 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2197 pvt->mc_node_id, val,
2198 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2200 for_each_chip_select(i, 0, pvt) {
2201 csrow = &mci->csrows[i];
2203 if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) {
2204 debugf1("----CSROW %d EMPTY for node %d\n", i,
2205 pvt->mc_node_id);
2206 continue;
2209 debugf1("----CSROW %d VALID for MC node %d\n",
2210 i, pvt->mc_node_id);
2212 empty = 0;
2213 if (csrow_enabled(i, 0, pvt))
2214 csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2215 if (csrow_enabled(i, 1, pvt))
2216 csrow->nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
2217 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2218 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2219 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2220 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2221 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2223 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2224 csrow->page_mask = ~mask;
2225 /* 8 bytes of resolution */
2227 csrow->mtype = amd64_determine_memory_type(pvt, i);
2229 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2230 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2231 (unsigned long)input_addr_min,
2232 (unsigned long)input_addr_max);
2233 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2234 (unsigned long)sys_addr, csrow->page_mask);
2235 debugf1(" nr_pages: %u first_page: 0x%lx "
2236 "last_page: 0x%lx\n",
2237 (unsigned)csrow->nr_pages,
2238 csrow->first_page, csrow->last_page);
2241 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2243 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2244 csrow->edac_mode =
2245 (pvt->nbcfg & NBCFG_CHIPKILL) ?
2246 EDAC_S4ECD4ED : EDAC_SECDED;
2247 else
2248 csrow->edac_mode = EDAC_NONE;
2251 return empty;
2254 /* get all cores on this DCT */
2255 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
2257 int cpu;
2259 for_each_online_cpu(cpu)
2260 if (amd_get_nb_id(cpu) == nid)
2261 cpumask_set_cpu(cpu, mask);
2264 /* check MCG_CTL on all the cpus on this node */
2265 static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
2267 cpumask_var_t mask;
2268 int cpu, nbe;
2269 bool ret = false;
2271 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2272 amd64_warn("%s: Error allocating mask\n", __func__);
2273 return false;
2276 get_cpus_on_this_dct_cpumask(mask, nid);
2278 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2280 for_each_cpu(cpu, mask) {
2281 struct msr *reg = per_cpu_ptr(msrs, cpu);
2282 nbe = reg->l & MSR_MCGCTL_NBE;
2284 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2285 cpu, reg->q,
2286 (nbe ? "enabled" : "disabled"));
2288 if (!nbe)
2289 goto out;
2291 ret = true;
2293 out:
2294 free_cpumask_var(mask);
2295 return ret;
2298 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2300 cpumask_var_t cmask;
2301 int cpu;
2303 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2304 amd64_warn("%s: error allocating mask\n", __func__);
2305 return false;
2308 get_cpus_on_this_dct_cpumask(cmask, nid);
2310 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2312 for_each_cpu(cpu, cmask) {
2314 struct msr *reg = per_cpu_ptr(msrs, cpu);
2316 if (on) {
2317 if (reg->l & MSR_MCGCTL_NBE)
2318 s->flags.nb_mce_enable = 1;
2320 reg->l |= MSR_MCGCTL_NBE;
2321 } else {
2323 * Turn off NB MCE reporting only when it was off before
2325 if (!s->flags.nb_mce_enable)
2326 reg->l &= ~MSR_MCGCTL_NBE;
2329 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2331 free_cpumask_var(cmask);
2333 return 0;
2336 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2337 struct pci_dev *F3)
2339 bool ret = true;
2340 u32 value, mask = 0x3; /* UECC/CECC enable */
2342 if (toggle_ecc_err_reporting(s, nid, ON)) {
2343 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2344 return false;
2347 amd64_read_pci_cfg(F3, NBCTL, &value);
2349 s->old_nbctl = value & mask;
2350 s->nbctl_valid = true;
2352 value |= mask;
2353 amd64_write_pci_cfg(F3, NBCTL, value);
2355 amd64_read_pci_cfg(F3, NBCFG, &value);
2357 debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2358 nid, value, !!(value & NBCFG_ECC_ENABLE));
2360 if (!(value & NBCFG_ECC_ENABLE)) {
2361 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2363 s->flags.nb_ecc_prev = 0;
2365 /* Attempt to turn on DRAM ECC Enable */
2366 value |= NBCFG_ECC_ENABLE;
2367 amd64_write_pci_cfg(F3, NBCFG, value);
2369 amd64_read_pci_cfg(F3, NBCFG, &value);
2371 if (!(value & NBCFG_ECC_ENABLE)) {
2372 amd64_warn("Hardware rejected DRAM ECC enable,"
2373 "check memory DIMM configuration.\n");
2374 ret = false;
2375 } else {
2376 amd64_info("Hardware accepted DRAM ECC Enable\n");
2378 } else {
2379 s->flags.nb_ecc_prev = 1;
2382 debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2383 nid, value, !!(value & NBCFG_ECC_ENABLE));
2385 return ret;
2388 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2389 struct pci_dev *F3)
2391 u32 value, mask = 0x3; /* UECC/CECC enable */
2394 if (!s->nbctl_valid)
2395 return;
2397 amd64_read_pci_cfg(F3, NBCTL, &value);
2398 value &= ~mask;
2399 value |= s->old_nbctl;
2401 amd64_write_pci_cfg(F3, NBCTL, value);
2403 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2404 if (!s->flags.nb_ecc_prev) {
2405 amd64_read_pci_cfg(F3, NBCFG, &value);
2406 value &= ~NBCFG_ECC_ENABLE;
2407 amd64_write_pci_cfg(F3, NBCFG, value);
2410 /* restore the NB Enable MCGCTL bit */
2411 if (toggle_ecc_err_reporting(s, nid, OFF))
2412 amd64_warn("Error restoring NB MCGCTL settings!\n");
2416 * EDAC requires that the BIOS have ECC enabled before
2417 * taking over the processing of ECC errors. A command line
2418 * option allows to force-enable hardware ECC later in
2419 * enable_ecc_error_reporting().
2421 static const char *ecc_msg =
2422 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2423 " Either enable ECC checking or force module loading by setting "
2424 "'ecc_enable_override'.\n"
2425 " (Note that use of the override may cause unknown side effects.)\n";
2427 static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2429 u32 value;
2430 u8 ecc_en = 0;
2431 bool nb_mce_en = false;
2433 amd64_read_pci_cfg(F3, NBCFG, &value);
2435 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2436 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2438 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2439 if (!nb_mce_en)
2440 amd64_notice("NB MCE bank disabled, set MSR "
2441 "0x%08x[4] on node %d to enable.\n",
2442 MSR_IA32_MCG_CTL, nid);
2444 if (!ecc_en || !nb_mce_en) {
2445 amd64_notice("%s", ecc_msg);
2446 return false;
2448 return true;
2451 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2452 ARRAY_SIZE(amd64_inj_attrs) +
2455 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2457 static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2459 unsigned int i = 0, j = 0;
2461 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2462 sysfs_attrs[i] = amd64_dbg_attrs[i];
2464 if (boot_cpu_data.x86 >= 0x10)
2465 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2466 sysfs_attrs[i] = amd64_inj_attrs[j];
2468 sysfs_attrs[i] = terminator;
2470 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2473 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2474 struct amd64_family_type *fam)
2476 struct amd64_pvt *pvt = mci->pvt_info;
2478 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2479 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2481 if (pvt->nbcap & NBCAP_SECDED)
2482 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2484 if (pvt->nbcap & NBCAP_CHIPKILL)
2485 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2487 mci->edac_cap = amd64_determine_edac_cap(pvt);
2488 mci->mod_name = EDAC_MOD_STR;
2489 mci->mod_ver = EDAC_AMD64_VERSION;
2490 mci->ctl_name = fam->ctl_name;
2491 mci->dev_name = pci_name(pvt->F2);
2492 mci->ctl_page_to_phys = NULL;
2494 /* memory scrubber interface */
2495 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2496 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2500 * returns a pointer to the family descriptor on success, NULL otherwise.
2502 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2504 u8 fam = boot_cpu_data.x86;
2505 struct amd64_family_type *fam_type = NULL;
2507 switch (fam) {
2508 case 0xf:
2509 fam_type = &amd64_family_types[K8_CPUS];
2510 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2511 break;
2513 case 0x10:
2514 fam_type = &amd64_family_types[F10_CPUS];
2515 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2516 break;
2518 case 0x15:
2519 fam_type = &amd64_family_types[F15_CPUS];
2520 pvt->ops = &amd64_family_types[F15_CPUS].ops;
2521 break;
2523 default:
2524 amd64_err("Unsupported family!\n");
2525 return NULL;
2528 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2530 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2531 (fam == 0xf ?
2532 (pvt->ext_model >= K8_REV_F ? "revF or later "
2533 : "revE or earlier ")
2534 : ""), pvt->mc_node_id);
2535 return fam_type;
2538 static int amd64_init_one_instance(struct pci_dev *F2)
2540 struct amd64_pvt *pvt = NULL;
2541 struct amd64_family_type *fam_type = NULL;
2542 struct mem_ctl_info *mci = NULL;
2543 int err = 0, ret;
2544 u8 nid = get_node_id(F2);
2546 ret = -ENOMEM;
2547 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2548 if (!pvt)
2549 goto err_ret;
2551 pvt->mc_node_id = nid;
2552 pvt->F2 = F2;
2554 ret = -EINVAL;
2555 fam_type = amd64_per_family_init(pvt);
2556 if (!fam_type)
2557 goto err_free;
2559 ret = -ENODEV;
2560 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2561 if (err)
2562 goto err_free;
2564 read_mc_regs(pvt);
2567 * We need to determine how many memory channels there are. Then use
2568 * that information for calculating the size of the dynamic instance
2569 * tables in the 'mci' structure.
2571 ret = -EINVAL;
2572 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2573 if (pvt->channel_count < 0)
2574 goto err_siblings;
2576 ret = -ENOMEM;
2577 mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
2578 if (!mci)
2579 goto err_siblings;
2581 mci->pvt_info = pvt;
2582 mci->dev = &pvt->F2->dev;
2584 setup_mci_misc_attrs(mci, fam_type);
2586 if (init_csrows(mci))
2587 mci->edac_cap = EDAC_FLAG_NONE;
2589 set_mc_sysfs_attrs(mci);
2591 ret = -ENODEV;
2592 if (edac_mc_add_mc(mci)) {
2593 debugf1("failed edac_mc_add_mc()\n");
2594 goto err_add_mc;
2597 /* register stuff with EDAC MCE */
2598 if (report_gart_errors)
2599 amd_report_gart_errors(true);
2601 amd_register_ecc_decoder(amd64_decode_bus_error);
2603 mcis[nid] = mci;
2605 atomic_inc(&drv_instances);
2607 return 0;
2609 err_add_mc:
2610 edac_mc_free(mci);
2612 err_siblings:
2613 free_mc_sibling_devs(pvt);
2615 err_free:
2616 kfree(pvt);
2618 err_ret:
2619 return ret;
2622 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2623 const struct pci_device_id *mc_type)
2625 u8 nid = get_node_id(pdev);
2626 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2627 struct ecc_settings *s;
2628 int ret = 0;
2630 ret = pci_enable_device(pdev);
2631 if (ret < 0) {
2632 debugf0("ret=%d\n", ret);
2633 return -EIO;
2636 ret = -ENOMEM;
2637 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2638 if (!s)
2639 goto err_out;
2641 ecc_stngs[nid] = s;
2643 if (!ecc_enabled(F3, nid)) {
2644 ret = -ENODEV;
2646 if (!ecc_enable_override)
2647 goto err_enable;
2649 amd64_warn("Forcing ECC on!\n");
2651 if (!enable_ecc_error_reporting(s, nid, F3))
2652 goto err_enable;
2655 ret = amd64_init_one_instance(pdev);
2656 if (ret < 0) {
2657 amd64_err("Error probing instance: %d\n", nid);
2658 restore_ecc_error_reporting(s, nid, F3);
2661 return ret;
2663 err_enable:
2664 kfree(s);
2665 ecc_stngs[nid] = NULL;
2667 err_out:
2668 return ret;
2671 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2673 struct mem_ctl_info *mci;
2674 struct amd64_pvt *pvt;
2675 u8 nid = get_node_id(pdev);
2676 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2677 struct ecc_settings *s = ecc_stngs[nid];
2679 /* Remove from EDAC CORE tracking list */
2680 mci = edac_mc_del_mc(&pdev->dev);
2681 if (!mci)
2682 return;
2684 pvt = mci->pvt_info;
2686 restore_ecc_error_reporting(s, nid, F3);
2688 free_mc_sibling_devs(pvt);
2690 /* unregister from EDAC MCE */
2691 amd_report_gart_errors(false);
2692 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2694 kfree(ecc_stngs[nid]);
2695 ecc_stngs[nid] = NULL;
2697 /* Free the EDAC CORE resources */
2698 mci->pvt_info = NULL;
2699 mcis[nid] = NULL;
2701 kfree(pvt);
2702 edac_mc_free(mci);
2706 * This table is part of the interface for loading drivers for PCI devices. The
2707 * PCI core identifies what devices are on a system during boot, and then
2708 * inquiry this table to see if this driver is for a given device found.
2710 static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
2712 .vendor = PCI_VENDOR_ID_AMD,
2713 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2714 .subvendor = PCI_ANY_ID,
2715 .subdevice = PCI_ANY_ID,
2716 .class = 0,
2717 .class_mask = 0,
2720 .vendor = PCI_VENDOR_ID_AMD,
2721 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2722 .subvendor = PCI_ANY_ID,
2723 .subdevice = PCI_ANY_ID,
2724 .class = 0,
2725 .class_mask = 0,
2728 .vendor = PCI_VENDOR_ID_AMD,
2729 .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2730 .subvendor = PCI_ANY_ID,
2731 .subdevice = PCI_ANY_ID,
2732 .class = 0,
2733 .class_mask = 0,
2736 {0, }
2738 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2740 static struct pci_driver amd64_pci_driver = {
2741 .name = EDAC_MOD_STR,
2742 .probe = amd64_probe_one_instance,
2743 .remove = __devexit_p(amd64_remove_one_instance),
2744 .id_table = amd64_pci_table,
2747 static void setup_pci_device(void)
2749 struct mem_ctl_info *mci;
2750 struct amd64_pvt *pvt;
2752 if (amd64_ctl_pci)
2753 return;
2755 mci = mcis[0];
2756 if (mci) {
2758 pvt = mci->pvt_info;
2759 amd64_ctl_pci =
2760 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2762 if (!amd64_ctl_pci) {
2763 pr_warning("%s(): Unable to create PCI control\n",
2764 __func__);
2766 pr_warning("%s(): PCI error report via EDAC not set\n",
2767 __func__);
2772 static int __init amd64_edac_init(void)
2774 int err = -ENODEV;
2776 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2778 opstate_init();
2780 if (amd_cache_northbridges() < 0)
2781 goto err_ret;
2783 err = -ENOMEM;
2784 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2785 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2786 if (!(mcis && ecc_stngs))
2787 goto err_free;
2789 msrs = msrs_alloc();
2790 if (!msrs)
2791 goto err_free;
2793 err = pci_register_driver(&amd64_pci_driver);
2794 if (err)
2795 goto err_pci;
2797 err = -ENODEV;
2798 if (!atomic_read(&drv_instances))
2799 goto err_no_instances;
2801 setup_pci_device();
2802 return 0;
2804 err_no_instances:
2805 pci_unregister_driver(&amd64_pci_driver);
2807 err_pci:
2808 msrs_free(msrs);
2809 msrs = NULL;
2811 err_free:
2812 kfree(mcis);
2813 mcis = NULL;
2815 kfree(ecc_stngs);
2816 ecc_stngs = NULL;
2818 err_ret:
2819 return err;
2822 static void __exit amd64_edac_exit(void)
2824 if (amd64_ctl_pci)
2825 edac_pci_release_generic_ctl(amd64_ctl_pci);
2827 pci_unregister_driver(&amd64_pci_driver);
2829 kfree(ecc_stngs);
2830 ecc_stngs = NULL;
2832 kfree(mcis);
2833 mcis = NULL;
2835 msrs_free(msrs);
2836 msrs = NULL;
2839 module_init(amd64_edac_init);
2840 module_exit(amd64_edac_exit);
2842 MODULE_LICENSE("GPL");
2843 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2844 "Dave Peterson, Thayne Harbaugh");
2845 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2846 EDAC_AMD64_VERSION);
2848 module_param(edac_op_state, int, 0444);
2849 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");