2 * This file is part of the coreboot project.
4 * Copyright (C) 2002 Linux Networx
5 * (Written by Eric Biederman <ebiederman@lnxi.com> for Linux Networx)
6 * Copyright (C) 2004 YingHai Lu
7 * Copyright (C) 2008 Advanced Micro Devices, Inc.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <cpu/x86/cache.h>
20 #include <cpu/x86/mtrr.h>
21 #include <cpu/x86/tsc.h>
22 #include <cpu/amd/mtrr.h>
26 #include <arch/acpi.h>
30 #if CONFIG_HAVE_OPTION_TABLE
31 #include "option_table.h"
34 #if CONFIG_DEBUG_RAM_SETUP
35 #define printk_raminit(args...) printk(BIOS_DEBUG, args)
37 #define printk_raminit(args...)
44 /* for PCI_ADDR(0, 0x18, 2, 0x98) index,
45 and PCI_ADDR(0x, 0x18, 2, 0x9c) data */
48 [29: 0] DctOffset (Dram Controller Offset)
49 [30:30] DctAccessWrite (Dram Controller Read/Write Select)
52 [31:31] DctAccessDone (Dram Controller Access Done)
53 0 = Access in progress
54 1 = No access is progress
57 [31: 0] DctOffsetData (Dram Controller Offset Data)
60 - Write the register num to DctOffset with
62 - poll the DctAccessDone until it = 1
63 - Read the data from DctOffsetData
65 - Write the data to DctOffsetData
66 - Write register num to DctOffset with DctAccessWrite = 1
67 - poll the DctAccessDone untio it = 1
71 void setup_resource_map(const unsigned int *register_values
, int max
)
74 for (i
= 0; i
< max
; i
+= 3) {
78 dev
= register_values
[i
] & ~0xff;
79 where
= register_values
[i
] & 0xff;
80 reg
= pci_read_config32(dev
, where
);
81 reg
&= register_values
[i
+1];
82 reg
|= register_values
[i
+2];
83 pci_write_config32(dev
, where
, reg
);
87 static int controller_present(const struct mem_controller
*ctrl
)
89 return pci_read_config32(ctrl
->f0
, 0) == 0x11001022;
92 static void sdram_set_registers(const struct mem_controller
*ctrl
, struct sys_info
*sysinfo
)
94 static const unsigned int register_values
[] = {
96 /* Careful set limit registers before base registers which
97 contain the enables */
98 /* DRAM Limit i Registers
107 * [ 2: 0] Destination Node ID
117 * [10: 8] Interleave select
118 * specifies the values of A[14:12] to use with interleave enable.
120 * [31:16] DRAM Limit Address i Bits 39-24
121 * This field defines the upper address bits of a 40 bit address
122 * that define the end of the DRAM region.
124 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
125 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
126 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
127 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
128 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
129 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
130 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
131 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
132 /* DRAM Base i Registers
141 * [ 0: 0] Read Enable
144 * [ 1: 1] Write Enable
145 * 0 = Writes Disabled
148 * [10: 8] Interleave Enable
149 * 000 = No interleave
150 * 001 = Interleave on A[12] (2 nodes)
152 * 011 = Interleave on A[12] and A[14] (4 nodes)
156 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
158 * [13:16] DRAM Base Address i Bits 39-24
159 * This field defines the upper address bits of a 40-bit address
160 * that define the start of the DRAM region.
162 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
163 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
164 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
165 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
166 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
167 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
168 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
169 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
171 /* DRAM CS Base Address i Registers
180 * [ 0: 0] Chip-Select Bank Enable
184 * [ 2: 2] Memory Test Failed
186 * [13: 5] Base Address (21-13)
187 * An optimization used when all DIMM are the same size...
189 * [28:19] Base Address (36-27)
190 * This field defines the top 11 addresses bit of a 40-bit
191 * address that define the memory address space. These
192 * bits decode 32-MByte blocks of memory.
195 PCI_ADDR(0, 0x18, 2, 0x40), 0xe007c018, 0x00000000,
196 PCI_ADDR(0, 0x18, 2, 0x44), 0xe007c018, 0x00000000,
197 PCI_ADDR(0, 0x18, 2, 0x48), 0xe007c018, 0x00000000,
198 PCI_ADDR(0, 0x18, 2, 0x4C), 0xe007c018, 0x00000000,
199 PCI_ADDR(0, 0x18, 2, 0x50), 0xe007c018, 0x00000000,
200 PCI_ADDR(0, 0x18, 2, 0x54), 0xe007c018, 0x00000000,
201 PCI_ADDR(0, 0x18, 2, 0x58), 0xe007c018, 0x00000000,
202 PCI_ADDR(0, 0x18, 2, 0x5C), 0xe007c018, 0x00000000,
203 /* DRAM CS Mask Address i Registers
208 * Select bits to exclude from comparison with the DRAM Base address register.
210 * [13: 5] Address Mask (21-13)
211 * Address to be excluded from the optimized case
213 * [28:19] Address Mask (36-27)
214 * The bits with an address mask of 1 are excluded from address comparison
218 PCI_ADDR(0, 0x18, 2, 0x60), 0xe007c01f, 0x00000000,
219 PCI_ADDR(0, 0x18, 2, 0x64), 0xe007c01f, 0x00000000,
220 PCI_ADDR(0, 0x18, 2, 0x68), 0xe007c01f, 0x00000000,
221 PCI_ADDR(0, 0x18, 2, 0x6C), 0xe007c01f, 0x00000000,
223 /* DRAM Control Register
225 * [ 3: 0] RdPtrInit ( Read Pointer Initial Value)
226 * 0x03-0x00: reserved
227 * [ 6: 4] RdPadRcvFifoDly (Read Delay from Pad Receive FIFO)
230 * 010 = 1.5 Memory Clocks
231 * 011 = 2 Memory Clocks
232 * 100 = 2.5 Memory Clocks
233 * 101 = 3 Memory Clocks
234 * 110 = 3.5 Memory Clocks
237 * [16:16] AltVidC3MemClkTriEn (AltVID Memory Clock Tristate Enable)
238 * Enables the DDR memory clocks to be tristated when alternate VID
239 * mode is enabled. This bit has no effect if the DisNbClkRamp bit
241 * [17:17] DllTempAdjTime (DLL Temperature Adjust Cycle Time)
244 * [18:18] DqsRcvEnTrain (DQS Receiver Enable Training Mode)
245 * 0 = Normal DQS Receiver enable operation
246 * 1 = DQS receiver enable training mode
249 PCI_ADDR(0, 0x18, 2, 0x78), 0xfff80000, (6<<4)|(6<<0),
251 /* DRAM Initialization Register
253 * [15: 0] MrsAddress (Address for MRS/EMRS Commands)
254 * this field specifies the dsata driven on the DRAM address pins
255 * 15-0 for MRS and EMRS commands
256 * [18:16] MrsBank (Bank Address for MRS/EMRS Commands)
257 * this files specifies the data driven on the DRAM bank pins for
258 * the MRS and EMRS commands
260 * [24:24] SendPchgAll (Send Precharge All Command)
261 * Setting this bit causes the DRAM controller to send a precharge
262 * all command. This bit is cleared by the hardware after the
264 * [25:25] SendAutoRefresh (Send Auto Refresh Command)
265 * Setting this bit causes the DRAM controller to send an auto
266 * refresh command. This bit is cleared by the hardware after the
268 * [26:26] SendMrsCmd (Send MRS/EMRS Command)
269 * Setting this bit causes the DRAM controller to send the MRS or
270 * EMRS command defined by the MrsAddress and MrsBank fields. This
271 * bit is cleared by the hardware adter the commmand completes
272 * [27:27] DeassertMemRstX (De-assert Memory Reset)
273 * Setting this bit causes the DRAM controller to de-assert the
274 * memory reset pin. This bit cannot be used to assert the memory
276 * [28:28] AssertCke (Assert CKE)
277 * setting this bit causes the DRAM controller to assert the CKE
278 * pins. This bit cannot be used to de-assert the CKE pins
280 * [31:31] EnDramInit (Enable DRAM Initialization)
281 * Setting this bit puts the DRAM controller in a BIOS controlled
282 * DRAM initialization mode. BIOS must clear this bit aster DRAM
283 * initialization is complete.
285 // PCI_ADDR(0, 0x18, 2, 0x7C), 0x60f80000, 0,
288 /* DRAM Bank Address Mapping Register
290 * Specify the memory module size
310 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff0000, 0x00000000,
311 /* DRAM Timing Low Register
313 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
323 * [ 5: 4] Trcd (Ras#-active to Cas# read/write delay)
329 * [ 9: 8] Trp (Row Precharge Time, Precharge-to-Active or Auto-Refresh)
335 * [11:11] Trtp (Read to Precharge Time, read Cas# to precharge time)
336 * 0 = 2 clocks for Burst Length of 32 Bytes
337 * 4 clocks for Burst Length of 64 Bytes
338 * 1 = 3 clocks for Burst Length of 32 Bytes
339 * 5 clocks for Burst Length of 64 Bytes
340 * [15:12] Tras (Minimum Ras# Active Time)
343 * 0010 = 5 bus clocks
345 * 1111 = 18 bus clocks
346 * [19:16] Trc (Row Cycle Time, Ras#-active to Ras#-active or auto
347 * refresh of the same bank)
348 * 0000 = 11 bus clocks
349 * 0010 = 12 bus clocks
351 * 1110 = 25 bus clocks
352 * 1111 = 26 bus clocks
353 * [21:20] Twr (Write Recovery Time, From the last data to precharge,
354 * writes can go back-to-back)
359 * [23:22] Trrd (Active-to-active(Ras#-to-Ras#) Delay of different banks)
364 * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel A,
365 * BIOS should set it to reduce the power consumption)
366 * Bit F(1207) M2 Package S1g1 Package
368 * 1 N/A MA0_CLK1 MA0_CLK1
371 * 4 MA1_CLK MA1_CLK0 N/A
372 * 5 MA0_CLK MA0_CLK0 MA0_CLK0
374 * 7 N/A MA0_CLK2 MA0_CLK2
376 PCI_ADDR(0, 0x18, 2, 0x88), 0x000004c8, 0xff000002 /* 0x03623125 */ ,
377 /* DRAM Timing High Register
380 * [ 6: 4] TrwtTO (Read-to-Write Turnaround for Data, DQS Contention)
390 * [ 9: 8] Twtr (Internal DRAM Write-to-Read Command Delay,
391 * minium write-to-read delay when both access the same chip select)
396 * [11:10] Twrrd (Write to Read DIMM Termination Turnaround, minimum
397 * write-to-read delay when accessing two different DIMMs)
402 * [13:12] Twrwr (Write to Write Timing)
403 * 00 = 1 bus clocks ( 0 idle cycle on the bus)
404 * 01 = 2 bus clocks ( 1 idle cycle on the bus)
405 * 10 = 3 bus clocks ( 2 idle cycles on the bus)
407 * [15:14] Trdrd ( Read to Read Timing)
408 * 00 = 2 bus clocks ( 1 idle cycle on the bus)
409 * 01 = 3 bus clocks ( 2 idle cycles on the bus)
410 * 10 = 4 bus clocks ( 3 idle cycles on the bus)
411 * 11 = 5 bus clocks ( 4 idel cycles on the bus)
412 * [17:16] Tref (Refresh Rate)
413 * 00 = Undefined behavior
415 * 10 = Refresh interval of 7.8 microseconds
416 * 11 = Refresh interval of 3.9 microseconds
418 * [22:20] Trfc0 ( Auto-Refresh Row Cycle Time for the Logical DIMM0,
419 * based on DRAM density and speed)
420 * 000 = 75 ns (all speeds, 256Mbit)
421 * 001 = 105 ns (all speeds, 512Mbit)
422 * 010 = 127.5 ns (all speeds, 1Gbit)
423 * 011 = 195 ns (all speeds, 2Gbit)
424 * 100 = 327.5 ns (all speeds, 4Gbit)
428 * [25:23] Trfc1 ( Auto-Refresh Row Cycle Time for the Logical DIMM1,
429 * based on DRAM density and speed)
430 * [28:26] Trfc2 ( Auto-Refresh Row Cycle Time for the Logical DIMM2,
431 * based on DRAM density and speed)
432 * [31:29] Trfc3 ( Auto-Refresh Row Cycle Time for the Logical DIMM3,
433 * based on DRAM density and speed)
435 PCI_ADDR(0, 0x18, 2, 0x8c), 0x000c008f, (2 << 16)|(1 << 8),
436 /* DRAM Config Low Register
438 * [ 0: 0] InitDram (Initialize DRAM)
439 * 1 = write 1 cause DRAM controller to execute the DRAM
440 * initialization, when done it read to 0
441 * [ 1: 1] ExitSelfRef ( Exit Self Refresh Command )
442 * 1 = write 1 causes the DRAM controller to bring the DRAMs out
443 * for self refresh mode
445 * [ 5: 4] DramTerm (DRAM Termination)
446 * 00 = On die termination disabled
451 * [ 7: 7] DramDrvWeak ( DRAM Drivers Weak Mode)
452 * 0 = Normal drive strength mode.
453 * 1 = Weak drive strength mode
454 * [ 8: 8] ParEn (Parity Enable)
455 * 1 = Enable address parity computation output, PAR,
456 * and enables the parity error input, ERR
457 * [ 9: 9] SelfRefRateEn (Faster Self Refresh Rate Enable)
458 * 1 = Enable high temperature ( two times normal )
460 * [10:10] BurstLength32 ( DRAM Burst Length Set for 32 Bytes)
463 * [11:11] Width128 ( Width of DRAM interface)
464 * 0 = the controller DRAM interface is 64-bits wide
465 * 1 = the controller DRAM interface is 128-bits wide
466 * [12:12] X4Dimm (DIMM 0 is x4)
467 * [13:13] X4Dimm (DIMM 1 is x4)
468 * [14:14] X4Dimm (DIMM 2 is x4)
469 * [15:15] X4Dimm (DIMM 3 is x4)
471 * 1 = x4 DIMM present
472 * [16:16] UnBuffDimm ( Unbuffered DIMMs)
474 * 1 = Unbuffered DIMMs
476 * [19:19] DimmEccEn ( DIMM ECC Enable )
477 * 1 = ECC checking is being enabled for all DIMMs on the DRAM
478 * controller ( Through F3 0x44[EccEn])
481 PCI_ADDR(0, 0x18, 2, 0x90), 0xfff6004c, 0x00000010,
482 /* DRAM Config High Register
484 * [ 0: 2] MemClkFreq ( Memory Clock Frequency)
490 * [ 3: 3] MemClkFreqVal (Memory Clock Freqency Valid)
491 * 1 = BIOS need to set the bit when setting up MemClkFreq to
493 * [ 7: 4] MaxAsyncLat ( Maximum Asynchronous Latency)
498 * [12:12] RDqsEn ( Read DQS Enable) This bit is only be set if x8
499 * registered DIMMs are present in the system
500 * 0 = DM pins function as data mask pins
501 * 1 = DM pins function as read DQS pins
503 * [14:14] DisDramInterface ( Disable the DRAM interface ) When this bit
504 * is set, the DRAM controller is disabled, and interface in low power
506 * 0 = Enabled (default)
508 * [15:15] PowerDownEn ( Power Down Mode Enable )
509 * 0 = Disabled (default)
511 * [16:16] PowerDown ( Power Down Mode )
512 * 0 = Channel CKE Control
513 * 1 = Chip Select CKE Control
514 * [17:17] FourRankSODimm (Four Rank SO-DIMM)
515 * 1 = this bit is set by BIOS to indicate that a four rank
517 * [18:18] FourRankRDimm (Four Rank Registered DIMM)
518 * 1 = this bit is set by BIOS to indicate that a four rank
519 * registered DIMM is present
521 * [20:20] SlowAccessMode (Slow Access Mode (2T Mode))
522 * 0 = DRAM address and control signals are driven for one
524 * 1 = One additional MEMCLK of setup time is provided on all
525 * DRAM address and control signals except CS, CKE, and ODT;
526 * i.e., these signals are drivern for two MEMCLK cycles
529 * [22:22] BankSwizzleMode ( Bank Swizzle Mode),
530 * 0 = Disabled (default)
533 * [27:24] DcqBypassMax ( DRAM Controller Queue Bypass Maximum)
534 * 0000 = No bypass; the oldest request is never bypassed
535 * 0001 = The oldest request may be bypassed no more than 1 time
537 * 1111 = The oldest request may be bypassed no more than 15\
539 * [31:28] FourActWindow ( Four Bank Activate Window) , not more than
540 * 4 banks in a 8 bank device are activated
541 * 0000 = No tFAW window restriction
542 * 0001 = 8 MEMCLK cycles
543 * 0010 = 9 MEMCLK cycles
545 * 1101 = 20 MEMCLK cycles
548 PCI_ADDR(0, 0x18, 2, 0x94), 0x00a82f00,0x00008000,
549 /* DRAM Delay Line Register
551 * [ 0: 0] MemClrStatus (Memory Clear Status) : Readonly
552 * when set, this bit indicates that the memory clear function
553 * is complete. Only clear by reset. BIOS should not write or
554 * read the DRAM until this bit is set by hardware
555 * [ 1: 1] DisableJitter ( Disable Jitter)
556 * When set the DDR compensation circuit will not change the
557 * values unless the change is more than one step from the
559 * [ 3: 2] RdWrQByp ( Read/Write Queue Bypass Count)
564 * [ 4: 4] Mode64BitMux (Mismatched DIMM Support Enable)
565 * 1 When bit enables support for mismatched DIMMs when using
566 * 128-bit DRAM interface, the Width128 no effect, only for
568 * [ 5: 5] DCC_EN ( Dynamica Idle Cycle Counter Enable)
569 * When set to 1, indicates that each entry in the page tables
570 * dynamically adjusts the idle cycle limit based on page
571 * Conflict/Page Miss (PC/PM) traffic
572 * [ 8: 6] ILD_lmt ( Idle Cycle Limit)
581 * [ 9: 9] DramEnabled ( DRAM Enabled)
582 * When Set, this bit indicates that the DRAM is enabled, this
583 * bit is set by hardware after DRAM initialization or on an exit
584 * from self refresh. The DRAM controller is intialized after the
585 * hardware-controlled initialization process ( initiated by the
586 * F2 0x90[DramInit]) completes or when the BIOS-controlled
587 * initialization process completes (F2 0x7c(EnDramInit] is
588 * written from 1 to 0)
590 * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel B,
591 * BIOS should set it to reduce the power consumption)
592 * Bit F(1207) M2 Package S1g1 Package
594 * 1 N/A MA0_CLK1 MA0_CLK1
597 * 4 MA1_CLK MA1_CLK0 N/A
598 * 5 MA0_CLK MA0_CLK0 MA0_CLK0
600 * 7 N/A MA0_CLK2 MA0_CLK2
602 PCI_ADDR(0, 0x18, 2, 0xa0), 0x00fffc00, 0xff000000,
604 /* DRAM Scrub Control Register
606 * [ 4: 0] DRAM Scrube Rate
608 * [12: 8] L2 Scrub Rate
610 * [20:16] Dcache Scrub
613 * 00000 = Do not scrub
635 * All Others = Reserved
637 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
638 /* DRAM Scrub Address Low Register
640 * [ 0: 0] DRAM Scrubber Redirect Enable
642 * 1 = Scrubber Corrects errors found in normal operation
644 * [31: 6] DRAM Scrub Address 31-6
646 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
647 /* DRAM Scrub Address High Register
649 * [ 7: 0] DRAM Scrubb Address 39-32
652 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
654 /* for PCI_ADDR(0, 0x18, 2, 0x98) index,
655 and PCI_ADDR(0x, 0x18, 2, 0x9c) data */
658 [29: 0] DctOffset (Dram Controller Offset)
659 [30:30] DctAccessWrite (Dram Controller Read/Write Select)
662 [31:31] DctAccessDone (Dram Controller Access Done)
663 0 = Access in progress
664 1 = No access is progress
667 [31: 0] DctOffsetData (Dram Controller Offset Data)
670 - Write the register num to DctOffset with DctAccessWrite = 0
671 - poll the DctAccessDone until it = 1
672 - Read the data from DctOffsetData
674 - Write the data to DctOffsetData
675 - Write register num to DctOffset with DctAccessWrite = 1
676 - poll the DctAccessDone untio it = 1
682 if (!controller_present(ctrl
)) {
683 sysinfo
->ctrl_present
[ctrl
->node_id
] = 0;
686 sysinfo
->ctrl_present
[ctrl
->node_id
] = 1;
688 printk(BIOS_SPEW
, "setting up CPU %02x northbridge registers\n", ctrl
->node_id
);
689 max
= ARRAY_SIZE(register_values
);
690 for (i
= 0; i
< max
; i
+= 3) {
694 dev
= (register_values
[i
] & ~0xff) - PCI_DEV(0, 0x18, 0) + ctrl
->f0
;
695 where
= register_values
[i
] & 0xff;
696 reg
= pci_read_config32(dev
, where
);
697 reg
&= register_values
[i
+1];
698 reg
|= register_values
[i
+2];
699 pci_write_config32(dev
, where
, reg
);
701 printk(BIOS_SPEW
, "done.\n");
705 static int is_dual_channel(const struct mem_controller
*ctrl
)
708 dcl
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_LOW
);
709 return dcl
& DCL_Width128
;
713 static int is_opteron(const struct mem_controller
*ctrl
)
715 /* Test to see if I am an Opteron. M2 and S1G1 support dual
716 * channel, too, but only support unbuffered DIMMs so we need a
717 * better test for Opterons.
718 * However, all code uses is_opteron() to find out whether to
719 * use dual channel, so if we really check for opteron here, we
720 * need to fix up all code using this function, too.
724 nbcap
= pci_read_config32(ctrl
->f3
, NORTHBRIDGE_CAP
);
725 return !!(nbcap
& NBCAP_128Bit
);
729 static int is_registered(const struct mem_controller
*ctrl
)
731 /* Test to see if we are dealing with registered SDRAM.
732 * If we are not registered we are unbuffered.
733 * This function must be called after spd_handle_unbuffered_dimms.
736 dcl
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_LOW
);
737 return !(dcl
& DCL_UnBuffDimm
);
741 static void spd_get_dimm_size(unsigned device
, struct dimm_size
*sz
)
743 /* Calculate the log base 2 size of a DIMM in bits */
750 value
= spd_read_byte(device
, SPD_ROW_NUM
); /* rows */
751 if (value
< 0) goto hw_err
;
752 if ((value
& 0xff) == 0) goto val_err
; /* max is 16 ? */
753 sz
->per_rank
+= value
& 0xff;
754 sz
->rows
= value
& 0xff;
756 value
= spd_read_byte(device
, SPD_COL_NUM
); /* columns */
757 if (value
< 0) goto hw_err
;
758 if ((value
& 0xff) == 0) goto val_err
; /* max is 11 */
759 sz
->per_rank
+= value
& 0xff;
760 sz
->col
= value
& 0xff;
762 value
= spd_read_byte(device
, SPD_BANK_NUM
); /* banks */
763 if (value
< 0) goto hw_err
;
764 if ((value
& 0xff) == 0) goto val_err
;
765 sz
->bank
= log2(value
& 0xff); // convert 4 to 2, and 8 to 3
766 sz
->per_rank
+= sz
->bank
;
768 /* Get the module data width and convert it to a power of two */
769 value
= spd_read_byte(device
, SPD_DATA_WIDTH
);
770 if (value
< 0) goto hw_err
;
772 if ((value
!= 72) && (value
!= 64)) goto val_err
;
773 sz
->per_rank
+= log2(value
) - 3; //64 bit So another 3 lines
775 /* How many ranks? */
776 /* number of physical banks */
777 value
= spd_read_byte(device
, SPD_MOD_ATTRIB_RANK
);
778 if (value
< 0) goto hw_err
;
779 /* value >>= SPD_MOD_ATTRIB_RANK_NUM_SHIFT; */
780 value
&= SPD_MOD_ATTRIB_RANK_NUM_MASK
;
781 value
+= SPD_MOD_ATTRIB_RANK_NUM_BASE
; // 0-->1, 1-->2, 3-->4
783 rank == 1 only one rank or say one side
784 rank == 2 two side , and two ranks
785 rank == 4 two side , and four ranks total
786 Some one side two ranks, because of stacked
788 if ((value
!= 1) && (value
!= 2) && (value
!= 4 )) {
793 /* verify if per_rank is equal byte 31
794 it has the DIMM size as a multiple of 128MB.
796 value
= spd_read_byte(device
, SPD_RANK_SIZE
);
797 if (value
< 0) goto hw_err
;
800 if (value
<=4 ) value
+= 8; // add back to 1G to high
801 value
+= (27-5); // make 128MB to the real lines
802 if ( value
!= (sz
->per_rank
)) {
803 printk(BIOS_ERR
, "Bad RANK Size --\n");
810 die("Bad SPD value\n");
811 /* If an hw_error occurs report that I have no memory */
823 static void set_dimm_size(const struct mem_controller
*ctrl
,
824 struct dimm_size
*sz
, unsigned index
,
825 struct mem_info
*meminfo
)
827 uint32_t base0
, base1
;
829 /* For each base register.
830 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
831 * The initialize dimm size is in bits.
832 * Set the base enable bit0.
837 /* Make certain side1 of the dimm is at least 128MB */
838 if (sz
->per_rank
>= 27) {
839 base0
= (1 << ((sz
->per_rank
- 27 ) + 19)) | 1;
842 /* Make certain side2 of the dimm is at least 128MB */
843 if (sz
->rank
> 1) { // 2 ranks or 4 ranks
844 base1
= (1 << ((sz
->per_rank
- 27 ) + 19)) | 1;
847 /* Double the size if we are using dual channel memory */
848 if (meminfo
->is_Width128
) {
849 base0
= (base0
<< 1) | (base0
& 1);
850 base1
= (base1
<< 1) | (base1
& 1);
853 /* Clear the reserved bits */
854 base0
&= ~0xe007fffe;
855 base1
&= ~0xe007fffe;
857 if (!(meminfo
->dimm_mask
& 0x0F) && (meminfo
->dimm_mask
& 0xF0)) { /* channelB only? */
858 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (((index
<< 1) + 4) << 2), base0
);
859 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (((index
<< 1) + 5) << 2), base1
);
861 /* Set the appropriate DIMM base address register */
862 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (((index
<< 1) + 0) << 2), base0
);
863 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (((index
<< 1) + 1) << 2), base1
);
864 #if CONFIG_QRANK_DIMM_SUPPORT
866 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (((index
<< 1) + 4) << 2), base0
);
867 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (((index
<< 1) + 5) << 2), base1
);
872 /* Enable the memory clocks for this DIMM by Clear the MemClkDis bit*/
876 #if CONFIG_CPU_SOCKET_TYPE == 0x10 /* L1 */
877 ClkDis0
= DTL_MemClkDis0
;
878 #elif CONFIG_CPU_SOCKET_TYPE == 0x11 /* AM2 */
879 ClkDis0
= DTL_MemClkDis0_AM2
;
880 #elif CONFIG_CPU_SOCKET_TYPE == 0x12 /* S1G1 */
881 ClkDis0
= DTL_MemClkDis0_S1g1
;
884 if (!(meminfo
->dimm_mask
& 0x0F) && (meminfo
->dimm_mask
& 0xF0)) { /* channelB only? */
885 dword
= pci_read_config32(ctrl
->f2
, DRAM_CTRL_MISC
);
886 dword
&= ~(ClkDis0
>> index
);
887 pci_write_config32(ctrl
->f2
, DRAM_CTRL_MISC
, dword
);
890 dword
= pci_read_config32(ctrl
->f2
, DRAM_TIMING_LOW
); //Channel A
891 dword
&= ~(ClkDis0
>> index
);
892 #if CONFIG_QRANK_DIMM_SUPPORT
894 dword
&= ~(ClkDis0
>> (index
+2));
897 pci_write_config32(ctrl
->f2
, DRAM_TIMING_LOW
, dword
);
899 if (meminfo
->is_Width128
) { // ChannelA+B
900 dword
= pci_read_config32(ctrl
->f2
, DRAM_CTRL_MISC
);
901 dword
&= ~(ClkDis0
>> index
);
902 #if CONFIG_QRANK_DIMM_SUPPORT
904 dword
&= ~(ClkDis0
>> (index
+2));
907 pci_write_config32(ctrl
->f2
, DRAM_CTRL_MISC
, dword
);
914 /* row col bank for 64 bit
930 static void set_dimm_cs_map(const struct mem_controller
*ctrl
,
931 struct dimm_size
*sz
, unsigned index
,
932 struct mem_info
*meminfo
)
934 static const uint8_t cs_map_aaa
[24] = {
935 /* (bank=2, row=13, col=9)(3, 16, 11) ---> (0, 0, 0) (1, 3, 2) */
950 if (!(meminfo
->dimm_mask
& 0x0F) && (meminfo
->dimm_mask
& 0xF0)) { /* channelB only? */
953 map
= pci_read_config32(ctrl
->f2
, DRAM_BANK_ADDR_MAP
);
954 map
&= ~(0xf << (index
* 4));
955 #if CONFIG_QRANK_DIMM_SUPPORT
957 map
&= ~(0xf << ( (index
+ 2) * 4));
961 /* Make certain side1 of the dimm is at least 128MB */
962 if (sz
->per_rank
>= 27) {
964 temp_map
= cs_map_aaa
[(sz
->bank
-2)*3*4 + (sz
->rows
- 13)*3 + (sz
->col
- 9) ];
965 map
|= temp_map
<< (index
*4);
966 #if CONFIG_QRANK_DIMM_SUPPORT
968 map
|= temp_map
<< ( (index
+ 2) * 4);
973 pci_write_config32(ctrl
->f2
, DRAM_BANK_ADDR_MAP
, map
);
978 static long spd_set_ram_size(const struct mem_controller
*ctrl
,
979 struct mem_info
*meminfo
)
983 for (i
= 0; i
< DIMM_SOCKETS
; i
++) {
984 struct dimm_size
*sz
= &(meminfo
->sz
[i
]);
985 u32 spd_device
= ctrl
->channel0
[i
];
987 if (!(meminfo
->dimm_mask
& (1 << i
))) {
988 if (meminfo
->dimm_mask
& (1 << (DIMM_SOCKETS
+ i
))) { /* channelB only? */
989 spd_device
= ctrl
->channel1
[i
];
995 spd_get_dimm_size(spd_device
, sz
);
996 if (sz
->per_rank
== 0) {
997 return -1; /* Report SPD error */
999 set_dimm_size(ctrl
, sz
, i
, meminfo
);
1000 set_dimm_cs_map(ctrl
, sz
, i
, meminfo
);
1002 return meminfo
->dimm_mask
;
1005 static void route_dram_accesses(const struct mem_controller
*ctrl
,
1006 unsigned long base_k
, unsigned long limit_k
)
1008 /* Route the addresses to the controller node */
1013 unsigned limit_reg
, base_reg
;
1016 node_id
= ctrl
->node_id
;
1017 index
= (node_id
<< 3);
1018 limit
= (limit_k
<< 2);
1019 limit
&= 0xffff0000;
1020 limit
-= 0x00010000;
1021 limit
|= ( 0 << 8) | (node_id
<< 0);
1022 base
= (base_k
<< 2);
1024 base
|= (0 << 8) | (1<<1) | (1<<0);
1026 limit_reg
= 0x44 + index
;
1027 base_reg
= 0x40 + index
;
1028 for (device
= PCI_DEV(0, 0x18, 1); device
<= PCI_DEV(0, 0x1f, 1);
1029 device
+= PCI_DEV(0, 1, 0)) {
1030 pci_write_config32(device
, limit_reg
, limit
);
1031 pci_write_config32(device
, base_reg
, base
);
1035 static void set_top_mem(unsigned tom_k
, unsigned hole_startk
)
1037 /* Error if I don't have memory */
1042 /* Report the amount of memory. */
1043 printk(BIOS_DEBUG
, "RAM end at 0x%08x kB\n", tom_k
);
1045 /* Now set top of memory */
1047 if (tom_k
> (4*1024*1024)) {
1048 printk_raminit("Handling memory mapped above 4 GB\n");
1049 printk_raminit("Upper RAM end at 0x%08x kB\n", tom_k
);
1050 msr
.lo
= (tom_k
& 0x003fffff) << 10;
1051 msr
.hi
= (tom_k
& 0xffc00000) >> 22;
1052 wrmsr(TOP_MEM2
, msr
);
1053 printk_raminit("Correcting memory amount mapped below 4 GB\n");
1056 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
1057 * so I can see my rom chip and other I/O devices.
1059 if (tom_k
>= 0x003f0000) {
1060 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
1061 if (hole_startk
!= 0) {
1062 tom_k
= hole_startk
;
1066 printk_raminit("Adjusting lower RAM end\n");
1068 printk_raminit("Lower RAM end at 0x%08x kB\n", tom_k
);
1069 msr
.lo
= (tom_k
& 0x003fffff) << 10;
1070 msr
.hi
= (tom_k
& 0xffc00000) >> 22;
1071 wrmsr(TOP_MEM
, msr
);
1074 static unsigned long interleave_chip_selects(const struct mem_controller
*ctrl
, int is_Width128
)
1078 static const uint8_t csbase_low_f0_shift
[] = {
1079 /* 128MB */ (14 - (13-5)),
1080 /* 256MB */ (15 - (13-5)),
1081 /* 512MB */ (15 - (13-5)),
1082 /* 512MB */ (16 - (13-5)),
1083 /* 512MB */ (16 - (13-5)),
1084 /* 1GB */ (16 - (13-5)),
1085 /* 1GB */ (16 - (13-5)),
1086 /* 2GB */ (16 - (13-5)),
1087 /* 2GB */ (17 - (13-5)),
1088 /* 4GB */ (17 - (13-5)),
1089 /* 4GB */ (16 - (13-5)),
1090 /* 8GB */ (17 - (13-5)),
1093 /* cs_base_high is not changed */
1095 uint32_t csbase_inc
;
1096 int chip_selects
, index
;
1098 unsigned common_size
;
1099 unsigned common_cs_mode
;
1100 uint32_t csbase
, csmask
;
1102 /* See if all of the memory chip selects are the same size
1103 * and if so count them.
1105 #if defined(CMOS_VSTART_interleave_chip_selects)
1106 if (read_option(interleave_chip_selects
, 1) == 0)
1109 #if !defined(CONFIG_INTERLEAVE_CHIP_SELECTS) || !CONFIG_INTERLEAVE_CHIP_SELECTS
1116 common_cs_mode
= 0xff;
1117 for (index
= 0; index
< 8; index
++) {
1122 value
= pci_read_config32(ctrl
->f2
, DRAM_CSBASE
+ (index
<< 2));
1124 /* Is it enabled? */
1129 size
= (value
>> 19) & 0x3ff;
1130 if (common_size
== 0) {
1133 /* The size differed fail */
1134 if (common_size
!= size
) {
1138 value
= pci_read_config32(ctrl
->f2
, DRAM_BANK_ADDR_MAP
);
1139 cs_mode
=( value
>> ((index
>>1)*4)) & 0xf;
1140 if (common_cs_mode
== 0xff) {
1141 common_cs_mode
= cs_mode
;
1143 /* The cs_mode differed fail */
1144 if (common_cs_mode
!= cs_mode
) {
1149 /* Chip selects can only be interleaved when there is
1150 * more than one and their is a power of two of them.
1152 bits
= log2(chip_selects
);
1153 if (((1 << bits
) != chip_selects
) || (bits
< 1) || (bits
> 3)) {
1154 //chip_selects max = 8
1158 /* Find the bits of csbase that we need to interleave on */
1159 csbase_inc
= 1 << (csbase_low_f0_shift
[common_cs_mode
]);
1164 /* Compute the initial values for csbase and csbask.
1165 * In csbase just set the enable bit and the base to zero.
1166 * In csmask set the mask bits for the size and page level interleave.
1169 csmask
= (((common_size
<< bits
) - 1) << 19);
1170 csmask
|= 0x3fe0 & ~((csbase_inc
<< bits
) - csbase_inc
);
1171 for (index
= 0; index
< 8; index
++) {
1174 value
= pci_read_config32(ctrl
->f2
, DRAM_CSBASE
+ (index
<< 2));
1175 /* Is it enabled? */
1179 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (index
<< 2), csbase
);
1180 if ((index
& 1) == 0) { //only have 4 CSMASK
1181 pci_write_config32(ctrl
->f2
, DRAM_CSMASK
+ ((index
>>1) << 2), csmask
);
1183 csbase
+= csbase_inc
;
1186 printk(BIOS_DEBUG
, "Interleaved\n");
1188 /* Return the memory size in K */
1189 return common_size
<< ((27-10) + bits
);
1192 static unsigned long order_chip_selects(const struct mem_controller
*ctrl
)
1196 /* Remember which registers we have used in the high 8 bits of tom */
1199 /* Find the largest remaining canidate */
1200 unsigned index
, canidate
;
1201 uint32_t csbase
, csmask
;
1205 for (index
= 0; index
< 8; index
++) {
1207 value
= pci_read_config32(ctrl
->f2
, DRAM_CSBASE
+ (index
<< 2));
1209 /* Is it enabled? */
1214 /* Is it greater? */
1215 if (value
<= csbase
) {
1219 /* Has it already been selected */
1220 if (tom
& (1 << (index
+ 24))) {
1223 /* I have a new canidate */
1228 /* See if I have found a new canidate */
1233 /* Remember the dimm size */
1234 size
= csbase
>> 19;
1236 /* Remember I have used this register */
1237 tom
|= (1 << (canidate
+ 24));
1239 /* Recompute the cs base register value */
1240 csbase
= (tom
<< 19) | 1;
1242 /* Increment the top of memory */
1245 /* Compute the memory mask */
1246 csmask
= ((size
-1) << 19);
1247 csmask
|= 0x3fe0; /* For now don't optimize */
1249 /* Write the new base register */
1250 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (canidate
<< 2), csbase
);
1251 /* Write the new mask register */
1252 if ((canidate
& 1) == 0) { //only have 4 CSMASK
1253 pci_write_config32(ctrl
->f2
, DRAM_CSMASK
+ ((canidate
>> 1) << 2), csmask
);
1257 /* Return the memory size in K */
1258 return (tom
& ~0xff000000) << (27-10);
1261 static unsigned long memory_end_k(const struct mem_controller
*ctrl
, int max_node_id
)
1265 /* Find the last memory address used */
1267 for (node_id
= 0; node_id
< max_node_id
; node_id
++) {
1268 uint32_t limit
, base
;
1270 index
= node_id
<< 3;
1271 base
= pci_read_config32(ctrl
->f1
, 0x40 + index
);
1272 /* Only look at the limit if the base is enabled */
1273 if ((base
& 3) == 3) {
1274 limit
= pci_read_config32(ctrl
->f1
, 0x44 + index
);
1275 end_k
= ((limit
+ 0x00010000) & 0xffff0000) >> 2;
1281 static void order_dimms(const struct mem_controller
*ctrl
,
1282 struct mem_info
*meminfo
)
1284 unsigned long tom_k
, base_k
;
1286 tom_k
= interleave_chip_selects(ctrl
, meminfo
->is_Width128
);
1289 printk(BIOS_DEBUG
, "Interleaving disabled\n");
1290 tom_k
= order_chip_selects(ctrl
);
1293 /* Compute the memory base address */
1294 base_k
= memory_end_k(ctrl
, ctrl
->node_id
);
1296 route_dram_accesses(ctrl
, base_k
, tom_k
);
1297 set_top_mem(tom_k
, 0);
1300 static long disable_dimm(const struct mem_controller
*ctrl
, unsigned index
,
1301 struct mem_info
*meminfo
)
1303 printk(BIOS_DEBUG
, "disabling dimm %02x\n", index
);
1304 if (!(meminfo
->dimm_mask
& 0x0F) && (meminfo
->dimm_mask
& 0xF0)) { /* channelB only? */
1305 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (((index
<< 1) + 4) << 2), 0);
1306 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (((index
<< 1) + 5) << 2), 0);
1308 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (((index
<< 1) + 0) << 2), 0);
1309 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (((index
<< 1) + 1) << 2), 0);
1310 #if CONFIG_QRANK_DIMM_SUPPORT
1311 if (meminfo
->sz
[index
].rank
== 4) {
1312 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (((index
<< 1) + 4) << 2), 0);
1313 pci_write_config32(ctrl
->f2
, DRAM_CSBASE
+ (((index
<< 1) + 5) << 2), 0);
1318 meminfo
->dimm_mask
&= ~(1 << index
);
1319 return meminfo
->dimm_mask
;
1322 static long spd_handle_unbuffered_dimms(const struct mem_controller
*ctrl
,
1323 struct mem_info
*meminfo
)
1326 uint32_t registered
;
1329 for (i
= 0; (i
< DIMM_SOCKETS
); i
++) {
1331 u32 spd_device
= ctrl
->channel0
[i
];
1332 if (!(meminfo
->dimm_mask
& (1 << i
))) {
1333 if (meminfo
->dimm_mask
& (1 << (DIMM_SOCKETS
+ i
))) { /* channelB only? */
1334 spd_device
= ctrl
->channel1
[i
];
1339 value
= spd_read_byte(spd_device
, SPD_DIMM_TYPE
);
1344 /* Registered dimm ? */
1346 if ((value
== SPD_DIMM_TYPE_RDIMM
) || (value
== SPD_DIMM_TYPE_mRDIMM
)) {
1347 //check SPD_MOD_ATTRIB to verify it is SPD_MOD_ATTRIB_REGADC (0x11)?
1348 registered
|= (1<<i
);
1352 if (is_opteron(ctrl
)) {
1354 if ( registered
!= (meminfo
->dimm_mask
& ((1<<DIMM_SOCKETS
)-1)) ) {
1355 meminfo
->dimm_mask
&= (registered
| (registered
<< DIMM_SOCKETS
) ); //disable unbuffed dimm
1356 // die("Mixed buffered and registered dimms not supported");
1358 //By yhlu for debug M2, s1g1 can do dual channel, but it use unbuffer DIMM
1360 die("Unbuffered Dimms not supported on Opteron");
1366 dcl
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_LOW
);
1367 dcl
&= ~DCL_UnBuffDimm
;
1368 meminfo
->is_registered
= 1;
1370 dcl
|= DCL_UnBuffDimm
;
1371 meminfo
->is_registered
= 0;
1373 pci_write_config32(ctrl
->f2
, DRAM_CONFIG_LOW
, dcl
);
1375 if (meminfo
->is_registered
) {
1376 printk(BIOS_SPEW
, "Registered\n");
1378 printk(BIOS_SPEW
, "Unbuffered\n");
1380 return meminfo
->dimm_mask
;
1383 static unsigned int spd_detect_dimms(const struct mem_controller
*ctrl
)
1388 for (i
= 0; i
< DIMM_SOCKETS
; i
++) {
1391 device
= ctrl
->channel0
[i
];
1392 printk_raminit("DIMM socket %i, channel 0 SPD device is 0x%02x\n", i
, device
);
1394 byte
= spd_read_byte(ctrl
->channel0
[i
], SPD_MEM_TYPE
); /* Type */
1395 if (byte
== SPD_MEM_TYPE_SDRAM_DDR2
) {
1396 printk_raminit("\tDIMM detected\n");
1397 dimm_mask
|= (1 << i
);
1400 device
= ctrl
->channel1
[i
];
1401 printk_raminit("DIMM socket %i, channel 1 SPD device is 0x%02x\n", i
, device
);
1403 byte
= spd_read_byte(ctrl
->channel1
[i
], SPD_MEM_TYPE
);
1404 if (byte
== SPD_MEM_TYPE_SDRAM_DDR2
) {
1405 printk_raminit("\tDIMM detected\n");
1406 dimm_mask
|= (1 << (i
+ DIMM_SOCKETS
));
1413 static long spd_enable_2channels(const struct mem_controller
*ctrl
, struct mem_info
*meminfo
)
1417 /* SPD addresses to verify are identical */
1418 static const uint8_t addresses
[] = {
1419 2, /* Type should be DDR2 SDRAM */
1420 3, /* *Row addresses */
1421 4, /* *Column addresses */
1422 5, /* *Number of DIMM Ranks */
1423 6, /* *Module Data Width*/
1424 11, /* *DIMM Conf Type */
1425 13, /* *Pri SDRAM Width */
1426 17, /* *Logical Banks */
1427 20, /* *DIMM Type Info */
1428 21, /* *SDRAM Module Attributes */
1429 27, /* *tRP Row precharge time */
1430 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1431 29, /* *tRCD RAS to CAS */
1432 30, /* *tRAS Activate to Precharge */
1433 36, /* *Write recovery time (tWR) */
1434 37, /* *Internal write to read command delay (tRDP) */
1435 38, /* *Internal read to precharge command delay (tRTP) */
1436 40, /* *Extension of Byte 41 tRC and Byte 42 tRFC */
1437 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1438 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1439 /* The SPD addresses 18, 9, 23, 26 need special treatment like
1440 * in spd_set_memclk. Right now they cause many false negatives.
1441 * Keep them at the end to see other mismatches (if any).
1443 18, /* *Supported CAS Latencies */
1444 9, /* *Cycle time at highest CAS Latency CL=X */
1445 23, /* *Cycle time at CAS Latency (CLX - 1) */
1446 25, /* *Cycle time at CAS Latency (CLX - 2) */
1451 /* S1G1 and AM2 sockets are Mod64BitMux capable. */
1452 #if CONFIG_CPU_SOCKET_TYPE == 0x11 || CONFIG_CPU_SOCKET_TYPE == 0x12
1458 /* If the dimms are not in pairs do not do dual channels */
1459 if ((meminfo
->dimm_mask
& ((1 << DIMM_SOCKETS
) - 1)) !=
1460 ((meminfo
->dimm_mask
>> DIMM_SOCKETS
) & ((1 << DIMM_SOCKETS
) - 1))) {
1461 goto single_channel
;
1463 /* If the cpu is not capable of doing dual channels don't do dual channels */
1464 nbcap
= pci_read_config32(ctrl
->f3
, NORTHBRIDGE_CAP
);
1465 if (!(nbcap
& NBCAP_128Bit
)) {
1466 goto single_channel
;
1468 for (i
= 0; (i
< 4) && (ctrl
->channel0
[i
]); i
++) {
1469 unsigned device0
, device1
;
1472 /* If I don't have a dimm skip this one */
1473 if (!(meminfo
->dimm_mask
& (1 << i
))) {
1476 device0
= ctrl
->channel0
[i
];
1477 device1
= ctrl
->channel1
[i
];
1478 /* Abort if the chips don't support a common CAS latency. */
1479 common_cl
= spd_read_byte(device0
, 18) & spd_read_byte(device1
, 18);
1481 printk(BIOS_DEBUG
, "No common CAS latency supported\n");
1482 goto single_channel
;
1484 printk_raminit("Common CAS latency bitfield: 0x%02x\n", common_cl
);
1486 for (j
= 0; j
< ARRAY_SIZE(addresses
); j
++) {
1488 addr
= addresses
[j
];
1489 value0
= spd_read_byte(device0
, addr
);
1493 value1
= spd_read_byte(device1
, addr
);
1497 if (value0
!= value1
) {
1498 printk_raminit("SPD values differ between channel 0/1 for byte %i\n", addr
);
1499 goto single_channel
;
1503 printk(BIOS_SPEW
, "Enabling dual channel memory\n");
1504 dcl
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_LOW
);
1505 dcl
&= ~DCL_BurstLength32
; /* 32byte mode may be preferred in platforms that include graphics controllers that generate a lot of 32-bytes system memory accesses
1506 32byte mode is not supported when the DRAM interface is 128 bits wides, even 32byte mode is set, system still use 64 byte mode */
1507 dcl
|= DCL_Width128
;
1508 pci_write_config32(ctrl
->f2
, DRAM_CONFIG_LOW
, dcl
);
1509 meminfo
->is_Width128
= 1;
1510 return meminfo
->dimm_mask
;
1513 meminfo
->is_Width128
= 0;
1514 meminfo
->is_64MuxMode
= 0;
1517 if ((meminfo
->dimm_mask
& ((1 << DIMM_SOCKETS
) - 1)) !=
1518 ((meminfo
->dimm_mask
>> DIMM_SOCKETS
) & ((1 << DIMM_SOCKETS
) - 1))) {
1519 if (((meminfo
->dimm_mask
>> DIMM_SOCKETS
) & ((1 << DIMM_SOCKETS
) - 1))) {
1520 /* mux capable and single dimm in channelB */
1522 printk(BIOS_SPEW
, "Enable 64MuxMode & BurstLength32\n");
1523 dcm
= pci_read_config32(ctrl
->f2
, DRAM_CTRL_MISC
);
1524 dcm
|= DCM_Mode64BitMux
;
1525 pci_write_config32(ctrl
->f2
, DRAM_CTRL_MISC
, dcm
);
1526 dcl
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_LOW
);
1527 //dcl |= DCL_BurstLength32; /* 32byte mode for channelB only */
1528 pci_write_config32(ctrl
->f2
, DRAM_CONFIG_LOW
, dcl
);
1529 meminfo
->is_64MuxMode
= 1;
1531 meminfo
->dimm_mask
&= ~((1 << (DIMM_SOCKETS
* 2)) - (1 << DIMM_SOCKETS
));
1534 } else { /* unmatched dual dimms ? */
1535 /* unmatched dual dimms not supported by meminit code. Use single channelA dimm. */
1536 meminfo
->dimm_mask
&= ~((1 << (DIMM_SOCKETS
* 2)) - (1 << DIMM_SOCKETS
));
1537 printk(BIOS_SPEW
, "Unmatched dual dimms. Use single channelA dimm.\n");
1539 return meminfo
->dimm_mask
;
1543 uint16_t cycle_time
;
1544 uint8_t divisor
; /* In 1/40 ns increments */
1549 uint8_t DcqByPassMax
;
1550 uint32_t dch_memclk
;
1554 static const struct mem_param speed
[] = {
1557 .cycle_time
= 0x500,
1558 .divisor
= 200, // how many 1/40ns per clock
1559 .dch_memclk
= DCH_MemClkFreq_200MHz
, //0
1569 .cycle_time
= 0x375,
1570 .divisor
= 150, //????
1571 .dch_memclk
= DCH_MemClkFreq_266MHz
, //1
1580 .cycle_time
= 0x300,
1582 .dch_memclk
= DCH_MemClkFreq_333MHz
, //2
1592 .cycle_time
= 0x250,
1594 .dch_memclk
= DCH_MemClkFreq_400MHz
,//3
1602 .cycle_time
= 0x000,
1606 static const struct mem_param
*get_mem_param(unsigned min_cycle_time
)
1609 const struct mem_param
*param
;
1610 for (param
= &speed
[0]; param
->cycle_time
; param
++) {
1611 if (min_cycle_time
> (param
+1)->cycle_time
) {
1615 if (!param
->cycle_time
) {
1616 die("min_cycle_time to low");
1618 printk(BIOS_SPEW
, "%s\n", param
->name
);
1622 static uint8_t get_exact_divisor(int i
, uint8_t divisor
)
1624 //input divisor could be 200(200), 150(266), 120(333), 100 (400)
1625 static const uint8_t dv_a
[] = {
1626 /* 200 266 333 400 */
1627 /*4 */ 250, 250, 250, 250,
1628 /*5 */ 200, 200, 200, 100,
1629 /*6 */ 200, 166, 166, 100,
1630 /*7 */ 200, 171, 142, 100,
1632 /*8 */ 200, 150, 125, 100,
1633 /*9 */ 200, 156, 133, 100,
1634 /*10*/ 200, 160, 120, 100,
1635 /*11*/ 200, 163, 127, 100,
1637 /*12*/ 200, 150, 133, 100,
1638 /*13*/ 200, 153, 123, 100,
1639 /*14*/ 200, 157, 128, 100,
1640 /*15*/ 200, 160, 120, 100,
1647 /* Check for FID control support */
1648 struct cpuid_result cpuid1
;
1649 cpuid1
= cpuid(0x80000007);
1650 if( cpuid1
.edx
& 0x02 ) {
1651 /* Use current FID */
1653 msr
= rdmsr(0xc0010042);
1654 fid_cur
= msr
.lo
& 0x3f;
1658 /* Use startup FID */
1660 msr
= rdmsr(0xc0010015);
1661 fid_start
= (msr
.lo
& (0x3f << 24));
1663 index
= fid_start
>>25;
1666 if (index
>12) return divisor
;
1668 if (i
>3) return divisor
;
1670 return dv_a
[index
* 4+i
];
1675 struct spd_set_memclk_result
{
1676 const struct mem_param
*param
;
1681 static unsigned convert_to_linear(unsigned value
)
1683 static const unsigned fraction
[] = { 0x25, 0x33, 0x66, 0x75 };
1686 /* We need to convert value to more readable */
1687 if ((value
& 0xf) < 10) { //no .25, .33, .66, .75
1690 valuex
= ((value
& 0xf0) << 4) | fraction
[(value
& 0xf)-10];
1696 static const uint8_t latency_indicies
[] = { 25, 23, 9 };
1698 static int find_optimum_spd_latency(u32 spd_device
, unsigned *min_latency
, unsigned *min_cycle_time
)
1700 int new_cycle_time
, new_latency
;
1705 /* First find the supported CAS latencies
1706 * Byte 18 for DDR SDRAM is interpreted:
1707 * bit 3 == CAS Latency = 3
1708 * bit 4 == CAS Latency = 4
1709 * bit 5 == CAS Latency = 5
1710 * bit 6 == CAS Latency = 6
1712 new_cycle_time
= 0x500;
1715 latencies
= spd_read_byte(spd_device
, SPD_CAS_LAT
);
1719 printk_raminit("\tlatencies: %08x\n", latencies
);
1720 /* Compute the lowest cas latency which can be expressed in this
1721 * particular SPD EEPROM. You can store at most settings for 3
1722 * contiguous CAS latencies, so by taking the highest CAS
1723 * latency maked as supported in the SPD and subtracting 2 you
1724 * get the lowest expressable CAS latency. That latency is not
1725 * necessarily supported, but a (maybe invalid) entry exists
1728 latency
= log2(latencies
) - 2;
1730 /* Loop through and find a fast clock with a low latency */
1731 for (index
= 0; index
< 3; index
++, latency
++) {
1733 if ((latency
< 3) || (latency
> 6) ||
1734 (!(latencies
& (1 << latency
)))) {
1737 value
= spd_read_byte(spd_device
, latency_indicies
[index
]);
1742 printk_raminit("\tindex: %08x\n", index
);
1743 printk_raminit("\t\tlatency: %08x\n", latency
);
1744 printk_raminit("\t\tvalue1: %08x\n", value
);
1746 value
= convert_to_linear(value
);
1748 printk_raminit("\t\tvalue2: %08x\n", value
);
1750 /* Only increase the latency if we decrease the clock */
1751 if (value
>= *min_cycle_time
) {
1752 if (value
< new_cycle_time
) {
1753 new_cycle_time
= value
;
1754 new_latency
= latency
;
1755 } else if (value
== new_cycle_time
) {
1756 if (new_latency
> latency
) {
1757 new_latency
= latency
;
1761 printk_raminit("\t\tnew_cycle_time: %08x\n", new_cycle_time
);
1762 printk_raminit("\t\tnew_latency: %08x\n", new_latency
);
1766 if (new_latency
> 6){
1770 /* Does min_latency need to be increased? */
1771 if (new_cycle_time
> *min_cycle_time
) {
1772 *min_cycle_time
= new_cycle_time
;
1775 /* Does min_cycle_time need to be increased? */
1776 if (new_latency
> *min_latency
) {
1777 *min_latency
= new_latency
;
1780 printk_raminit("2 min_cycle_time: %08x\n", *min_cycle_time
);
1781 printk_raminit("2 min_latency: %08x\n", *min_latency
);
1786 static struct spd_set_memclk_result
spd_set_memclk(const struct mem_controller
*ctrl
, struct mem_info
*meminfo
)
1788 /* Compute the minimum cycle time for these dimms */
1789 struct spd_set_memclk_result result
;
1790 unsigned min_cycle_time
, min_latency
, bios_cycle_time
;
1794 static const uint16_t min_cycle_times
[] = { // use full speed to compare
1795 [NBCAP_MEMCLK_NOLIMIT
] = 0x250, /*2.5ns */
1796 [NBCAP_MEMCLK_333MHZ
] = 0x300, /* 3.0ns */
1797 [NBCAP_MEMCLK_266MHZ
] = 0x375, /* 3.75ns */
1798 [NBCAP_MEMCLK_200MHZ
] = 0x500, /* 5.0s */
1802 value
= pci_read_config32(ctrl
->f3
, NORTHBRIDGE_CAP
);
1803 min_cycle_time
= min_cycle_times
[(value
>> NBCAP_MEMCLK_SHIFT
) & NBCAP_MEMCLK_MASK
];
1804 bios_cycle_time
= min_cycle_times
[
1805 #ifdef CMOS_VSTART_max_mem_clock
1806 read_option(max_mem_clock
, 0)
1808 #if defined(CONFIG_MAX_MEM_CLOCK)
1809 CONFIG_MAX_MEM_CLOCK
1811 0 // use DDR400 as default
1816 if (bios_cycle_time
> min_cycle_time
) {
1817 min_cycle_time
= bios_cycle_time
;
1821 printk_raminit("1 min_cycle_time: %08x\n", min_cycle_time
);
1823 /* Compute the least latency with the fastest clock supported
1824 * by both the memory controller and the dimms.
1826 for (i
= 0; i
< DIMM_SOCKETS
; i
++) {
1829 printk_raminit("1.1 dimm_mask: %08x\n", meminfo
->dimm_mask
);
1830 printk_raminit("i: %08x\n",i
);
1832 if (meminfo
->dimm_mask
& (1 << i
)) {
1833 spd_device
= ctrl
->channel0
[i
];
1834 printk_raminit("Channel 0 settings:\n");
1836 switch (find_optimum_spd_latency(spd_device
, &min_latency
, &min_cycle_time
)) {
1844 if (meminfo
->dimm_mask
& (1 << (DIMM_SOCKETS
+ i
))) {
1845 spd_device
= ctrl
->channel1
[i
];
1846 printk_raminit("Channel 1 settings:\n");
1848 switch (find_optimum_spd_latency(spd_device
, &min_latency
, &min_cycle_time
)) {
1858 /* Make a second pass through the dimms and disable
1859 * any that cannot support the selected memclk and cas latency.
1862 printk_raminit("3 min_cycle_time: %08x\n", min_cycle_time
);
1863 printk_raminit("3 min_latency: %08x\n", min_latency
);
1865 for (i
= 0; (i
< DIMM_SOCKETS
); i
++) {
1870 u32 spd_device
= ctrl
->channel0
[i
];
1872 if (!(meminfo
->dimm_mask
& (1 << i
))) {
1873 if (meminfo
->dimm_mask
& (1 << (DIMM_SOCKETS
+ i
))) { /* channelB only? */
1874 spd_device
= ctrl
->channel1
[i
];
1880 latencies
= spd_read_byte(spd_device
, SPD_CAS_LAT
);
1881 if (latencies
< 0) goto hw_error
;
1882 if (latencies
== 0) {
1886 /* Compute the lowest cas latency supported */
1887 latency
= log2(latencies
) -2;
1889 /* Walk through searching for the selected latency */
1890 for (index
= 0; index
< 3; index
++, latency
++) {
1891 if (!(latencies
& (1 << latency
))) {
1894 if (latency
== min_latency
)
1897 /* If I can't find the latency or my index is bad error */
1898 if ((latency
!= min_latency
) || (index
>= 3)) {
1902 /* Read the min_cycle_time for this latency */
1903 val
= spd_read_byte(spd_device
, latency_indicies
[index
]);
1904 if (val
< 0) goto hw_error
;
1906 val
= convert_to_linear(val
);
1907 /* All is good if the selected clock speed
1908 * is what I need or slower.
1910 if (val
<= min_cycle_time
) {
1913 /* Otherwise I have an error, disable the dimm */
1915 meminfo
->dimm_mask
= disable_dimm(ctrl
, i
, meminfo
);
1918 printk_raminit("4 min_cycle_time: %08x\n", min_cycle_time
);
1920 /* Now that I know the minimum cycle time lookup the memory parameters */
1921 result
.param
= get_mem_param(min_cycle_time
);
1923 /* Update DRAM Config High with our selected memory speed */
1924 value
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_HIGH
);
1925 value
&= ~(DCH_MemClkFreq_MASK
<< DCH_MemClkFreq_SHIFT
);
1927 value
|= result
.param
->dch_memclk
<< DCH_MemClkFreq_SHIFT
;
1928 pci_write_config32(ctrl
->f2
, DRAM_CONFIG_HIGH
, value
);
1930 printk(BIOS_DEBUG
, "%s\n", result
.param
->name
);
1932 /* Update DRAM Timing Low with our selected cas latency */
1933 value
= pci_read_config32(ctrl
->f2
, DRAM_TIMING_LOW
);
1934 value
&= ~(DTL_TCL_MASK
<< DTL_TCL_SHIFT
);
1935 value
|= (min_latency
- DTL_TCL_BASE
) << DTL_TCL_SHIFT
;
1936 pci_write_config32(ctrl
->f2
, DRAM_TIMING_LOW
, value
);
1938 result
.dimm_mask
= meminfo
->dimm_mask
;
1941 result
.param
= (const struct mem_param
*)0;
1942 result
.dimm_mask
= -1;
1946 static unsigned convert_to_1_4(unsigned value
)
1948 static const uint8_t fraction
[] = { 0, 1, 2, 2, 3, 3, 0 };
1951 /* We need to convert value to more readable */
1952 valuex
= fraction
[value
& 0x7];
1956 static int get_dimm_Trc_clocks(u32 spd_device
, const struct mem_param
*param
)
1961 value
= spd_read_byte(spd_device
, SPD_TRC
);
1964 printk_raminit("update_dimm_Trc: tRC (41) = %08x\n", value
);
1966 value2
= spd_read_byte(spd_device
, SPD_TRC
-1);
1968 value
+= convert_to_1_4(value2
>>4);
1971 printk_raminit("update_dimm_Trc: tRC final value = %i\n", value
);
1973 clocks
= CEIL_DIV(value
, param
->divisor
);
1974 printk_raminit("update_dimm_Trc: clocks = %i\n", clocks
);
1976 if (clocks
< DTL_TRC_MIN
) {
1977 // We might want to die here instead or (at least|better) disable this bank.
1978 printk(BIOS_NOTICE
, "update_dimm_Trc: Can't refresh fast enough, "
1979 "want %i clocks, minimum is %i clocks.\n", clocks
, DTL_TRC_MIN
);
1980 clocks
= DTL_TRC_MIN
;
1985 static int update_dimm_Trc(const struct mem_controller
*ctrl
,
1986 const struct mem_param
*param
,
1987 int i
, long dimm_mask
)
1989 int clocks
, old_clocks
;
1991 u32 spd_device
= ctrl
->channel0
[i
];
1993 if (!(dimm_mask
& (1 << i
)) && (dimm_mask
& (1 << (DIMM_SOCKETS
+ i
)))) { /* channelB only? */
1994 spd_device
= ctrl
->channel1
[i
];
1997 clocks
= get_dimm_Trc_clocks(spd_device
, param
);
2000 if (clocks
> DTL_TRC_MAX
) {
2003 printk_raminit("update_dimm_Trc: clocks after adjustment = %i\n", clocks
);
2005 dtl
= pci_read_config32(ctrl
->f2
, DRAM_TIMING_LOW
);
2006 old_clocks
= ((dtl
>> DTL_TRC_SHIFT
) & DTL_TRC_MASK
) + DTL_TRC_BASE
;
2007 if (old_clocks
>= clocks
) { //?? someone did it
2008 // clocks = old_clocks;
2011 dtl
&= ~(DTL_TRC_MASK
<< DTL_TRC_SHIFT
);
2012 dtl
|= ((clocks
- DTL_TRC_BASE
) << DTL_TRC_SHIFT
);
2013 pci_write_config32(ctrl
->f2
, DRAM_TIMING_LOW
, dtl
);
2017 static int update_dimm_Trfc(const struct mem_controller
*ctrl
, const struct mem_param
*param
, int i
, struct mem_info
*meminfo
)
2019 unsigned clocks
, old_clocks
;
2023 u32 spd_device
= ctrl
->channel0
[i
];
2025 if (!(meminfo
->dimm_mask
& (1 << i
)) && (meminfo
->dimm_mask
& (1 << (DIMM_SOCKETS
+ i
)))) { /* channelB only? */
2026 spd_device
= ctrl
->channel1
[i
];
2027 ch_b
= 2; /* offset to channelB trfc setting */
2030 //get the cs_size --> logic dimm size
2031 value
= spd_read_byte(spd_device
, SPD_PRI_WIDTH
);
2036 value
= 6 - log2(value
); //4-->4, 8-->3, 16-->2
2038 clocks
= meminfo
->sz
[i
].per_rank
- 27 + 2 - value
;
2040 dth
= pci_read_config32(ctrl
->f2
, DRAM_TIMING_HIGH
);
2042 old_clocks
= ((dth
>> (DTH_TRFC0_SHIFT
+ ((i
+ ch_b
) * 3))) & DTH_TRFC_MASK
);
2044 if (old_clocks
>= clocks
) { // some one did it?
2047 dth
&= ~(DTH_TRFC_MASK
<< (DTH_TRFC0_SHIFT
+ ((i
+ ch_b
) * 3)));
2048 dth
|= clocks
<< (DTH_TRFC0_SHIFT
+ ((i
+ ch_b
) * 3));
2049 pci_write_config32(ctrl
->f2
, DRAM_TIMING_HIGH
, dth
);
2053 static int update_dimm_TT_1_4(const struct mem_controller
*ctrl
, const struct mem_param
*param
, int i
, long dimm_mask
,
2055 unsigned SPD_TT
, unsigned TT_SHIFT
, unsigned TT_MASK
, unsigned TT_BASE
, unsigned TT_MIN
, unsigned TT_MAX
)
2057 unsigned clocks
, old_clocks
;
2060 u32 spd_device
= ctrl
->channel0
[i
];
2062 if (!(dimm_mask
& (1 << i
)) && (dimm_mask
& (1 << (DIMM_SOCKETS
+ i
)))) { /* channelB only? */
2063 spd_device
= ctrl
->channel1
[i
];
2066 value
= spd_read_byte(spd_device
, SPD_TT
); //already in 1/4 ns
2067 if (value
< 0) return -1;
2069 clocks
= CEIL_DIV(value
, param
->divisor
);
2070 if (clocks
< TT_MIN
) {
2074 if (clocks
> TT_MAX
) {
2075 printk(BIOS_INFO
, "warning spd byte : %x = %x > TT_MAX: %x, setting TT_MAX", SPD_TT
, value
, TT_MAX
);
2079 dtl
= pci_read_config32(ctrl
->f2
, TT_REG
);
2081 old_clocks
= ((dtl
>> TT_SHIFT
) & TT_MASK
) + TT_BASE
;
2082 if (old_clocks
>= clocks
) { //some one did it?
2083 // clocks = old_clocks;
2086 dtl
&= ~(TT_MASK
<< TT_SHIFT
);
2087 dtl
|= ((clocks
- TT_BASE
) << TT_SHIFT
);
2088 pci_write_config32(ctrl
->f2
, TT_REG
, dtl
);
2092 static int update_dimm_Trcd(const struct mem_controller
*ctrl
,
2093 const struct mem_param
*param
, int i
, long dimm_mask
)
2095 return update_dimm_TT_1_4(ctrl
, param
, i
, dimm_mask
, DRAM_TIMING_LOW
, SPD_TRCD
, DTL_TRCD_SHIFT
, DTL_TRCD_MASK
, DTL_TRCD_BASE
, DTL_TRCD_MIN
, DTL_TRCD_MAX
);
2098 static int update_dimm_Trrd(const struct mem_controller
*ctrl
, const struct mem_param
*param
, int i
, long dimm_mask
)
2100 return update_dimm_TT_1_4(ctrl
, param
, i
, dimm_mask
, DRAM_TIMING_LOW
, SPD_TRRD
, DTL_TRRD_SHIFT
, DTL_TRRD_MASK
, DTL_TRRD_BASE
, DTL_TRRD_MIN
, DTL_TRRD_MAX
);
2103 static int update_dimm_Tras(const struct mem_controller
*ctrl
, const struct mem_param
*param
, int i
, long dimm_mask
)
2105 unsigned clocks
, old_clocks
;
2108 u32 spd_device
= ctrl
->channel0
[i
];
2110 if (!(dimm_mask
& (1 << i
)) && (dimm_mask
& (1 << (DIMM_SOCKETS
+ i
)))) { /* channelB only? */
2111 spd_device
= ctrl
->channel1
[i
];
2114 value
= spd_read_byte(spd_device
, SPD_TRAS
); //in 1 ns
2115 if (value
< 0) return -1;
2116 printk_raminit("update_dimm_Tras: 0 value= %08x\n", value
);
2118 value
<<= 2; //convert it to in 1/4ns
2121 printk_raminit("update_dimm_Tras: 1 value= %08x\n", value
);
2123 clocks
= CEIL_DIV(value
, param
->divisor
);
2124 printk_raminit("update_dimm_Tras: divisor= %08x\n", param
->divisor
);
2125 printk_raminit("update_dimm_Tras: clocks= %08x\n", clocks
);
2126 if (clocks
< DTL_TRAS_MIN
) {
2127 clocks
= DTL_TRAS_MIN
;
2129 if (clocks
> DTL_TRAS_MAX
) {
2132 dtl
= pci_read_config32(ctrl
->f2
, DRAM_TIMING_LOW
);
2133 old_clocks
= ((dtl
>> DTL_TRAS_SHIFT
) & DTL_TRAS_MASK
) + DTL_TRAS_BASE
;
2134 if (old_clocks
>= clocks
) { // someone did it?
2137 dtl
&= ~(DTL_TRAS_MASK
<< DTL_TRAS_SHIFT
);
2138 dtl
|= ((clocks
- DTL_TRAS_BASE
) << DTL_TRAS_SHIFT
);
2139 pci_write_config32(ctrl
->f2
, DRAM_TIMING_LOW
, dtl
);
2143 static int update_dimm_Trp(const struct mem_controller
*ctrl
,
2144 const struct mem_param
*param
, int i
, long dimm_mask
)
2146 return update_dimm_TT_1_4(ctrl
, param
, i
, dimm_mask
, DRAM_TIMING_LOW
, SPD_TRP
, DTL_TRP_SHIFT
, DTL_TRP_MASK
, DTL_TRP_BASE
, DTL_TRP_MIN
, DTL_TRP_MAX
);
2150 static int update_dimm_Trtp(const struct mem_controller
*ctrl
,
2151 const struct mem_param
*param
, int i
, struct mem_info
*meminfo
)
2153 /* need to figure if it is 32 byte burst or 64 bytes burst */
2155 if (!meminfo
->is_Width128
) {
2157 dword
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_LOW
);
2158 if ((dword
& DCL_BurstLength32
)) offset
= 0;
2160 return update_dimm_TT_1_4(ctrl
, param
, i
, meminfo
->dimm_mask
, DRAM_TIMING_LOW
, SPD_TRTP
, DTL_TRTP_SHIFT
, DTL_TRTP_MASK
, DTL_TRTP_BASE
+offset
, DTL_TRTP_MIN
+offset
, DTL_TRTP_MAX
+offset
);
2164 static int update_dimm_Twr(const struct mem_controller
*ctrl
, const struct mem_param
*param
, int i
, long dimm_mask
)
2166 return update_dimm_TT_1_4(ctrl
, param
, i
, dimm_mask
, DRAM_TIMING_LOW
, SPD_TWR
, DTL_TWR_SHIFT
, DTL_TWR_MASK
, DTL_TWR_BASE
, DTL_TWR_MIN
, DTL_TWR_MAX
);
2170 static int update_dimm_Tref(const struct mem_controller
*ctrl
,
2171 const struct mem_param
*param
, int i
, long dimm_mask
)
2173 uint32_t dth
, dth_old
;
2175 u32 spd_device
= ctrl
->channel0
[i
];
2177 if (!(dimm_mask
& (1 << i
)) && (dimm_mask
& (1 << (DIMM_SOCKETS
+ i
)))) { /* channelB only? */
2178 spd_device
= ctrl
->channel1
[i
];
2181 value
= spd_read_byte(spd_device
, SPD_TREF
); // 0: 15.625us, 1: 3.9us 2: 7.8 us....
2182 if (value
< 0) return -1;
2190 dth
= pci_read_config32(ctrl
->f2
, DRAM_TIMING_HIGH
);
2193 dth
&= ~(DTH_TREF_MASK
<< DTH_TREF_SHIFT
);
2194 dth
|= (value
<< DTH_TREF_SHIFT
);
2195 if (dth_old
!= dth
) {
2196 pci_write_config32(ctrl
->f2
, DRAM_TIMING_HIGH
, dth
);
2201 static void set_4RankRDimm(const struct mem_controller
*ctrl
,
2202 const struct mem_param
*param
, struct mem_info
*meminfo
)
2204 #if CONFIG_QRANK_DIMM_SUPPORT
2207 long dimm_mask
= meminfo
->dimm_mask
;
2210 if (!(meminfo
->is_registered
)) return;
2214 for (i
= 0; i
< DIMM_SOCKETS
; i
++) {
2215 if (!(dimm_mask
& (1 << i
))) {
2219 if (meminfo
->sz
[i
].rank
== 4) {
2227 dch
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_HIGH
);
2228 dch
|= DCH_FourRankRDimm
;
2229 pci_write_config32(ctrl
->f2
, DRAM_CONFIG_HIGH
, dch
);
2234 static uint32_t get_extra_dimm_mask(const struct mem_controller
*ctrl
,
2235 struct mem_info
*meminfo
)
2241 uint32_t mask_single_rank
;
2242 uint32_t mask_page_1k
;
2244 #if CONFIG_QRANK_DIMM_SUPPORT
2248 long dimm_mask
= meminfo
->dimm_mask
;
2253 mask_single_rank
= 0;
2256 for (i
= 0; i
< DIMM_SOCKETS
; i
++) {
2257 u32 spd_device
= ctrl
->channel0
[i
];
2258 if (!(dimm_mask
& (1 << i
))) {
2259 if (dimm_mask
& (1 << (DIMM_SOCKETS
+ i
))) { /* channelB only? */
2260 spd_device
= ctrl
->channel1
[i
];
2266 if (meminfo
->sz
[i
].rank
== 1) {
2267 mask_single_rank
|= 1<<i
;
2270 if (meminfo
->sz
[i
].col
==10) {
2271 mask_page_1k
|= 1<<i
;
2275 value
= spd_read_byte(spd_device
, SPD_PRI_WIDTH
);
2277 #if CONFIG_QRANK_DIMM_SUPPORT
2278 rank
= meminfo
->sz
[i
].rank
;
2283 #if CONFIG_QRANK_DIMM_SUPPORT
2285 mask_x4
|= 1<<(i
+2);
2288 } else if (value
==16) {
2290 #if CONFIG_QRANK_DIMM_SUPPORT
2292 mask_x16
|= 1<<(i
+2);
2299 meminfo
->x4_mask
= mask_x4
;
2300 meminfo
->x16_mask
= mask_x16
;
2302 meminfo
->single_rank_mask
= mask_single_rank
;
2303 meminfo
->page_1k_mask
= mask_page_1k
;
2310 static void set_dimm_x4(const struct mem_controller
*ctrl
, const struct mem_param
*param
, struct mem_info
*meminfo
)
2313 dcl
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_LOW
);
2314 dcl
&= ~(DCL_X4Dimm_MASK
<<DCL_X4Dimm_SHIFT
);
2315 dcl
|= ((meminfo
->x4_mask
) & 0xf) << (DCL_X4Dimm_SHIFT
);
2316 pci_write_config32(ctrl
->f2
, DRAM_CONFIG_LOW
, dcl
);
2320 static int count_ones(uint32_t dimm_mask
)
2325 for (index
= 0; index
< (2 * DIMM_SOCKETS
); index
++, dimm_mask
>>= 1) {
2326 if (dimm_mask
& 1) {
2334 static void set_DramTerm(const struct mem_controller
*ctrl
,
2335 const struct mem_param
*param
, struct mem_info
*meminfo
)
2341 if (param
->divisor
== 100) { //DDR2 800
2342 if (meminfo
->is_Width128
) {
2343 if (count_ones(meminfo
->dimm_mask
& 0x0f)==2) {
2351 #if CONFIG_DIMM_SUPPORT == 0x0204
2352 odt
= 0x2; /* 150 ohms */
2355 dcl
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_LOW
);
2356 dcl
&= ~(DCL_DramTerm_MASK
<<DCL_DramTerm_SHIFT
);
2357 dcl
|= (odt
& DCL_DramTerm_MASK
) << (DCL_DramTerm_SHIFT
);
2358 pci_write_config32(ctrl
->f2
, DRAM_CONFIG_LOW
, dcl
);
2361 static void set_ecc(const struct mem_controller
*ctrl
,
2362 const struct mem_param
*param
, struct mem_info
*meminfo
)
2367 uint32_t dcl
, nbcap
;
2368 nbcap
= pci_read_config32(ctrl
->f3
, NORTHBRIDGE_CAP
);
2369 dcl
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_LOW
);
2370 dcl
&= ~DCL_DimmEccEn
;
2371 if (nbcap
& NBCAP_ECC
) {
2372 dcl
|= DCL_DimmEccEn
;
2374 #ifdef CMOS_VSTART_ECC_memory
2375 if (read_option(ECC_memory
, 1) == 0) {
2376 dcl
&= ~DCL_DimmEccEn
;
2378 #else // CMOS_VSTART_ECC_memory not defined
2379 #if !CONFIG_ECC_MEMORY
2380 dcl
&= ~DCL_DimmEccEn
;
2383 pci_write_config32(ctrl
->f2
, DRAM_CONFIG_LOW
, dcl
);
2385 meminfo
->is_ecc
= 1;
2386 if (!(dcl
& DCL_DimmEccEn
)) {
2387 meminfo
->is_ecc
= 0;
2388 printk(BIOS_DEBUG
, "set_ecc: ECC disabled\n");
2389 return; // already disabled the ECC, so don't need to read SPD any more
2392 for (i
= 0; i
< DIMM_SOCKETS
; i
++) {
2393 u32 spd_device
= ctrl
->channel0
[i
];
2394 if (!(meminfo
->dimm_mask
& (1 << i
))) {
2395 if (meminfo
->dimm_mask
& (1 << (DIMM_SOCKETS
+ i
))) { /* channelB only? */
2396 spd_device
= ctrl
->channel1
[i
];
2397 printk(BIOS_DEBUG
, "set_ecc spd_device: 0x%x\n", spd_device
);
2403 value
= spd_read_byte(ctrl
->channel0
[i
], SPD_DIMM_CONF_TYPE
);
2405 if (!(value
& SPD_DIMM_CONF_TYPE_ECC
)) {
2406 dcl
&= ~DCL_DimmEccEn
;
2407 pci_write_config32(ctrl
->f2
, DRAM_CONFIG_LOW
, dcl
);
2408 meminfo
->is_ecc
= 0;
2416 static int update_dimm_Twtr(const struct mem_controller
*ctrl
,
2417 const struct mem_param
*param
, int i
, long dimm_mask
)
2419 return update_dimm_TT_1_4(ctrl
, param
, i
, dimm_mask
, DRAM_TIMING_HIGH
, SPD_TWTR
, DTH_TWTR_SHIFT
, DTH_TWTR_MASK
, DTH_TWTR_BASE
, DTH_TWTR_MIN
, DTH_TWTR_MAX
);
2422 static void set_TT(const struct mem_controller
*ctrl
,
2423 const struct mem_param
*param
, unsigned TT_REG
, unsigned TT_SHIFT
,
2424 unsigned TT_MASK
, unsigned TT_BASE
, unsigned TT_MIN
, unsigned TT_MAX
,
2425 unsigned val
, const char *str
)
2429 if ((val
< TT_MIN
) || (val
> TT_MAX
)) {
2430 printk(BIOS_ERR
, "%s", str
);
2434 reg
= pci_read_config32(ctrl
->f2
, TT_REG
);
2435 reg
&= ~(TT_MASK
<< TT_SHIFT
);
2436 reg
|= ((val
- TT_BASE
) << TT_SHIFT
);
2437 pci_write_config32(ctrl
->f2
, TT_REG
, reg
);
2442 static void set_TrwtTO(const struct mem_controller
*ctrl
,
2443 const struct mem_param
*param
)
2445 set_TT(ctrl
, param
, DRAM_TIMING_HIGH
, DTH_TRWTTO_SHIFT
, DTH_TRWTTO_MASK
,DTH_TRWTTO_BASE
, DTH_TRWTTO_MIN
, DTH_TRWTTO_MAX
, param
->TrwtTO
, "TrwtTO");
2449 static void set_Twrrd(const struct mem_controller
*ctrl
, const struct mem_param
*param
)
2451 set_TT(ctrl
, param
, DRAM_TIMING_HIGH
, DTH_TWRRD_SHIFT
, DTH_TWRRD_MASK
,DTH_TWRRD_BASE
, DTH_TWRRD_MIN
, DTH_TWRRD_MAX
, param
->Twrrd
, "Twrrd");
2455 static void set_Twrwr(const struct mem_controller
*ctrl
, const struct mem_param
*param
)
2457 set_TT(ctrl
, param
, DRAM_TIMING_HIGH
, DTH_TWRWR_SHIFT
, DTH_TWRWR_MASK
,DTH_TWRWR_BASE
, DTH_TWRWR_MIN
, DTH_TWRWR_MAX
, param
->Twrwr
, "Twrwr");
2460 static void set_Trdrd(const struct mem_controller
*ctrl
, const struct mem_param
*param
)
2462 set_TT(ctrl
, param
, DRAM_TIMING_HIGH
, DTH_TRDRD_SHIFT
, DTH_TRDRD_MASK
,DTH_TRDRD_BASE
, DTH_TRDRD_MIN
, DTH_TRDRD_MAX
, param
->Trdrd
, "Trdrd");
2465 static void set_DcqBypassMax(const struct mem_controller
*ctrl
, const struct mem_param
*param
)
2467 set_TT(ctrl
, param
, DRAM_CONFIG_HIGH
, DCH_DcqBypassMax_SHIFT
, DCH_DcqBypassMax_MASK
,DCH_DcqBypassMax_BASE
, DCH_DcqBypassMax_MIN
, DCH_DcqBypassMax_MAX
, param
->DcqByPassMax
, "DcqBypassMax"); // value need to be in CMOS
2470 static void set_Tfaw(const struct mem_controller
*ctrl
, const struct mem_param
*param
, struct mem_info
*meminfo
)
2472 static const uint8_t faw_1k
[] = {8, 10, 13, 14};
2473 static const uint8_t faw_2k
[] = {10, 14, 17, 18};
2474 unsigned memclkfreq_index
;
2478 memclkfreq_index
= param
->dch_memclk
;
2480 if (meminfo
->page_1k_mask
!= 0) { //1k page
2481 faw
= faw_1k
[memclkfreq_index
];
2483 faw
= faw_2k
[memclkfreq_index
];
2486 set_TT(ctrl
, param
, DRAM_CONFIG_HIGH
, DCH_FourActWindow_SHIFT
, DCH_FourActWindow_MASK
, DCH_FourActWindow_BASE
, DCH_FourActWindow_MIN
, DCH_FourActWindow_MAX
, faw
, "FourActWindow");
2489 static void set_max_async_latency(const struct mem_controller
*ctrl
, const struct mem_param
*param
)
2495 dch
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_HIGH
);
2496 dch
&= ~(DCH_MaxAsyncLat_MASK
<< DCH_MaxAsyncLat_SHIFT
);
2498 //FIXME: We need to use Max of DqsRcvEnDelay + 6ns here: After trainning and get that from index reg 0x10, 0x13, 0x16, 0x19, 0x30, 0x33, 0x36, 0x39
2502 dch
|= ((async_lat
- DCH_MaxAsyncLat_BASE
) << DCH_MaxAsyncLat_SHIFT
);
2503 pci_write_config32(ctrl
->f2
, DRAM_CONFIG_HIGH
, dch
);
2506 #if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
2507 static void set_SlowAccessMode(const struct mem_controller
*ctrl
)
2511 dch
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_HIGH
);
2515 pci_write_config32(ctrl
->f2
, DRAM_CONFIG_HIGH
, dch
);
2520 DRAM_OUTPUT_DRV_COMP_CTRL 0, 0x20
2521 DRAM_ADDR_TIMING_CTRL 04, 0x24
2523 static void set_misc_timing(const struct mem_controller
*ctrl
, struct mem_info
*meminfo
)
2527 #if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
2528 unsigned SlowAccessMode
= 0;
2531 #if CONFIG_DIMM_SUPPORT==0x0104 /* DDR2 and REG */
2532 long dimm_mask
= meminfo
->dimm_mask
& 0x0f;
2535 dwordx
= 0x002f0000;
2536 switch (meminfo
->memclk_set
) {
2537 case DCH_MemClkFreq_266MHz
:
2538 if ( (dimm_mask
== 0x03) || (dimm_mask
== 0x02) || (dimm_mask
== 0x01)) {
2539 dwordx
= 0x002f2700;
2542 case DCH_MemClkFreq_333MHz
:
2543 if ( (dimm_mask
== 0x03) || (dimm_mask
== 0x02) || (dimm_mask
== 0x01)) {
2544 if ((meminfo
->single_rank_mask
& 0x03)!=0x03) { //any double rank there?
2545 dwordx
= 0x002f2f00;
2549 case DCH_MemClkFreq_400MHz
:
2550 dwordx
= 0x002f3300;
2556 #if CONFIG_DIMM_SUPPORT==0x0204 /* DDR2 and SO-DIMM, S1G1 */
2558 dwordx
= 0x002F2F00;
2560 switch (meminfo
->memclk_set
) {
2561 case DCH_MemClkFreq_200MHz
: /* nothing to be set here */
2563 case DCH_MemClkFreq_266MHz
:
2564 if ((meminfo
->single_rank_mask
== 0)
2565 && (meminfo
->x4_mask
== 0) && (meminfo
->x16_mask
))
2566 dwordx
= 0x002C2C00; /* Double rank x8 */
2567 /* else SRx16, SRx8, DRx16 == 0x002F2F00 */
2569 case DCH_MemClkFreq_333MHz
:
2570 if ((meminfo
->single_rank_mask
== 1)
2571 && (meminfo
->x16_mask
== 1)) /* SR x16 */
2572 dwordx
= 0x00272700;
2573 else if ((meminfo
->x4_mask
== 0) && (meminfo
->x16_mask
== 0)
2574 && (meminfo
->single_rank_mask
== 0)) { /* DR x8 */
2576 dwordx
= 0x00002800;
2577 } else { /* SR x8, DR x16 */
2578 dwordx
= 0x002A2A00;
2581 case DCH_MemClkFreq_400MHz
:
2582 if ((meminfo
->single_rank_mask
== 1)
2583 && (meminfo
->x16_mask
== 1)) /* SR x16 */
2584 dwordx
= 0x00292900;
2585 else if ((meminfo
->x4_mask
== 0) && (meminfo
->x16_mask
== 0)
2586 && (meminfo
->single_rank_mask
== 0)) { /* DR x8 */
2588 dwordx
= 0x00002A00;
2589 } else { /* SR x8, DR x16 */
2590 dwordx
= 0x002A2A00;
2596 #if CONFIG_DIMM_SUPPORT==0x0004 /* DDR2 and unbuffered */
2597 long dimm_mask
= meminfo
->dimm_mask
& 0x0f;
2598 /* for UNBUF DIMM */
2600 dwordx
= 0x002f2f00;
2601 switch (meminfo
->memclk_set
) {
2602 case DCH_MemClkFreq_200MHz
:
2603 if (dimm_mask
== 0x03) {
2608 case DCH_MemClkFreq_266MHz
:
2609 if (dimm_mask
== 0x03) {
2612 if ((meminfo
->x4_mask
== 0 ) && (meminfo
->x16_mask
== 0)) {
2613 switch (meminfo
->single_rank_mask
) {
2615 dwordx
= 0x00002f00; //x8 single Rank
2618 dwordx
= 0x00342f00; //x8 double Rank
2621 dwordx
= 0x00372f00; //x8 single Rank and double Rank mixed
2623 } else if ((meminfo
->x4_mask
== 0 ) && (meminfo
->x16_mask
== 0x01) && (meminfo
->single_rank_mask
== 0x01)) {
2624 dwordx
= 0x00382f00; //x8 Double Rank and x16 single Rank mixed
2625 } else if ((meminfo
->x4_mask
== 0 ) && (meminfo
->x16_mask
== 0x02) && (meminfo
->single_rank_mask
== 0x02)) {
2626 dwordx
= 0x00382f00; //x16 single Rank and x8 double Rank mixed
2630 if ((meminfo
->x4_mask
== 0 ) && (meminfo
->x16_mask
== 0x00) && ((meminfo
->single_rank_mask
== 0x01)||(meminfo
->single_rank_mask
== 0x02))) { //x8 single rank
2631 dwordx
= 0x002f2f00;
2633 dwordx
= 0x002b2f00;
2637 case DCH_MemClkFreq_333MHz
:
2638 dwordx
= 0x00202220;
2639 if (dimm_mask
== 0x03) {
2642 if ((meminfo
->x4_mask
== 0 ) && (meminfo
->x16_mask
== 0)) {
2643 switch (meminfo
->single_rank_mask
) {
2645 dwordx
= 0x00302220; //x8 single Rank
2648 dwordx
= 0x002b2220; //x8 double Rank
2651 dwordx
= 0x002a2220; //x8 single Rank and double Rank mixed
2653 } else if ((meminfo
->x4_mask
== 0) && (meminfo
->x16_mask
== 0x01) && (meminfo
->single_rank_mask
== 0x01)) {
2654 dwordx
= 0x002c2220; //x8 Double Rank and x16 single Rank mixed
2655 } else if ((meminfo
->x4_mask
== 0) && (meminfo
->x16_mask
== 0x02) && (meminfo
->single_rank_mask
== 0x02)) {
2656 dwordx
= 0x002c2220; //x16 single Rank and x8 double Rank mixed
2660 case DCH_MemClkFreq_400MHz
:
2661 dwordx
= 0x00202520;
2663 if (dimm_mask
== 0x03) {
2671 printk_raminit("\tdimm_mask = %08x\n", meminfo
->dimm_mask
);
2672 printk_raminit("\tx4_mask = %08x\n", meminfo
->x4_mask
);
2673 printk_raminit("\tx16_mask = %08x\n", meminfo
->x16_mask
);
2674 printk_raminit("\tsingle_rank_mask = %08x\n", meminfo
->single_rank_mask
);
2675 printk_raminit("\tODC = %08x\n", dword
);
2676 printk_raminit("\tAddr Timing= %08x\n", dwordx
);
2679 #if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
2680 if (SlowAccessMode
) {
2681 set_SlowAccessMode(ctrl
);
2685 if (!(meminfo
->dimm_mask
& 0x0F) && (meminfo
->dimm_mask
& 0xF0)) { /* channelB only? */
2686 /* Program the Output Driver Compensation Control Registers (Function 2:Offset 0x9c, index 0, 0x20) */
2687 pci_write_config32_index_wait(ctrl
->f2
, 0x98, 0x20, dword
);
2689 /* Program the Address Timing Control Registers (Function 2:Offset 0x9c, index 4, 0x24) */
2690 pci_write_config32_index_wait(ctrl
->f2
, 0x98, 0x24, dwordx
);
2692 /* Program the Output Driver Compensation Control Registers (Function 2:Offset 0x9c, index 0, 0x20) */
2693 pci_write_config32_index_wait(ctrl
->f2
, 0x98, 0, dword
);
2694 if (meminfo
->is_Width128
) {
2695 pci_write_config32_index_wait(ctrl
->f2
, 0x98, 0x20, dword
);
2698 /* Program the Address Timing Control Registers (Function 2:Offset 0x9c, index 4, 0x24) */
2699 pci_write_config32_index_wait(ctrl
->f2
, 0x98, 4, dwordx
);
2700 if (meminfo
->is_Width128
) {
2701 pci_write_config32_index_wait(ctrl
->f2
, 0x98, 0x24, dwordx
);
2707 static void set_RDqsEn(const struct mem_controller
*ctrl
,
2708 const struct mem_param
*param
, struct mem_info
*meminfo
)
2710 #if CONFIG_CPU_SOCKET_TYPE==0x10
2711 //only need to set for reg and x8
2714 dch
= pci_read_config32(ctrl
->f2
, DRAM_CONFIG_HIGH
);
2717 if ((!meminfo
->x4_mask
) && (!meminfo
->x16_mask
)) {
2721 pci_write_config32(ctrl
->f2
, DRAM_CONFIG_HIGH
, dch
);
2725 static void set_idle_cycle_limit(const struct mem_controller
*ctrl
,
2726 const struct mem_param
*param
)
2729 /* AMD says to Hardcode this */
2730 dcm
= pci_read_config32(ctrl
->f2
, DRAM_CTRL_MISC
);
2731 dcm
&= ~(DCM_ILD_lmt_MASK
<< DCM_ILD_lmt_SHIFT
);
2732 dcm
|= DCM_ILD_lmt_16
<< DCM_ILD_lmt_SHIFT
;
2734 pci_write_config32(ctrl
->f2
, DRAM_CTRL_MISC
, dcm
);
2737 static void set_RdWrQByp(const struct mem_controller
*ctrl
,
2738 const struct mem_param
*param
)
2740 set_TT(ctrl
, param
, DRAM_CTRL_MISC
, DCM_RdWrQByp_SHIFT
, DCM_RdWrQByp_MASK
,0, 0, 3, 2, "RdWrQByp");
2743 static long spd_set_dram_timing(const struct mem_controller
*ctrl
,
2744 const struct mem_param
*param
,
2745 struct mem_info
*meminfo
)
2749 for (i
= 0; i
< DIMM_SOCKETS
; i
++) {
2751 if (!(meminfo
->dimm_mask
& (1 << i
)) &&
2752 !(meminfo
->dimm_mask
& (1 << (DIMM_SOCKETS
+ i
))) ) {
2755 printk_raminit("spd_set_dram_timing dimm socket: %08x\n", i
);
2756 /* DRAM Timing Low Register */
2757 printk_raminit("\ttrc\n");
2758 if ((rc
= update_dimm_Trc (ctrl
, param
, i
, meminfo
->dimm_mask
)) <= 0) goto dimm_err
;
2760 printk_raminit("\ttrcd\n");
2761 if ((rc
= update_dimm_Trcd(ctrl
, param
, i
, meminfo
->dimm_mask
)) <= 0) goto dimm_err
;
2763 printk_raminit("\ttrrd\n");
2764 if ((rc
= update_dimm_Trrd(ctrl
, param
, i
, meminfo
->dimm_mask
)) <= 0) goto dimm_err
;
2766 printk_raminit("\ttras\n");
2767 if ((rc
= update_dimm_Tras(ctrl
, param
, i
, meminfo
->dimm_mask
)) <= 0) goto dimm_err
;
2769 printk_raminit("\ttrp\n");
2770 if ((rc
= update_dimm_Trp (ctrl
, param
, i
, meminfo
->dimm_mask
)) <= 0) goto dimm_err
;
2772 printk_raminit("\ttrtp\n");
2773 if ((rc
= update_dimm_Trtp(ctrl
, param
, i
, meminfo
)) <= 0) goto dimm_err
;
2775 printk_raminit("\ttwr\n");
2776 if ((rc
= update_dimm_Twr (ctrl
, param
, i
, meminfo
->dimm_mask
)) <= 0) goto dimm_err
;
2778 /* DRAM Timing High Register */
2779 printk_raminit("\ttref\n");
2780 if ((rc
= update_dimm_Tref(ctrl
, param
, i
, meminfo
->dimm_mask
)) <= 0) goto dimm_err
;
2782 printk_raminit("\ttwtr\n");
2783 if ((rc
= update_dimm_Twtr(ctrl
, param
, i
, meminfo
->dimm_mask
)) <= 0) goto dimm_err
;
2785 printk_raminit("\ttrfc\n");
2786 if ((rc
= update_dimm_Trfc(ctrl
, param
, i
, meminfo
)) <= 0) goto dimm_err
;
2788 /* DRAM Config Low */
2792 printk(BIOS_DEBUG
, "spd_set_dram_timing dimm_err!\n");
2796 meminfo
->dimm_mask
= disable_dimm(ctrl
, i
, meminfo
);
2799 get_extra_dimm_mask(ctrl
, meminfo
); // will be used by RDqsEn and dimm_x4
2800 /* DRAM Timing Low Register */
2802 /* DRAM Timing High Register */
2803 set_TrwtTO(ctrl
, param
);
2804 set_Twrrd (ctrl
, param
);
2805 set_Twrwr (ctrl
, param
);
2806 set_Trdrd (ctrl
, param
);
2808 set_4RankRDimm(ctrl
, param
, meminfo
);
2810 /* DRAM Config High */
2811 set_Tfaw(ctrl
, param
, meminfo
);
2812 set_DcqBypassMax(ctrl
, param
);
2813 set_max_async_latency(ctrl
, param
);
2814 set_RDqsEn(ctrl
, param
, meminfo
);
2816 /* DRAM Config Low */
2817 set_ecc(ctrl
, param
, meminfo
);
2818 set_dimm_x4(ctrl
, param
, meminfo
);
2819 set_DramTerm(ctrl
, param
, meminfo
);
2821 /* DRAM Control Misc */
2822 set_idle_cycle_limit(ctrl
, param
);
2823 set_RdWrQByp(ctrl
, param
);
2825 return meminfo
->dimm_mask
;
2828 static void sdram_set_spd_registers(const struct mem_controller
*ctrl
,
2829 struct sys_info
*sysinfo
)
2831 struct spd_set_memclk_result result
;
2832 const struct mem_param
*param
;
2833 struct mem_param paramx
;
2834 struct mem_info
*meminfo
;
2836 if (!sysinfo
->ctrl_present
[ctrl
->node_id
]) {
2840 meminfo
= &sysinfo
->meminfo
[ctrl
->node_id
];
2842 printk(BIOS_DEBUG
, "sdram_set_spd_registers: paramx :%p\n", ¶mx
);
2844 activate_spd_rom(ctrl
);
2845 meminfo
->dimm_mask
= spd_detect_dimms(ctrl
);
2847 printk_raminit("sdram_set_spd_registers: dimm_mask=0x%x\n", meminfo
->dimm_mask
);
2849 if (!(meminfo
->dimm_mask
& ((1 << 2*DIMM_SOCKETS
) - 1)))
2851 printk(BIOS_DEBUG
, "No memory for this cpu\n");
2854 meminfo
->dimm_mask
= spd_enable_2channels(ctrl
, meminfo
);
2855 printk_raminit("spd_enable_2channels: dimm_mask=0x%x\n", meminfo
->dimm_mask
);
2856 if (meminfo
->dimm_mask
== -1)
2859 meminfo
->dimm_mask
= spd_set_ram_size(ctrl
, meminfo
);
2860 printk_raminit("spd_set_ram_size: dimm_mask=0x%x\n", meminfo
->dimm_mask
);
2861 if (meminfo
->dimm_mask
== -1)
2864 meminfo
->dimm_mask
= spd_handle_unbuffered_dimms(ctrl
, meminfo
);
2865 printk_raminit("spd_handle_unbuffered_dimms: dimm_mask=0x%x\n", meminfo
->dimm_mask
);
2866 if (meminfo
->dimm_mask
== -1)
2869 result
= spd_set_memclk(ctrl
, meminfo
);
2870 param
= result
.param
;
2871 meminfo
->dimm_mask
= result
.dimm_mask
;
2872 printk_raminit("spd_set_memclk: dimm_mask=0x%x\n", meminfo
->dimm_mask
);
2873 if (meminfo
->dimm_mask
== -1)
2876 //store memclk set to sysinfo, incase we need rebuilt param again
2877 meminfo
->memclk_set
= param
->dch_memclk
;
2879 memcpy(¶mx
, param
, sizeof(paramx
));
2881 paramx
.divisor
= get_exact_divisor(param
->dch_memclk
, paramx
.divisor
);
2883 meminfo
->dimm_mask
= spd_set_dram_timing(ctrl
, ¶mx
, meminfo
);
2884 printk_raminit("spd_set_dram_timing: dimm_mask=0x%x\n", meminfo
->dimm_mask
);
2885 if (meminfo
->dimm_mask
== -1)
2888 order_dimms(ctrl
, meminfo
);
2892 /* Unrecoverable error reading SPD data */
2893 die("Unrecoverable error reading SPD data. No qualified DIMMs?");
2897 #define TIMEOUT_LOOPS 300000
2899 #include "raminit_f_dqs.c"
2901 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
2902 static uint32_t hoist_memory(int controllers
, const struct mem_controller
*ctrl
,unsigned hole_startk
, int i
)
2905 uint32_t carry_over
;
2907 uint32_t base
, limit
;
2912 carry_over
= (4*1024*1024) - hole_startk
;
2914 for (ii
=controllers
- 1;ii
>i
;ii
--) {
2915 base
= pci_read_config32(ctrl
[0].f1
, 0x40 + (ii
<< 3));
2916 if ((base
& ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2919 limit
= pci_read_config32(ctrl
[0].f1
, 0x44 + (ii
<< 3));
2920 limit
+= (carry_over
<< 2 );
2921 base
+= (carry_over
<< 2 );
2922 for (j
= 0; j
< controllers
; j
++) {
2923 pci_write_config32(ctrl
[j
].f1
, 0x44 + (ii
<< 3), limit
);
2924 pci_write_config32(ctrl
[j
].f1
, 0x40 + (ii
<< 3), base
);
2927 limit
= pci_read_config32(ctrl
[0].f1
, 0x44 + (i
<< 3));
2928 limit
+= (carry_over
<< 2);
2929 for (j
= 0; j
< controllers
; j
++) {
2930 pci_write_config32(ctrl
[j
].f1
, 0x44 + (i
<< 3), limit
);
2933 base
= pci_read_config32(dev
, 0x40 + (i
<< 3));
2934 basek
= (base
& 0xffff0000) >> 2;
2935 if (basek
== hole_startk
) {
2936 //don't need set memhole here, because hole off set will be 0, overflow
2937 //so need to change base reg instead, new basek will be 4*1024*1024
2939 base
|= (4*1024*1024)<<2;
2940 for (j
= 0; j
< controllers
; j
++) {
2941 pci_write_config32(ctrl
[j
].f1
, 0x40 + (i
<<3), base
);
2944 hoist
= /* hole start address */
2945 ((hole_startk
<< 10) & 0xff000000) +
2946 /* hole address to memory controller address */
2947 (((basek
+ carry_over
) >> 6) & 0x0000ff00) +
2950 pci_write_config32(dev
, 0xf0, hoist
);
2956 static void set_hw_mem_hole(int controllers
, const struct mem_controller
*ctrl
)
2959 uint32_t hole_startk
;
2962 hole_startk
= 4*1024*1024 - CONFIG_HW_MEM_HOLE_SIZEK
;
2964 printk_raminit("Handling memory hole at 0x%08x (default)\n", hole_startk
);
2965 #if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC
2966 /* We need to double check if the hole_startk is valid, if it is equal
2967 to basek, we need to decrease it some */
2969 for (i
=0; i
<controllers
; i
++) {
2972 base
= pci_read_config32(ctrl
[0].f1
, 0x40 + (i
<< 3));
2973 if ((base
& ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2976 base_k
= (base
& 0xffff0000) >> 2;
2977 if (base_k
== hole_startk
) {
2978 /* decrease mem hole startk to make sure it is
2979 on middle of previous node */
2980 hole_startk
-= (base_k
- basek_pri
) >> 1;
2981 break; //only one hole
2985 printk_raminit("Handling memory hole at 0x%08x (adjusted)\n", hole_startk
);
2987 /* find node index that need do set hole */
2988 for (i
=0; i
< controllers
; i
++) {
2989 uint32_t base
, limit
;
2990 unsigned base_k
, limit_k
;
2991 base
= pci_read_config32(ctrl
[0].f1
, 0x40 + (i
<< 3));
2992 if ((base
& ((1 << 1) | (1 << 0))) != ((1 << 1) | (1 << 0))) {
2995 limit
= pci_read_config32(ctrl
[0].f1
, 0x44 + (i
<< 3));
2996 base_k
= (base
& 0xffff0000) >> 2;
2997 limit_k
= ((limit
+ 0x00010000) & 0xffff0000) >> 2;
2998 if ((base_k
<= hole_startk
) && (limit_k
> hole_startk
)) {
3000 hoist_memory(controllers
, ctrl
, hole_startk
, i
);
3001 end_k
= memory_end_k(ctrl
, controllers
);
3002 set_top_mem(end_k
, hole_startk
);
3003 break; //only one hole
3009 #if CONFIG_HAVE_ACPI_RESUME
3010 #include "exit_from_self.c"
3013 static void sdram_enable(int controllers
, const struct mem_controller
*ctrl
,
3014 struct sys_info
*sysinfo
)
3017 int suspend
= acpi_is_wakeup_s3();
3019 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3020 unsigned cpu_f0_f1
[8];
3021 /* FIXME: How about 32 node machine later? */
3024 printk(BIOS_DEBUG
, "sdram_enable: tsc0[8]: %p", &tsc0
[0]);
3028 /* Error if I don't have memory */
3029 if (memory_end_k(ctrl
, controllers
) == 0) {
3033 /* Before enabling memory start the memory clocks */
3034 for (i
= 0; i
< controllers
; i
++) {
3036 if (!sysinfo
->ctrl_present
[ i
])
3038 dch
= pci_read_config32(ctrl
[i
].f2
, DRAM_CONFIG_HIGH
);
3040 /* if no memory installed, disabled the interface */
3041 if (sysinfo
->meminfo
[i
].dimm_mask
==0x00){
3042 dch
|= DCH_DisDramInterface
;
3043 pci_write_config32(ctrl
[i
].f2
, DRAM_CONFIG_HIGH
, dch
);
3046 dch
|= DCH_MemClkFreqVal
;
3047 pci_write_config32(ctrl
[i
].f2
, DRAM_CONFIG_HIGH
, dch
);
3048 /* address timing and Output driver comp Control */
3049 set_misc_timing(ctrl
+i
, sysinfo
->meminfo
+i
);
3053 /* We need to wait a minimum of 20 MEMCLKS to enable the InitDram */
3054 memreset(controllers
, ctrl
);
3056 /* lets override the rest of the routine */
3058 printk(BIOS_DEBUG
, "Wakeup!\n");
3059 exit_from_self(controllers
, ctrl
, sysinfo
);
3060 printk(BIOS_DEBUG
, "Mem running !\n");
3064 for (i
= 0; i
< controllers
; i
++) {
3066 if (!sysinfo
->ctrl_present
[ i
])
3068 /* Skip everything if I don't have any memory on this controller */
3069 dch
= pci_read_config32(ctrl
[i
].f2
, DRAM_CONFIG_HIGH
);
3070 if (!(dch
& DCH_MemClkFreqVal
)) {
3075 dcl
= pci_read_config32(ctrl
[i
].f2
, DRAM_CONFIG_LOW
);
3076 if (dcl
& DCL_DimmEccEn
) {
3078 printk(BIOS_SPEW
, "ECC enabled\n");
3079 mnc
= pci_read_config32(ctrl
[i
].f3
, MCA_NB_CONFIG
);
3081 if (dcl
& DCL_Width128
) {
3082 mnc
|= MNC_CHIPKILL_EN
;
3084 pci_write_config32(ctrl
[i
].f3
, MCA_NB_CONFIG
, mnc
);
3087 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3088 cpu_f0_f1
[i
] = is_cpu_pre_f2_in_bsp(i
);
3090 //Rev F0/F1 workaround
3092 /* Set the DqsRcvEnTrain bit */
3093 dword
= pci_read_config32(ctrl
[i
].f2
, DRAM_CTRL
);
3094 dword
|= DC_DqsRcvEnTrain
;
3095 pci_write_config32(ctrl
[i
].f2
, DRAM_CTRL
, dword
);
3101 pci_write_config32(ctrl
[i
].f2
, DRAM_CONFIG_LOW
, dcl
);
3102 dcl
|= DCL_InitDram
;
3103 pci_write_config32(ctrl
[i
].f2
, DRAM_CONFIG_LOW
, dcl
);
3106 for (i
= 0; i
< controllers
; i
++) {
3108 if (!sysinfo
->ctrl_present
[ i
])
3110 /* Skip everything if I don't have any memory on this controller */
3111 if (sysinfo
->meminfo
[i
].dimm_mask
==0x00) continue;
3113 printk(BIOS_DEBUG
, "Initializing memory: ");
3116 dcl
= pci_read_config32(ctrl
[i
].f2
, DRAM_CONFIG_LOW
);
3118 if ((loops
& 1023) == 0) {
3119 printk(BIOS_DEBUG
, ".");
3121 } while(((dcl
& DCL_InitDram
) != 0) && (loops
< TIMEOUT_LOOPS
));
3122 if (loops
>= TIMEOUT_LOOPS
) {
3123 printk(BIOS_DEBUG
, " failed\n");
3127 /* Wait until it is safe to touch memory */
3129 dcm
= pci_read_config32(ctrl
[i
].f2
, DRAM_CTRL_MISC
);
3130 } while(((dcm
& DCM_MemClrStatus
) == 0) /* || ((dcm & DCM_DramEnabled) == 0)*/ );
3132 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3136 print_debug_dqs_tsc("\nbegin tsc0", i
, tsc0
[i
].hi
, tsc0
[i
].lo
, 2);
3137 print_debug_dqs_tsc("end tsc ", i
, tsc
.hi
, tsc
.lo
, 2);
3139 if (tsc
.lo
<tsc0
[i
].lo
) {
3142 tsc
.lo
-= tsc0
[i
].lo
;
3143 tsc
.hi
-= tsc0
[i
].hi
;
3145 tsc0
[i
].lo
= tsc
.lo
;
3146 tsc0
[i
].hi
= tsc
.hi
;
3148 print_debug_dqs_tsc(" dtsc0", i
, tsc0
[i
].hi
, tsc0
[i
].lo
, 2);
3151 printk(BIOS_DEBUG
, " done\n");
3154 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
3155 /* init hw mem hole here */
3156 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
3157 set_hw_mem_hole(controllers
, ctrl
);
3160 /* store tom to sysinfo, and it will be used by dqs_timing */
3164 msr
= rdmsr(TOP_MEM
);
3165 sysinfo
->tom_k
= ((msr
.hi
<<24) | (msr
.lo
>>8))>>2;
3168 msr
= rdmsr(TOP_MEM2
);
3169 sysinfo
->tom2_k
= ((msr
.hi
<<24)| (msr
.lo
>>8))>>2;
3172 for (i
= 0; i
< controllers
; i
++) {
3173 sysinfo
->mem_trained
[i
] = 0;
3175 if (!sysinfo
->ctrl_present
[ i
])
3178 /* Skip everything if I don't have any memory on this controller */
3179 if (sysinfo
->meminfo
[i
].dimm_mask
==0x00)
3182 sysinfo
->mem_trained
[i
] = 0x80; // mem need to be trained
3186 #if CONFIG_MEM_TRAIN_SEQ == 0
3187 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3188 dqs_timing(controllers
, ctrl
, tsc0
, sysinfo
);
3190 dqs_timing(controllers
, ctrl
, sysinfo
);
3194 #if CONFIG_MEM_TRAIN_SEQ == 2
3195 /* need to enable mtrr, so dqs training could access the test address */
3196 setup_mtrr_dqs(sysinfo
->tom_k
, sysinfo
->tom2_k
);
3199 for (i
= 0; i
< controllers
; i
++) {
3200 /* Skip everything if I don't have any memory on this controller */
3201 if (sysinfo
->mem_trained
[i
]!=0x80)
3204 dqs_timing(i
, &ctrl
[i
], sysinfo
, 1);
3206 #if CONFIG_MEM_TRAIN_SEQ == 1
3207 break; // only train the first node with ram
3211 #if CONFIG_MEM_TRAIN_SEQ == 2
3212 clear_mtrr_dqs(sysinfo
->tom2_k
);
3217 #if CONFIG_MEM_TRAIN_SEQ != 1
3218 wait_all_core0_mem_trained(sysinfo
);
3223 void fill_mem_ctrl(int controllers
, struct mem_controller
*ctrl_a
,
3224 const uint16_t *spd_addr
)
3228 struct mem_controller
*ctrl
;
3229 for (i
=0;i
<controllers
; i
++) {
3232 ctrl
->f0
= PCI_DEV(0, 0x18+i
, 0);
3233 ctrl
->f1
= PCI_DEV(0, 0x18+i
, 1);
3234 ctrl
->f2
= PCI_DEV(0, 0x18+i
, 2);
3235 ctrl
->f3
= PCI_DEV(0, 0x18+i
, 3);
3237 if (spd_addr
== (void *)0) continue;
3239 for (j
=0;j
<DIMM_SOCKETS
;j
++) {
3240 ctrl
->channel0
[j
] = spd_addr
[(i
*2+0)*DIMM_SOCKETS
+ j
];
3241 ctrl
->channel1
[j
] = spd_addr
[(i
*2+1)*DIMM_SOCKETS
+ j
];