tree: drop last paragraph of GPL copyright header
[coreboot.git] / src / northbridge / amd / amdk8 / raminit_f.c
blobf258cca7cf8f206bbdb9168cd7399898b7625b11
1 /*
2 * This file is part of the coreboot project.
4 * Copyright (C) 2002 Linux Networx
5 * (Written by Eric Biederman <ebiederman@lnxi.com> for Linux Networx)
6 * Copyright (C) 2004 YingHai Lu
7 * Copyright (C) 2008 Advanced Micro Devices, Inc.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <cpu/x86/cache.h>
20 #include <cpu/x86/mtrr.h>
21 #include <cpu/x86/tsc.h>
22 #include <cpu/amd/mtrr.h>
24 #include <lib.h>
25 #include <stdlib.h>
26 #include <arch/acpi.h>
27 #include "raminit.h"
28 #include "f.h"
29 #include <spd_ddr2.h>
30 #if CONFIG_HAVE_OPTION_TABLE
31 #include "option_table.h"
32 #endif
34 #if CONFIG_DEBUG_RAM_SETUP
35 #define printk_raminit(args...) printk(BIOS_DEBUG, args)
36 #else
37 #define printk_raminit(args...)
38 #endif
41 #include "f_pci.c"
44 /* for PCI_ADDR(0, 0x18, 2, 0x98) index,
45 and PCI_ADDR(0x, 0x18, 2, 0x9c) data */
47 index:
48 [29: 0] DctOffset (Dram Controller Offset)
49 [30:30] DctAccessWrite (Dram Controller Read/Write Select)
50 0 = read access
51 1 = write access
52 [31:31] DctAccessDone (Dram Controller Access Done)
53 0 = Access in progress
54 1 = No access is progress
56 Data:
57 [31: 0] DctOffsetData (Dram Controller Offset Data)
59 Read:
60 - Write the register num to DctOffset with
61 DctAccessWrite = 0
62 - poll the DctAccessDone until it = 1
63 - Read the data from DctOffsetData
64 Write:
65 - Write the data to DctOffsetData
66 - Write register num to DctOffset with DctAccessWrite = 1
67 - poll the DctAccessDone untio it = 1
71 void setup_resource_map(const unsigned int *register_values, int max)
73 int i;
74 for (i = 0; i < max; i += 3) {
75 device_t dev;
76 unsigned where;
77 unsigned long reg;
78 dev = register_values[i] & ~0xff;
79 where = register_values[i] & 0xff;
80 reg = pci_read_config32(dev, where);
81 reg &= register_values[i+1];
82 reg |= register_values[i+2];
83 pci_write_config32(dev, where, reg);
87 static int controller_present(const struct mem_controller *ctrl)
89 return pci_read_config32(ctrl->f0, 0) == 0x11001022;
92 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
94 static const unsigned int register_values[] = {
96 /* Careful set limit registers before base registers which
97 contain the enables */
98 /* DRAM Limit i Registers
99 * F1:0x44 i = 0
100 * F1:0x4C i = 1
101 * F1:0x54 i = 2
102 * F1:0x5C i = 3
103 * F1:0x64 i = 4
104 * F1:0x6C i = 5
105 * F1:0x74 i = 6
106 * F1:0x7C i = 7
107 * [ 2: 0] Destination Node ID
108 * 000 = Node 0
109 * 001 = Node 1
110 * 010 = Node 2
111 * 011 = Node 3
112 * 100 = Node 4
113 * 101 = Node 5
114 * 110 = Node 6
115 * 111 = Node 7
116 * [ 7: 3] Reserved
117 * [10: 8] Interleave select
118 * specifies the values of A[14:12] to use with interleave enable.
119 * [15:11] Reserved
120 * [31:16] DRAM Limit Address i Bits 39-24
121 * This field defines the upper address bits of a 40 bit address
122 * that define the end of the DRAM region.
124 PCI_ADDR(0, 0x18, 1, 0x44), 0x0000f8f8, 0x00000000,
125 PCI_ADDR(0, 0x18, 1, 0x4C), 0x0000f8f8, 0x00000001,
126 PCI_ADDR(0, 0x18, 1, 0x54), 0x0000f8f8, 0x00000002,
127 PCI_ADDR(0, 0x18, 1, 0x5C), 0x0000f8f8, 0x00000003,
128 PCI_ADDR(0, 0x18, 1, 0x64), 0x0000f8f8, 0x00000004,
129 PCI_ADDR(0, 0x18, 1, 0x6C), 0x0000f8f8, 0x00000005,
130 PCI_ADDR(0, 0x18, 1, 0x74), 0x0000f8f8, 0x00000006,
131 PCI_ADDR(0, 0x18, 1, 0x7C), 0x0000f8f8, 0x00000007,
132 /* DRAM Base i Registers
133 * F1:0x40 i = 0
134 * F1:0x48 i = 1
135 * F1:0x50 i = 2
136 * F1:0x58 i = 3
137 * F1:0x60 i = 4
138 * F1:0x68 i = 5
139 * F1:0x70 i = 6
140 * F1:0x78 i = 7
141 * [ 0: 0] Read Enable
142 * 0 = Reads Disabled
143 * 1 = Reads Enabled
144 * [ 1: 1] Write Enable
145 * 0 = Writes Disabled
146 * 1 = Writes Enabled
147 * [ 7: 2] Reserved
148 * [10: 8] Interleave Enable
149 * 000 = No interleave
150 * 001 = Interleave on A[12] (2 nodes)
151 * 010 = reserved
152 * 011 = Interleave on A[12] and A[14] (4 nodes)
153 * 100 = reserved
154 * 101 = reserved
155 * 110 = reserved
156 * 111 = Interleve on A[12] and A[13] and A[14] (8 nodes)
157 * [15:11] Reserved
158 * [13:16] DRAM Base Address i Bits 39-24
159 * This field defines the upper address bits of a 40-bit address
160 * that define the start of the DRAM region.
162 PCI_ADDR(0, 0x18, 1, 0x40), 0x0000f8fc, 0x00000000,
163 PCI_ADDR(0, 0x18, 1, 0x48), 0x0000f8fc, 0x00000000,
164 PCI_ADDR(0, 0x18, 1, 0x50), 0x0000f8fc, 0x00000000,
165 PCI_ADDR(0, 0x18, 1, 0x58), 0x0000f8fc, 0x00000000,
166 PCI_ADDR(0, 0x18, 1, 0x60), 0x0000f8fc, 0x00000000,
167 PCI_ADDR(0, 0x18, 1, 0x68), 0x0000f8fc, 0x00000000,
168 PCI_ADDR(0, 0x18, 1, 0x70), 0x0000f8fc, 0x00000000,
169 PCI_ADDR(0, 0x18, 1, 0x78), 0x0000f8fc, 0x00000000,
171 /* DRAM CS Base Address i Registers
172 * F2:0x40 i = 0
173 * F2:0x44 i = 1
174 * F2:0x48 i = 2
175 * F2:0x4C i = 3
176 * F2:0x50 i = 4
177 * F2:0x54 i = 5
178 * F2:0x58 i = 6
179 * F2:0x5C i = 7
180 * [ 0: 0] Chip-Select Bank Enable
181 * 0 = Bank Disabled
182 * 1 = Bank Enabled
183 * [ 1: 1] Spare Rank
184 * [ 2: 2] Memory Test Failed
185 * [ 4: 3] Reserved
186 * [13: 5] Base Address (21-13)
187 * An optimization used when all DIMM are the same size...
188 * [18:14] Reserved
189 * [28:19] Base Address (36-27)
190 * This field defines the top 11 addresses bit of a 40-bit
191 * address that define the memory address space. These
192 * bits decode 32-MByte blocks of memory.
193 * [31:29] Reserved
195 PCI_ADDR(0, 0x18, 2, 0x40), 0xe007c018, 0x00000000,
196 PCI_ADDR(0, 0x18, 2, 0x44), 0xe007c018, 0x00000000,
197 PCI_ADDR(0, 0x18, 2, 0x48), 0xe007c018, 0x00000000,
198 PCI_ADDR(0, 0x18, 2, 0x4C), 0xe007c018, 0x00000000,
199 PCI_ADDR(0, 0x18, 2, 0x50), 0xe007c018, 0x00000000,
200 PCI_ADDR(0, 0x18, 2, 0x54), 0xe007c018, 0x00000000,
201 PCI_ADDR(0, 0x18, 2, 0x58), 0xe007c018, 0x00000000,
202 PCI_ADDR(0, 0x18, 2, 0x5C), 0xe007c018, 0x00000000,
203 /* DRAM CS Mask Address i Registers
204 * F2:0x60 i = 0,1
205 * F2:0x64 i = 2,3
206 * F2:0x68 i = 4,5
207 * F2:0x6C i = 6,7
208 * Select bits to exclude from comparison with the DRAM Base address register.
209 * [ 4: 0] Reserved
210 * [13: 5] Address Mask (21-13)
211 * Address to be excluded from the optimized case
212 * [18:14] Reserved
213 * [28:19] Address Mask (36-27)
214 * The bits with an address mask of 1 are excluded from address comparison
215 * [31:29] Reserved
218 PCI_ADDR(0, 0x18, 2, 0x60), 0xe007c01f, 0x00000000,
219 PCI_ADDR(0, 0x18, 2, 0x64), 0xe007c01f, 0x00000000,
220 PCI_ADDR(0, 0x18, 2, 0x68), 0xe007c01f, 0x00000000,
221 PCI_ADDR(0, 0x18, 2, 0x6C), 0xe007c01f, 0x00000000,
223 /* DRAM Control Register
224 * F2:0x78
225 * [ 3: 0] RdPtrInit ( Read Pointer Initial Value)
226 * 0x03-0x00: reserved
227 * [ 6: 4] RdPadRcvFifoDly (Read Delay from Pad Receive FIFO)
228 * 000 = reserved
229 * 001 = reserved
230 * 010 = 1.5 Memory Clocks
231 * 011 = 2 Memory Clocks
232 * 100 = 2.5 Memory Clocks
233 * 101 = 3 Memory Clocks
234 * 110 = 3.5 Memory Clocks
235 * 111 = Reseved
236 * [15: 7] Reserved
237 * [16:16] AltVidC3MemClkTriEn (AltVID Memory Clock Tristate Enable)
238 * Enables the DDR memory clocks to be tristated when alternate VID
239 * mode is enabled. This bit has no effect if the DisNbClkRamp bit
240 * (F3, 0x88) is set
241 * [17:17] DllTempAdjTime (DLL Temperature Adjust Cycle Time)
242 * 0 = 5 ms
243 * 1 = 1 ms
244 * [18:18] DqsRcvEnTrain (DQS Receiver Enable Training Mode)
245 * 0 = Normal DQS Receiver enable operation
246 * 1 = DQS receiver enable training mode
247 * [31:19] reverved
249 PCI_ADDR(0, 0x18, 2, 0x78), 0xfff80000, (6<<4)|(6<<0),
251 /* DRAM Initialization Register
252 * F2:0x7C
253 * [15: 0] MrsAddress (Address for MRS/EMRS Commands)
254 * this field specifies the dsata driven on the DRAM address pins
255 * 15-0 for MRS and EMRS commands
256 * [18:16] MrsBank (Bank Address for MRS/EMRS Commands)
257 * this files specifies the data driven on the DRAM bank pins for
258 * the MRS and EMRS commands
259 * [23:19] reverved
260 * [24:24] SendPchgAll (Send Precharge All Command)
261 * Setting this bit causes the DRAM controller to send a precharge
262 * all command. This bit is cleared by the hardware after the
263 * command completes
264 * [25:25] SendAutoRefresh (Send Auto Refresh Command)
265 * Setting this bit causes the DRAM controller to send an auto
266 * refresh command. This bit is cleared by the hardware after the
267 * command completes
268 * [26:26] SendMrsCmd (Send MRS/EMRS Command)
269 * Setting this bit causes the DRAM controller to send the MRS or
270 * EMRS command defined by the MrsAddress and MrsBank fields. This
271 * bit is cleared by the hardware adter the commmand completes
272 * [27:27] DeassertMemRstX (De-assert Memory Reset)
273 * Setting this bit causes the DRAM controller to de-assert the
274 * memory reset pin. This bit cannot be used to assert the memory
275 * reset pin
276 * [28:28] AssertCke (Assert CKE)
277 * setting this bit causes the DRAM controller to assert the CKE
278 * pins. This bit cannot be used to de-assert the CKE pins
279 * [30:29] reverved
280 * [31:31] EnDramInit (Enable DRAM Initialization)
281 * Setting this bit puts the DRAM controller in a BIOS controlled
282 * DRAM initialization mode. BIOS must clear this bit aster DRAM
283 * initialization is complete.
285 // PCI_ADDR(0, 0x18, 2, 0x7C), 0x60f80000, 0,
288 /* DRAM Bank Address Mapping Register
289 * F2:0x80
290 * Specify the memory module size
291 * [ 3: 0] CS1/0
292 * [ 7: 4] CS3/2
293 * [11: 8] CS5/4
294 * [15:12] CS7/6
295 * [31:16]
296 row col bank
297 0: 13 9 2 :128M
298 1: 13 10 2 :256M
299 2: 14 10 2 :512M
300 3: 13 11 2 :512M
301 4: 13 10 3 :512M
302 5: 14 10 3 :1G
303 6: 14 11 2 :1G
304 7: 15 10 3 :2G
305 8: 14 11 3 :2G
306 9: 15 11 3 :4G
307 10: 16 10 3 :4G
308 11: 16 11 3 :8G
310 PCI_ADDR(0, 0x18, 2, 0x80), 0xffff0000, 0x00000000,
311 /* DRAM Timing Low Register
312 * F2:0x88
313 * [ 2: 0] Tcl (Cas# Latency, Cas# to read-data-valid)
314 * 000 = reserved
315 * 001 = reserved
316 * 010 = CL 3
317 * 011 = CL 4
318 * 100 = CL 5
319 * 101 = CL 6
320 * 110 = reserved
321 * 111 = reserved
322 * [ 3: 3] Reserved
323 * [ 5: 4] Trcd (Ras#-active to Cas# read/write delay)
324 * 00 = 3 clocks
325 * 01 = 4 clocks
326 * 10 = 5 clocks
327 * 11 = 6 clocks
328 * [ 7: 6] Reserved
329 * [ 9: 8] Trp (Row Precharge Time, Precharge-to-Active or Auto-Refresh)
330 * 00 = 3 clocks
331 * 01 = 4 clocks
332 * 10 = 5 clocks
333 * 11 = 6 clocks
334 * [10:10] Reserved
335 * [11:11] Trtp (Read to Precharge Time, read Cas# to precharge time)
336 * 0 = 2 clocks for Burst Length of 32 Bytes
337 * 4 clocks for Burst Length of 64 Bytes
338 * 1 = 3 clocks for Burst Length of 32 Bytes
339 * 5 clocks for Burst Length of 64 Bytes
340 * [15:12] Tras (Minimum Ras# Active Time)
341 * 0000 = reserved
342 * 0001 = reserved
343 * 0010 = 5 bus clocks
344 * ...
345 * 1111 = 18 bus clocks
346 * [19:16] Trc (Row Cycle Time, Ras#-active to Ras#-active or auto
347 * refresh of the same bank)
348 * 0000 = 11 bus clocks
349 * 0010 = 12 bus clocks
350 * ...
351 * 1110 = 25 bus clocks
352 * 1111 = 26 bus clocks
353 * [21:20] Twr (Write Recovery Time, From the last data to precharge,
354 * writes can go back-to-back)
355 * 00 = 3 bus clocks
356 * 01 = 4 bus clocks
357 * 10 = 5 bus clocks
358 * 11 = 6 bus clocks
359 * [23:22] Trrd (Active-to-active(Ras#-to-Ras#) Delay of different banks)
360 * 00 = 2 bus clocks
361 * 01 = 3 bus clocks
362 * 10 = 4 bus clocks
363 * 11 = 5 bus clocks
364 * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel A,
365 * BIOS should set it to reduce the power consumption)
366 * Bit F(1207) M2 Package S1g1 Package
367 * 0 N/A MA1_CLK1 N/A
368 * 1 N/A MA0_CLK1 MA0_CLK1
369 * 2 MA3_CLK N/A N/A
370 * 3 MA2_CLK N/A N/A
371 * 4 MA1_CLK MA1_CLK0 N/A
372 * 5 MA0_CLK MA0_CLK0 MA0_CLK0
373 * 6 N/A MA1_CLK2 N/A
374 * 7 N/A MA0_CLK2 MA0_CLK2
376 PCI_ADDR(0, 0x18, 2, 0x88), 0x000004c8, 0xff000002 /* 0x03623125 */ ,
377 /* DRAM Timing High Register
378 * F2:0x8C
379 * [ 3: 0] Reserved
380 * [ 6: 4] TrwtTO (Read-to-Write Turnaround for Data, DQS Contention)
381 * 000 = 2 bus clocks
382 * 001 = 3 bus clocks
383 * 010 = 4 bus clocks
384 * 011 = 5 bus clocks
385 * 100 = 6 bus clocks
386 * 101 = 7 bus clocks
387 * 110 = 8 bus clocks
388 * 111 = 9 bus clocks
389 * [ 7: 7] Reserved
390 * [ 9: 8] Twtr (Internal DRAM Write-to-Read Command Delay,
391 * minium write-to-read delay when both access the same chip select)
392 * 00 = Reserved
393 * 01 = 1 bus clocks
394 * 10 = 2 bus clocks
395 * 11 = 3 bus clocks
396 * [11:10] Twrrd (Write to Read DIMM Termination Turnaround, minimum
397 * write-to-read delay when accessing two different DIMMs)
398 * 00 = 0 bus clocks
399 * 01 = 1 bus clocks
400 * 10 = 2 bus clocks
401 * 11 = 3 bus clocks
402 * [13:12] Twrwr (Write to Write Timing)
403 * 00 = 1 bus clocks ( 0 idle cycle on the bus)
404 * 01 = 2 bus clocks ( 1 idle cycle on the bus)
405 * 10 = 3 bus clocks ( 2 idle cycles on the bus)
406 * 11 = Reserved
407 * [15:14] Trdrd ( Read to Read Timing)
408 * 00 = 2 bus clocks ( 1 idle cycle on the bus)
409 * 01 = 3 bus clocks ( 2 idle cycles on the bus)
410 * 10 = 4 bus clocks ( 3 idle cycles on the bus)
411 * 11 = 5 bus clocks ( 4 idel cycles on the bus)
412 * [17:16] Tref (Refresh Rate)
413 * 00 = Undefined behavior
414 * 01 = Reserved
415 * 10 = Refresh interval of 7.8 microseconds
416 * 11 = Refresh interval of 3.9 microseconds
417 * [19:18] Reserved
418 * [22:20] Trfc0 ( Auto-Refresh Row Cycle Time for the Logical DIMM0,
419 * based on DRAM density and speed)
420 * 000 = 75 ns (all speeds, 256Mbit)
421 * 001 = 105 ns (all speeds, 512Mbit)
422 * 010 = 127.5 ns (all speeds, 1Gbit)
423 * 011 = 195 ns (all speeds, 2Gbit)
424 * 100 = 327.5 ns (all speeds, 4Gbit)
425 * 101 = reserved
426 * 110 = reserved
427 * 111 = reserved
428 * [25:23] Trfc1 ( Auto-Refresh Row Cycle Time for the Logical DIMM1,
429 * based on DRAM density and speed)
430 * [28:26] Trfc2 ( Auto-Refresh Row Cycle Time for the Logical DIMM2,
431 * based on DRAM density and speed)
432 * [31:29] Trfc3 ( Auto-Refresh Row Cycle Time for the Logical DIMM3,
433 * based on DRAM density and speed)
435 PCI_ADDR(0, 0x18, 2, 0x8c), 0x000c008f, (2 << 16)|(1 << 8),
436 /* DRAM Config Low Register
437 * F2:0x90
438 * [ 0: 0] InitDram (Initialize DRAM)
439 * 1 = write 1 cause DRAM controller to execute the DRAM
440 * initialization, when done it read to 0
441 * [ 1: 1] ExitSelfRef ( Exit Self Refresh Command )
442 * 1 = write 1 causes the DRAM controller to bring the DRAMs out
443 * for self refresh mode
444 * [ 3: 2] Reserved
445 * [ 5: 4] DramTerm (DRAM Termination)
446 * 00 = On die termination disabled
447 * 01 = 75 ohms
448 * 10 = 150 ohms
449 * 11 = 50 ohms
450 * [ 6: 6] Reserved
451 * [ 7: 7] DramDrvWeak ( DRAM Drivers Weak Mode)
452 * 0 = Normal drive strength mode.
453 * 1 = Weak drive strength mode
454 * [ 8: 8] ParEn (Parity Enable)
455 * 1 = Enable address parity computation output, PAR,
456 * and enables the parity error input, ERR
457 * [ 9: 9] SelfRefRateEn (Faster Self Refresh Rate Enable)
458 * 1 = Enable high temperature ( two times normal )
459 * self refresh rate
460 * [10:10] BurstLength32 ( DRAM Burst Length Set for 32 Bytes)
461 * 0 = 64-byte mode
462 * 1 = 32-byte mode
463 * [11:11] Width128 ( Width of DRAM interface)
464 * 0 = the controller DRAM interface is 64-bits wide
465 * 1 = the controller DRAM interface is 128-bits wide
466 * [12:12] X4Dimm (DIMM 0 is x4)
467 * [13:13] X4Dimm (DIMM 1 is x4)
468 * [14:14] X4Dimm (DIMM 2 is x4)
469 * [15:15] X4Dimm (DIMM 3 is x4)
470 * 0 = DIMM is not x4
471 * 1 = x4 DIMM present
472 * [16:16] UnBuffDimm ( Unbuffered DIMMs)
473 * 0 = Buffered DIMMs
474 * 1 = Unbuffered DIMMs
475 * [18:17] Reserved
476 * [19:19] DimmEccEn ( DIMM ECC Enable )
477 * 1 = ECC checking is being enabled for all DIMMs on the DRAM
478 * controller ( Through F3 0x44[EccEn])
479 * [31:20] Reserved
481 PCI_ADDR(0, 0x18, 2, 0x90), 0xfff6004c, 0x00000010,
482 /* DRAM Config High Register
483 * F2:0x94
484 * [ 0: 2] MemClkFreq ( Memory Clock Frequency)
485 * 000 = 200MHz
486 * 001 = 266MHz
487 * 010 = 333MHz
488 * 011 = reserved
489 * 1xx = reserved
490 * [ 3: 3] MemClkFreqVal (Memory Clock Freqency Valid)
491 * 1 = BIOS need to set the bit when setting up MemClkFreq to
492 * the proper value
493 * [ 7: 4] MaxAsyncLat ( Maximum Asynchronous Latency)
494 * 0000 = 0 ns
495 * ...
496 * 1111 = 15 ns
497 * [11: 8] Reserved
498 * [12:12] RDqsEn ( Read DQS Enable) This bit is only be set if x8
499 * registered DIMMs are present in the system
500 * 0 = DM pins function as data mask pins
501 * 1 = DM pins function as read DQS pins
502 * [13:13] Reserved
503 * [14:14] DisDramInterface ( Disable the DRAM interface ) When this bit
504 * is set, the DRAM controller is disabled, and interface in low power
505 * state
506 * 0 = Enabled (default)
507 * 1 = Disabled
508 * [15:15] PowerDownEn ( Power Down Mode Enable )
509 * 0 = Disabled (default)
510 * 1 = Enabled
511 * [16:16] PowerDown ( Power Down Mode )
512 * 0 = Channel CKE Control
513 * 1 = Chip Select CKE Control
514 * [17:17] FourRankSODimm (Four Rank SO-DIMM)
515 * 1 = this bit is set by BIOS to indicate that a four rank
516 * SO-DIMM is present
517 * [18:18] FourRankRDimm (Four Rank Registered DIMM)
518 * 1 = this bit is set by BIOS to indicate that a four rank
519 * registered DIMM is present
520 * [19:19] Reserved
521 * [20:20] SlowAccessMode (Slow Access Mode (2T Mode))
522 * 0 = DRAM address and control signals are driven for one
523 * MEMCLK cycle
524 * 1 = One additional MEMCLK of setup time is provided on all
525 * DRAM address and control signals except CS, CKE, and ODT;
526 * i.e., these signals are drivern for two MEMCLK cycles
527 * rather than one
528 * [21:21] Reserved
529 * [22:22] BankSwizzleMode ( Bank Swizzle Mode),
530 * 0 = Disabled (default)
531 * 1 = Enabled
532 * [23:23] Reserved
533 * [27:24] DcqBypassMax ( DRAM Controller Queue Bypass Maximum)
534 * 0000 = No bypass; the oldest request is never bypassed
535 * 0001 = The oldest request may be bypassed no more than 1 time
536 * ...
537 * 1111 = The oldest request may be bypassed no more than 15\
538 * times
539 * [31:28] FourActWindow ( Four Bank Activate Window) , not more than
540 * 4 banks in a 8 bank device are activated
541 * 0000 = No tFAW window restriction
542 * 0001 = 8 MEMCLK cycles
543 * 0010 = 9 MEMCLK cycles
544 * ...
545 * 1101 = 20 MEMCLK cycles
546 * 111x = reserved
548 PCI_ADDR(0, 0x18, 2, 0x94), 0x00a82f00,0x00008000,
549 /* DRAM Delay Line Register
550 * F2:0xa0
551 * [ 0: 0] MemClrStatus (Memory Clear Status) : Readonly
552 * when set, this bit indicates that the memory clear function
553 * is complete. Only clear by reset. BIOS should not write or
554 * read the DRAM until this bit is set by hardware
555 * [ 1: 1] DisableJitter ( Disable Jitter)
556 * When set the DDR compensation circuit will not change the
557 * values unless the change is more than one step from the
558 * current value
559 * [ 3: 2] RdWrQByp ( Read/Write Queue Bypass Count)
560 * 00 = 2
561 * 01 = 4
562 * 10 = 8
563 * 11 = 16
564 * [ 4: 4] Mode64BitMux (Mismatched DIMM Support Enable)
565 * 1 When bit enables support for mismatched DIMMs when using
566 * 128-bit DRAM interface, the Width128 no effect, only for
567 * AM2 and s1g1
568 * [ 5: 5] DCC_EN ( Dynamica Idle Cycle Counter Enable)
569 * When set to 1, indicates that each entry in the page tables
570 * dynamically adjusts the idle cycle limit based on page
571 * Conflict/Page Miss (PC/PM) traffic
572 * [ 8: 6] ILD_lmt ( Idle Cycle Limit)
573 * 000 = 0 cycles
574 * 001 = 4 cycles
575 * 010 = 8 cycles
576 * 011 = 16 cycles
577 * 100 = 32 cycles
578 * 101 = 64 cycles
579 * 110 = 128 cycles
580 * 111 = 256 cycles
581 * [ 9: 9] DramEnabled ( DRAM Enabled)
582 * When Set, this bit indicates that the DRAM is enabled, this
583 * bit is set by hardware after DRAM initialization or on an exit
584 * from self refresh. The DRAM controller is intialized after the
585 * hardware-controlled initialization process ( initiated by the
586 * F2 0x90[DramInit]) completes or when the BIOS-controlled
587 * initialization process completes (F2 0x7c(EnDramInit] is
588 * written from 1 to 0)
589 * [23:10] Reserved
590 * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel B,
591 * BIOS should set it to reduce the power consumption)
592 * Bit F(1207) M2 Package S1g1 Package
593 * 0 N/A MA1_CLK1 N/A
594 * 1 N/A MA0_CLK1 MA0_CLK1
595 * 2 MA3_CLK N/A N/A
596 * 3 MA2_CLK N/A N/A
597 * 4 MA1_CLK MA1_CLK0 N/A
598 * 5 MA0_CLK MA0_CLK0 MA0_CLK0
599 * 6 N/A MA1_CLK2 N/A
600 * 7 N/A MA0_CLK2 MA0_CLK2
602 PCI_ADDR(0, 0x18, 2, 0xa0), 0x00fffc00, 0xff000000,
604 /* DRAM Scrub Control Register
605 * F3:0x58
606 * [ 4: 0] DRAM Scrube Rate
607 * [ 7: 5] reserved
608 * [12: 8] L2 Scrub Rate
609 * [15:13] reserved
610 * [20:16] Dcache Scrub
611 * [31:21] reserved
612 * Scrub Rates
613 * 00000 = Do not scrub
614 * 00001 = 40.00 ns
615 * 00010 = 80.00 ns
616 * 00011 = 160.00 ns
617 * 00100 = 320.00 ns
618 * 00101 = 640.00 ns
619 * 00110 = 1.28 us
620 * 00111 = 2.56 us
621 * 01000 = 5.12 us
622 * 01001 = 10.20 us
623 * 01011 = 41.00 us
624 * 01100 = 81.90 us
625 * 01101 = 163.80 us
626 * 01110 = 327.70 us
627 * 01111 = 655.40 us
628 * 10000 = 1.31 ms
629 * 10001 = 2.62 ms
630 * 10010 = 5.24 ms
631 * 10011 = 10.49 ms
632 * 10100 = 20.97 ms
633 * 10101 = 42.00 ms
634 * 10110 = 84.00 ms
635 * All Others = Reserved
637 PCI_ADDR(0, 0x18, 3, 0x58), 0xffe0e0e0, 0x00000000,
638 /* DRAM Scrub Address Low Register
639 * F3:0x5C
640 * [ 0: 0] DRAM Scrubber Redirect Enable
641 * 0 = Do nothing
642 * 1 = Scrubber Corrects errors found in normal operation
643 * [ 5: 1] Reserved
644 * [31: 6] DRAM Scrub Address 31-6
646 PCI_ADDR(0, 0x18, 3, 0x5C), 0x0000003e, 0x00000000,
647 /* DRAM Scrub Address High Register
648 * F3:0x60
649 * [ 7: 0] DRAM Scrubb Address 39-32
650 * [31: 8] Reserved
652 PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
654 /* for PCI_ADDR(0, 0x18, 2, 0x98) index,
655 and PCI_ADDR(0x, 0x18, 2, 0x9c) data */
657 index:
658 [29: 0] DctOffset (Dram Controller Offset)
659 [30:30] DctAccessWrite (Dram Controller Read/Write Select)
660 0 = read access
661 1 = write access
662 [31:31] DctAccessDone (Dram Controller Access Done)
663 0 = Access in progress
664 1 = No access is progress
666 Data:
667 [31: 0] DctOffsetData (Dram Controller Offset Data)
669 Read:
670 - Write the register num to DctOffset with DctAccessWrite = 0
671 - poll the DctAccessDone until it = 1
672 - Read the data from DctOffsetData
673 Write:
674 - Write the data to DctOffsetData
675 - Write register num to DctOffset with DctAccessWrite = 1
676 - poll the DctAccessDone untio it = 1
679 int i;
680 int max;
682 if (!controller_present(ctrl)) {
683 sysinfo->ctrl_present[ctrl->node_id] = 0;
684 return;
686 sysinfo->ctrl_present[ctrl->node_id] = 1;
688 printk(BIOS_SPEW, "setting up CPU %02x northbridge registers\n", ctrl->node_id);
689 max = ARRAY_SIZE(register_values);
690 for (i = 0; i < max; i += 3) {
691 device_t dev;
692 unsigned where;
693 unsigned long reg;
694 dev = (register_values[i] & ~0xff) - PCI_DEV(0, 0x18, 0) + ctrl->f0;
695 where = register_values[i] & 0xff;
696 reg = pci_read_config32(dev, where);
697 reg &= register_values[i+1];
698 reg |= register_values[i+2];
699 pci_write_config32(dev, where, reg);
701 printk(BIOS_SPEW, "done.\n");
704 #if 0
705 static int is_dual_channel(const struct mem_controller *ctrl)
707 uint32_t dcl;
708 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
709 return dcl & DCL_Width128;
711 #endif
713 static int is_opteron(const struct mem_controller *ctrl)
715 /* Test to see if I am an Opteron. M2 and S1G1 support dual
716 * channel, too, but only support unbuffered DIMMs so we need a
717 * better test for Opterons.
718 * However, all code uses is_opteron() to find out whether to
719 * use dual channel, so if we really check for opteron here, we
720 * need to fix up all code using this function, too.
723 uint32_t nbcap;
724 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
725 return !!(nbcap & NBCAP_128Bit);
728 #if 0
729 static int is_registered(const struct mem_controller *ctrl)
731 /* Test to see if we are dealing with registered SDRAM.
732 * If we are not registered we are unbuffered.
733 * This function must be called after spd_handle_unbuffered_dimms.
735 uint32_t dcl;
736 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
737 return !(dcl & DCL_UnBuffDimm);
739 #endif
741 static void spd_get_dimm_size(unsigned device, struct dimm_size *sz)
743 /* Calculate the log base 2 size of a DIMM in bits */
744 int value;
745 sz->per_rank = 0;
746 sz->rows = 0;
747 sz->col = 0;
748 sz->rank = 0;
750 value = spd_read_byte(device, SPD_ROW_NUM); /* rows */
751 if (value < 0) goto hw_err;
752 if ((value & 0xff) == 0) goto val_err; /* max is 16 ? */
753 sz->per_rank += value & 0xff;
754 sz->rows = value & 0xff;
756 value = spd_read_byte(device, SPD_COL_NUM); /* columns */
757 if (value < 0) goto hw_err;
758 if ((value & 0xff) == 0) goto val_err; /* max is 11 */
759 sz->per_rank += value & 0xff;
760 sz->col = value & 0xff;
762 value = spd_read_byte(device, SPD_BANK_NUM); /* banks */
763 if (value < 0) goto hw_err;
764 if ((value & 0xff) == 0) goto val_err;
765 sz->bank = log2(value & 0xff); // convert 4 to 2, and 8 to 3
766 sz->per_rank += sz->bank;
768 /* Get the module data width and convert it to a power of two */
769 value = spd_read_byte(device, SPD_DATA_WIDTH);
770 if (value < 0) goto hw_err;
771 value &= 0xff;
772 if ((value != 72) && (value != 64)) goto val_err;
773 sz->per_rank += log2(value) - 3; //64 bit So another 3 lines
775 /* How many ranks? */
776 /* number of physical banks */
777 value = spd_read_byte(device, SPD_MOD_ATTRIB_RANK);
778 if (value < 0) goto hw_err;
779 /* value >>= SPD_MOD_ATTRIB_RANK_NUM_SHIFT; */
780 value &= SPD_MOD_ATTRIB_RANK_NUM_MASK;
781 value += SPD_MOD_ATTRIB_RANK_NUM_BASE; // 0-->1, 1-->2, 3-->4
783 rank == 1 only one rank or say one side
784 rank == 2 two side , and two ranks
785 rank == 4 two side , and four ranks total
786 Some one side two ranks, because of stacked
788 if ((value != 1) && (value != 2) && (value != 4 )) {
789 goto val_err;
791 sz->rank = value;
793 /* verify if per_rank is equal byte 31
794 it has the DIMM size as a multiple of 128MB.
796 value = spd_read_byte(device, SPD_RANK_SIZE);
797 if (value < 0) goto hw_err;
798 value &= 0xff;
799 value = log2(value);
800 if (value <=4 ) value += 8; // add back to 1G to high
801 value += (27-5); // make 128MB to the real lines
802 if ( value != (sz->per_rank)) {
803 printk(BIOS_ERR, "Bad RANK Size --\n");
804 goto val_err;
807 goto out;
809 val_err:
810 die("Bad SPD value\n");
811 /* If an hw_error occurs report that I have no memory */
812 hw_err:
813 sz->per_rank = 0;
814 sz->rows = 0;
815 sz->col = 0;
816 sz->bank = 0;
817 sz->rank = 0;
818 out:
819 return;
823 static void set_dimm_size(const struct mem_controller *ctrl,
824 struct dimm_size *sz, unsigned index,
825 struct mem_info *meminfo)
827 uint32_t base0, base1;
829 /* For each base register.
830 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
831 * The initialize dimm size is in bits.
832 * Set the base enable bit0.
835 base0 = base1 = 0;
837 /* Make certain side1 of the dimm is at least 128MB */
838 if (sz->per_rank >= 27) {
839 base0 = (1 << ((sz->per_rank - 27 ) + 19)) | 1;
842 /* Make certain side2 of the dimm is at least 128MB */
843 if (sz->rank > 1) { // 2 ranks or 4 ranks
844 base1 = (1 << ((sz->per_rank - 27 ) + 19)) | 1;
847 /* Double the size if we are using dual channel memory */
848 if (meminfo->is_Width128) {
849 base0 = (base0 << 1) | (base0 & 1);
850 base1 = (base1 << 1) | (base1 & 1);
853 /* Clear the reserved bits */
854 base0 &= ~0xe007fffe;
855 base1 &= ~0xe007fffe;
857 if (!(meminfo->dimm_mask & 0x0F) && (meminfo->dimm_mask & 0xF0)) { /* channelB only? */
858 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), base0);
859 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), base1);
860 } else {
861 /* Set the appropriate DIMM base address register */
862 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 0) << 2), base0);
863 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 1) << 2), base1);
864 #if CONFIG_QRANK_DIMM_SUPPORT
865 if (sz->rank == 4) {
866 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), base0);
867 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), base1);
869 #endif
872 /* Enable the memory clocks for this DIMM by Clear the MemClkDis bit*/
873 if (base0) {
874 uint32_t dword;
875 uint32_t ClkDis0;
876 #if CONFIG_CPU_SOCKET_TYPE == 0x10 /* L1 */
877 ClkDis0 = DTL_MemClkDis0;
878 #elif CONFIG_CPU_SOCKET_TYPE == 0x11 /* AM2 */
879 ClkDis0 = DTL_MemClkDis0_AM2;
880 #elif CONFIG_CPU_SOCKET_TYPE == 0x12 /* S1G1 */
881 ClkDis0 = DTL_MemClkDis0_S1g1;
882 #endif
884 if (!(meminfo->dimm_mask & 0x0F) && (meminfo->dimm_mask & 0xF0)) { /* channelB only? */
885 dword = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
886 dword &= ~(ClkDis0 >> index);
887 pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dword);
889 } else {
890 dword = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW); //Channel A
891 dword &= ~(ClkDis0 >> index);
892 #if CONFIG_QRANK_DIMM_SUPPORT
893 if (sz->rank == 4) {
894 dword &= ~(ClkDis0 >> (index+2));
896 #endif
897 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dword);
899 if (meminfo->is_Width128) { // ChannelA+B
900 dword = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
901 dword &= ~(ClkDis0 >> index);
902 #if CONFIG_QRANK_DIMM_SUPPORT
903 if (sz->rank == 4) {
904 dword &= ~(ClkDis0 >> (index+2));
906 #endif
907 pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dword);
914 /* row col bank for 64 bit
915 0: 13 9 2 :128M
916 1: 13 10 2 :256M
917 2: 14 10 2 :512M
918 3: 13 11 2 :512M
919 4: 13 10 3 :512M
920 5: 14 10 3 :1G
921 6: 14 11 2 :1G
922 7: 15 10 3 :2G
923 8: 14 11 3 :2G
924 9: 15 11 3 :4G
925 10: 16 10 3 :4G
926 11: 16 11 3 :8G
930 static void set_dimm_cs_map(const struct mem_controller *ctrl,
931 struct dimm_size *sz, unsigned index,
932 struct mem_info *meminfo)
934 static const uint8_t cs_map_aaa[24] = {
935 /* (bank=2, row=13, col=9)(3, 16, 11) ---> (0, 0, 0) (1, 3, 2) */
936 //Bank2
937 0, 1, 3,
938 0, 2, 6,
939 0, 0, 0,
940 0, 0, 0,
941 //Bank3
942 0, 4, 0,
943 0, 5, 8,
944 0, 7, 9,
945 0,10,11,
948 uint32_t map;
950 if (!(meminfo->dimm_mask & 0x0F) && (meminfo->dimm_mask & 0xF0)) { /* channelB only? */
951 index += 2;
953 map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
954 map &= ~(0xf << (index * 4));
955 #if CONFIG_QRANK_DIMM_SUPPORT
956 if (sz->rank == 4) {
957 map &= ~(0xf << ( (index + 2) * 4));
959 #endif
961 /* Make certain side1 of the dimm is at least 128MB */
962 if (sz->per_rank >= 27) {
963 unsigned temp_map;
964 temp_map = cs_map_aaa[(sz->bank-2)*3*4 + (sz->rows - 13)*3 + (sz->col - 9) ];
965 map |= temp_map << (index*4);
966 #if CONFIG_QRANK_DIMM_SUPPORT
967 if (sz->rank == 4) {
968 map |= temp_map << ( (index + 2) * 4);
970 #endif
973 pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
978 static long spd_set_ram_size(const struct mem_controller *ctrl,
979 struct mem_info *meminfo)
981 int i;
983 for (i = 0; i < DIMM_SOCKETS; i++) {
984 struct dimm_size *sz = &(meminfo->sz[i]);
985 u32 spd_device = ctrl->channel0[i];
987 if (!(meminfo->dimm_mask & (1 << i))) {
988 if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */
989 spd_device = ctrl->channel1[i];
990 } else {
991 continue;
995 spd_get_dimm_size(spd_device, sz);
996 if (sz->per_rank == 0) {
997 return -1; /* Report SPD error */
999 set_dimm_size(ctrl, sz, i, meminfo);
1000 set_dimm_cs_map(ctrl, sz, i, meminfo);
1002 return meminfo->dimm_mask;
1005 static void route_dram_accesses(const struct mem_controller *ctrl,
1006 unsigned long base_k, unsigned long limit_k)
1008 /* Route the addresses to the controller node */
1009 unsigned node_id;
1010 unsigned limit;
1011 unsigned base;
1012 unsigned index;
1013 unsigned limit_reg, base_reg;
1014 device_t device;
1016 node_id = ctrl->node_id;
1017 index = (node_id << 3);
1018 limit = (limit_k << 2);
1019 limit &= 0xffff0000;
1020 limit -= 0x00010000;
1021 limit |= ( 0 << 8) | (node_id << 0);
1022 base = (base_k << 2);
1023 base &= 0xffff0000;
1024 base |= (0 << 8) | (1<<1) | (1<<0);
1026 limit_reg = 0x44 + index;
1027 base_reg = 0x40 + index;
1028 for (device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1);
1029 device += PCI_DEV(0, 1, 0)) {
1030 pci_write_config32(device, limit_reg, limit);
1031 pci_write_config32(device, base_reg, base);
1035 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
1037 /* Error if I don't have memory */
1038 if (!tom_k) {
1039 die("No memory?");
1042 /* Report the amount of memory. */
1043 printk(BIOS_DEBUG, "RAM end at 0x%08x kB\n", tom_k);
1045 /* Now set top of memory */
1046 msr_t msr;
1047 if (tom_k > (4*1024*1024)) {
1048 printk_raminit("Handling memory mapped above 4 GB\n");
1049 printk_raminit("Upper RAM end at 0x%08x kB\n", tom_k);
1050 msr.lo = (tom_k & 0x003fffff) << 10;
1051 msr.hi = (tom_k & 0xffc00000) >> 22;
1052 wrmsr(TOP_MEM2, msr);
1053 printk_raminit("Correcting memory amount mapped below 4 GB\n");
1056 /* Leave a 64M hole between TOP_MEM and TOP_MEM2
1057 * so I can see my rom chip and other I/O devices.
1059 if (tom_k >= 0x003f0000) {
1060 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
1061 if (hole_startk != 0) {
1062 tom_k = hole_startk;
1063 } else
1064 #endif
1065 tom_k = 0x3f0000;
1066 printk_raminit("Adjusting lower RAM end\n");
1068 printk_raminit("Lower RAM end at 0x%08x kB\n", tom_k);
1069 msr.lo = (tom_k & 0x003fffff) << 10;
1070 msr.hi = (tom_k & 0xffc00000) >> 22;
1071 wrmsr(TOP_MEM, msr);
1074 static unsigned long interleave_chip_selects(const struct mem_controller *ctrl, int is_Width128)
1076 /* 35 - 27 */
1078 static const uint8_t csbase_low_f0_shift[] = {
1079 /* 128MB */ (14 - (13-5)),
1080 /* 256MB */ (15 - (13-5)),
1081 /* 512MB */ (15 - (13-5)),
1082 /* 512MB */ (16 - (13-5)),
1083 /* 512MB */ (16 - (13-5)),
1084 /* 1GB */ (16 - (13-5)),
1085 /* 1GB */ (16 - (13-5)),
1086 /* 2GB */ (16 - (13-5)),
1087 /* 2GB */ (17 - (13-5)),
1088 /* 4GB */ (17 - (13-5)),
1089 /* 4GB */ (16 - (13-5)),
1090 /* 8GB */ (17 - (13-5)),
1093 /* cs_base_high is not changed */
1095 uint32_t csbase_inc;
1096 int chip_selects, index;
1097 int bits;
1098 unsigned common_size;
1099 unsigned common_cs_mode;
1100 uint32_t csbase, csmask;
1102 /* See if all of the memory chip selects are the same size
1103 * and if so count them.
1105 #if defined(CMOS_VSTART_interleave_chip_selects)
1106 if (read_option(interleave_chip_selects, 1) == 0)
1107 return 0;
1108 #else
1109 #if !defined(CONFIG_INTERLEAVE_CHIP_SELECTS) || !CONFIG_INTERLEAVE_CHIP_SELECTS
1110 return 0;
1111 #endif
1112 #endif
1114 chip_selects = 0;
1115 common_size = 0;
1116 common_cs_mode = 0xff;
1117 for (index = 0; index < 8; index++) {
1118 unsigned size;
1119 unsigned cs_mode;
1120 uint32_t value;
1122 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1124 /* Is it enabled? */
1125 if (!(value & 1)) {
1126 continue;
1128 chip_selects++;
1129 size = (value >> 19) & 0x3ff;
1130 if (common_size == 0) {
1131 common_size = size;
1133 /* The size differed fail */
1134 if (common_size != size) {
1135 return 0;
1138 value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
1139 cs_mode =( value >> ((index>>1)*4)) & 0xf;
1140 if (common_cs_mode == 0xff) {
1141 common_cs_mode = cs_mode;
1143 /* The cs_mode differed fail */
1144 if (common_cs_mode != cs_mode) {
1145 return 0;
1149 /* Chip selects can only be interleaved when there is
1150 * more than one and their is a power of two of them.
1152 bits = log2(chip_selects);
1153 if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
1154 //chip_selects max = 8
1155 return 0;
1158 /* Find the bits of csbase that we need to interleave on */
1159 csbase_inc = 1 << (csbase_low_f0_shift[common_cs_mode]);
1160 if (is_Width128) {
1161 csbase_inc <<=1;
1164 /* Compute the initial values for csbase and csbask.
1165 * In csbase just set the enable bit and the base to zero.
1166 * In csmask set the mask bits for the size and page level interleave.
1168 csbase = 0 | 1;
1169 csmask = (((common_size << bits) - 1) << 19);
1170 csmask |= 0x3fe0 & ~((csbase_inc << bits) - csbase_inc);
1171 for (index = 0; index < 8; index++) {
1172 uint32_t value;
1174 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1175 /* Is it enabled? */
1176 if (!(value & 1)) {
1177 continue;
1179 pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
1180 if ((index & 1) == 0) { //only have 4 CSMASK
1181 pci_write_config32(ctrl->f2, DRAM_CSMASK + ((index>>1) << 2), csmask);
1183 csbase += csbase_inc;
1186 printk(BIOS_DEBUG, "Interleaved\n");
1188 /* Return the memory size in K */
1189 return common_size << ((27-10) + bits);
1192 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
1194 unsigned long tom;
1196 /* Remember which registers we have used in the high 8 bits of tom */
1197 tom = 0;
1198 for (;;) {
1199 /* Find the largest remaining canidate */
1200 unsigned index, canidate;
1201 uint32_t csbase, csmask;
1202 unsigned size;
1203 csbase = 0;
1204 canidate = 0;
1205 for (index = 0; index < 8; index++) {
1206 uint32_t value;
1207 value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
1209 /* Is it enabled? */
1210 if (!(value & 1)) {
1211 continue;
1214 /* Is it greater? */
1215 if (value <= csbase) {
1216 continue;
1219 /* Has it already been selected */
1220 if (tom & (1 << (index + 24))) {
1221 continue;
1223 /* I have a new canidate */
1224 csbase = value;
1225 canidate = index;
1228 /* See if I have found a new canidate */
1229 if (csbase == 0) {
1230 break;
1233 /* Remember the dimm size */
1234 size = csbase >> 19;
1236 /* Remember I have used this register */
1237 tom |= (1 << (canidate + 24));
1239 /* Recompute the cs base register value */
1240 csbase = (tom << 19) | 1;
1242 /* Increment the top of memory */
1243 tom += size;
1245 /* Compute the memory mask */
1246 csmask = ((size -1) << 19);
1247 csmask |= 0x3fe0; /* For now don't optimize */
1249 /* Write the new base register */
1250 pci_write_config32(ctrl->f2, DRAM_CSBASE + (canidate << 2), csbase);
1251 /* Write the new mask register */
1252 if ((canidate & 1) == 0) { //only have 4 CSMASK
1253 pci_write_config32(ctrl->f2, DRAM_CSMASK + ((canidate >> 1) << 2), csmask);
1257 /* Return the memory size in K */
1258 return (tom & ~0xff000000) << (27-10);
1261 static unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
1263 unsigned node_id;
1264 unsigned end_k;
1265 /* Find the last memory address used */
1266 end_k = 0;
1267 for (node_id = 0; node_id < max_node_id; node_id++) {
1268 uint32_t limit, base;
1269 unsigned index;
1270 index = node_id << 3;
1271 base = pci_read_config32(ctrl->f1, 0x40 + index);
1272 /* Only look at the limit if the base is enabled */
1273 if ((base & 3) == 3) {
1274 limit = pci_read_config32(ctrl->f1, 0x44 + index);
1275 end_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
1278 return end_k;
1281 static void order_dimms(const struct mem_controller *ctrl,
1282 struct mem_info *meminfo)
1284 unsigned long tom_k, base_k;
1286 tom_k = interleave_chip_selects(ctrl, meminfo->is_Width128);
1288 if (!tom_k) {
1289 printk(BIOS_DEBUG, "Interleaving disabled\n");
1290 tom_k = order_chip_selects(ctrl);
1293 /* Compute the memory base address */
1294 base_k = memory_end_k(ctrl, ctrl->node_id);
1295 tom_k += base_k;
1296 route_dram_accesses(ctrl, base_k, tom_k);
1297 set_top_mem(tom_k, 0);
1300 static long disable_dimm(const struct mem_controller *ctrl, unsigned index,
1301 struct mem_info *meminfo)
1303 printk(BIOS_DEBUG, "disabling dimm %02x\n", index);
1304 if (!(meminfo->dimm_mask & 0x0F) && (meminfo->dimm_mask & 0xF0)) { /* channelB only? */
1305 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), 0);
1306 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), 0);
1307 } else {
1308 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 0) << 2), 0);
1309 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 1) << 2), 0);
1310 #if CONFIG_QRANK_DIMM_SUPPORT
1311 if (meminfo->sz[index].rank == 4) {
1312 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 4) << 2), 0);
1313 pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1) + 5) << 2), 0);
1315 #endif
1318 meminfo->dimm_mask &= ~(1 << index);
1319 return meminfo->dimm_mask;
1322 static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl,
1323 struct mem_info *meminfo)
1325 int i;
1326 uint32_t registered;
1327 uint32_t dcl;
1328 registered = 0;
1329 for (i = 0; (i < DIMM_SOCKETS); i++) {
1330 int value;
1331 u32 spd_device = ctrl->channel0[i];
1332 if (!(meminfo->dimm_mask & (1 << i))) {
1333 if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */
1334 spd_device = ctrl->channel1[i];
1335 } else {
1336 continue;
1339 value = spd_read_byte(spd_device, SPD_DIMM_TYPE);
1340 if (value < 0) {
1341 return -1;
1344 /* Registered dimm ? */
1345 value &= 0x3f;
1346 if ((value == SPD_DIMM_TYPE_RDIMM) || (value == SPD_DIMM_TYPE_mRDIMM)) {
1347 //check SPD_MOD_ATTRIB to verify it is SPD_MOD_ATTRIB_REGADC (0x11)?
1348 registered |= (1<<i);
1352 if (is_opteron(ctrl)) {
1353 #if 0
1354 if ( registered != (meminfo->dimm_mask & ((1<<DIMM_SOCKETS)-1)) ) {
1355 meminfo->dimm_mask &= (registered | (registered << DIMM_SOCKETS) ); //disable unbuffed dimm
1356 // die("Mixed buffered and registered dimms not supported");
1358 //By yhlu for debug M2, s1g1 can do dual channel, but it use unbuffer DIMM
1359 if (!registered) {
1360 die("Unbuffered Dimms not supported on Opteron");
1362 #endif
1366 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1367 dcl &= ~DCL_UnBuffDimm;
1368 meminfo->is_registered = 1;
1369 if (!registered) {
1370 dcl |= DCL_UnBuffDimm;
1371 meminfo->is_registered = 0;
1373 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1375 if (meminfo->is_registered) {
1376 printk(BIOS_SPEW, "Registered\n");
1377 } else {
1378 printk(BIOS_SPEW, "Unbuffered\n");
1380 return meminfo->dimm_mask;
1383 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
1385 unsigned dimm_mask;
1386 int i;
1387 dimm_mask = 0;
1388 for (i = 0; i < DIMM_SOCKETS; i++) {
1389 int byte;
1390 unsigned device;
1391 device = ctrl->channel0[i];
1392 printk_raminit("DIMM socket %i, channel 0 SPD device is 0x%02x\n", i, device);
1393 if (device) {
1394 byte = spd_read_byte(ctrl->channel0[i], SPD_MEM_TYPE); /* Type */
1395 if (byte == SPD_MEM_TYPE_SDRAM_DDR2) {
1396 printk_raminit("\tDIMM detected\n");
1397 dimm_mask |= (1 << i);
1400 device = ctrl->channel1[i];
1401 printk_raminit("DIMM socket %i, channel 1 SPD device is 0x%02x\n", i, device);
1402 if (device) {
1403 byte = spd_read_byte(ctrl->channel1[i], SPD_MEM_TYPE);
1404 if (byte == SPD_MEM_TYPE_SDRAM_DDR2) {
1405 printk_raminit("\tDIMM detected\n");
1406 dimm_mask |= (1 << (i + DIMM_SOCKETS));
1410 return dimm_mask;
1413 static long spd_enable_2channels(const struct mem_controller *ctrl, struct mem_info *meminfo)
1415 int i;
1416 uint32_t nbcap;
1417 /* SPD addresses to verify are identical */
1418 static const uint8_t addresses[] = {
1419 2, /* Type should be DDR2 SDRAM */
1420 3, /* *Row addresses */
1421 4, /* *Column addresses */
1422 5, /* *Number of DIMM Ranks */
1423 6, /* *Module Data Width*/
1424 11, /* *DIMM Conf Type */
1425 13, /* *Pri SDRAM Width */
1426 17, /* *Logical Banks */
1427 20, /* *DIMM Type Info */
1428 21, /* *SDRAM Module Attributes */
1429 27, /* *tRP Row precharge time */
1430 28, /* *Minimum Row Active to Row Active Delay (tRRD) */
1431 29, /* *tRCD RAS to CAS */
1432 30, /* *tRAS Activate to Precharge */
1433 36, /* *Write recovery time (tWR) */
1434 37, /* *Internal write to read command delay (tRDP) */
1435 38, /* *Internal read to precharge command delay (tRTP) */
1436 40, /* *Extension of Byte 41 tRC and Byte 42 tRFC */
1437 41, /* *Minimum Active to Active/Auto Refresh Time(Trc) */
1438 42, /* *Minimum Auto Refresh Command Time(Trfc) */
1439 /* The SPD addresses 18, 9, 23, 26 need special treatment like
1440 * in spd_set_memclk. Right now they cause many false negatives.
1441 * Keep them at the end to see other mismatches (if any).
1443 18, /* *Supported CAS Latencies */
1444 9, /* *Cycle time at highest CAS Latency CL=X */
1445 23, /* *Cycle time at CAS Latency (CLX - 1) */
1446 25, /* *Cycle time at CAS Latency (CLX - 2) */
1448 u32 dcl, dcm;
1449 u8 common_cl;
1451 /* S1G1 and AM2 sockets are Mod64BitMux capable. */
1452 #if CONFIG_CPU_SOCKET_TYPE == 0x11 || CONFIG_CPU_SOCKET_TYPE == 0x12
1453 u8 mux_cap = 1;
1454 #else
1455 u8 mux_cap = 0;
1456 #endif
1458 /* If the dimms are not in pairs do not do dual channels */
1459 if ((meminfo->dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1460 ((meminfo->dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1461 goto single_channel;
1463 /* If the cpu is not capable of doing dual channels don't do dual channels */
1464 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1465 if (!(nbcap & NBCAP_128Bit)) {
1466 goto single_channel;
1468 for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
1469 unsigned device0, device1;
1470 int value0, value1;
1471 int j;
1472 /* If I don't have a dimm skip this one */
1473 if (!(meminfo->dimm_mask & (1 << i))) {
1474 continue;
1476 device0 = ctrl->channel0[i];
1477 device1 = ctrl->channel1[i];
1478 /* Abort if the chips don't support a common CAS latency. */
1479 common_cl = spd_read_byte(device0, 18) & spd_read_byte(device1, 18);
1480 if (!common_cl) {
1481 printk(BIOS_DEBUG, "No common CAS latency supported\n");
1482 goto single_channel;
1483 } else {
1484 printk_raminit("Common CAS latency bitfield: 0x%02x\n", common_cl);
1486 for (j = 0; j < ARRAY_SIZE(addresses); j++) {
1487 unsigned addr;
1488 addr = addresses[j];
1489 value0 = spd_read_byte(device0, addr);
1490 if (value0 < 0) {
1491 return -1;
1493 value1 = spd_read_byte(device1, addr);
1494 if (value1 < 0) {
1495 return -1;
1497 if (value0 != value1) {
1498 printk_raminit("SPD values differ between channel 0/1 for byte %i\n", addr);
1499 goto single_channel;
1503 printk(BIOS_SPEW, "Enabling dual channel memory\n");
1504 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1505 dcl &= ~DCL_BurstLength32; /* 32byte mode may be preferred in platforms that include graphics controllers that generate a lot of 32-bytes system memory accesses
1506 32byte mode is not supported when the DRAM interface is 128 bits wides, even 32byte mode is set, system still use 64 byte mode */
1507 dcl |= DCL_Width128;
1508 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1509 meminfo->is_Width128 = 1;
1510 return meminfo->dimm_mask;
1512 single_channel:
1513 meminfo->is_Width128 = 0;
1514 meminfo->is_64MuxMode = 0;
1516 /* single dimm */
1517 if ((meminfo->dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
1518 ((meminfo->dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1519 if (((meminfo->dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
1520 /* mux capable and single dimm in channelB */
1521 if (mux_cap) {
1522 printk(BIOS_SPEW, "Enable 64MuxMode & BurstLength32\n");
1523 dcm = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
1524 dcm |= DCM_Mode64BitMux;
1525 pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dcm);
1526 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
1527 //dcl |= DCL_BurstLength32; /* 32byte mode for channelB only */
1528 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
1529 meminfo->is_64MuxMode = 1;
1530 } else {
1531 meminfo->dimm_mask &= ~((1 << (DIMM_SOCKETS * 2)) - (1 << DIMM_SOCKETS));
1534 } else { /* unmatched dual dimms ? */
1535 /* unmatched dual dimms not supported by meminit code. Use single channelA dimm. */
1536 meminfo->dimm_mask &= ~((1 << (DIMM_SOCKETS * 2)) - (1 << DIMM_SOCKETS));
1537 printk(BIOS_SPEW, "Unmatched dual dimms. Use single channelA dimm.\n");
1539 return meminfo->dimm_mask;
1542 struct mem_param {
1543 uint16_t cycle_time;
1544 uint8_t divisor; /* In 1/40 ns increments */
1545 uint8_t TrwtTO;
1546 uint8_t Twrrd;
1547 uint8_t Twrwr;
1548 uint8_t Trdrd;
1549 uint8_t DcqByPassMax;
1550 uint32_t dch_memclk;
1551 char name[9];
1554 static const struct mem_param speed[] = {
1556 .name = "200MHz",
1557 .cycle_time = 0x500,
1558 .divisor = 200, // how many 1/40ns per clock
1559 .dch_memclk = DCH_MemClkFreq_200MHz, //0
1560 .TrwtTO = 7,
1561 .Twrrd = 2,
1562 .Twrwr = 2,
1563 .Trdrd = 3,
1564 .DcqByPassMax = 4,
1568 .name = "266MHz",
1569 .cycle_time = 0x375,
1570 .divisor = 150, //????
1571 .dch_memclk = DCH_MemClkFreq_266MHz, //1
1572 .TrwtTO = 7,
1573 .Twrrd = 2,
1574 .Twrwr = 2,
1575 .Trdrd = 3,
1576 .DcqByPassMax = 4,
1579 .name = "333MHz",
1580 .cycle_time = 0x300,
1581 .divisor = 120,
1582 .dch_memclk = DCH_MemClkFreq_333MHz, //2
1583 .TrwtTO = 7,
1584 .Twrrd = 2,
1585 .Twrwr = 2,
1586 .Trdrd = 3,
1587 .DcqByPassMax = 4,
1591 .name = "400MHz",
1592 .cycle_time = 0x250,
1593 .divisor = 100,
1594 .dch_memclk = DCH_MemClkFreq_400MHz,//3
1595 .TrwtTO = 7,
1596 .Twrrd = 2,
1597 .Twrwr = 2,
1598 .Trdrd = 3,
1599 .DcqByPassMax = 4,
1602 .cycle_time = 0x000,
1606 static const struct mem_param *get_mem_param(unsigned min_cycle_time)
1609 const struct mem_param *param;
1610 for (param = &speed[0]; param->cycle_time ; param++) {
1611 if (min_cycle_time > (param+1)->cycle_time) {
1612 break;
1615 if (!param->cycle_time) {
1616 die("min_cycle_time to low");
1618 printk(BIOS_SPEW, "%s\n", param->name);
1619 return param;
1622 static uint8_t get_exact_divisor(int i, uint8_t divisor)
1624 //input divisor could be 200(200), 150(266), 120(333), 100 (400)
1625 static const uint8_t dv_a[] = {
1626 /* 200 266 333 400 */
1627 /*4 */ 250, 250, 250, 250,
1628 /*5 */ 200, 200, 200, 100,
1629 /*6 */ 200, 166, 166, 100,
1630 /*7 */ 200, 171, 142, 100,
1632 /*8 */ 200, 150, 125, 100,
1633 /*9 */ 200, 156, 133, 100,
1634 /*10*/ 200, 160, 120, 100,
1635 /*11*/ 200, 163, 127, 100,
1637 /*12*/ 200, 150, 133, 100,
1638 /*13*/ 200, 153, 123, 100,
1639 /*14*/ 200, 157, 128, 100,
1640 /*15*/ 200, 160, 120, 100,
1644 int index;
1645 msr_t msr;
1647 /* Check for FID control support */
1648 struct cpuid_result cpuid1;
1649 cpuid1 = cpuid(0x80000007);
1650 if( cpuid1.edx & 0x02 ) {
1651 /* Use current FID */
1652 unsigned fid_cur;
1653 msr = rdmsr(0xc0010042);
1654 fid_cur = msr.lo & 0x3f;
1656 index = fid_cur>>1;
1657 } else {
1658 /* Use startup FID */
1659 unsigned fid_start;
1660 msr = rdmsr(0xc0010015);
1661 fid_start = (msr.lo & (0x3f << 24));
1663 index = fid_start>>25;
1666 if (index>12) return divisor;
1668 if (i>3) return divisor;
1670 return dv_a[index * 4+i];
1675 struct spd_set_memclk_result {
1676 const struct mem_param *param;
1677 long dimm_mask;
1681 static unsigned convert_to_linear(unsigned value)
1683 static const unsigned fraction[] = { 0x25, 0x33, 0x66, 0x75 };
1684 unsigned valuex;
1686 /* We need to convert value to more readable */
1687 if ((value & 0xf) < 10) { //no .25, .33, .66, .75
1688 value <<= 4;
1689 } else {
1690 valuex = ((value & 0xf0) << 4) | fraction [(value & 0xf)-10];
1691 value = valuex;
1693 return value;
1696 static const uint8_t latency_indicies[] = { 25, 23, 9 };
1698 static int find_optimum_spd_latency(u32 spd_device, unsigned *min_latency, unsigned *min_cycle_time)
1700 int new_cycle_time, new_latency;
1701 int index;
1702 int latencies;
1703 int latency;
1705 /* First find the supported CAS latencies
1706 * Byte 18 for DDR SDRAM is interpreted:
1707 * bit 3 == CAS Latency = 3
1708 * bit 4 == CAS Latency = 4
1709 * bit 5 == CAS Latency = 5
1710 * bit 6 == CAS Latency = 6
1712 new_cycle_time = 0x500;
1713 new_latency = 6;
1715 latencies = spd_read_byte(spd_device, SPD_CAS_LAT);
1716 if (latencies <= 0)
1717 return 1;
1719 printk_raminit("\tlatencies: %08x\n", latencies);
1720 /* Compute the lowest cas latency which can be expressed in this
1721 * particular SPD EEPROM. You can store at most settings for 3
1722 * contiguous CAS latencies, so by taking the highest CAS
1723 * latency maked as supported in the SPD and subtracting 2 you
1724 * get the lowest expressable CAS latency. That latency is not
1725 * necessarily supported, but a (maybe invalid) entry exists
1726 * for it.
1728 latency = log2(latencies) - 2;
1730 /* Loop through and find a fast clock with a low latency */
1731 for (index = 0; index < 3; index++, latency++) {
1732 int value;
1733 if ((latency < 3) || (latency > 6) ||
1734 (!(latencies & (1 << latency)))) {
1735 continue;
1737 value = spd_read_byte(spd_device, latency_indicies[index]);
1738 if (value < 0) {
1739 return -1;
1742 printk_raminit("\tindex: %08x\n", index);
1743 printk_raminit("\t\tlatency: %08x\n", latency);
1744 printk_raminit("\t\tvalue1: %08x\n", value);
1746 value = convert_to_linear(value);
1748 printk_raminit("\t\tvalue2: %08x\n", value);
1750 /* Only increase the latency if we decrease the clock */
1751 if (value >= *min_cycle_time ) {
1752 if (value < new_cycle_time) {
1753 new_cycle_time = value;
1754 new_latency = latency;
1755 } else if (value == new_cycle_time) {
1756 if (new_latency > latency) {
1757 new_latency = latency;
1761 printk_raminit("\t\tnew_cycle_time: %08x\n", new_cycle_time);
1762 printk_raminit("\t\tnew_latency: %08x\n", new_latency);
1766 if (new_latency > 6){
1767 return 1;
1770 /* Does min_latency need to be increased? */
1771 if (new_cycle_time > *min_cycle_time) {
1772 *min_cycle_time = new_cycle_time;
1775 /* Does min_cycle_time need to be increased? */
1776 if (new_latency > *min_latency) {
1777 *min_latency = new_latency;
1780 printk_raminit("2 min_cycle_time: %08x\n", *min_cycle_time);
1781 printk_raminit("2 min_latency: %08x\n", *min_latency);
1783 return 0;
1786 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, struct mem_info *meminfo)
1788 /* Compute the minimum cycle time for these dimms */
1789 struct spd_set_memclk_result result;
1790 unsigned min_cycle_time, min_latency, bios_cycle_time;
1791 int i;
1792 uint32_t value;
1794 static const uint16_t min_cycle_times[] = { // use full speed to compare
1795 [NBCAP_MEMCLK_NOLIMIT] = 0x250, /*2.5ns */
1796 [NBCAP_MEMCLK_333MHZ] = 0x300, /* 3.0ns */
1797 [NBCAP_MEMCLK_266MHZ] = 0x375, /* 3.75ns */
1798 [NBCAP_MEMCLK_200MHZ] = 0x500, /* 5.0s */
1802 value = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
1803 min_cycle_time = min_cycle_times[(value >> NBCAP_MEMCLK_SHIFT) & NBCAP_MEMCLK_MASK];
1804 bios_cycle_time = min_cycle_times[
1805 #ifdef CMOS_VSTART_max_mem_clock
1806 read_option(max_mem_clock, 0)
1807 #else
1808 #if defined(CONFIG_MAX_MEM_CLOCK)
1809 CONFIG_MAX_MEM_CLOCK
1810 #else
1811 0 // use DDR400 as default
1812 #endif
1813 #endif
1816 if (bios_cycle_time > min_cycle_time) {
1817 min_cycle_time = bios_cycle_time;
1819 min_latency = 3;
1821 printk_raminit("1 min_cycle_time: %08x\n", min_cycle_time);
1823 /* Compute the least latency with the fastest clock supported
1824 * by both the memory controller and the dimms.
1826 for (i = 0; i < DIMM_SOCKETS; i++) {
1827 u32 spd_device;
1829 printk_raminit("1.1 dimm_mask: %08x\n", meminfo->dimm_mask);
1830 printk_raminit("i: %08x\n",i);
1832 if (meminfo->dimm_mask & (1 << i)) {
1833 spd_device = ctrl->channel0[i];
1834 printk_raminit("Channel 0 settings:\n");
1836 switch (find_optimum_spd_latency(spd_device, &min_latency, &min_cycle_time)) {
1837 case -1:
1838 goto hw_error;
1839 break;
1840 case 1:
1841 continue;
1844 if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) {
1845 spd_device = ctrl->channel1[i];
1846 printk_raminit("Channel 1 settings:\n");
1848 switch (find_optimum_spd_latency(spd_device, &min_latency, &min_cycle_time)) {
1849 case -1:
1850 goto hw_error;
1851 break;
1852 case 1:
1853 continue;
1858 /* Make a second pass through the dimms and disable
1859 * any that cannot support the selected memclk and cas latency.
1862 printk_raminit("3 min_cycle_time: %08x\n", min_cycle_time);
1863 printk_raminit("3 min_latency: %08x\n", min_latency);
1865 for (i = 0; (i < DIMM_SOCKETS); i++) {
1866 int latencies;
1867 int latency;
1868 int index;
1869 int val;
1870 u32 spd_device = ctrl->channel0[i];
1872 if (!(meminfo->dimm_mask & (1 << i))) {
1873 if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */
1874 spd_device = ctrl->channel1[i];
1875 } else {
1876 continue;
1880 latencies = spd_read_byte(spd_device, SPD_CAS_LAT);
1881 if (latencies < 0) goto hw_error;
1882 if (latencies == 0) {
1883 continue;
1886 /* Compute the lowest cas latency supported */
1887 latency = log2(latencies) -2;
1889 /* Walk through searching for the selected latency */
1890 for (index = 0; index < 3; index++, latency++) {
1891 if (!(latencies & (1 << latency))) {
1892 continue;
1894 if (latency == min_latency)
1895 break;
1897 /* If I can't find the latency or my index is bad error */
1898 if ((latency != min_latency) || (index >= 3)) {
1899 goto dimm_err;
1902 /* Read the min_cycle_time for this latency */
1903 val = spd_read_byte(spd_device, latency_indicies[index]);
1904 if (val < 0) goto hw_error;
1906 val = convert_to_linear(val);
1907 /* All is good if the selected clock speed
1908 * is what I need or slower.
1910 if (val <= min_cycle_time) {
1911 continue;
1913 /* Otherwise I have an error, disable the dimm */
1914 dimm_err:
1915 meminfo->dimm_mask = disable_dimm(ctrl, i, meminfo);
1918 printk_raminit("4 min_cycle_time: %08x\n", min_cycle_time);
1920 /* Now that I know the minimum cycle time lookup the memory parameters */
1921 result.param = get_mem_param(min_cycle_time);
1923 /* Update DRAM Config High with our selected memory speed */
1924 value = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
1925 value &= ~(DCH_MemClkFreq_MASK << DCH_MemClkFreq_SHIFT);
1927 value |= result.param->dch_memclk << DCH_MemClkFreq_SHIFT;
1928 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, value);
1930 printk(BIOS_DEBUG, "%s\n", result.param->name);
1932 /* Update DRAM Timing Low with our selected cas latency */
1933 value = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
1934 value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
1935 value |= (min_latency - DTL_TCL_BASE) << DTL_TCL_SHIFT;
1936 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
1938 result.dimm_mask = meminfo->dimm_mask;
1939 return result;
1940 hw_error:
1941 result.param = (const struct mem_param *)0;
1942 result.dimm_mask = -1;
1943 return result;
1946 static unsigned convert_to_1_4(unsigned value)
1948 static const uint8_t fraction[] = { 0, 1, 2, 2, 3, 3, 0 };
1949 unsigned valuex;
1951 /* We need to convert value to more readable */
1952 valuex = fraction [value & 0x7];
1953 return valuex;
1956 static int get_dimm_Trc_clocks(u32 spd_device, const struct mem_param *param)
1958 int value;
1959 int value2;
1960 int clocks;
1961 value = spd_read_byte(spd_device, SPD_TRC);
1962 if (value < 0)
1963 return -1;
1964 printk_raminit("update_dimm_Trc: tRC (41) = %08x\n", value);
1966 value2 = spd_read_byte(spd_device, SPD_TRC -1);
1967 value <<= 2;
1968 value += convert_to_1_4(value2>>4);
1970 value *= 10;
1971 printk_raminit("update_dimm_Trc: tRC final value = %i\n", value);
1973 clocks = CEIL_DIV(value, param->divisor);
1974 printk_raminit("update_dimm_Trc: clocks = %i\n", clocks);
1976 if (clocks < DTL_TRC_MIN) {
1977 // We might want to die here instead or (at least|better) disable this bank.
1978 printk(BIOS_NOTICE, "update_dimm_Trc: Can't refresh fast enough, "
1979 "want %i clocks, minimum is %i clocks.\n", clocks, DTL_TRC_MIN);
1980 clocks = DTL_TRC_MIN;
1982 return clocks;
1985 static int update_dimm_Trc(const struct mem_controller *ctrl,
1986 const struct mem_param *param,
1987 int i, long dimm_mask)
1989 int clocks, old_clocks;
1990 uint32_t dtl;
1991 u32 spd_device = ctrl->channel0[i];
1993 if (!(dimm_mask & (1 << i)) && (dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */
1994 spd_device = ctrl->channel1[i];
1997 clocks = get_dimm_Trc_clocks(spd_device, param);
1998 if (clocks == -1)
1999 return clocks;
2000 if (clocks > DTL_TRC_MAX) {
2001 return 0;
2003 printk_raminit("update_dimm_Trc: clocks after adjustment = %i\n", clocks);
2005 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
2006 old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
2007 if (old_clocks >= clocks) { //?? someone did it
2008 // clocks = old_clocks;
2009 return 1;
2011 dtl &= ~(DTL_TRC_MASK << DTL_TRC_SHIFT);
2012 dtl |= ((clocks - DTL_TRC_BASE) << DTL_TRC_SHIFT);
2013 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
2014 return 1;
2017 static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_param *param, int i, struct mem_info *meminfo)
2019 unsigned clocks, old_clocks;
2020 uint32_t dth;
2021 int value;
2022 u8 ch_b = 0;
2023 u32 spd_device = ctrl->channel0[i];
2025 if (!(meminfo->dimm_mask & (1 << i)) && (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */
2026 spd_device = ctrl->channel1[i];
2027 ch_b = 2; /* offset to channelB trfc setting */
2030 //get the cs_size --> logic dimm size
2031 value = spd_read_byte(spd_device, SPD_PRI_WIDTH);
2032 if (value < 0) {
2033 return -1;
2036 value = 6 - log2(value); //4-->4, 8-->3, 16-->2
2038 clocks = meminfo->sz[i].per_rank - 27 + 2 - value;
2040 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
2042 old_clocks = ((dth >> (DTH_TRFC0_SHIFT + ((i + ch_b) * 3))) & DTH_TRFC_MASK);
2044 if (old_clocks >= clocks) { // some one did it?
2045 return 1;
2047 dth &= ~(DTH_TRFC_MASK << (DTH_TRFC0_SHIFT + ((i + ch_b) * 3)));
2048 dth |= clocks << (DTH_TRFC0_SHIFT + ((i + ch_b) * 3));
2049 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
2050 return 1;
2053 static int update_dimm_TT_1_4(const struct mem_controller *ctrl, const struct mem_param *param, int i, long dimm_mask,
2054 unsigned TT_REG,
2055 unsigned SPD_TT, unsigned TT_SHIFT, unsigned TT_MASK, unsigned TT_BASE, unsigned TT_MIN, unsigned TT_MAX )
2057 unsigned clocks, old_clocks;
2058 uint32_t dtl;
2059 int value;
2060 u32 spd_device = ctrl->channel0[i];
2062 if (!(dimm_mask & (1 << i)) && (dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */
2063 spd_device = ctrl->channel1[i];
2066 value = spd_read_byte(spd_device, SPD_TT); //already in 1/4 ns
2067 if (value < 0) return -1;
2068 value *=10;
2069 clocks = CEIL_DIV(value, param->divisor);
2070 if (clocks < TT_MIN) {
2071 clocks = TT_MIN;
2074 if (clocks > TT_MAX) {
2075 printk(BIOS_INFO, "warning spd byte : %x = %x > TT_MAX: %x, setting TT_MAX", SPD_TT, value, TT_MAX);
2076 clocks = TT_MAX;
2079 dtl = pci_read_config32(ctrl->f2, TT_REG);
2081 old_clocks = ((dtl >> TT_SHIFT) & TT_MASK) + TT_BASE;
2082 if (old_clocks >= clocks) { //some one did it?
2083 // clocks = old_clocks;
2084 return 1;
2086 dtl &= ~(TT_MASK << TT_SHIFT);
2087 dtl |= ((clocks - TT_BASE) << TT_SHIFT);
2088 pci_write_config32(ctrl->f2, TT_REG, dtl);
2089 return 1;
2092 static int update_dimm_Trcd(const struct mem_controller *ctrl,
2093 const struct mem_param *param, int i, long dimm_mask)
2095 return update_dimm_TT_1_4(ctrl, param, i, dimm_mask, DRAM_TIMING_LOW, SPD_TRCD, DTL_TRCD_SHIFT, DTL_TRCD_MASK, DTL_TRCD_BASE, DTL_TRCD_MIN, DTL_TRCD_MAX);
2098 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i, long dimm_mask)
2100 return update_dimm_TT_1_4(ctrl, param, i, dimm_mask, DRAM_TIMING_LOW, SPD_TRRD, DTL_TRRD_SHIFT, DTL_TRRD_MASK, DTL_TRRD_BASE, DTL_TRRD_MIN, DTL_TRRD_MAX);
2103 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i, long dimm_mask)
2105 unsigned clocks, old_clocks;
2106 uint32_t dtl;
2107 int value;
2108 u32 spd_device = ctrl->channel0[i];
2110 if (!(dimm_mask & (1 << i)) && (dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */
2111 spd_device = ctrl->channel1[i];
2114 value = spd_read_byte(spd_device, SPD_TRAS); //in 1 ns
2115 if (value < 0) return -1;
2116 printk_raminit("update_dimm_Tras: 0 value= %08x\n", value);
2118 value <<= 2; //convert it to in 1/4ns
2120 value *= 10;
2121 printk_raminit("update_dimm_Tras: 1 value= %08x\n", value);
2123 clocks = CEIL_DIV(value, param->divisor);
2124 printk_raminit("update_dimm_Tras: divisor= %08x\n", param->divisor);
2125 printk_raminit("update_dimm_Tras: clocks= %08x\n", clocks);
2126 if (clocks < DTL_TRAS_MIN) {
2127 clocks = DTL_TRAS_MIN;
2129 if (clocks > DTL_TRAS_MAX) {
2130 return 0;
2132 dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
2133 old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
2134 if (old_clocks >= clocks) { // someone did it?
2135 return 1;
2137 dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
2138 dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
2139 pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
2140 return 1;
2143 static int update_dimm_Trp(const struct mem_controller *ctrl,
2144 const struct mem_param *param, int i, long dimm_mask)
2146 return update_dimm_TT_1_4(ctrl, param, i, dimm_mask, DRAM_TIMING_LOW, SPD_TRP, DTL_TRP_SHIFT, DTL_TRP_MASK, DTL_TRP_BASE, DTL_TRP_MIN, DTL_TRP_MAX);
2150 static int update_dimm_Trtp(const struct mem_controller *ctrl,
2151 const struct mem_param *param, int i, struct mem_info *meminfo)
2153 /* need to figure if it is 32 byte burst or 64 bytes burst */
2154 int offset = 2;
2155 if (!meminfo->is_Width128) {
2156 uint32_t dword;
2157 dword = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2158 if ((dword & DCL_BurstLength32)) offset = 0;
2160 return update_dimm_TT_1_4(ctrl, param, i, meminfo->dimm_mask, DRAM_TIMING_LOW, SPD_TRTP, DTL_TRTP_SHIFT, DTL_TRTP_MASK, DTL_TRTP_BASE+offset, DTL_TRTP_MIN+offset, DTL_TRTP_MAX+offset);
2164 static int update_dimm_Twr(const struct mem_controller *ctrl, const struct mem_param *param, int i, long dimm_mask)
2166 return update_dimm_TT_1_4(ctrl, param, i, dimm_mask, DRAM_TIMING_LOW, SPD_TWR, DTL_TWR_SHIFT, DTL_TWR_MASK, DTL_TWR_BASE, DTL_TWR_MIN, DTL_TWR_MAX);
2170 static int update_dimm_Tref(const struct mem_controller *ctrl,
2171 const struct mem_param *param, int i, long dimm_mask)
2173 uint32_t dth, dth_old;
2174 int value;
2175 u32 spd_device = ctrl->channel0[i];
2177 if (!(dimm_mask & (1 << i)) && (dimm_mask & (1 << (DIMM_SOCKETS + i)))) { /* channelB only? */
2178 spd_device = ctrl->channel1[i];
2181 value = spd_read_byte(spd_device, SPD_TREF); // 0: 15.625us, 1: 3.9us 2: 7.8 us....
2182 if (value < 0) return -1;
2184 if (value == 1 ) {
2185 value = 3;
2186 } else {
2187 value = 2;
2190 dth = pci_read_config32(ctrl->f2, DRAM_TIMING_HIGH);
2192 dth_old = dth;
2193 dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
2194 dth |= (value << DTH_TREF_SHIFT);
2195 if (dth_old != dth) {
2196 pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
2198 return 1;
2201 static void set_4RankRDimm(const struct mem_controller *ctrl,
2202 const struct mem_param *param, struct mem_info *meminfo)
2204 #if CONFIG_QRANK_DIMM_SUPPORT
2205 int value;
2206 int i;
2207 long dimm_mask = meminfo->dimm_mask;
2210 if (!(meminfo->is_registered)) return;
2212 value = 0;
2214 for (i = 0; i < DIMM_SOCKETS; i++) {
2215 if (!(dimm_mask & (1 << i))) {
2216 continue;
2219 if (meminfo->sz[i].rank == 4) {
2220 value = 1;
2221 break;
2225 if (value == 1) {
2226 uint32_t dch;
2227 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2228 dch |= DCH_FourRankRDimm;
2229 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2231 #endif
2234 static uint32_t get_extra_dimm_mask(const struct mem_controller *ctrl,
2235 struct mem_info *meminfo)
2237 int i;
2239 uint32_t mask_x4;
2240 uint32_t mask_x16;
2241 uint32_t mask_single_rank;
2242 uint32_t mask_page_1k;
2243 int value;
2244 #if CONFIG_QRANK_DIMM_SUPPORT
2245 int rank;
2246 #endif
2248 long dimm_mask = meminfo->dimm_mask;
2251 mask_x4 = 0;
2252 mask_x16 = 0;
2253 mask_single_rank = 0;
2254 mask_page_1k = 0;
2256 for (i = 0; i < DIMM_SOCKETS; i++) {
2257 u32 spd_device = ctrl->channel0[i];
2258 if (!(dimm_mask & (1 << i))) {
2259 if (dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */
2260 spd_device = ctrl->channel1[i];
2261 } else {
2262 continue;
2266 if (meminfo->sz[i].rank == 1) {
2267 mask_single_rank |= 1<<i;
2270 if (meminfo->sz[i].col==10) {
2271 mask_page_1k |= 1<<i;
2275 value = spd_read_byte(spd_device, SPD_PRI_WIDTH);
2277 #if CONFIG_QRANK_DIMM_SUPPORT
2278 rank = meminfo->sz[i].rank;
2279 #endif
2281 if (value==4) {
2282 mask_x4 |= (1<<i);
2283 #if CONFIG_QRANK_DIMM_SUPPORT
2284 if (rank==4) {
2285 mask_x4 |= 1<<(i+2);
2287 #endif
2288 } else if (value==16) {
2289 mask_x16 |= (1<<i);
2290 #if CONFIG_QRANK_DIMM_SUPPORT
2291 if (rank==4) {
2292 mask_x16 |= 1<<(i+2);
2294 #endif
2299 meminfo->x4_mask= mask_x4;
2300 meminfo->x16_mask = mask_x16;
2302 meminfo->single_rank_mask = mask_single_rank;
2303 meminfo->page_1k_mask = mask_page_1k;
2305 return mask_x4;
2310 static void set_dimm_x4(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
2312 uint32_t dcl;
2313 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2314 dcl &= ~(DCL_X4Dimm_MASK<<DCL_X4Dimm_SHIFT);
2315 dcl |= ((meminfo->x4_mask) & 0xf) << (DCL_X4Dimm_SHIFT);
2316 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2320 static int count_ones(uint32_t dimm_mask)
2322 int dimms;
2323 unsigned index;
2324 dimms = 0;
2325 for (index = 0; index < (2 * DIMM_SOCKETS); index++, dimm_mask >>= 1) {
2326 if (dimm_mask & 1) {
2327 dimms++;
2330 return dimms;
2334 static void set_DramTerm(const struct mem_controller *ctrl,
2335 const struct mem_param *param, struct mem_info *meminfo)
2337 uint32_t dcl;
2338 unsigned odt;
2339 odt = 1; // 75 ohms
2341 if (param->divisor == 100) { //DDR2 800
2342 if (meminfo->is_Width128) {
2343 if (count_ones(meminfo->dimm_mask & 0x0f)==2) {
2344 odt = 3; //50 ohms
2351 #if CONFIG_DIMM_SUPPORT == 0x0204
2352 odt = 0x2; /* 150 ohms */
2353 #endif
2355 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2356 dcl &= ~(DCL_DramTerm_MASK<<DCL_DramTerm_SHIFT);
2357 dcl |= (odt & DCL_DramTerm_MASK) << (DCL_DramTerm_SHIFT);
2358 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2361 static void set_ecc(const struct mem_controller *ctrl,
2362 const struct mem_param *param, struct mem_info *meminfo)
2364 int i;
2365 int value;
2367 uint32_t dcl, nbcap;
2368 nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
2369 dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
2370 dcl &= ~DCL_DimmEccEn;
2371 if (nbcap & NBCAP_ECC) {
2372 dcl |= DCL_DimmEccEn;
2374 #ifdef CMOS_VSTART_ECC_memory
2375 if (read_option(ECC_memory, 1) == 0) {
2376 dcl &= ~DCL_DimmEccEn;
2378 #else // CMOS_VSTART_ECC_memory not defined
2379 #if !CONFIG_ECC_MEMORY
2380 dcl &= ~DCL_DimmEccEn;
2381 #endif
2382 #endif
2383 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2385 meminfo->is_ecc = 1;
2386 if (!(dcl & DCL_DimmEccEn)) {
2387 meminfo->is_ecc = 0;
2388 printk(BIOS_DEBUG, "set_ecc: ECC disabled\n");
2389 return; // already disabled the ECC, so don't need to read SPD any more
2392 for (i = 0; i < DIMM_SOCKETS; i++) {
2393 u32 spd_device = ctrl->channel0[i];
2394 if (!(meminfo->dimm_mask & (1 << i))) {
2395 if (meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) { /* channelB only? */
2396 spd_device = ctrl->channel1[i];
2397 printk(BIOS_DEBUG, "set_ecc spd_device: 0x%x\n", spd_device);
2398 } else {
2399 continue;
2403 value = spd_read_byte(ctrl->channel0[i], SPD_DIMM_CONF_TYPE);
2405 if (!(value & SPD_DIMM_CONF_TYPE_ECC)) {
2406 dcl &= ~DCL_DimmEccEn;
2407 pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
2408 meminfo->is_ecc = 0;
2409 return;
2416 static int update_dimm_Twtr(const struct mem_controller *ctrl,
2417 const struct mem_param *param, int i, long dimm_mask)
2419 return update_dimm_TT_1_4(ctrl, param, i, dimm_mask, DRAM_TIMING_HIGH, SPD_TWTR, DTH_TWTR_SHIFT, DTH_TWTR_MASK, DTH_TWTR_BASE, DTH_TWTR_MIN, DTH_TWTR_MAX);
2422 static void set_TT(const struct mem_controller *ctrl,
2423 const struct mem_param *param, unsigned TT_REG, unsigned TT_SHIFT,
2424 unsigned TT_MASK, unsigned TT_BASE, unsigned TT_MIN, unsigned TT_MAX,
2425 unsigned val, const char *str)
2427 uint32_t reg;
2429 if ((val < TT_MIN) || (val > TT_MAX)) {
2430 printk(BIOS_ERR, "%s", str);
2431 die(" Unknown\n");
2434 reg = pci_read_config32(ctrl->f2, TT_REG);
2435 reg &= ~(TT_MASK << TT_SHIFT);
2436 reg |= ((val - TT_BASE) << TT_SHIFT);
2437 pci_write_config32(ctrl->f2, TT_REG, reg);
2438 return;
2442 static void set_TrwtTO(const struct mem_controller *ctrl,
2443 const struct mem_param *param)
2445 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TRWTTO_SHIFT, DTH_TRWTTO_MASK,DTH_TRWTTO_BASE, DTH_TRWTTO_MIN, DTH_TRWTTO_MAX, param->TrwtTO, "TrwtTO");
2449 static void set_Twrrd(const struct mem_controller *ctrl, const struct mem_param *param)
2451 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRRD_SHIFT, DTH_TWRRD_MASK,DTH_TWRRD_BASE, DTH_TWRRD_MIN, DTH_TWRRD_MAX, param->Twrrd, "Twrrd");
2455 static void set_Twrwr(const struct mem_controller *ctrl, const struct mem_param *param)
2457 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRWR_SHIFT, DTH_TWRWR_MASK,DTH_TWRWR_BASE, DTH_TWRWR_MIN, DTH_TWRWR_MAX, param->Twrwr, "Twrwr");
2460 static void set_Trdrd(const struct mem_controller *ctrl, const struct mem_param *param)
2462 set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TRDRD_SHIFT, DTH_TRDRD_MASK,DTH_TRDRD_BASE, DTH_TRDRD_MIN, DTH_TRDRD_MAX, param->Trdrd, "Trdrd");
2465 static void set_DcqBypassMax(const struct mem_controller *ctrl, const struct mem_param *param)
2467 set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_DcqBypassMax_SHIFT, DCH_DcqBypassMax_MASK,DCH_DcqBypassMax_BASE, DCH_DcqBypassMax_MIN, DCH_DcqBypassMax_MAX, param->DcqByPassMax, "DcqBypassMax"); // value need to be in CMOS
2470 static void set_Tfaw(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
2472 static const uint8_t faw_1k[] = {8, 10, 13, 14};
2473 static const uint8_t faw_2k[] = {10, 14, 17, 18};
2474 unsigned memclkfreq_index;
2475 unsigned faw;
2478 memclkfreq_index = param->dch_memclk;
2480 if (meminfo->page_1k_mask != 0) { //1k page
2481 faw = faw_1k[memclkfreq_index];
2482 } else {
2483 faw = faw_2k[memclkfreq_index];
2486 set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_FourActWindow_SHIFT, DCH_FourActWindow_MASK, DCH_FourActWindow_BASE, DCH_FourActWindow_MIN, DCH_FourActWindow_MAX, faw, "FourActWindow");
2489 static void set_max_async_latency(const struct mem_controller *ctrl, const struct mem_param *param)
2491 uint32_t dch;
2492 unsigned async_lat;
2495 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2496 dch &= ~(DCH_MaxAsyncLat_MASK << DCH_MaxAsyncLat_SHIFT);
2498 //FIXME: We need to use Max of DqsRcvEnDelay + 6ns here: After trainning and get that from index reg 0x10, 0x13, 0x16, 0x19, 0x30, 0x33, 0x36, 0x39
2499 async_lat = 6 + 6;
2502 dch |= ((async_lat - DCH_MaxAsyncLat_BASE) << DCH_MaxAsyncLat_SHIFT);
2503 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2506 #if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
2507 static void set_SlowAccessMode(const struct mem_controller *ctrl)
2509 uint32_t dch;
2511 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2513 dch |= (1<<20);
2515 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2517 #endif
2520 DRAM_OUTPUT_DRV_COMP_CTRL 0, 0x20
2521 DRAM_ADDR_TIMING_CTRL 04, 0x24
2523 static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info *meminfo)
2525 uint32_t dword;
2526 uint32_t dwordx;
2527 #if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
2528 unsigned SlowAccessMode = 0;
2529 #endif
2531 #if CONFIG_DIMM_SUPPORT==0x0104 /* DDR2 and REG */
2532 long dimm_mask = meminfo->dimm_mask & 0x0f;
2533 /* for REG DIMM */
2534 dword = 0x00111222;
2535 dwordx = 0x002f0000;
2536 switch (meminfo->memclk_set) {
2537 case DCH_MemClkFreq_266MHz:
2538 if ( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
2539 dwordx = 0x002f2700;
2541 break;
2542 case DCH_MemClkFreq_333MHz:
2543 if ( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
2544 if ((meminfo->single_rank_mask & 0x03)!=0x03) { //any double rank there?
2545 dwordx = 0x002f2f00;
2548 break;
2549 case DCH_MemClkFreq_400MHz:
2550 dwordx = 0x002f3300;
2551 break;
2554 #endif
2556 #if CONFIG_DIMM_SUPPORT==0x0204 /* DDR2 and SO-DIMM, S1G1 */
2557 dword = 0x00111222;
2558 dwordx = 0x002F2F00;
2560 switch (meminfo->memclk_set) {
2561 case DCH_MemClkFreq_200MHz: /* nothing to be set here */
2562 break;
2563 case DCH_MemClkFreq_266MHz:
2564 if ((meminfo->single_rank_mask == 0)
2565 && (meminfo->x4_mask == 0) && (meminfo->x16_mask))
2566 dwordx = 0x002C2C00; /* Double rank x8 */
2567 /* else SRx16, SRx8, DRx16 == 0x002F2F00 */
2568 break;
2569 case DCH_MemClkFreq_333MHz:
2570 if ((meminfo->single_rank_mask == 1)
2571 && (meminfo->x16_mask == 1)) /* SR x16 */
2572 dwordx = 0x00272700;
2573 else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0)
2574 && (meminfo->single_rank_mask == 0)) { /* DR x8 */
2575 SlowAccessMode = 1;
2576 dwordx = 0x00002800;
2577 } else { /* SR x8, DR x16 */
2578 dwordx = 0x002A2A00;
2580 break;
2581 case DCH_MemClkFreq_400MHz:
2582 if ((meminfo->single_rank_mask == 1)
2583 && (meminfo->x16_mask == 1)) /* SR x16 */
2584 dwordx = 0x00292900;
2585 else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0)
2586 && (meminfo->single_rank_mask == 0)) { /* DR x8 */
2587 SlowAccessMode = 1;
2588 dwordx = 0x00002A00;
2589 } else { /* SR x8, DR x16 */
2590 dwordx = 0x002A2A00;
2592 break;
2594 #endif
2596 #if CONFIG_DIMM_SUPPORT==0x0004 /* DDR2 and unbuffered */
2597 long dimm_mask = meminfo->dimm_mask & 0x0f;
2598 /* for UNBUF DIMM */
2599 dword = 0x00111222;
2600 dwordx = 0x002f2f00;
2601 switch (meminfo->memclk_set) {
2602 case DCH_MemClkFreq_200MHz:
2603 if (dimm_mask == 0x03) {
2604 SlowAccessMode = 1;
2605 dword = 0x00111322;
2607 break;
2608 case DCH_MemClkFreq_266MHz:
2609 if (dimm_mask == 0x03) {
2610 SlowAccessMode = 1;
2611 dword = 0x00111322;
2612 if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
2613 switch (meminfo->single_rank_mask) {
2614 case 0x03:
2615 dwordx = 0x00002f00; //x8 single Rank
2616 break;
2617 case 0x00:
2618 dwordx = 0x00342f00; //x8 double Rank
2619 break;
2620 default:
2621 dwordx = 0x00372f00; //x8 single Rank and double Rank mixed
2623 } else if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
2624 dwordx = 0x00382f00; //x8 Double Rank and x16 single Rank mixed
2625 } else if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
2626 dwordx = 0x00382f00; //x16 single Rank and x8 double Rank mixed
2629 } else {
2630 if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x00) && ((meminfo->single_rank_mask == 0x01)||(meminfo->single_rank_mask == 0x02))) { //x8 single rank
2631 dwordx = 0x002f2f00;
2632 } else {
2633 dwordx = 0x002b2f00;
2636 break;
2637 case DCH_MemClkFreq_333MHz:
2638 dwordx = 0x00202220;
2639 if (dimm_mask == 0x03) {
2640 SlowAccessMode = 1;
2641 dword = 0x00111322;
2642 if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
2643 switch (meminfo->single_rank_mask) {
2644 case 0x03:
2645 dwordx = 0x00302220; //x8 single Rank
2646 break;
2647 case 0x00:
2648 dwordx = 0x002b2220; //x8 double Rank
2649 break;
2650 default:
2651 dwordx = 0x002a2220; //x8 single Rank and double Rank mixed
2653 } else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
2654 dwordx = 0x002c2220; //x8 Double Rank and x16 single Rank mixed
2655 } else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
2656 dwordx = 0x002c2220; //x16 single Rank and x8 double Rank mixed
2659 break;
2660 case DCH_MemClkFreq_400MHz:
2661 dwordx = 0x00202520;
2662 SlowAccessMode = 1;
2663 if (dimm_mask == 0x03) {
2664 dword = 0x00113322;
2665 } else {
2666 dword = 0x00113222;
2668 break;
2671 printk_raminit("\tdimm_mask = %08x\n", meminfo->dimm_mask);
2672 printk_raminit("\tx4_mask = %08x\n", meminfo->x4_mask);
2673 printk_raminit("\tx16_mask = %08x\n", meminfo->x16_mask);
2674 printk_raminit("\tsingle_rank_mask = %08x\n", meminfo->single_rank_mask);
2675 printk_raminit("\tODC = %08x\n", dword);
2676 printk_raminit("\tAddr Timing= %08x\n", dwordx);
2677 #endif
2679 #if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
2680 if (SlowAccessMode) {
2681 set_SlowAccessMode(ctrl);
2683 #endif
2685 if (!(meminfo->dimm_mask & 0x0F) && (meminfo->dimm_mask & 0xF0)) { /* channelB only? */
2686 /* Program the Output Driver Compensation Control Registers (Function 2:Offset 0x9c, index 0, 0x20) */
2687 pci_write_config32_index_wait(ctrl->f2, 0x98, 0x20, dword);
2689 /* Program the Address Timing Control Registers (Function 2:Offset 0x9c, index 4, 0x24) */
2690 pci_write_config32_index_wait(ctrl->f2, 0x98, 0x24, dwordx);
2691 } else {
2692 /* Program the Output Driver Compensation Control Registers (Function 2:Offset 0x9c, index 0, 0x20) */
2693 pci_write_config32_index_wait(ctrl->f2, 0x98, 0, dword);
2694 if (meminfo->is_Width128) {
2695 pci_write_config32_index_wait(ctrl->f2, 0x98, 0x20, dword);
2698 /* Program the Address Timing Control Registers (Function 2:Offset 0x9c, index 4, 0x24) */
2699 pci_write_config32_index_wait(ctrl->f2, 0x98, 4, dwordx);
2700 if (meminfo->is_Width128) {
2701 pci_write_config32_index_wait(ctrl->f2, 0x98, 0x24, dwordx);
2707 static void set_RDqsEn(const struct mem_controller *ctrl,
2708 const struct mem_param *param, struct mem_info *meminfo)
2710 #if CONFIG_CPU_SOCKET_TYPE==0x10
2711 //only need to set for reg and x8
2712 uint32_t dch;
2714 dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
2716 dch &= ~DCH_RDqsEn;
2717 if ((!meminfo->x4_mask) && (!meminfo->x16_mask)) {
2718 dch |= DCH_RDqsEn;
2721 pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
2722 #endif
2725 static void set_idle_cycle_limit(const struct mem_controller *ctrl,
2726 const struct mem_param *param)
2728 uint32_t dcm;
2729 /* AMD says to Hardcode this */
2730 dcm = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
2731 dcm &= ~(DCM_ILD_lmt_MASK << DCM_ILD_lmt_SHIFT);
2732 dcm |= DCM_ILD_lmt_16 << DCM_ILD_lmt_SHIFT;
2733 dcm |= DCM_DCC_EN;
2734 pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dcm);
2737 static void set_RdWrQByp(const struct mem_controller *ctrl,
2738 const struct mem_param *param)
2740 set_TT(ctrl, param, DRAM_CTRL_MISC, DCM_RdWrQByp_SHIFT, DCM_RdWrQByp_MASK,0, 0, 3, 2, "RdWrQByp");
2743 static long spd_set_dram_timing(const struct mem_controller *ctrl,
2744 const struct mem_param *param,
2745 struct mem_info *meminfo)
2747 int i;
2749 for (i = 0; i < DIMM_SOCKETS; i++) {
2750 int rc;
2751 if (!(meminfo->dimm_mask & (1 << i)) &&
2752 !(meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) ) {
2753 continue;
2755 printk_raminit("spd_set_dram_timing dimm socket: %08x\n", i);
2756 /* DRAM Timing Low Register */
2757 printk_raminit("\ttrc\n");
2758 if ((rc = update_dimm_Trc (ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2760 printk_raminit("\ttrcd\n");
2761 if ((rc = update_dimm_Trcd(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2763 printk_raminit("\ttrrd\n");
2764 if ((rc = update_dimm_Trrd(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2766 printk_raminit("\ttras\n");
2767 if ((rc = update_dimm_Tras(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2769 printk_raminit("\ttrp\n");
2770 if ((rc = update_dimm_Trp (ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2772 printk_raminit("\ttrtp\n");
2773 if ((rc = update_dimm_Trtp(ctrl, param, i, meminfo)) <= 0) goto dimm_err;
2775 printk_raminit("\ttwr\n");
2776 if ((rc = update_dimm_Twr (ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2778 /* DRAM Timing High Register */
2779 printk_raminit("\ttref\n");
2780 if ((rc = update_dimm_Tref(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2782 printk_raminit("\ttwtr\n");
2783 if ((rc = update_dimm_Twtr(ctrl, param, i, meminfo->dimm_mask)) <= 0) goto dimm_err;
2785 printk_raminit("\ttrfc\n");
2786 if ((rc = update_dimm_Trfc(ctrl, param, i, meminfo)) <= 0) goto dimm_err;
2788 /* DRAM Config Low */
2790 continue;
2791 dimm_err:
2792 printk(BIOS_DEBUG, "spd_set_dram_timing dimm_err!\n");
2793 if (rc < 0) {
2794 return -1;
2796 meminfo->dimm_mask = disable_dimm(ctrl, i, meminfo);
2799 get_extra_dimm_mask(ctrl, meminfo); // will be used by RDqsEn and dimm_x4
2800 /* DRAM Timing Low Register */
2802 /* DRAM Timing High Register */
2803 set_TrwtTO(ctrl, param);
2804 set_Twrrd (ctrl, param);
2805 set_Twrwr (ctrl, param);
2806 set_Trdrd (ctrl, param);
2808 set_4RankRDimm(ctrl, param, meminfo);
2810 /* DRAM Config High */
2811 set_Tfaw(ctrl, param, meminfo);
2812 set_DcqBypassMax(ctrl, param);
2813 set_max_async_latency(ctrl, param);
2814 set_RDqsEn(ctrl, param, meminfo);
2816 /* DRAM Config Low */
2817 set_ecc(ctrl, param, meminfo);
2818 set_dimm_x4(ctrl, param, meminfo);
2819 set_DramTerm(ctrl, param, meminfo);
2821 /* DRAM Control Misc */
2822 set_idle_cycle_limit(ctrl, param);
2823 set_RdWrQByp(ctrl, param);
2825 return meminfo->dimm_mask;
2828 static void sdram_set_spd_registers(const struct mem_controller *ctrl,
2829 struct sys_info *sysinfo)
2831 struct spd_set_memclk_result result;
2832 const struct mem_param *param;
2833 struct mem_param paramx;
2834 struct mem_info *meminfo;
2835 #if 1
2836 if (!sysinfo->ctrl_present[ctrl->node_id]) {
2837 return;
2839 #endif
2840 meminfo = &sysinfo->meminfo[ctrl->node_id];
2842 printk(BIOS_DEBUG, "sdram_set_spd_registers: paramx :%p\n", &paramx);
2844 activate_spd_rom(ctrl);
2845 meminfo->dimm_mask = spd_detect_dimms(ctrl);
2847 printk_raminit("sdram_set_spd_registers: dimm_mask=0x%x\n", meminfo->dimm_mask);
2849 if (!(meminfo->dimm_mask & ((1 << 2*DIMM_SOCKETS) - 1)))
2851 printk(BIOS_DEBUG, "No memory for this cpu\n");
2852 return;
2854 meminfo->dimm_mask = spd_enable_2channels(ctrl, meminfo);
2855 printk_raminit("spd_enable_2channels: dimm_mask=0x%x\n", meminfo->dimm_mask);
2856 if (meminfo->dimm_mask == -1)
2857 goto hw_spd_err;
2859 meminfo->dimm_mask = spd_set_ram_size(ctrl, meminfo);
2860 printk_raminit("spd_set_ram_size: dimm_mask=0x%x\n", meminfo->dimm_mask);
2861 if (meminfo->dimm_mask == -1)
2862 goto hw_spd_err;
2864 meminfo->dimm_mask = spd_handle_unbuffered_dimms(ctrl, meminfo);
2865 printk_raminit("spd_handle_unbuffered_dimms: dimm_mask=0x%x\n", meminfo->dimm_mask);
2866 if (meminfo->dimm_mask == -1)
2867 goto hw_spd_err;
2869 result = spd_set_memclk(ctrl, meminfo);
2870 param = result.param;
2871 meminfo->dimm_mask = result.dimm_mask;
2872 printk_raminit("spd_set_memclk: dimm_mask=0x%x\n", meminfo->dimm_mask);
2873 if (meminfo->dimm_mask == -1)
2874 goto hw_spd_err;
2876 //store memclk set to sysinfo, incase we need rebuilt param again
2877 meminfo->memclk_set = param->dch_memclk;
2879 memcpy(&paramx, param, sizeof(paramx));
2881 paramx.divisor = get_exact_divisor(param->dch_memclk, paramx.divisor);
2883 meminfo->dimm_mask = spd_set_dram_timing(ctrl, &paramx, meminfo);
2884 printk_raminit("spd_set_dram_timing: dimm_mask=0x%x\n", meminfo->dimm_mask);
2885 if (meminfo->dimm_mask == -1)
2886 goto hw_spd_err;
2888 order_dimms(ctrl, meminfo);
2890 return;
2891 hw_spd_err:
2892 /* Unrecoverable error reading SPD data */
2893 die("Unrecoverable error reading SPD data. No qualified DIMMs?");
2894 return;
2897 #define TIMEOUT_LOOPS 300000
2899 #include "raminit_f_dqs.c"
2901 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
2902 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
2904 int ii;
2905 uint32_t carry_over;
2906 device_t dev;
2907 uint32_t base, limit;
2908 uint32_t basek;
2909 uint32_t hoist;
2910 int j;
2912 carry_over = (4*1024*1024) - hole_startk;
2914 for (ii=controllers - 1;ii>i;ii--) {
2915 base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
2916 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2917 continue;
2919 limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
2920 limit += (carry_over << 2 );
2921 base += (carry_over << 2 );
2922 for (j = 0; j < controllers; j++) {
2923 pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit);
2924 pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base );
2927 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2928 limit += (carry_over << 2);
2929 for (j = 0; j < controllers; j++) {
2930 pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit);
2932 dev = ctrl[i].f1;
2933 base = pci_read_config32(dev, 0x40 + (i << 3));
2934 basek = (base & 0xffff0000) >> 2;
2935 if (basek == hole_startk) {
2936 //don't need set memhole here, because hole off set will be 0, overflow
2937 //so need to change base reg instead, new basek will be 4*1024*1024
2938 base &= 0x0000ffff;
2939 base |= (4*1024*1024)<<2;
2940 for (j = 0; j < controllers; j++) {
2941 pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
2943 } else {
2944 hoist = /* hole start address */
2945 ((hole_startk << 10) & 0xff000000) +
2946 /* hole address to memory controller address */
2947 (((basek + carry_over) >> 6) & 0x0000ff00) +
2948 /* enable */
2950 pci_write_config32(dev, 0xf0, hoist);
2953 return carry_over;
2956 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
2959 uint32_t hole_startk;
2960 int i;
2962 hole_startk = 4*1024*1024 - CONFIG_HW_MEM_HOLE_SIZEK;
2964 printk_raminit("Handling memory hole at 0x%08x (default)\n", hole_startk);
2965 #if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC
2966 /* We need to double check if the hole_startk is valid, if it is equal
2967 to basek, we need to decrease it some */
2968 uint32_t basek_pri;
2969 for (i=0; i<controllers; i++) {
2970 uint32_t base;
2971 unsigned base_k;
2972 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2973 if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
2974 continue;
2976 base_k = (base & 0xffff0000) >> 2;
2977 if (base_k == hole_startk) {
2978 /* decrease mem hole startk to make sure it is
2979 on middle of previous node */
2980 hole_startk -= (base_k - basek_pri) >> 1;
2981 break; //only one hole
2983 basek_pri = base_k;
2985 printk_raminit("Handling memory hole at 0x%08x (adjusted)\n", hole_startk);
2986 #endif
2987 /* find node index that need do set hole */
2988 for (i=0; i < controllers; i++) {
2989 uint32_t base, limit;
2990 unsigned base_k, limit_k;
2991 base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
2992 if ((base & ((1 << 1) | (1 << 0))) != ((1 << 1) | (1 << 0))) {
2993 continue;
2995 limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
2996 base_k = (base & 0xffff0000) >> 2;
2997 limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
2998 if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
2999 unsigned end_k;
3000 hoist_memory(controllers, ctrl, hole_startk, i);
3001 end_k = memory_end_k(ctrl, controllers);
3002 set_top_mem(end_k, hole_startk);
3003 break; //only one hole
3008 #endif
3009 #if CONFIG_HAVE_ACPI_RESUME
3010 #include "exit_from_self.c"
3011 #endif
3013 static void sdram_enable(int controllers, const struct mem_controller *ctrl,
3014 struct sys_info *sysinfo)
3016 int i;
3017 int suspend = acpi_is_wakeup_s3();
3019 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3020 unsigned cpu_f0_f1[8];
3021 /* FIXME: How about 32 node machine later? */
3022 tsc_t tsc, tsc0[8];
3024 printk(BIOS_DEBUG, "sdram_enable: tsc0[8]: %p", &tsc0[0]);
3025 uint32_t dword;
3026 #endif
3028 /* Error if I don't have memory */
3029 if (memory_end_k(ctrl, controllers) == 0) {
3030 die("No memory\n");
3033 /* Before enabling memory start the memory clocks */
3034 for (i = 0; i < controllers; i++) {
3035 uint32_t dch;
3036 if (!sysinfo->ctrl_present[ i ])
3037 continue;
3038 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
3040 /* if no memory installed, disabled the interface */
3041 if (sysinfo->meminfo[i].dimm_mask==0x00){
3042 dch |= DCH_DisDramInterface;
3043 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
3045 } else {
3046 dch |= DCH_MemClkFreqVal;
3047 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
3048 /* address timing and Output driver comp Control */
3049 set_misc_timing(ctrl+i, sysinfo->meminfo+i );
3053 /* We need to wait a minimum of 20 MEMCLKS to enable the InitDram */
3054 memreset(controllers, ctrl);
3056 /* lets override the rest of the routine */
3057 if (suspend) {
3058 printk(BIOS_DEBUG, "Wakeup!\n");
3059 exit_from_self(controllers, ctrl, sysinfo);
3060 printk(BIOS_DEBUG, "Mem running !\n");
3061 return;
3064 for (i = 0; i < controllers; i++) {
3065 uint32_t dcl, dch;
3066 if (!sysinfo->ctrl_present[ i ])
3067 continue;
3068 /* Skip everything if I don't have any memory on this controller */
3069 dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
3070 if (!(dch & DCH_MemClkFreqVal)) {
3071 continue;
3074 /* ChipKill */
3075 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
3076 if (dcl & DCL_DimmEccEn) {
3077 uint32_t mnc;
3078 printk(BIOS_SPEW, "ECC enabled\n");
3079 mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
3080 mnc |= MNC_ECC_EN;
3081 if (dcl & DCL_Width128) {
3082 mnc |= MNC_CHIPKILL_EN;
3084 pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
3087 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3088 cpu_f0_f1[i] = is_cpu_pre_f2_in_bsp(i);
3089 if (cpu_f0_f1[i]) {
3090 //Rev F0/F1 workaround
3091 #if 1
3092 /* Set the DqsRcvEnTrain bit */
3093 dword = pci_read_config32(ctrl[i].f2, DRAM_CTRL);
3094 dword |= DC_DqsRcvEnTrain;
3095 pci_write_config32(ctrl[i].f2, DRAM_CTRL, dword);
3096 #endif
3097 tsc0[i] = rdtsc();
3099 #endif
3101 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
3102 dcl |= DCL_InitDram;
3103 pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
3106 for (i = 0; i < controllers; i++) {
3107 uint32_t dcl, dcm;
3108 if (!sysinfo->ctrl_present[ i ])
3109 continue;
3110 /* Skip everything if I don't have any memory on this controller */
3111 if (sysinfo->meminfo[i].dimm_mask==0x00) continue;
3113 printk(BIOS_DEBUG, "Initializing memory: ");
3114 int loops = 0;
3115 do {
3116 dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
3117 loops++;
3118 if ((loops & 1023) == 0) {
3119 printk(BIOS_DEBUG, ".");
3121 } while(((dcl & DCL_InitDram) != 0) && (loops < TIMEOUT_LOOPS));
3122 if (loops >= TIMEOUT_LOOPS) {
3123 printk(BIOS_DEBUG, " failed\n");
3124 continue;
3127 /* Wait until it is safe to touch memory */
3128 do {
3129 dcm = pci_read_config32(ctrl[i].f2, DRAM_CTRL_MISC);
3130 } while(((dcm & DCM_MemClrStatus) == 0) /* || ((dcm & DCM_DramEnabled) == 0)*/ );
3132 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3133 if (cpu_f0_f1[i]) {
3134 tsc= rdtsc();
3136 print_debug_dqs_tsc("\nbegin tsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
3137 print_debug_dqs_tsc("end tsc ", i, tsc.hi, tsc.lo, 2);
3139 if (tsc.lo<tsc0[i].lo) {
3140 tsc.hi--;
3142 tsc.lo -= tsc0[i].lo;
3143 tsc.hi -= tsc0[i].hi;
3145 tsc0[i].lo = tsc.lo;
3146 tsc0[i].hi = tsc.hi;
3148 print_debug_dqs_tsc(" dtsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
3150 #endif
3151 printk(BIOS_DEBUG, " done\n");
3154 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
3155 /* init hw mem hole here */
3156 /* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
3157 set_hw_mem_hole(controllers, ctrl);
3158 #endif
3160 /* store tom to sysinfo, and it will be used by dqs_timing */
3162 msr_t msr;
3163 //[1M, TOM)
3164 msr = rdmsr(TOP_MEM);
3165 sysinfo->tom_k = ((msr.hi<<24) | (msr.lo>>8))>>2;
3167 //[4G, TOM2)
3168 msr = rdmsr(TOP_MEM2);
3169 sysinfo->tom2_k = ((msr.hi<<24)| (msr.lo>>8))>>2;
3172 for (i = 0; i < controllers; i++) {
3173 sysinfo->mem_trained[i] = 0;
3175 if (!sysinfo->ctrl_present[ i ])
3176 continue;
3178 /* Skip everything if I don't have any memory on this controller */
3179 if (sysinfo->meminfo[i].dimm_mask==0x00)
3180 continue;
3182 sysinfo->mem_trained[i] = 0x80; // mem need to be trained
3186 #if CONFIG_MEM_TRAIN_SEQ == 0
3187 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
3188 dqs_timing(controllers, ctrl, tsc0, sysinfo);
3189 #else
3190 dqs_timing(controllers, ctrl, sysinfo);
3191 #endif
3192 #else
3194 #if CONFIG_MEM_TRAIN_SEQ == 2
3195 /* need to enable mtrr, so dqs training could access the test address */
3196 setup_mtrr_dqs(sysinfo->tom_k, sysinfo->tom2_k);
3197 #endif
3199 for (i = 0; i < controllers; i++) {
3200 /* Skip everything if I don't have any memory on this controller */
3201 if (sysinfo->mem_trained[i]!=0x80)
3202 continue;
3204 dqs_timing(i, &ctrl[i], sysinfo, 1);
3206 #if CONFIG_MEM_TRAIN_SEQ == 1
3207 break; // only train the first node with ram
3208 #endif
3211 #if CONFIG_MEM_TRAIN_SEQ == 2
3212 clear_mtrr_dqs(sysinfo->tom2_k);
3213 #endif
3215 #endif
3217 #if CONFIG_MEM_TRAIN_SEQ != 1
3218 wait_all_core0_mem_trained(sysinfo);
3219 #endif
3223 void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
3224 const uint16_t *spd_addr)
3226 int i;
3227 int j;
3228 struct mem_controller *ctrl;
3229 for (i=0;i<controllers; i++) {
3230 ctrl = &ctrl_a[i];
3231 ctrl->node_id = i;
3232 ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
3233 ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
3234 ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
3235 ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
3237 if (spd_addr == (void *)0) continue;
3239 for (j=0;j<DIMM_SOCKETS;j++) {
3240 ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
3241 ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];