2 * This file is part of the coreboot project.
4 * Copyright (C) 2013 Vladimir Serbinenko.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 /* Please don't remove this. It's needed it to do debugging
18 and reverse engineering to support in future more nehalem variants. */
25 #include <console/console.h>
28 #include <cpu/x86/msr.h>
30 #include <arch/cbfs.h>
32 #include <ip_checksum.h>
33 #include <pc80/mc146818rtc.h>
34 #include <device/pci_def.h>
35 #include <device/device.h>
41 #include <timestamp.h>
42 #include <cpu/x86/mtrr.h>
43 #include <cpu/intel/speedstep.h>
44 #include <cpu/intel/turbo.h>
45 #include <northbridge/intel/common/mrc_cache.h>
49 typedef unsigned char u8
;
50 typedef unsigned short u16
;
51 typedef unsigned int u32
;
57 #include "southbridge/intel/ibexpeak/me.h"
63 #define NORTHBRIDGE PCI_DEV(0, 0, 0)
64 #define SOUTHBRIDGE PCI_DEV(0, 0x1f, 0)
65 #define GMA PCI_DEV (0, 0x2, 0x0)
66 #define HECIDEV PCI_DEV(0, 0x16, 0)
69 #define FOR_ALL_RANKS \
70 for (channel = 0; channel < NUM_CHANNELS; channel++) \
71 for (slot = 0; slot < NUM_SLOTS; slot++) \
72 for (rank = 0; rank < NUM_RANKS; rank++)
74 #define FOR_POPULATED_RANKS \
75 for (channel = 0; channel < NUM_CHANNELS; channel++) \
76 for (slot = 0; slot < NUM_SLOTS; slot++) \
77 for (rank = 0; rank < NUM_RANKS; rank++) \
78 if (info->populated_ranks[channel][slot][rank])
80 #define FOR_POPULATED_RANKS_BACKWARDS \
81 for (channel = NUM_CHANNELS - 1; channel >= 0; channel--) \
82 for (slot = 0; slot < NUM_SLOTS; slot++) \
83 for (rank = 0; rank < NUM_RANKS; rank++) \
84 if (info->populated_ranks[channel][slot][rank])
86 /* [REG_178][CHANNEL][2 * SLOT + RANK][LANE] */
90 } timing_bounds_t
[2][2][2][9];
93 /* [TM][CHANNEL][SLOT][RANK][LANE] */
94 u16 lane_timings
[4][2][2][2][9];
101 timing_bounds_t timing_bounds
[2];
102 u16 timing_offset
[2][2][2][9];
103 u16 timing2_offset
[2][2][2][9];
104 u16 timing2_bounds
[2][2][2][9][2];
105 u8 reg274265
[2][3]; /* [CHANNEL][REGISTER] */
112 #include "raminit_fake.c"
115 #include <lib.h> /* Prototypes */
117 static inline void write_mchbar32(u32 addr
, u32 val
)
119 MCHBAR32(addr
) = val
;
122 static inline void write_mchbar16(u32 addr
, u16 val
)
124 MCHBAR16(addr
) = val
;
127 static inline void write_mchbar8(u32 addr
, u8 val
)
133 static inline u32
read_mchbar32(u32 addr
)
135 return MCHBAR32(addr
);
138 static inline u16
read_mchbar16(u32 addr
)
140 return MCHBAR16(addr
);
143 static inline u8
read_mchbar8(u32 addr
)
145 return MCHBAR8(addr
);
148 static void clflush(u32 addr
)
150 asm volatile ("clflush (%0)"::"r" (addr
));
153 typedef struct _u128
{
158 static void read128(u32 addr
, u64
* out
)
162 asm volatile ("movdqu %%xmm0, %0\n"
163 "movdqa (%2), %%xmm0\n"
164 "movdqu %%xmm0, %1\n"
165 "movdqu %0, %%xmm0":"+m" (stor
), "=m"(ret
):"r"(addr
));
173 static void write_1d0(u32 val
, u16 addr
, int bits
, int flag
)
175 write_mchbar32(0x1d0, 0);
176 while (read_mchbar32(0x1d0) & 0x800000);
177 write_mchbar32(0x1d4,
178 (val
& ((1 << bits
) - 1)) | (2 << bits
) | (flag
<<
180 write_mchbar32(0x1d0, 0x40000000 | addr
);
181 while (read_mchbar32(0x1d0) & 0x800000);
185 static u16
read_1d0(u16 addr
, int split
)
188 write_mchbar32(0x1d0, 0);
189 while (read_mchbar32(0x1d0) & 0x800000);
190 write_mchbar32(0x1d0,
191 0x80000000 | (((read_mchbar8(0x246) >> 2) & 3) +
193 while (read_mchbar32(0x1d0) & 0x800000);
194 val
= read_mchbar32(0x1d8);
195 write_1d0(0, 0x33d, 0, 0);
196 write_1d0(0, 0x33d, 0, 0);
197 val
&= ((1 << split
) - 1);
198 // printk (BIOS_ERR, "R1D0C [%x] => %x\n", addr, val);
202 static void write32p(uintptr_t addr
, uint32_t val
)
204 write32((void *)addr
, val
);
207 static uint32_t read32p(uintptr_t addr
)
209 return read32((void *)addr
);
212 static void sfence(void)
215 asm volatile ("sfence");
219 static inline u16
get_lane_offset(int slot
, int rank
, int lane
)
221 return 0x124 * lane
+ ((lane
& 4) ? 0x23e : 0) + 11 * rank
+ 22 * slot
-
225 static inline u16
get_timing_register_addr(int lane
, int tm
, int slot
, int rank
)
227 const u16 offs
[] = { 0x1d, 0xa8, 0xe6, 0x5c };
228 return get_lane_offset(slot
, rank
, lane
) + offs
[(tm
+ 3) % 4];
232 static u32
gav_real(int line
, u32 in
)
234 // printk (BIOS_DEBUG, "%d: GAV: %x\n", line, in);
238 #define gav(x) gav_real (__LINE__, (x))
241 u16 clock_speed_index
; /* clock_speed (REAL, not DDR) / 133.(3) - 3 */
242 u16 fsb_frequency
; /* in 1.(1)/2 MHz. */
243 u8 is_x16_module
[2][2]; /* [CHANNEL][SLOT] */
244 u8 density
[2][2]; /* [CHANNEL][SLOT] */
245 u8 populated_ranks
[2][2][2]; /* [CHANNEL][SLOT][RANK] */
246 int rank_start
[2][2][2];
248 u8 board_lane_delay
[9];
251 u8 max_supported_clock_speed_index
;
253 u8 spd
[2][2][151]; /* [CHANNEL][SLOT][BYTE] */
255 u8 populated_ranks_mask
[2];
256 u8 max_slots_used_in_channel
;
260 unsigned total_memory_mb
;
261 unsigned interleaved_part_mb
;
262 unsigned non_interleaved_part_mb
;
266 unsigned memory_reserved_for_heci_mb
;
268 struct ram_training training
;
269 u32 last_500_command
[2];
274 u8 some_delay_1_cycle_floor
;
275 u8 some_delay_2_halfcycles_ceil
;
276 u8 some_delay_3_ps_rounded
;
278 const struct ram_training
*cached_training
;
282 write_500(struct raminfo
*info
, int channel
, u32 val
, u16 addr
, int bits
,
287 read_500(struct raminfo
*info
, int channel
, u16 addr
, int split
)
290 info
->last_500_command
[channel
] = 0x80000000;
291 write_mchbar32(0x500 + (channel
<< 10), 0);
292 while (read_mchbar32(0x500 + (channel
<< 10)) & 0x800000);
293 write_mchbar32(0x500 + (channel
<< 10),
295 (((read_mchbar8(0x246 + (channel
<< 10)) >> 2) &
297 while (read_mchbar32(0x500 + (channel
<< 10)) & 0x800000);
298 val
= read_mchbar32(0x508 + (channel
<< 10));
299 return val
& ((1 << split
) - 1);
304 write_500(struct raminfo
*info
, int channel
, u32 val
, u16 addr
, int bits
,
307 if (info
->last_500_command
[channel
] == 0x80000000) {
308 info
->last_500_command
[channel
] = 0x40000000;
309 write_500(info
, channel
, 0, 0xb61, 0, 0);
311 write_mchbar32(0x500 + (channel
<< 10), 0);
312 while (read_mchbar32(0x500 + (channel
<< 10)) & 0x800000);
313 write_mchbar32(0x504 + (channel
<< 10),
314 (val
& ((1 << bits
) - 1)) | (2 << bits
) | (flag
<<
316 write_mchbar32(0x500 + (channel
<< 10), 0x40000000 | addr
);
317 while (read_mchbar32(0x500 + (channel
<< 10)) & 0x800000);
320 static int rw_test(int rank
)
322 const u32 mask
= 0xf00fc33c;
325 for (i
= 0; i
< 64; i
++)
326 write32p((rank
<< 28) | (i
<< 2), 0);
328 for (i
= 0; i
< 64; i
++)
329 gav(read32p((rank
<< 28) | (i
<< 2)));
331 for (i
= 0; i
< 32; i
++) {
332 u32 pat
= (((mask
>> i
) & 1) ? 0xffffffff : 0);
333 write32p((rank
<< 28) | (i
<< 3), pat
);
334 write32p((rank
<< 28) | (i
<< 3) | 4, pat
);
337 for (i
= 0; i
< 32; i
++) {
338 u8 pat
= (((mask
>> i
) & 1) ? 0xff : 0);
341 gav(val
= read32p((rank
<< 28) | (i
<< 3)));
342 for (j
= 0; j
< 4; j
++)
343 if (((val
>> (j
* 8)) & 0xff) != pat
)
345 gav(val
= read32p((rank
<< 28) | (i
<< 3) | 4));
346 for (j
= 0; j
< 4; j
++)
347 if (((val
>> (j
* 8)) & 0xff) != pat
)
351 for (i
= 0; i
< 64; i
++)
352 write32p((rank
<< 28) | (i
<< 2), 0);
354 for (i
= 0; i
< 64; i
++)
355 gav(read32p((rank
<< 28) | (i
<< 2)));
361 program_timings(struct raminfo
*info
, u16 base
, int channel
, int slot
, int rank
)
364 for (lane
= 0; lane
< 8; lane
++) {
365 write_500(info
, channel
,
368 lane_timings
[2][channel
][slot
][rank
][lane
],
369 get_timing_register_addr(lane
, 2, slot
, rank
), 9, 0);
370 write_500(info
, channel
,
373 lane_timings
[3][channel
][slot
][rank
][lane
],
374 get_timing_register_addr(lane
, 3, slot
, rank
), 9, 0);
378 static void write_26c(int channel
, u16 si
)
380 write_mchbar32(0x26c + (channel
<< 10), 0x03243f35);
381 write_mchbar32(0x268 + (channel
<< 10), 0xcfc00000 | (si
<< 9));
382 write_mchbar16(0x2b9 + (channel
<< 10), si
);
385 static u32
get_580(int channel
, u8 addr
)
388 gav(read_1d0(0x142, 3));
389 write_mchbar8(0x5ff, 0x0); /* OK */
390 write_mchbar8(0x5ff, 0x80); /* OK */
391 write_mchbar32(0x580 + (channel
<< 10), 0x8493c012 | addr
);
392 write_mchbar8(0x580 + (channel
<< 10),
393 read_mchbar8(0x580 + (channel
<< 10)) | 1);
394 while (!((ret
= read_mchbar32(0x580 + (channel
<< 10))) & 0x10000));
395 write_mchbar8(0x580 + (channel
<< 10),
396 read_mchbar8(0x580 + (channel
<< 10)) & ~1);
400 const int cached_config
= 0;
402 #define NUM_CHANNELS 2
405 #define RANK_SHIFT 28
406 #define CHANNEL_SHIFT 10
408 #include "raminit_tables.c"
410 static void seq9(struct raminfo
*info
, int channel
, int slot
, int rank
)
414 for (i
= 0; i
< 2; i
++)
415 for (lane
= 0; lane
< 8; lane
++)
416 write_500(info
, channel
,
417 info
->training
.lane_timings
[i
+
419 [rank
][lane
], get_timing_register_addr(lane
,
425 write_1d0(1, 0x103, 6, 1);
426 for (lane
= 0; lane
< 8; lane
++)
427 write_500(info
, channel
,
429 lane_timings
[0][channel
][slot
][rank
][lane
],
430 get_timing_register_addr(lane
, 0, slot
, rank
), 9, 0);
432 for (i
= 0; i
< 2; i
++) {
433 for (lane
= 0; lane
< 8; lane
++)
434 write_500(info
, channel
,
435 info
->training
.lane_timings
[i
+
437 [rank
][lane
], get_timing_register_addr(lane
,
442 gav(get_580(channel
, ((i
+ 1) << 2) | (rank
<< 5)));
445 gav(read_1d0(0x142, 3)); // = 0x10408118
446 write_mchbar8(0x5ff, 0x0); /* OK */
447 write_mchbar8(0x5ff, 0x80); /* OK */
448 write_1d0(0x2, 0x142, 3, 1);
449 for (lane
= 0; lane
< 8; lane
++) {
450 // printk (BIOS_ERR, "before: %x\n", info->training.lane_timings[2][channel][slot][rank][lane]);
451 info
->training
.lane_timings
[2][channel
][slot
][rank
][lane
] =
452 read_500(info
, channel
,
453 get_timing_register_addr(lane
, 2, slot
, rank
), 9);
454 //printk (BIOS_ERR, "after: %x\n", info->training.lane_timings[2][channel][slot][rank][lane]);
455 info
->training
.lane_timings
[3][channel
][slot
][rank
][lane
] =
456 info
->training
.lane_timings
[2][channel
][slot
][rank
][lane
] +
461 static int count_ranks_in_channel(struct raminfo
*info
, int channel
)
465 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
466 for (rank
= 0; rank
< NUM_SLOTS
; rank
++)
467 res
+= info
->populated_ranks
[channel
][slot
][rank
];
472 config_rank(struct raminfo
*info
, int s3resume
, int channel
, int slot
, int rank
)
476 write_1d0(0, 0x178, 7, 1);
477 seq9(info
, channel
, slot
, rank
);
478 program_timings(info
, 0x80, channel
, slot
, rank
);
481 add
= count_ranks_in_channel(info
, 1);
485 gav(rw_test(rank
+ add
));
486 program_timings(info
, 0x00, channel
, slot
, rank
);
488 gav(rw_test(rank
+ add
));
490 gav(rw_test(rank
+ add
));
491 write_1d0(0, 0x142, 3, 1);
492 write_1d0(0, 0x103, 6, 1);
494 gav(get_580(channel
, 0xc | (rank
<< 5)));
495 gav(read_1d0(0x142, 3));
497 write_mchbar8(0x5ff, 0x0); /* OK */
498 write_mchbar8(0x5ff, 0x80); /* OK */
501 static void set_4cf(struct raminfo
*info
, int channel
, u8 val
)
503 gav(read_500(info
, channel
, 0x4cf, 4)); // = 0xc2300cf9
504 write_500(info
, channel
, val
, 0x4cf, 4, 1);
505 gav(read_500(info
, channel
, 0x659, 4)); // = 0x80300839
506 write_500(info
, channel
, val
, 0x659, 4, 1);
507 gav(read_500(info
, channel
, 0x697, 4)); // = 0x80300839
508 write_500(info
, channel
, val
, 0x697, 4, 1);
511 static void set_334(int zero
)
514 const u32 val3
[] = { 0x2a2b2a2b, 0x26272627, 0x2e2f2e2f, 0x2a2b };
517 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
518 for (j
= 0; j
< 4; j
++) {
519 u32 a
= (j
== 1) ? 0x29292929 : 0x31313131;
520 u32 lmask
= (j
== 3) ? 0xffff : 0xffffffff;
522 if ((j
== 0 || j
== 3) && zero
)
529 for (k
= 0; k
< 2; k
++) {
530 write_mchbar32(0x138 + 8 * k
,
531 (channel
<< 26) | (j
<< 24));
532 gav(vd8
[1][(channel
<< 3) | (j
<< 1) | k
] =
533 read_mchbar32(0x138 + 8 * k
));
534 gav(vd8
[0][(channel
<< 3) | (j
<< 1) | k
] =
535 read_mchbar32(0x13c + 8 * k
));
538 write_mchbar32(0x334 + (channel
<< 10) + (j
* 0x44),
540 write_mchbar32(0x32c + (channel
<< 10) + (j
* 0x44),
541 zero
? 0 : (0x18191819 & lmask
));
542 write_mchbar16(0x34a + (channel
<< 10) + (j
* 0x44), c
);
543 write_mchbar32(0x33c + (channel
<< 10) + (j
* 0x44),
544 zero
? 0 : (a
& lmask
));
545 write_mchbar32(0x344 + (channel
<< 10) + (j
* 0x44),
546 zero
? 0 : (a
& lmask
));
550 write_mchbar32(0x130, read_mchbar32(0x130) | 1); /* OK */
551 while (read_mchbar8(0x130) & 1); /* OK */
554 static void rmw_1d0(u16 addr
, u32
and, u32
or, int split
, int flag
)
557 v
= read_1d0(addr
, split
);
558 write_1d0((v
& and) | or, addr
, split
, flag
);
561 static int find_highest_bit_set(u16 val
)
564 for (i
= 15; i
>= 0; i
--)
570 static int find_lowest_bit_set32(u32 val
)
573 for (i
= 0; i
< 32; i
++)
584 MEMORY_BUS_WIDTH
= 8,
585 TIMEBASE_DIVIDEND
= 10,
586 TIMEBASE_DIVISOR
= 11,
589 CAS_LATENCIES_LSB
= 14,
590 CAS_LATENCIES_MSB
= 15,
591 CAS_LATENCY_TIME
= 16,
592 THERMAL_AND_REFRESH
= 31,
593 REFERENCE_RAW_CARD_USED
= 62,
594 RANK1_ADDRESS_MAPPING
= 63
597 static void calculate_timings(struct raminfo
*info
)
600 unsigned cas_latency_time
;
601 unsigned supported_cas_latencies
;
602 unsigned channel
, slot
;
603 unsigned clock_speed_index
;
604 unsigned min_cas_latency
;
605 unsigned cas_latency
;
606 unsigned max_clock_index
;
608 /* Find common CAS latency */
609 supported_cas_latencies
= 0x3fe;
610 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
611 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
612 if (info
->populated_ranks
[channel
][slot
][0])
613 supported_cas_latencies
&=
616 spd
[channel
][slot
][CAS_LATENCIES_LSB
] |
618 spd
[channel
][slot
][CAS_LATENCIES_MSB
] <<
621 max_clock_index
= min(3, info
->max_supported_clock_speed_index
);
623 cycletime
= min_cycletime
[max_clock_index
];
624 cas_latency_time
= min_cas_latency_time
[max_clock_index
];
626 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
627 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
628 if (info
->populated_ranks
[channel
][slot
][0]) {
633 spd
[channel
][slot
][TIMEBASE_DIVIDEND
] /
634 info
->spd
[channel
][slot
][TIMEBASE_DIVISOR
];
638 info
->spd
[channel
][slot
][CYCLETIME
]);
640 max(cas_latency_time
,
643 spd
[channel
][slot
][CAS_LATENCY_TIME
]);
645 for (clock_speed_index
= 0; clock_speed_index
< 3; clock_speed_index
++) {
646 if (cycletime
== min_cycletime
[clock_speed_index
])
648 if (cycletime
> min_cycletime
[clock_speed_index
]) {
650 cycletime
= min_cycletime
[clock_speed_index
];
654 min_cas_latency
= CEIL_DIV(cas_latency_time
, cycletime
);
656 while (supported_cas_latencies
) {
657 cas_latency
= find_highest_bit_set(supported_cas_latencies
) + 3;
658 if (cas_latency
<= min_cas_latency
)
660 supported_cas_latencies
&=
661 ~(1 << find_highest_bit_set(supported_cas_latencies
));
664 if (cas_latency
!= min_cas_latency
&& clock_speed_index
)
667 if (cas_latency
* min_cycletime
[clock_speed_index
] > 20000)
668 die("Couldn't configure DRAM");
669 info
->clock_speed_index
= clock_speed_index
;
670 info
->cas_latency
= cas_latency
;
673 static void program_base_timings(struct raminfo
*info
)
676 unsigned slot
, rank
, lane
;
677 unsigned extended_silicon_revision
;
680 extended_silicon_revision
= info
->silicon_revision
;
681 if (info
->silicon_revision
== 0)
682 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
683 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
685 spd
[channel
][slot
][MODULE_TYPE
] & 0xF) ==
687 extended_silicon_revision
= 4;
689 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
690 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
691 for (rank
= 0; rank
< NUM_SLOTS
; rank
++) {
693 if (!info
->populated_ranks
[channel
][slot
][rank
])
696 for (lane
= 0; lane
< 9; lane
++) {
702 spd
[channel
][slot
][MODULE_TYPE
] &
708 [REFERENCE_RAW_CARD_USED
] &
710 if (reference_card
== 3)
715 if (reference_card
== 5)
723 lane_timings
[0][channel
][slot
][rank
]
728 lane_timings
[1][channel
][slot
][rank
]
731 for (tm_reg
= 2; tm_reg
< 4; tm_reg
++)
734 [channel
][slot
][rank
][lane
]
737 [extended_silicon_revision
]
741 + info
->max4048
[channel
]
744 [extended_silicon_revision
]
746 mode4030
[channel
]][slot
]
750 for (tm_reg
= 0; tm_reg
< 4; tm_reg
++)
751 write_500(info
, channel
,
754 [channel
][slot
][rank
]
756 get_timing_register_addr
762 if (!(extended_silicon_revision
!= 4
764 populated_ranks_mask
[channel
] & 5) ==
768 [REFERENCE_RAW_CARD_USED
] & 0x1F)
771 u16_FFFE0EB8
[0][info
->
775 [REFERENCE_RAW_CARD_USED
] & 0x1F)
778 u16_FFFE0EB8
[1][info
->
782 for (i
= 0; i
< 3; i
++)
783 write_500(info
, channel
,
785 info
->max4048
[channel
]
788 [extended_silicon_revision
]
790 mode4030
[channel
]][info
->
792 u16_fffd0c50
[i
][slot
][rank
],
794 write_500(info
, channel
,
795 (info
->max4048
[channel
] +
797 [extended_silicon_revision
][info
->
802 u16_fffd0c70
[slot
][rank
], 7, 1);
804 if (!info
->populated_ranks_mask
[channel
])
806 for (i
= 0; i
< 3; i
++)
807 write_500(info
, channel
,
808 (info
->max4048
[channel
] +
809 info
->avg4044
[channel
]
812 [extended_silicon_revision
][info
->
816 u16_fffd0c68
[i
], 8, 1);
820 static unsigned int fsbcycle_ps(struct raminfo
*info
)
822 return 900000 / info
->fsb_frequency
;
825 /* The time of DDR transfer in ps. */
826 static unsigned int halfcycle_ps(struct raminfo
*info
)
828 return 3750 / (info
->clock_speed_index
+ 3);
831 /* The time of clock cycle in ps. */
832 static unsigned int cycle_ps(struct raminfo
*info
)
834 return 2 * halfcycle_ps(info
);
837 /* Frequency in 1.(1)=10/9 MHz units. */
838 static unsigned frequency_11(struct raminfo
*info
)
840 return (info
->clock_speed_index
+ 3) * 120;
843 /* Frequency in 0.1 MHz units. */
844 static unsigned frequency_01(struct raminfo
*info
)
846 return 100 * frequency_11(info
) / 9;
849 static unsigned ps_to_halfcycles(struct raminfo
*info
, unsigned int ps
)
851 return (frequency_11(info
) * 2) * ps
/ 900000;
854 static unsigned ns_to_cycles(struct raminfo
*info
, unsigned int ns
)
856 return (frequency_11(info
)) * ns
/ 900;
859 static void compute_derived_timings(struct raminfo
*info
)
861 unsigned channel
, slot
, rank
;
862 int extended_silicon_revision
;
865 int some_delay_2_halfcycles_ceil
;
866 int some_delay_2_halfcycles_floor
;
868 int some_delay_3_halfcycles
;
869 int some_delay_3_ps_rounded
;
870 int some_delay_1_cycle_ceil
;
871 int some_delay_1_cycle_floor
;
873 some_delay_3_halfcycles
= 0;
874 some_delay_3_ps_rounded
= 0;
875 extended_silicon_revision
= info
->silicon_revision
;
876 if (!info
->silicon_revision
)
877 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
878 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
880 spd
[channel
][slot
][MODULE_TYPE
] & 0xF) ==
882 extended_silicon_revision
= 4;
883 if (info
->board_lane_delay
[7] < 5)
884 info
->board_lane_delay
[7] = 5;
885 info
->revision_flag_1
= 2;
886 if (info
->silicon_revision
== 2 || info
->silicon_revision
== 3)
887 info
->revision_flag_1
= 0;
888 if (info
->revision
< 16)
889 info
->revision_flag_1
= 0;
891 if (info
->revision
< 8)
892 info
->revision_flag_1
= 0;
893 if (info
->revision
>= 8 && (info
->silicon_revision
== 0
894 || info
->silicon_revision
== 1))
895 some_delay_2_ps
= 735;
897 some_delay_2_ps
= 750;
899 if (info
->revision
>= 0x10 && (info
->silicon_revision
== 0
900 || info
->silicon_revision
== 1))
901 some_delay_1_ps
= 3929;
903 some_delay_1_ps
= 3490;
905 some_delay_1_cycle_floor
= some_delay_1_ps
/ cycle_ps(info
);
906 some_delay_1_cycle_ceil
= some_delay_1_ps
/ cycle_ps(info
);
907 if (some_delay_1_ps
% cycle_ps(info
))
908 some_delay_1_cycle_ceil
++;
910 some_delay_1_cycle_floor
--;
911 info
->some_delay_1_cycle_floor
= some_delay_1_cycle_floor
;
912 if (info
->revision_flag_1
)
913 some_delay_2_ps
= halfcycle_ps(info
) >> 6;
915 max(some_delay_1_ps
- 30,
916 2 * halfcycle_ps(info
) * (some_delay_1_cycle_ceil
- 1) + 1000) +
919 halfcycle_ps(info
) - some_delay_2_ps
% halfcycle_ps(info
);
920 if (info
->revision_flag_1
) {
921 if (some_delay_3_ps
< 150)
922 some_delay_3_halfcycles
= 0;
924 some_delay_3_halfcycles
=
925 (some_delay_3_ps
<< 6) / halfcycle_ps(info
);
926 some_delay_3_ps_rounded
=
927 halfcycle_ps(info
) * some_delay_3_halfcycles
>> 6;
929 some_delay_2_halfcycles_ceil
=
930 (some_delay_2_ps
+ halfcycle_ps(info
) - 1) / halfcycle_ps(info
) -
931 2 * (some_delay_1_cycle_ceil
- 1);
932 if (info
->revision_flag_1
&& some_delay_3_ps
< 150)
933 some_delay_2_halfcycles_ceil
++;
934 some_delay_2_halfcycles_floor
= some_delay_2_halfcycles_ceil
;
935 if (info
->revision
< 0x10)
936 some_delay_2_halfcycles_floor
=
937 some_delay_2_halfcycles_ceil
- 1;
938 if (!info
->revision_flag_1
)
939 some_delay_2_halfcycles_floor
++;
940 info
->some_delay_2_halfcycles_ceil
= some_delay_2_halfcycles_ceil
;
941 info
->some_delay_3_ps_rounded
= some_delay_3_ps_rounded
;
942 if ((info
->populated_ranks
[0][0][0] && info
->populated_ranks
[0][1][0])
943 || (info
->populated_ranks
[1][0][0]
944 && info
->populated_ranks
[1][1][0]))
945 info
->max_slots_used_in_channel
= 2;
947 info
->max_slots_used_in_channel
= 1;
948 for (channel
= 0; channel
< 2; channel
++)
949 write_mchbar32(0x244 + (channel
<< 10),
950 ((info
->revision
< 8) ? 1 : 0x200)
951 | ((2 - info
->max_slots_used_in_channel
) << 17) |
952 (channel
<< 21) | (info
->
953 some_delay_1_cycle_floor
<<
955 if (info
->max_slots_used_in_channel
== 1) {
956 info
->mode4030
[0] = (count_ranks_in_channel(info
, 0) == 2);
957 info
->mode4030
[1] = (count_ranks_in_channel(info
, 1) == 2);
959 info
->mode4030
[0] = ((count_ranks_in_channel(info
, 0) == 1) || (count_ranks_in_channel(info
, 0) == 2)) ? 2 : 3; /* 2 if 1 or 2 ranks */
960 info
->mode4030
[1] = ((count_ranks_in_channel(info
, 1) == 1)
961 || (count_ranks_in_channel(info
, 1) ==
964 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
971 if (!info
->populated_ranks_mask
[channel
])
975 min_of_unk_2
= 32767;
979 for (i
= 0; i
< 3; i
++) {
981 if (info
->revision
< 8)
983 u8_FFFD1891
[0][channel
][info
->
987 (info
->revision
>= 0x10
988 || info
->revision_flag_1
))
990 u8_FFFD1891
[1][channel
][info
->
995 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
996 for (rank
= 0; rank
< NUM_RANKS
; rank
++) {
1001 populated_ranks
[channel
][slot
]
1004 if (extended_silicon_revision
== 4
1006 populated_ranks_mask
[channel
] &
1010 [REFERENCE_RAW_CARD_USED
] &
1015 b
= u16_fe0eb8
[0][info
->
1020 [REFERENCE_RAW_CARD_USED
]
1025 b
= u16_fe0eb8
[1][info
->
1029 min_of_unk_2
= min(min_of_unk_2
, a
);
1030 min_of_unk_2
= min(min_of_unk_2
, b
);
1038 u8_FFFD0EF8
[channel
]
1039 [extended_silicon_revision
]
1041 mode4030
[channel
]][info
->
1051 u8_FFFD17E0
[channel
]
1052 [extended_silicon_revision
][info
->
1055 [info
->clock_speed_index
] + min_of_unk_2
;
1057 max_of_unk
= max(max_of_unk
, unk1
- t
);
1061 info
->avg4044
[channel
] = sum
/ count
;
1062 info
->max4048
[channel
] = max_of_unk
;
1066 static void jedec_read(struct raminfo
*info
,
1067 int channel
, int slot
, int rank
,
1068 int total_rank
, u8 addr3
, unsigned int value
)
1070 /* Handle mirrored mapping. */
1071 if ((rank
& 1) && (info
->spd
[channel
][slot
][RANK1_ADDRESS_MAPPING
] & 1))
1073 (addr3
& 0xCF) | ((addr3
& 0x10) << 1) | ((addr3
>> 1) &
1075 write_mchbar8(0x271, addr3
| (read_mchbar8(0x271) & 0xC1));
1076 write_mchbar8(0x671, addr3
| (read_mchbar8(0x671) & 0xC1));
1078 /* Handle mirrored mapping. */
1079 if ((rank
& 1) && (info
->spd
[channel
][slot
][RANK1_ADDRESS_MAPPING
] & 1))
1081 (value
& ~0x1f8) | ((value
>> 1) & 0xa8) | ((value
& 0xa8)
1084 read32p((value
<< 3) | (total_rank
<< 28));
1086 write_mchbar8(0x271, (read_mchbar8(0x271) & 0xC3) | 2);
1087 write_mchbar8(0x671, (read_mchbar8(0x671) & 0xC3) | 2);
1089 read32p(total_rank
<< 28);
1100 MR0_BT_INTERLEAVED
= 8,
1101 MR0_DLL_RESET_ON
= 256
1105 MR2_RTT_WR_DISABLED
= 0,
1109 static void jedec_init(struct raminfo
*info
)
1112 int channel
, slot
, rank
;
1115 int self_refresh_temperature
;
1116 int auto_self_refresh
;
1118 auto_self_refresh
= 1;
1119 self_refresh_temperature
= 1;
1120 if (info
->board_lane_delay
[3] <= 10) {
1121 if (info
->board_lane_delay
[3] <= 8)
1122 write_recovery
= info
->board_lane_delay
[3] - 4;
1128 FOR_POPULATED_RANKS
{
1129 auto_self_refresh
&=
1130 (info
->spd
[channel
][slot
][THERMAL_AND_REFRESH
] >> 2) & 1;
1131 self_refresh_temperature
&=
1132 info
->spd
[channel
][slot
][THERMAL_AND_REFRESH
] & 1;
1134 if (auto_self_refresh
== 1)
1135 self_refresh_temperature
= 0;
1137 dll_on
= ((info
->silicon_revision
!= 2 && info
->silicon_revision
!= 3)
1138 || (info
->populated_ranks
[0][0][0]
1139 && info
->populated_ranks
[0][1][0])
1140 || (info
->populated_ranks
[1][0][0]
1141 && info
->populated_ranks
[1][1][0]));
1145 for (channel
= NUM_CHANNELS
- 1; channel
>= 0; channel
--) {
1146 int rtt
, rtt_wr
= MR2_RTT_WR_DISABLED
;
1149 if (info
->silicon_revision
== 2 || info
->silicon_revision
== 3) {
1152 if (info
->clock_speed_index
!= 0) {
1154 if (info
->populated_ranks_mask
[channel
] == 3)
1158 if ((info
->populated_ranks_mask
[channel
] & 5) == 5) {
1168 write_mchbar16(0x588 + (channel
<< 10), 0x0);
1169 write_mchbar16(0x58a + (channel
<< 10), 0x4);
1170 write_mchbar16(0x58c + (channel
<< 10), rtt
| MR1_ODS34OHM
);
1171 write_mchbar16(0x58e + (channel
<< 10), rzq_reg58e
| 0x82);
1172 write_mchbar16(0x590 + (channel
<< 10), 0x1282);
1174 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
1175 for (rank
= 0; rank
< NUM_RANKS
; rank
++)
1176 if (info
->populated_ranks
[channel
][slot
][rank
]) {
1177 jedec_read(info
, channel
, slot
, rank
,
1182 | (auto_self_refresh
<< 6) |
1183 (self_refresh_temperature
<<
1185 jedec_read(info
, channel
, slot
, rank
,
1186 total_rank
, 0x38, 0);
1187 jedec_read(info
, channel
, slot
, rank
,
1189 rtt
| MR1_ODS34OHM
);
1190 jedec_read(info
, channel
, slot
, rank
,
1193 (write_recovery
<< 9)
1194 | ((info
->cas_latency
- 4) <<
1195 4) | MR0_BT_INTERLEAVED
|
1202 static void program_modules_memory_map(struct raminfo
*info
, int pre_jedec
)
1204 unsigned channel
, slot
, rank
;
1205 unsigned int total_mb
[2] = { 0, 0 }; /* total memory per channel in MB */
1206 unsigned int channel_0_non_interleaved
;
1209 if (info
->populated_ranks
[channel
][slot
][rank
]) {
1210 total_mb
[channel
] +=
1211 pre_jedec
? 256 : (256 << info
->
1212 density
[channel
][slot
] >> info
->
1213 is_x16_module
[channel
][slot
]);
1214 write_mchbar8(0x208 + rank
+ 2 * slot
+ (channel
<< 10),
1215 (pre_jedec
? (1 | ((1 + 1) << 1))
1217 is_x16_module
[channel
][slot
] |
1218 ((info
->density
[channel
][slot
] +
1221 write_mchbar16(0x200 + (channel
<< 10) + 4 * slot
+ 2 * rank
,
1222 total_mb
[channel
] >> 6);
1225 info
->total_memory_mb
= total_mb
[0] + total_mb
[1];
1227 info
->interleaved_part_mb
=
1228 pre_jedec
? 0 : 2 * min(total_mb
[0], total_mb
[1]);
1229 info
->non_interleaved_part_mb
=
1230 total_mb
[0] + total_mb
[1] - info
->interleaved_part_mb
;
1231 channel_0_non_interleaved
= total_mb
[0] - info
->interleaved_part_mb
/ 2;
1232 write_mchbar32(0x100,
1233 channel_0_non_interleaved
| (info
->
1234 non_interleaved_part_mb
<<
1237 write_mchbar16(0x104, info
->interleaved_part_mb
);
1240 static void program_board_delay(struct raminfo
*info
)
1242 int cas_latency_shift
;
1244 int some_delay_3_half_cycles
;
1246 unsigned channel
, i
;
1247 int high_multiplier
;
1249 int cas_latency_derived
;
1251 high_multiplier
= 0;
1252 some_delay_ns
= 200;
1253 some_delay_3_half_cycles
= 4;
1254 cas_latency_shift
= info
->silicon_revision
== 0
1255 || info
->silicon_revision
== 1 ? 1 : 0;
1256 if (info
->revision
< 8) {
1257 some_delay_ns
= 600;
1258 cas_latency_shift
= 0;
1263 ((info
->clock_speed_index
> 1
1264 || (info
->silicon_revision
!= 2
1265 && info
->silicon_revision
!= 3))) ^ (info
->revision
>=
1267 write_500(info
, 0, speed_bit
| ((!info
->use_ecc
) << 1), 0x60e,
1269 write_500(info
, 1, speed_bit
| ((!info
->use_ecc
) << 1), 0x60e,
1271 if (info
->revision
>= 0x10 && info
->clock_speed_index
<= 1
1272 && (info
->silicon_revision
== 2
1273 || info
->silicon_revision
== 3))
1274 rmw_1d0(0x116, 5, 2, 4, 1);
1276 write_mchbar32(0x120,
1277 (1 << (info
->max_slots_used_in_channel
+ 28)) |
1280 write_mchbar8(0x124,
1281 info
->board_lane_delay
[4] +
1282 ((frequency_01(info
) + 999) / 1000));
1283 write_mchbar16(0x125, 0x1360);
1284 write_mchbar8(0x127, 0x40);
1285 if (info
->fsb_frequency
< frequency_11(info
) / 2) {
1286 unsigned some_delay_2_half_cycles
;
1287 high_multiplier
= 1;
1288 some_delay_2_half_cycles
= ps_to_halfcycles(info
,
1301 some_delay_3_half_cycles
=
1302 min((some_delay_2_half_cycles
+
1303 (frequency_11(info
) * 2) * (28 -
1304 some_delay_2_half_cycles
) /
1305 (frequency_11(info
) * 2 -
1306 4 * (info
->fsb_frequency
))) >> 3, 7);
1308 if (read_mchbar8(0x2ca9) & 1)
1309 some_delay_3_half_cycles
= 3;
1310 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
1311 write_mchbar32(0x220 + (channel
<< 10),
1312 read_mchbar32(0x220 +
1313 (channel
<< 10)) | 0x18001117);
1314 write_mchbar32(0x224 + (channel
<< 10),
1315 (info
->max_slots_used_in_channel
- 1)
1317 ((info
->cas_latency
- 5 -
1318 info
->clock_speed_index
) << 21)
1320 ((info
->max_slots_used_in_channel
+
1321 info
->cas_latency
- cas_latency_shift
-
1323 | ((info
->cas_latency
- cas_latency_shift
- 4) <<
1326 ((info
->cas_latency
- info
->clock_speed_index
+
1327 info
->max_slots_used_in_channel
- 6) << 8));
1328 write_mchbar32(0x228 + (channel
<< 10),
1329 info
->max_slots_used_in_channel
);
1330 write_mchbar8(0x239 + (channel
<< 10), 32);
1331 write_mchbar32(0x248 + (channel
<< 10),
1332 (high_multiplier
<< 24) |
1333 (some_delay_3_half_cycles
<< 25) | 0x840000);
1334 write_mchbar32(0x278 + (channel
<< 10), 0xc362042);
1335 write_mchbar32(0x27c + (channel
<< 10), 0x8b000062);
1336 write_mchbar32(0x24c + (channel
<< 10),
1338 clock_speed_index
) << 17) | (((2 +
1343 clock_speed_index
)))
1346 write_mchbar8(0x267 + (channel
<< 10), 0x4);
1347 write_mchbar16(0x272 + (channel
<< 10), 0x155);
1348 write_mchbar32(0x2bc + (channel
<< 10),
1349 (read_mchbar32(0x2bc + (channel
<< 10)) &
1353 write_500(info
, channel
,
1354 ((!info
->populated_ranks
[channel
][1][1])
1355 | (!info
->populated_ranks
[channel
][1][0] << 1)
1356 | (!info
->populated_ranks
[channel
][0][1] << 2)
1357 | (!info
->populated_ranks
[channel
][0][0] << 3)),
1361 write_mchbar8(0x2c4, ((1 + (info
->clock_speed_index
!= 0)) << 6) | 0xC);
1363 u8 freq_divisor
= 2;
1364 if (info
->fsb_frequency
== frequency_11(info
))
1366 else if (2 * info
->fsb_frequency
< 3 * (frequency_11(info
) / 2))
1370 write_mchbar32(0x2c0, (freq_divisor
<< 11) | 0x6009c400);
1373 if (info
->board_lane_delay
[3] <= 10) {
1374 if (info
->board_lane_delay
[3] <= 8)
1375 lane_3_delay
= info
->board_lane_delay
[3];
1381 cas_latency_derived
= info
->cas_latency
- info
->clock_speed_index
+ 2;
1382 if (info
->clock_speed_index
> 1)
1383 cas_latency_derived
++;
1384 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
1385 write_mchbar32(0x240 + (channel
<< 10),
1386 ((info
->clock_speed_index
==
1387 0) * 0x11000) | 0x1002100 | ((2 +
1393 write_500(info
, channel
, (info
->clock_speed_index
<< 1) | 1,
1395 write_500(info
, channel
,
1396 info
->clock_speed_index
+ 2 * info
->cas_latency
- 7,
1399 write_mchbar32(0x250 + (channel
<< 10),
1400 ((lane_3_delay
+ info
->clock_speed_index
+
1402 | (info
->board_lane_delay
[7] << 2) | (info
->
1405 | (info
->board_lane_delay
[1] << 25) | (info
->
1409 write_mchbar32(0x254 + (channel
<< 10),
1411 board_lane_delay
[1] >> 3) | ((info
->
1417 0x80 | (info
->board_lane_delay
[6] << 1) | (info
->
1421 (cas_latency_derived
<< 16) | 0x4700000);
1422 write_mchbar32(0x258 + (channel
<< 10),
1423 ((info
->board_lane_delay
[5] +
1424 info
->clock_speed_index
+
1425 9) << 12) | ((info
->clock_speed_index
-
1426 info
->cas_latency
+ 12) << 8)
1427 | (info
->board_lane_delay
[2] << 17) | (info
->
1431 write_mchbar32(0x25c + (channel
<< 10),
1432 (info
->board_lane_delay
[1] << 1) | (info
->
1436 write_mchbar8(0x264 + (channel
<< 10), 0xff);
1437 write_mchbar8(0x5f8 + (channel
<< 10),
1438 (cas_latency_shift
<< 3) | info
->use_ecc
);
1441 program_modules_memory_map(info
, 1);
1443 write_mchbar16(0x610,
1444 (min(ns_to_cycles(info
, some_delay_ns
) / 2, 127) << 9)
1445 | (read_mchbar16(0x610) & 0x1C3) | 0x3C);
1446 write_mchbar16(0x612, read_mchbar16(0x612) | 0x100);
1447 write_mchbar16(0x214, read_mchbar16(0x214) | 0x3E00);
1448 for (i
= 0; i
< 8; i
++) {
1449 pci_write_config32(PCI_DEV (QUICKPATH_BUS
, 0, 1), 0x80 + 4 * i
,
1450 (info
->total_memory_mb
- 64) | !i
| 2);
1451 pci_write_config32(PCI_DEV (QUICKPATH_BUS
, 0, 1), 0xc0 + 4 * i
, 0);
1455 #define DEFAULT_PCI_MMIO_SIZE 2048
1456 #define HOST_BRIDGE PCI_DEVFN(0, 0)
1458 static unsigned int get_mmio_size(void)
1460 const struct device
*dev
;
1461 const struct northbridge_intel_nehalem_config
*cfg
= NULL
;
1463 dev
= dev_find_slot(0, HOST_BRIDGE
);
1465 cfg
= dev
->chip_info
;
1467 /* If this is zero, it just means devicetree.cb didn't set it */
1468 if (!cfg
|| cfg
->pci_mmio_size
== 0)
1469 return DEFAULT_PCI_MMIO_SIZE
;
1471 return cfg
->pci_mmio_size
;
1474 #define BETTER_MEMORY_MAP 0
1476 static void program_total_memory_map(struct raminfo
*info
)
1478 unsigned int TOM
, TOLUD
, TOUUD
;
1479 unsigned int quickpath_reserved
;
1480 unsigned int REMAPbase
;
1481 unsigned int uma_base_igd
;
1482 unsigned int uma_base_gtt
;
1483 unsigned int mmio_size
;
1485 unsigned int memory_map
[8];
1487 unsigned int current_limit
;
1488 unsigned int tseg_base
;
1489 int uma_size_igd
= 0, uma_size_gtt
= 0;
1491 memset(memory_map
, 0, sizeof(memory_map
));
1494 if (info
->uma_enabled
) {
1495 u16 t
= pci_read_config16(NORTHBRIDGE
, D0F0_GGC
);
1497 const int uma_sizes_gtt
[16] =
1498 { 0, 1, 0, 2, 0, 0, 0, 0, 0, 2, 3, 4, 42, 42, 42, 42 };
1500 const int uma_sizes_igd
[16] = {
1501 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352,
1505 uma_size_igd
= uma_sizes_igd
[(t
>> 4) & 0xF];
1506 uma_size_gtt
= uma_sizes_gtt
[(t
>> 8) & 0xF];
1510 mmio_size
= get_mmio_size();
1512 TOM
= info
->total_memory_mb
;
1515 TOUUD
= ALIGN_DOWN(TOM
- info
->memory_reserved_for_heci_mb
, 64);
1516 TOLUD
= ALIGN_DOWN(min(4096 - mmio_size
+ ALIGN_UP(uma_size_igd
+ uma_size_gtt
, 64)
1519 if (TOUUD
- TOLUD
> 64) {
1521 REMAPbase
= max(4096, TOUUD
);
1522 TOUUD
= TOUUD
- TOLUD
+ 4096;
1525 memory_map
[2] = TOUUD
| 1;
1526 quickpath_reserved
= 0;
1531 gav(t
= pci_read_config32(PCI_DEV(QUICKPATH_BUS
, 0, 1), 0x68));
1533 quickpath_reserved
=
1534 (1 << find_lowest_bit_set32(t
>> 20));
1537 TOUUD
-= quickpath_reserved
;
1540 if (info
->uma_enabled
) {
1541 u16 t
= pci_read_config16(NORTHBRIDGE
, D0F0_GGC
);
1543 const int uma_sizes_gtt
[16] =
1544 { 0, 1, 0, 2, 0, 0, 0, 0, 0, 2, 3, 4, 42, 42, 42, 42 };
1546 const int uma_sizes_igd
[16] = {
1547 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352,
1551 uma_size_igd
= uma_sizes_igd
[(t
>> 4) & 0xF];
1552 uma_size_gtt
= uma_sizes_gtt
[(t
>> 8) & 0xF];
1556 uma_base_igd
= TOLUD
- uma_size_igd
;
1557 uma_base_gtt
= uma_base_igd
- uma_size_gtt
;
1558 tseg_base
= ALIGN_DOWN(uma_base_gtt
, 64) - (CONFIG_SMM_TSEG_SIZE
>> 20);
1560 tseg_base
-= quickpath_reserved
;
1561 tseg_base
= ALIGN_DOWN(tseg_base
, 8);
1563 pci_write_config16(NORTHBRIDGE
, D0F0_TOLUD
, TOLUD
<< 4);
1564 pci_write_config16(NORTHBRIDGE
, D0F0_TOM
, TOM
>> 6);
1566 pci_write_config16(NORTHBRIDGE
, D0F0_REMAPBASE
, REMAPbase
>> 6);
1567 pci_write_config16(NORTHBRIDGE
, D0F0_REMAPLIMIT
, (TOUUD
- 64) >> 6);
1569 pci_write_config16(NORTHBRIDGE
, D0F0_TOUUD
, TOUUD
);
1571 if (info
->uma_enabled
) {
1572 pci_write_config32(NORTHBRIDGE
, D0F0_IGD_BASE
, uma_base_igd
<< 20);
1573 pci_write_config32(NORTHBRIDGE
, D0F0_GTT_BASE
, uma_base_gtt
<< 20);
1575 pci_write_config32(NORTHBRIDGE
, TSEG
, tseg_base
<< 20);
1578 memory_map
[0] = ALIGN_DOWN(uma_base_gtt
, 64) | 1;
1579 memory_map
[1] = 4096;
1580 for (i
= 0; i
< ARRAY_SIZE(memory_map
); i
++) {
1581 current_limit
= max(current_limit
, memory_map
[i
] & ~1);
1582 pci_write_config32(PCI_DEV(QUICKPATH_BUS
, 0, 1), 4 * i
+ 0x80,
1583 (memory_map
[i
] & 1) | ALIGN_DOWN(current_limit
-
1585 pci_write_config32(PCI_DEV(QUICKPATH_BUS
, 0, 1), 4 * i
+ 0xc0, 0);
1589 static void collect_system_info(struct raminfo
*info
)
1595 /* Wait for some bit, maybe TXT clear. */
1596 while (!(read8((u8
*)0xfed40000) & (1 << 7)));
1598 if (!info
->heci_bar
)
1599 gav(info
->heci_bar
=
1600 pci_read_config32(HECIDEV
, HECIBAR
) & 0xFFFFFFF8);
1601 if (!info
->memory_reserved_for_heci_mb
) {
1602 /* Wait for ME to be ready */
1603 intel_early_me_init();
1604 info
->memory_reserved_for_heci_mb
= intel_early_me_uma_size();
1607 for (i
= 0; i
< 3; i
++)
1609 pci_read_config32(NORTHBRIDGE
, D0F0_CAPID0
| (i
<< 2)));
1610 gav(info
->revision
= pci_read_config8(NORTHBRIDGE
, PCI_REVISION_ID
));
1611 info
->max_supported_clock_speed_index
= (~capid0
[1] & 7);
1613 if ((capid0
[1] >> 11) & 1)
1614 info
->uma_enabled
= 0;
1616 gav(info
->uma_enabled
=
1617 pci_read_config8(NORTHBRIDGE
, D0F0_DEVEN
) & 8);
1618 /* Unrecognised: [0000:fffd3d2d] 37f81.37f82 ! CPUID: eax: 00000001; ecx: 00000e00 => 00020655.00010800.029ae3ff.bfebfbff */
1619 info
->silicon_revision
= 0;
1621 if (capid0
[2] & 2) {
1622 info
->silicon_revision
= 0;
1623 info
->max_supported_clock_speed_index
= 2;
1624 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
1625 if (info
->populated_ranks
[channel
][0][0]
1626 && (info
->spd
[channel
][0][MODULE_TYPE
] & 0xf) ==
1628 info
->silicon_revision
= 2;
1629 info
->max_supported_clock_speed_index
= 1;
1632 switch (((capid0
[2] >> 18) & 1) + 2 * ((capid0
[1] >> 3) & 1)) {
1635 info
->silicon_revision
= 3;
1638 info
->silicon_revision
= 0;
1641 info
->silicon_revision
= 2;
1644 switch (pci_read_config16(NORTHBRIDGE
, PCI_DEVICE_ID
)) {
1646 info
->silicon_revision
= 0;
1649 info
->silicon_revision
= 1;
1655 static void write_training_data(struct raminfo
*info
)
1657 int tm
, channel
, slot
, rank
, lane
;
1658 if (info
->revision
< 8)
1661 for (tm
= 0; tm
< 4; tm
++)
1662 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
1663 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
1664 for (rank
= 0; rank
< NUM_RANKS
; rank
++)
1665 for (lane
= 0; lane
< 9; lane
++)
1666 write_500(info
, channel
,
1670 [channel
][slot
][rank
]
1672 get_timing_register_addr
1675 write_1d0(info
->cached_training
->reg_178
, 0x178, 7, 1);
1676 write_1d0(info
->cached_training
->reg_10b
, 0x10b, 6, 1);
1679 static void dump_timings(struct raminfo
*info
)
1682 int channel
, slot
, rank
, lane
, i
;
1683 printk(BIOS_DEBUG
, "Timings:\n");
1684 FOR_POPULATED_RANKS
{
1685 printk(BIOS_DEBUG
, "channel %d, slot %d, rank %d\n", channel
,
1687 for (lane
= 0; lane
< 9; lane
++) {
1688 printk(BIOS_DEBUG
, "lane %d: ", lane
);
1689 for (i
= 0; i
< 4; i
++) {
1690 printk(BIOS_DEBUG
, "%x (%x) ",
1691 read_500(info
, channel
,
1692 get_timing_register_addr
1693 (lane
, i
, slot
, rank
),
1696 lane_timings
[i
][channel
][slot
][rank
]
1699 printk(BIOS_DEBUG
, "\n");
1702 printk(BIOS_DEBUG
, "[178] = %x (%x)\n", read_1d0(0x178, 7),
1703 info
->training
.reg_178
);
1704 printk(BIOS_DEBUG
, "[10b] = %x (%x)\n", read_1d0(0x10b, 6),
1705 info
->training
.reg_10b
);
1709 /* Read timings and other registers that need to be restored verbatim and
1712 static void save_timings(struct raminfo
*info
)
1714 struct ram_training train
;
1715 int channel
, slot
, rank
, lane
, i
;
1717 train
= info
->training
;
1718 FOR_POPULATED_RANKS
for (lane
= 0; lane
< 9; lane
++)
1719 for (i
= 0; i
< 4; i
++)
1720 train
.lane_timings
[i
][channel
][slot
][rank
][lane
] =
1721 read_500(info
, channel
,
1722 get_timing_register_addr(lane
, i
, slot
,
1724 train
.reg_178
= read_1d0(0x178, 7);
1725 train
.reg_10b
= read_1d0(0x10b, 6);
1727 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
1729 reg32
= read_mchbar32 ((channel
<< 10) + 0x274);
1730 train
.reg274265
[channel
][0] = reg32
>> 16;
1731 train
.reg274265
[channel
][1] = reg32
& 0xffff;
1732 train
.reg274265
[channel
][2] = read_mchbar16 ((channel
<< 10) + 0x265) >> 8;
1734 train
.reg2ca9_bit0
= read_mchbar8(0x2ca9) & 1;
1735 train
.reg_6dc
= read_mchbar32 (0x6dc);
1736 train
.reg_6e8
= read_mchbar32 (0x6e8);
1738 printk (BIOS_SPEW
, "[6dc] = %x\n", train
.reg_6dc
);
1739 printk (BIOS_SPEW
, "[6e8] = %x\n", train
.reg_6e8
);
1741 /* Save the MRC S3 restore data to cbmem */
1742 store_current_mrc_cache(&train
, sizeof(train
));
1746 static const struct ram_training
*get_cached_training(void)
1748 struct mrc_data_container
*cont
;
1749 cont
= find_current_mrc_cache();
1752 return (void *)cont
->mrc_data
;
1756 /* FIXME: add timeout. */
1757 static void wait_heci_ready(void)
1759 while (!(read32(DEFAULT_HECIBAR
+ 0xc) & 8)); // = 0x8000000c
1760 write32((DEFAULT_HECIBAR
+ 0x4),
1761 (read32(DEFAULT_HECIBAR
+ 0x4) & ~0x10) | 0xc);
1764 /* FIXME: add timeout. */
1765 static void wait_heci_cb_avail(int len
)
1772 while (!(read32(DEFAULT_HECIBAR
+ 0xc) & 8));
1775 csr
.raw
= read32(DEFAULT_HECIBAR
+ 0x4);
1777 csr
.csr
.buffer_depth
- (csr
.csr
.buffer_write_ptr
-
1778 csr
.csr
.buffer_read_ptr
));
1781 static void send_heci_packet(struct mei_header
*head
, u32
* payload
)
1783 int len
= (head
->length
+ 3) / 4;
1786 wait_heci_cb_avail(len
+ 1);
1788 /* FIXME: handle leftovers correctly. */
1789 write32(DEFAULT_HECIBAR
+ 0, *(u32
*) head
);
1790 for (i
= 0; i
< len
- 1; i
++)
1791 write32(DEFAULT_HECIBAR
+ 0, payload
[i
]);
1793 write32(DEFAULT_HECIBAR
+ 0, payload
[i
] & ((1 << (8 * len
)) - 1));
1794 write32(DEFAULT_HECIBAR
+ 0x4, read32(DEFAULT_HECIBAR
+ 0x4) | 0x4);
1798 send_heci_message(u8
* msg
, int len
, u8 hostaddress
, u8 clientaddress
)
1800 struct mei_header head
;
1804 maxlen
= (read32(DEFAULT_HECIBAR
+ 0x4) >> 24) * 4 - 4;
1810 head
.is_complete
= 0;
1812 head
.is_complete
= 1;
1815 head
.client_address
= clientaddress
;
1816 head
.host_address
= hostaddress
;
1817 send_heci_packet(&head
, (u32
*) msg
);
1823 /* FIXME: Add timeout. */
1825 recv_heci_packet(struct raminfo
*info
, struct mei_header
*head
, u32
* packet
,
1834 write32(DEFAULT_HECIBAR
+ 0x4, read32(DEFAULT_HECIBAR
+ 0x4) | 2);
1836 csr
.raw
= read32(DEFAULT_HECIBAR
+ 0xc);
1842 while (csr
.csr
.buffer_write_ptr
== csr
.csr
.buffer_read_ptr
);
1843 *(u32
*) head
= read32(DEFAULT_HECIBAR
+ 0x8);
1844 if (!head
->length
) {
1845 write32(DEFAULT_HECIBAR
+ 0x4,
1846 read32(DEFAULT_HECIBAR
+ 0x4) | 2);
1850 if (head
->length
+ 4 > 4 * csr
.csr
.buffer_depth
1851 || head
->length
> *packet_size
) {
1857 csr
.raw
= read32(DEFAULT_HECIBAR
+ 0xc);
1858 while ((head
->length
+ 3) >> 2 >
1859 csr
.csr
.buffer_write_ptr
- csr
.csr
.buffer_read_ptr
);
1861 for (i
= 0; i
< (head
->length
+ 3) >> 2; i
++)
1862 packet
[i
++] = read32(DEFAULT_HECIBAR
+ 0x8);
1863 *packet_size
= head
->length
;
1866 write32(DEFAULT_HECIBAR
+ 0x4, read32(DEFAULT_HECIBAR
+ 0x4) | 4);
1870 /* FIXME: Add timeout. */
1872 recv_heci_message(struct raminfo
*info
, u32
* message
, u32
* message_size
)
1874 struct mei_header head
;
1875 int current_position
;
1877 current_position
= 0;
1880 current_size
= *message_size
- current_position
;
1881 if (recv_heci_packet
1882 (info
, &head
, message
+ (current_position
>> 2),
1883 ¤t_size
) == -1)
1887 current_position
+= current_size
;
1888 if (head
.is_complete
) {
1889 *message_size
= current_position
;
1893 if (current_position
>= *message_size
)
1900 static void send_heci_uma_message(struct raminfo
*info
)
1908 u8 unk3
[0x48 - 4 - 1];
1909 } __attribute__ ((packed
)) reply
;
1910 struct uma_message
{
1917 u32 memory_reserved_for_heci_mb
;
1919 } __attribute__ ((packed
)) msg
= {
1920 0, MKHI_SET_UMA
, 0, 0,
1922 info
->heci_uma_addr
, info
->memory_reserved_for_heci_mb
, 0};
1925 send_heci_message((u8
*) & msg
, sizeof(msg
), 0, 7);
1927 reply_size
= sizeof(reply
);
1928 if (recv_heci_message(info
, (u32
*) & reply
, &reply_size
) == -1)
1931 if (reply
.command
!= (MKHI_SET_UMA
| (1 << 7)))
1932 die("HECI init failed\n");
1935 static void setup_heci_uma(struct raminfo
*info
)
1939 reg44
= pci_read_config32(HECIDEV
, 0x44); // = 0x80010020
1940 info
->memory_reserved_for_heci_mb
= 0;
1941 info
->heci_uma_addr
= 0;
1942 if (!((reg44
& 0x10000) && !(pci_read_config32(HECIDEV
, 0x40) & 0x20)))
1945 info
->heci_bar
= pci_read_config32(HECIDEV
, 0x10) & 0xFFFFFFF0;
1946 info
->memory_reserved_for_heci_mb
= reg44
& 0x3f;
1947 info
->heci_uma_addr
=
1949 ((((u64
) pci_read_config16(NORTHBRIDGE
, D0F0_TOM
)) << 6) -
1950 info
->memory_reserved_for_heci_mb
)) << 20;
1952 pci_read_config32(NORTHBRIDGE
, DMIBAR
);
1953 if (info
->memory_reserved_for_heci_mb
) {
1954 write32(DEFAULT_DMIBAR
+ 0x14,
1955 read32(DEFAULT_DMIBAR
+ 0x14) & ~0x80);
1956 write32(DEFAULT_RCBA
+ 0x14,
1957 read32(DEFAULT_RCBA
+ 0x14) & ~0x80);
1958 write32(DEFAULT_DMIBAR
+ 0x20,
1959 read32(DEFAULT_DMIBAR
+ 0x20) & ~0x80);
1960 write32(DEFAULT_RCBA
+ 0x20,
1961 read32(DEFAULT_RCBA
+ 0x20) & ~0x80);
1962 write32(DEFAULT_DMIBAR
+ 0x2c,
1963 read32(DEFAULT_DMIBAR
+ 0x2c) & ~0x80);
1964 write32(DEFAULT_RCBA
+ 0x30,
1965 read32(DEFAULT_RCBA
+ 0x30) & ~0x80);
1966 write32(DEFAULT_DMIBAR
+ 0x38,
1967 read32(DEFAULT_DMIBAR
+ 0x38) & ~0x80);
1968 write32(DEFAULT_RCBA
+ 0x40,
1969 read32(DEFAULT_RCBA
+ 0x40) & ~0x80);
1971 write32(DEFAULT_RCBA
+ 0x40, 0x87000080); // OK
1972 write32(DEFAULT_DMIBAR
+ 0x38, 0x87000080); // OK
1973 while (read16(DEFAULT_RCBA
+ 0x46) & 2
1974 && read16(DEFAULT_DMIBAR
+ 0x3e) & 2);
1977 write_mchbar32(0x24, 0x10000 + info
->memory_reserved_for_heci_mb
);
1979 send_heci_uma_message(info
);
1981 pci_write_config32(HECIDEV
, 0x10, 0x0);
1982 pci_write_config8(HECIDEV
, 0x4, 0x0);
1986 static int have_match_ranks(struct raminfo
*info
, int channel
, int ranks
)
1988 int ranks_in_channel
;
1989 ranks_in_channel
= info
->populated_ranks
[channel
][0][0]
1990 + info
->populated_ranks
[channel
][0][1]
1991 + info
->populated_ranks
[channel
][1][0]
1992 + info
->populated_ranks
[channel
][1][1];
1995 if (ranks_in_channel
== 0)
1998 if (ranks_in_channel
!= ranks
)
2001 if (info
->populated_ranks
[channel
][0][0] !=
2002 info
->populated_ranks
[channel
][1][0])
2004 if (info
->populated_ranks
[channel
][0][1] !=
2005 info
->populated_ranks
[channel
][1][1])
2007 if (info
->is_x16_module
[channel
][0] != info
->is_x16_module
[channel
][1])
2009 if (info
->density
[channel
][0] != info
->density
[channel
][1])
2014 static void read_4090(struct raminfo
*info
)
2016 int i
, channel
, slot
, rank
, lane
;
2017 for (i
= 0; i
< 2; i
++)
2018 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
2019 for (rank
= 0; rank
< NUM_RANKS
; rank
++)
2020 for (lane
= 0; lane
< 9; lane
++)
2022 lane_timings
[0][i
][slot
][rank
][lane
]
2025 for (i
= 1; i
< 4; i
++)
2026 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
2027 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
2028 for (rank
= 0; rank
< NUM_RANKS
; rank
++)
2029 for (lane
= 0; lane
< 9; lane
++) {
2031 lane_timings
[i
][channel
]
2032 [slot
][rank
][lane
] =
2033 read_500(info
, channel
,
2034 get_timing_register_addr
2037 + (i
== 1) * 11; // !!!!
2042 static u32
get_etalon2(int flip
, u32 addr
)
2044 const u16 invmask
[] = {
2045 0xaaaa, 0x6db6, 0x4924, 0xeeee, 0xcccc, 0x8888, 0x7bde, 0x739c,
2046 0x6318, 0x4210, 0xefbe, 0xcf3c, 0x8e38, 0x0c30, 0x0820
2049 u32 comp4
= addr
/ 480;
2051 u32 comp1
= addr
& 0xf;
2052 u32 comp2
= (addr
>> 4) & 1;
2053 u32 comp3
= addr
>> 5;
2056 ret
= 0x1010101 << (comp4
- 1);
2059 if (flip
^ (((invmask
[comp3
] >> comp1
) ^ comp2
) & 1))
2065 static void disable_cache(void)
2067 msr_t msr
= {.lo
= 0, .hi
= 0 };
2069 wrmsr(MTRR_PHYS_BASE(3), msr
);
2070 wrmsr(MTRR_PHYS_MASK(3), msr
);
2073 static void enable_cache(unsigned int base
, unsigned int size
)
2076 msr
.lo
= base
| MTRR_TYPE_WRPROT
;
2078 wrmsr(MTRR_PHYS_BASE(3), msr
);
2079 msr
.lo
= ((~(ALIGN_DOWN(size
+ 4096, 4096) - 1) | MTRR_DEF_TYPE_EN
)
2081 msr
.hi
= 0x0000000f;
2082 wrmsr(MTRR_PHYS_MASK(3), msr
);
2085 static void flush_cache(u32 start
, u32 size
)
2090 end
= start
+ (ALIGN_DOWN(size
+ 4096, 4096));
2091 for (addr
= start
; addr
< end
; addr
+= 64)
2095 static void clear_errors(void)
2097 pci_write_config8(NORTHBRIDGE
, 0xc0, 0x01);
2100 static void write_testing(struct raminfo
*info
, int totalrank
, int flip
)
2103 /* in 8-byte units. */
2107 base
= (u8
*)(totalrank
<< 28);
2108 for (offset
= 0; offset
< 9 * 480; offset
+= 2) {
2109 write32(base
+ offset
* 8, get_etalon2(flip
, offset
));
2110 write32(base
+ offset
* 8 + 4, get_etalon2(flip
, offset
));
2111 write32(base
+ offset
* 8 + 8, get_etalon2(flip
, offset
+ 1));
2112 write32(base
+ offset
* 8 + 12, get_etalon2(flip
, offset
+ 1));
2114 if (nwrites
>= 320) {
2121 static u8
check_testing(struct raminfo
*info
, u8 total_rank
, int flip
)
2125 int comp1
, comp2
, comp3
;
2126 u32 failxor
[2] = { 0, 0 };
2128 enable_cache((total_rank
<< 28), 1728 * 5 * 4);
2130 for (comp3
= 0; comp3
< 9 && failmask
!= 0xff; comp3
++) {
2131 for (comp1
= 0; comp1
< 4; comp1
++)
2132 for (comp2
= 0; comp2
< 60; comp2
++) {
2135 comp3
* 8 * 60 + 2 * comp1
+ 8 * comp2
;
2136 read128((total_rank
<< 28) | (curroffset
<< 3),
2139 get_etalon2(flip
, curroffset
) ^ re
[0];
2141 get_etalon2(flip
, curroffset
) ^ re
[1];
2143 get_etalon2(flip
, curroffset
| 1) ^ re
[2];
2145 get_etalon2(flip
, curroffset
| 1) ^ re
[3];
2147 for (i
= 0; i
< 8; i
++)
2148 if ((0xff << (8 * (i
% 4))) & failxor
[i
/ 4])
2152 flush_cache((total_rank
<< 28), 1728 * 5 * 4);
2156 const u32 seed1
[0x18] = {
2157 0x3a9d5ab5, 0x576cb65b, 0x555773b6, 0x2ab772ee,
2158 0x555556ee, 0x3a9d5ab5, 0x576cb65b, 0x555773b6,
2159 0x2ab772ee, 0x555556ee, 0x5155a555, 0x5155a555,
2160 0x5155a555, 0x5155a555, 0x3a9d5ab5, 0x576cb65b,
2161 0x555773b6, 0x2ab772ee, 0x555556ee, 0x55d6b4a5,
2162 0x366d6b3a, 0x2ae5ddbb, 0x3b9ddbb7, 0x55d6b4a5,
2165 static u32
get_seed2(int a
, int b
)
2167 const u32 seed2
[5] = {
2168 0x55555555, 0x33333333, 0x2e555a55, 0x55555555,
2172 r
= seed2
[(a
+ (a
>= 10)) / 5];
2176 static int make_shift(int comp2
, int comp5
, int x
)
2178 const u8 seed3
[32] = {
2179 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2180 0x00, 0x00, 0x38, 0x1c, 0x3c, 0x18, 0x38, 0x38,
2181 0x38, 0x38, 0x38, 0x38, 0x0f, 0x0f, 0x0f, 0x0f,
2182 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
2185 return (comp2
- ((seed3
[comp5
] >> (x
& 7)) & 1)) & 0x1f;
2188 static u32
get_etalon(int flip
, u32 addr
)
2191 int comp1
= (addr
>> 1) & 1;
2192 int comp2
= (addr
>> 3) & 0x1f;
2193 int comp3
= (addr
>> 8) & 0xf;
2194 int comp4
= (addr
>> 12) & 0xf;
2195 int comp5
= (addr
>> 16) & 0x1f;
2196 u32 mask_bit
= ~(0x10001 << comp3
);
2203 make_shift(comp2
, comp5
,
2204 (comp3
>> 3) | (comp1
<< 2) | 2)) & 1) ^ flip
;
2207 make_shift(comp2
, comp5
,
2208 (comp3
>> 3) | (comp1
<< 2) | 0)) & 1) ^ flip
;
2210 for (byte
= 0; byte
< 4; byte
++)
2211 if ((get_seed2(comp5
, comp4
) >>
2212 make_shift(comp2
, comp5
, (byte
| (comp1
<< 2)))) & 1)
2213 mask_byte
|= 0xff << (8 * byte
);
2215 return (mask_bit
& mask_byte
) | (part1
<< comp3
) | (part2
<<
2220 write_testing_type2(struct raminfo
*info
, u8 totalrank
, u8 region
, u8 block
,
2224 for (i
= 0; i
< 2048; i
++)
2225 write32p((totalrank
<< 28) | (region
<< 25) | (block
<< 16) |
2226 (i
<< 2), get_etalon(flip
, (block
<< 16) | (i
<< 2)));
2230 check_testing_type2(struct raminfo
*info
, u8 totalrank
, u8 region
, u8 block
,
2236 int comp1
, comp2
, comp3
;
2241 enable_cache(totalrank
<< 28, 134217728);
2242 for (comp3
= 0; comp3
< 2 && failmask
!= 0xff; comp3
++) {
2243 for (comp1
= 0; comp1
< 16; comp1
++)
2244 for (comp2
= 0; comp2
< 64; comp2
++) {
2246 (totalrank
<< 28) | (region
<< 25) | (block
2248 | (comp3
<< 12) | (comp2
<< 6) | (comp1
<<
2250 failxor
[comp1
& 1] |=
2251 read32p(addr
) ^ get_etalon(flip
, addr
);
2253 for (i
= 0; i
< 8; i
++)
2254 if ((0xff << (8 * (i
% 4))) & failxor
[i
/ 4])
2258 flush_cache((totalrank
<< 28) | (region
<< 25) | (block
<< 16), 16384);
2262 static int check_bounded(unsigned short *vals
, u16 bound
)
2266 for (i
= 0; i
< 8; i
++)
2267 if (vals
[i
] < bound
)
2273 BEFORE_USABLE
= 0, AT_USABLE
= 1, AT_MARGIN
= 2, COMPLETE
= 3
2276 static int validate_state(enum state
*in
)
2279 for (i
= 0; i
< 8; i
++)
2280 if (in
[i
] != COMPLETE
)
2286 do_fsm(enum state
*state
, u16
* counter
,
2287 u8 fail_mask
, int margin
, int uplimit
,
2288 u8
* res_low
, u8
* res_high
, u8 val
)
2292 for (lane
= 0; lane
< 8; lane
++) {
2293 int is_fail
= (fail_mask
>> lane
) & 1;
2294 switch (state
[lane
]) {
2298 state
[lane
] = AT_USABLE
;
2302 state
[lane
] = BEFORE_USABLE
;
2307 if (counter
[lane
] >= margin
) {
2308 state
[lane
] = AT_MARGIN
;
2309 res_low
[lane
] = val
- margin
+ 1;
2316 state
[lane
] = BEFORE_USABLE
;
2320 state
[lane
] = COMPLETE
;
2321 res_high
[lane
] = val
- 1;
2324 state
[lane
] = AT_MARGIN
;
2325 if (val
== uplimit
) {
2326 state
[lane
] = COMPLETE
;
2327 res_high
[lane
] = uplimit
;
2338 train_ram_at_178(struct raminfo
*info
, u8 channel
, int slot
, int rank
,
2339 u8 total_rank
, u8 reg_178
, int first_run
, int niter
,
2340 timing_bounds_t
* timings
)
2343 enum state state
[8];
2347 unsigned short num_sucessfully_checked
[8];
2348 u8 secondary_total_rank
;
2351 if (info
->populated_ranks_mask
[1]) {
2353 secondary_total_rank
=
2354 info
->populated_ranks
[1][0][0] +
2355 info
->populated_ranks
[1][0][1]
2356 + info
->populated_ranks
[1][1][0] +
2357 info
->populated_ranks
[1][1][1];
2359 secondary_total_rank
= 0;
2361 secondary_total_rank
= total_rank
;
2365 for (i
= 0; i
< 8; i
++)
2366 state
[i
] = BEFORE_USABLE
;
2371 for (lane
= 0; lane
< 8; lane
++)
2372 if (timings
[reg_178
][channel
][slot
][rank
][lane
].
2374 timings
[reg_178
][channel
][slot
][rank
][lane
].
2376 timings
[reg_178
][channel
][slot
][rank
][lane
].
2378 timings
[reg_178
][channel
][slot
][rank
][lane
].
2384 for (i
= 0; i
< 8; i
++)
2385 state
[i
] = COMPLETE
;
2389 for (reg1b3
= 0; reg1b3
< 0x30 && !validate_state(state
); reg1b3
++) {
2391 write_1d0(reg1b3
^ 32, 0x1b3, 6, 1);
2392 write_1d0(reg1b3
^ 32, 0x1a3, 6, 1);
2393 failmask
= check_testing(info
, total_rank
, 0);
2394 write_mchbar32(0xfb0, read_mchbar32(0xfb0) | 0x00030000);
2395 do_fsm(state
, count
, failmask
, 5, 47, lower_usable
,
2396 upper_usable
, reg1b3
);
2400 write_1d0(0, 0x1b3, 6, 1);
2401 write_1d0(0, 0x1a3, 6, 1);
2402 for (lane
= 0; lane
< 8; lane
++) {
2403 if (state
[lane
] == COMPLETE
) {
2404 timings
[reg_178
][channel
][slot
][rank
][lane
].
2406 lower_usable
[lane
] +
2408 lane_timings
[0][channel
][slot
][rank
][lane
]
2410 timings
[reg_178
][channel
][slot
][rank
][lane
].
2412 upper_usable
[lane
] +
2414 lane_timings
[0][channel
][slot
][rank
][lane
]
2421 for (lane
= 0; lane
< 8; lane
++)
2422 if (state
[lane
] == COMPLETE
) {
2423 write_500(info
, channel
,
2424 timings
[reg_178
][channel
][slot
][rank
]
2426 get_timing_register_addr(lane
, 0,
2429 write_500(info
, channel
,
2430 timings
[reg_178
][channel
][slot
][rank
]
2433 lane_timings
[1][channel
][slot
][rank
]
2437 lane_timings
[0][channel
][slot
][rank
]
2438 [lane
], get_timing_register_addr(lane
,
2443 num_sucessfully_checked
[lane
] = 0;
2445 num_sucessfully_checked
[lane
] = -1;
2450 for (i
= 0; i
< niter
; i
++) {
2451 if (failmask
== 0xFF)
2454 check_testing_type2(info
, total_rank
, 2, i
,
2457 check_testing_type2(info
, total_rank
, 3, i
,
2460 write_mchbar32(0xfb0,
2461 read_mchbar32(0xfb0) | 0x00030000);
2462 for (lane
= 0; lane
< 8; lane
++)
2463 if (num_sucessfully_checked
[lane
] != 0xffff) {
2464 if ((1 << lane
) & failmask
) {
2465 if (timings
[reg_178
][channel
]
2468 timings
[reg_178
][channel
]
2469 [slot
][rank
][lane
].smallest
)
2470 num_sucessfully_checked
2473 num_sucessfully_checked
2479 write_500(info
, channel
,
2486 get_timing_register_addr
2490 write_500(info
, channel
,
2510 get_timing_register_addr
2516 num_sucessfully_checked
[lane
]++;
2519 while (!check_bounded(num_sucessfully_checked
, 2));
2521 for (lane
= 0; lane
< 8; lane
++)
2522 if (state
[lane
] == COMPLETE
) {
2523 write_500(info
, channel
,
2524 timings
[reg_178
][channel
][slot
][rank
]
2526 get_timing_register_addr(lane
, 0,
2529 write_500(info
, channel
,
2530 timings
[reg_178
][channel
][slot
][rank
]
2533 lane_timings
[1][channel
][slot
][rank
]
2537 lane_timings
[0][channel
][slot
][rank
]
2538 [lane
], get_timing_register_addr(lane
,
2543 num_sucessfully_checked
[lane
] = 0;
2545 num_sucessfully_checked
[lane
] = -1;
2550 for (i
= 0; i
< niter
; i
++) {
2551 if (failmask
== 0xFF)
2554 check_testing_type2(info
, total_rank
, 2, i
,
2557 check_testing_type2(info
, total_rank
, 3, i
,
2561 write_mchbar32(0xfb0,
2562 read_mchbar32(0xfb0) | 0x00030000);
2563 for (lane
= 0; lane
< 8; lane
++) {
2564 if (num_sucessfully_checked
[lane
] != 0xffff) {
2565 if ((1 << lane
) & failmask
) {
2566 if (timings
[reg_178
][channel
]
2569 timings
[reg_178
][channel
]
2572 num_sucessfully_checked
2575 num_sucessfully_checked
2581 write_500(info
, channel
,
2588 get_timing_register_addr
2592 write_500(info
, channel
,
2612 get_timing_register_addr
2618 num_sucessfully_checked
[lane
]++;
2622 while (!check_bounded(num_sucessfully_checked
, 3));
2624 for (lane
= 0; lane
< 8; lane
++) {
2625 write_500(info
, channel
,
2627 lane_timings
[0][channel
][slot
][rank
][lane
],
2628 get_timing_register_addr(lane
, 0, slot
, rank
),
2630 write_500(info
, channel
,
2632 lane_timings
[1][channel
][slot
][rank
][lane
],
2633 get_timing_register_addr(lane
, 1, slot
, rank
),
2635 if (timings
[reg_178
][channel
][slot
][rank
][lane
].
2637 timings
[reg_178
][channel
][slot
][rank
][lane
].
2639 timings
[reg_178
][channel
][slot
][rank
][lane
].
2641 timings
[reg_178
][channel
][slot
][rank
][lane
].
2648 static void set_10b(struct raminfo
*info
, u8 val
)
2654 if (read_1d0(0x10b, 6) == val
)
2657 write_1d0(val
, 0x10b, 6, 1);
2659 FOR_POPULATED_RANKS_BACKWARDS
for (lane
= 0; lane
< 9; lane
++) {
2661 reg_500
= read_500(info
, channel
,
2662 get_timing_register_addr(lane
, 0, slot
,
2665 if (lut16
[info
->clock_speed_index
] <= reg_500
)
2666 reg_500
-= lut16
[info
->clock_speed_index
];
2670 reg_500
+= lut16
[info
->clock_speed_index
];
2672 write_500(info
, channel
, reg_500
,
2673 get_timing_register_addr(lane
, 0, slot
, rank
), 9, 1);
2677 static void set_ecc(int onoff
)
2680 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
2682 t
= read_mchbar8((channel
<< 10) + 0x5f8);
2687 write_mchbar8((channel
<< 10) + 0x5f8, t
);
2691 static void set_178(u8 val
)
2698 write_1d0(2 * val
, 0x178, 7, 1);
2702 write_500_timings_type(struct raminfo
*info
, int channel
, int slot
, int rank
,
2707 for (lane
= 0; lane
< 8; lane
++)
2708 write_500(info
, channel
,
2710 lane_timings
[type
][channel
][slot
][rank
][lane
],
2711 get_timing_register_addr(lane
, type
, slot
, rank
), 9,
2716 try_timing_offsets(struct raminfo
*info
, int channel
,
2717 int slot
, int rank
, int totalrank
)
2720 enum state state
[8];
2721 u8 lower_usable
[8], upper_usable
[8];
2727 for (i
= 0; i
< 8; i
++)
2728 state
[i
] = BEFORE_USABLE
;
2730 memset(count
, 0, sizeof(count
));
2732 for (lane
= 0; lane
< 8; lane
++)
2733 write_500(info
, channel
,
2735 lane_timings
[2][channel
][slot
][rank
][lane
] + 32,
2736 get_timing_register_addr(lane
, 3, slot
, rank
), 9, 1);
2738 for (timing_offset
= 0; !validate_state(state
) && timing_offset
< 64;
2741 write_1d0(timing_offset
^ 32, 0x1bb, 6, 1);
2743 for (i
= 0; i
< 2 && failmask
!= 0xff; i
++) {
2745 write_testing(info
, totalrank
, flip
);
2746 failmask
|= check_testing(info
, totalrank
, flip
);
2748 do_fsm(state
, count
, failmask
, 10, 63, lower_usable
,
2749 upper_usable
, timing_offset
);
2751 write_1d0(0, 0x1bb, 6, 1);
2753 if (!validate_state(state
))
2754 die("Couldn't discover DRAM timings (1)\n");
2756 for (lane
= 0; lane
< 8; lane
++) {
2759 if (info
->silicon_revision
) {
2762 usable_length
= upper_usable
[lane
] - lower_usable
[lane
];
2763 if (usable_length
>= 20) {
2764 bias
= usable_length
/ 2 - 10;
2769 write_500(info
, channel
,
2771 lane_timings
[2][channel
][slot
][rank
][lane
] +
2772 (upper_usable
[lane
] + lower_usable
[lane
]) / 2 - bias
,
2773 get_timing_register_addr(lane
, 3, slot
, rank
), 9, 1);
2774 info
->training
.timing2_bounds
[channel
][slot
][rank
][lane
][0] =
2775 info
->training
.lane_timings
[2][channel
][slot
][rank
][lane
] +
2777 info
->training
.timing2_bounds
[channel
][slot
][rank
][lane
][1] =
2778 info
->training
.lane_timings
[2][channel
][slot
][rank
][lane
] +
2780 info
->training
.timing2_offset
[channel
][slot
][rank
][lane
] =
2781 info
->training
.lane_timings
[2][channel
][slot
][rank
][lane
];
2786 choose_training(struct raminfo
*info
, int channel
, int slot
, int rank
,
2787 int lane
, timing_bounds_t
* timings
, u8 center_178
)
2791 unsigned int sum
= 0, count
= 0;
2793 u8 lower_margin
, upper_margin
;
2798 central_weight
= 20;
2800 if (info
->silicon_revision
== 1 && channel
== 1) {
2804 populated_ranks_mask
[1] ^ (info
->
2805 populated_ranks_mask
[1] >> 2)) &
2809 if ((info
->populated_ranks_mask
[0] & 5) == 5) {
2810 central_weight
= 20;
2813 if (info
->clock_speed_index
>= 2
2814 && (info
->populated_ranks_mask
[0] & 5) == 5 && slot
== 1) {
2815 if (info
->silicon_revision
== 1) {
2819 central_weight
= 10;
2826 central_weight
= 20;
2831 if (info
->silicon_revision
== 0 && channel
== 0 && lane
== 0) {
2833 central_weight
= 20;
2836 for (reg_178
= center_178
- span
; reg_178
<= center_178
+ span
;
2840 largest
= timings
[reg_178
][channel
][slot
][rank
][lane
].largest
;
2841 smallest
= timings
[reg_178
][channel
][slot
][rank
][lane
].smallest
;
2842 if (largest
- smallest
+ 1 >= 5) {
2843 unsigned int weight
;
2844 if (reg_178
== center_178
)
2845 weight
= central_weight
;
2847 weight
= side_weight
;
2848 sum
+= weight
* (largest
+ smallest
);
2854 die("Couldn't discover DRAM timings (2)\n");
2855 result
= sum
/ (2 * count
);
2857 result
- timings
[center_178
][channel
][slot
][rank
][lane
].smallest
;
2859 timings
[center_178
][channel
][slot
][rank
][lane
].largest
- result
;
2860 if (upper_margin
< 10 && lower_margin
> 10)
2861 result
-= min(lower_margin
- 10, 10 - upper_margin
);
2862 if (upper_margin
> 10 && lower_margin
< 10)
2863 result
+= min(upper_margin
- 10, 10 - lower_margin
);
2867 #define STANDARD_MIN_MARGIN 5
2869 static u8
choose_reg178(struct raminfo
*info
, timing_bounds_t
* timings
)
2872 int lane
, rank
, slot
, channel
;
2874 int count
= 0, sum
= 0;
2876 for (reg178
= reg178_min
[info
->clock_speed_index
];
2877 reg178
< reg178_max
[info
->clock_speed_index
];
2878 reg178
+= reg178_step
[info
->clock_speed_index
]) {
2879 margin
[reg178
] = -1;
2880 FOR_POPULATED_RANKS_BACKWARDS
for (lane
= 0; lane
< 8; lane
++) {
2882 timings
[reg178
][channel
][slot
][rank
][lane
].largest
-
2883 timings
[reg178
][channel
][slot
][rank
][lane
].
2885 if (curmargin
< margin
[reg178
])
2886 margin
[reg178
] = curmargin
;
2888 if (margin
[reg178
] >= STANDARD_MIN_MARGIN
) {
2890 weight
= margin
[reg178
] - STANDARD_MIN_MARGIN
;
2891 sum
+= weight
* reg178
;
2897 die("Couldn't discover DRAM timings (3)\n");
2901 for (threshold
= 30; threshold
>= 5; threshold
--) {
2902 int usable_length
= 0;
2903 int smallest_fount
= 0;
2904 for (reg178
= reg178_min
[info
->clock_speed_index
];
2905 reg178
< reg178_max
[info
->clock_speed_index
];
2906 reg178
+= reg178_step
[info
->clock_speed_index
])
2907 if (margin
[reg178
] >= threshold
) {
2909 reg178_step
[info
->clock_speed_index
];
2910 info
->training
.reg178_largest
=
2912 2 * reg178_step
[info
->clock_speed_index
];
2914 if (!smallest_fount
) {
2916 info
->training
.reg178_smallest
=
2922 if (usable_length
>= 0x21)
2929 static int check_cached_sanity(struct raminfo
*info
)
2935 if (!info
->cached_training
)
2938 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
2939 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
2940 for (rank
= 0; rank
< NUM_RANKS
; rank
++)
2941 for (lane
= 0; lane
< 8 + info
->use_ecc
; lane
++) {
2942 u16 cached_value
, estimation_value
;
2944 info
->cached_training
->
2945 lane_timings
[1][channel
][slot
][rank
]
2947 if (cached_value
>= 0x18
2948 && cached_value
<= 0x1E7) {
2951 lane_timings
[1][channel
]
2953 if (estimation_value
<
2956 if (estimation_value
>
2964 static int try_cached_training(struct raminfo
*info
)
2969 int channel
, slot
, rank
, lane
;
2973 if (!check_cached_sanity(info
))
2976 info
->training
.reg178_center
= info
->cached_training
->reg178_center
;
2977 info
->training
.reg178_smallest
= info
->cached_training
->reg178_smallest
;
2978 info
->training
.reg178_largest
= info
->cached_training
->reg178_largest
;
2979 memcpy(&info
->training
.timing_bounds
,
2980 &info
->cached_training
->timing_bounds
,
2981 sizeof(info
->training
.timing_bounds
));
2982 memcpy(&info
->training
.timing_offset
,
2983 &info
->cached_training
->timing_offset
,
2984 sizeof(info
->training
.timing_offset
));
2986 write_1d0(2, 0x142, 3, 1);
2987 saved_243
[0] = read_mchbar8(0x243);
2988 saved_243
[1] = read_mchbar8(0x643);
2989 write_mchbar8(0x243, saved_243
[0] | 2);
2990 write_mchbar8(0x643, saved_243
[1] | 2);
2992 pci_write_config16(NORTHBRIDGE
, 0xc8, 3);
2993 if (read_1d0(0x10b, 6) & 1)
2995 for (tm
= 0; tm
< 2; tm
++) {
2998 set_178(tm
? info
->cached_training
->reg178_largest
: info
->
2999 cached_training
->reg178_smallest
);
3002 /* Check timing ranges. With i == 0 we check smallest one and with
3003 i == 1 the largest bound. With j == 0 we check that on the bound
3004 it still works whereas with j == 1 we check that just outside of
3007 FOR_POPULATED_RANKS_BACKWARDS
{
3008 for (i
= 0; i
< 2; i
++) {
3009 for (lane
= 0; lane
< 8; lane
++) {
3010 write_500(info
, channel
,
3011 info
->cached_training
->
3012 timing2_bounds
[channel
][slot
]
3014 get_timing_register_addr(lane
,
3021 write_500(info
, channel
,
3025 [channel
][slot
][rank
]
3027 get_timing_register_addr
3028 (lane
, 2, slot
, rank
),
3030 write_500(info
, channel
,
3031 i
? info
->cached_training
->
3032 timing_bounds
[tm
][channel
]
3036 timing_bounds
[tm
][channel
]
3037 [slot
][rank
][lane
].smallest
,
3038 get_timing_register_addr(lane
,
3043 write_500(info
, channel
,
3044 info
->cached_training
->
3045 timing_offset
[channel
][slot
]
3047 (i
? info
->cached_training
->
3048 timing_bounds
[tm
][channel
]
3052 timing_bounds
[tm
][channel
]
3055 get_timing_register_addr(lane
,
3061 for (j
= 0; j
< 2; j
++) {
3063 u8 expected_failmask
;
3066 reg1b3
= (j
== 1) + 4;
3068 j
== i
? reg1b3
: (-reg1b3
) & 0x3f;
3069 write_1d0(reg1b3
, 0x1bb, 6, 1);
3070 write_1d0(reg1b3
, 0x1b3, 6, 1);
3071 write_1d0(reg1b3
, 0x1a3, 6, 1);
3074 write_testing(info
, totalrank
, flip
);
3076 check_testing(info
, totalrank
,
3079 j
== 0 ? 0x00 : 0xff;
3080 if (failmask
!= expected_failmask
)
3088 set_178(info
->cached_training
->reg178_center
);
3091 write_training_data(info
);
3092 write_1d0(0, 322, 3, 1);
3093 info
->training
= *info
->cached_training
;
3095 write_1d0(0, 0x1bb, 6, 1);
3096 write_1d0(0, 0x1b3, 6, 1);
3097 write_1d0(0, 0x1a3, 6, 1);
3098 write_mchbar8(0x243, saved_243
[0]);
3099 write_mchbar8(0x643, saved_243
[1]);
3104 FOR_POPULATED_RANKS
{
3105 write_500_timings_type(info
, channel
, slot
, rank
, 1);
3106 write_500_timings_type(info
, channel
, slot
, rank
, 2);
3107 write_500_timings_type(info
, channel
, slot
, rank
, 3);
3110 write_1d0(0, 0x1bb, 6, 1);
3111 write_1d0(0, 0x1b3, 6, 1);
3112 write_1d0(0, 0x1a3, 6, 1);
3113 write_mchbar8(0x243, saved_243
[0]);
3114 write_mchbar8(0x643, saved_243
[1]);
3119 static void do_ram_training(struct raminfo
*info
)
3126 timing_bounds_t timings
[64];
3127 int lane
, rank
, slot
, channel
;
3130 write_1d0(2, 0x142, 3, 1);
3131 saved_243
[0] = read_mchbar8(0x243);
3132 saved_243
[1] = read_mchbar8(0x643);
3133 write_mchbar8(0x243, saved_243
[0] | 2);
3134 write_mchbar8(0x643, saved_243
[1] | 2);
3135 switch (info
->clock_speed_index
) {
3148 FOR_POPULATED_RANKS_BACKWARDS
{
3151 write_500_timings_type(info
, channel
, slot
, rank
, 0);
3153 write_testing(info
, totalrank
, 0);
3154 for (i
= 0; i
< niter
; i
++) {
3155 write_testing_type2(info
, totalrank
, 2, i
, 0);
3156 write_testing_type2(info
, totalrank
, 3, i
, 1);
3158 pci_write_config8(NORTHBRIDGE
, 0xc0, 0x01);
3162 if (reg178_min
[info
->clock_speed_index
] <
3163 reg178_max
[info
->clock_speed_index
])
3164 memset(timings
[reg178_min
[info
->clock_speed_index
]], 0,
3165 sizeof(timings
[0]) *
3166 (reg178_max
[info
->clock_speed_index
] -
3167 reg178_min
[info
->clock_speed_index
]));
3168 for (reg_178
= reg178_min
[info
->clock_speed_index
];
3169 reg_178
< reg178_max
[info
->clock_speed_index
];
3170 reg_178
+= reg178_step
[info
->clock_speed_index
]) {
3173 for (channel
= NUM_CHANNELS
- 1; channel
>= 0; channel
--)
3174 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
3175 for (rank
= 0; rank
< NUM_RANKS
; rank
++) {
3176 memset(&timings
[reg_178
][channel
][slot
]
3177 [rank
][0].smallest
, 0, 16);
3179 populated_ranks
[channel
][slot
]
3181 train_ram_at_178(info
, channel
,
3192 reg178_center
= choose_reg178(info
, timings
);
3194 FOR_POPULATED_RANKS_BACKWARDS
for (lane
= 0; lane
< 8; lane
++) {
3195 info
->training
.timing_bounds
[0][channel
][slot
][rank
][lane
].
3197 timings
[info
->training
.
3198 reg178_smallest
][channel
][slot
][rank
][lane
].
3200 info
->training
.timing_bounds
[0][channel
][slot
][rank
][lane
].
3202 timings
[info
->training
.
3203 reg178_smallest
][channel
][slot
][rank
][lane
].largest
;
3204 info
->training
.timing_bounds
[1][channel
][slot
][rank
][lane
].
3206 timings
[info
->training
.
3207 reg178_largest
][channel
][slot
][rank
][lane
].smallest
;
3208 info
->training
.timing_bounds
[1][channel
][slot
][rank
][lane
].
3210 timings
[info
->training
.
3211 reg178_largest
][channel
][slot
][rank
][lane
].largest
;
3212 info
->training
.timing_offset
[channel
][slot
][rank
][lane
] =
3213 info
->training
.lane_timings
[1][channel
][slot
][rank
][lane
]
3215 info
->training
.lane_timings
[0][channel
][slot
][rank
][lane
] +
3219 if (info
->silicon_revision
== 1
3221 populated_ranks_mask
[1] ^ (info
->
3222 populated_ranks_mask
[1] >> 2)) & 1) {
3223 int ranks_after_channel1
;
3226 for (reg_178
= reg178_center
- 18;
3227 reg_178
<= reg178_center
+ 18; reg_178
+= 18) {
3230 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
3231 for (rank
= 0; rank
< NUM_RANKS
; rank
++) {
3233 populated_ranks
[1][slot
][rank
]) {
3234 train_ram_at_178(info
, 1, slot
,
3244 ranks_after_channel1
= totalrank
;
3246 for (reg_178
= reg178_center
- 12;
3247 reg_178
<= reg178_center
+ 12; reg_178
+= 12) {
3248 totalrank
= ranks_after_channel1
;
3250 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
3251 for (rank
= 0; rank
< NUM_RANKS
; rank
++)
3253 populated_ranks
[0][slot
][rank
]) {
3254 train_ram_at_178(info
, 0, slot
,
3265 for (reg_178
= reg178_center
- 12;
3266 reg_178
<= reg178_center
+ 12; reg_178
+= 12) {
3269 FOR_POPULATED_RANKS_BACKWARDS
{
3270 train_ram_at_178(info
, channel
, slot
, rank
,
3271 totalrank
, reg_178
, 0, niter
,
3278 set_178(reg178_center
);
3279 FOR_POPULATED_RANKS_BACKWARDS
for (lane
= 0; lane
< 8; lane
++) {
3283 choose_training(info
, channel
, slot
, rank
, lane
, timings
,
3285 write_500(info
, channel
, tm0
,
3286 get_timing_register_addr(lane
, 0, slot
, rank
), 9, 1);
3287 write_500(info
, channel
,
3290 lane_timings
[1][channel
][slot
][rank
][lane
] -
3292 lane_timings
[0][channel
][slot
][rank
][lane
],
3293 get_timing_register_addr(lane
, 1, slot
, rank
), 9, 1);
3297 FOR_POPULATED_RANKS_BACKWARDS
{
3298 try_timing_offsets(info
, channel
, slot
, rank
, totalrank
);
3301 write_mchbar8(0x243, saved_243
[0]);
3302 write_mchbar8(0x643, saved_243
[1]);
3303 write_1d0(0, 0x142, 3, 1);
3304 info
->training
.reg178_center
= reg178_center
;
3307 static void ram_training(struct raminfo
*info
)
3311 saved_fc4
= read_mchbar16(0xfc4);
3312 write_mchbar16(0xfc4, 0xffff);
3314 if (info
->revision
>= 8)
3317 if (!try_cached_training(info
))
3318 do_ram_training(info
);
3319 if ((info
->silicon_revision
== 2 || info
->silicon_revision
== 3)
3320 && info
->clock_speed_index
< 2)
3322 write_mchbar16(0xfc4, saved_fc4
);
3325 static unsigned gcd(unsigned a
, unsigned b
)
3333 /* invariant a < b. */
3342 static inline int div_roundup(int a
, int b
)
3344 return CEIL_DIV(a
, b
);
3347 static unsigned lcm(unsigned a
, unsigned b
)
3349 return (a
* b
) / gcd(a
, b
);
3354 u8 freq_diff_reduced
;
3355 u8 freq_min_reduced
;
3356 u8 divisor_f4_to_fmax
;
3357 u8 divisor_f3_to_fmax
;
3358 u8 freq4_to_max_remainder
;
3359 u8 freq3_to_2_remainder
;
3360 u8 freq3_to_2_remaindera
;
3361 u8 freq4_to_2_remainder
;
3362 int divisor_f3_to_f1
, divisor_f4_to_f2
;
3363 int common_time_unit_ps
;
3364 int freq_max_reduced
;
3368 compute_frequence_ratios(struct raminfo
*info
, u16 freq1
, u16 freq2
,
3369 int num_cycles_2
, int num_cycles_1
, int round_it
,
3370 int add_freqs
, struct stru1
*result
)
3373 int common_time_unit_ps
;
3374 int freq1_reduced
, freq2_reduced
;
3375 int freq_min_reduced
;
3376 int freq_max_reduced
;
3379 g
= gcd(freq1
, freq2
);
3380 freq1_reduced
= freq1
/ g
;
3381 freq2_reduced
= freq2
/ g
;
3382 freq_min_reduced
= min(freq1_reduced
, freq2_reduced
);
3383 freq_max_reduced
= max(freq1_reduced
, freq2_reduced
);
3385 common_time_unit_ps
= div_roundup(900000, lcm(freq1
, freq2
));
3386 freq3
= div_roundup(num_cycles_2
, common_time_unit_ps
) - 1;
3387 freq4
= div_roundup(num_cycles_1
, common_time_unit_ps
) - 1;
3389 freq3
+= freq2_reduced
;
3390 freq4
+= freq1_reduced
;
3394 result
->freq3_to_2_remainder
= 0;
3395 result
->freq3_to_2_remaindera
= 0;
3396 result
->freq4_to_max_remainder
= 0;
3397 result
->divisor_f4_to_f2
= 0;
3398 result
->divisor_f3_to_f1
= 0;
3400 if (freq2_reduced
< freq1_reduced
) {
3401 result
->freq3_to_2_remainder
=
3402 result
->freq3_to_2_remaindera
=
3403 freq3
% freq1_reduced
- freq1_reduced
+ 1;
3404 result
->freq4_to_max_remainder
=
3405 -(freq4
% freq1_reduced
);
3406 result
->divisor_f3_to_f1
= freq3
/ freq1_reduced
;
3407 result
->divisor_f4_to_f2
=
3409 (freq1_reduced
- freq2_reduced
)) / freq2_reduced
;
3410 result
->freq4_to_2_remainder
=
3411 -(char)((freq1_reduced
- freq2_reduced
) +
3414 freq2_reduced
)) % (u8
) freq2_reduced
);
3416 if (freq2_reduced
> freq1_reduced
) {
3417 result
->freq4_to_max_remainder
=
3418 (freq4
% freq2_reduced
) - freq2_reduced
+ 1;
3419 result
->freq4_to_2_remainder
=
3420 freq4
% freq_max_reduced
-
3421 freq_max_reduced
+ 1;
3423 result
->freq4_to_max_remainder
=
3424 -(freq4
% freq2_reduced
);
3425 result
->freq4_to_2_remainder
=
3426 -(char)(freq4
% freq_max_reduced
);
3428 result
->divisor_f4_to_f2
= freq4
/ freq2_reduced
;
3429 result
->divisor_f3_to_f1
=
3431 (freq2_reduced
- freq1_reduced
)) / freq1_reduced
;
3432 result
->freq3_to_2_remainder
= -(freq3
% freq2_reduced
);
3433 result
->freq3_to_2_remaindera
=
3434 -(char)((freq_max_reduced
- freq_min_reduced
) +
3437 freq_min_reduced
)) % freq1_reduced
);
3440 result
->divisor_f3_to_fmax
= freq3
/ freq_max_reduced
;
3441 result
->divisor_f4_to_fmax
= freq4
/ freq_max_reduced
;
3443 if (freq2_reduced
> freq1_reduced
) {
3444 if (freq3
% freq_max_reduced
)
3445 result
->divisor_f3_to_fmax
++;
3447 if (freq2_reduced
< freq1_reduced
) {
3448 if (freq4
% freq_max_reduced
)
3449 result
->divisor_f4_to_fmax
++;
3452 result
->freqs_reversed
= (freq2_reduced
< freq1_reduced
);
3453 result
->freq_diff_reduced
= freq_max_reduced
- freq_min_reduced
;
3454 result
->freq_min_reduced
= freq_min_reduced
;
3455 result
->common_time_unit_ps
= common_time_unit_ps
;
3456 result
->freq_max_reduced
= freq_max_reduced
;
3460 set_2d5x_reg(struct raminfo
*info
, u16 reg
, u16 freq1
, u16 freq2
,
3461 int num_cycles_2
, int num_cycles_1
, int num_cycles_3
,
3462 int num_cycles_4
, int reverse
)
3467 compute_frequence_ratios(info
, freq1
, freq2
, num_cycles_2
, num_cycles_1
,
3472 (div_roundup(num_cycles_2
, vv
.common_time_unit_ps
) +
3473 div_roundup(num_cycles_3
, vv
.common_time_unit_ps
),
3474 div_roundup(num_cycles_1
,
3475 vv
.common_time_unit_ps
) +
3476 div_roundup(num_cycles_4
, vv
.common_time_unit_ps
))
3477 + vv
.freq_min_reduced
- 1, vv
.freq_max_reduced
) - 1;
3480 (u8
) ((vv
.freq_max_reduced
- vv
.freq_min_reduced
) +
3481 vv
.freq_max_reduced
* multiplier
)
3483 freqs_reversed
<< 8) | ((u8
) (vv
.freq_min_reduced
*
3484 multiplier
) << 16) | ((u8
) (vv
.
3490 vv
.freq3_to_2_remaindera
| (vv
.freq4_to_2_remainder
<< 8) | (vv
.
3493 | (vv
.divisor_f4_to_f2
<< 20) | (vv
.freq_min_reduced
<< 24);
3495 write_mchbar32(reg
, y
);
3496 write_mchbar32(reg
+ 4, x
);
3498 write_mchbar32(reg
+ 4, y
);
3499 write_mchbar32(reg
, x
);
3504 set_6d_reg(struct raminfo
*info
, u16 reg
, u16 freq1
, u16 freq2
,
3505 int num_cycles_1
, int num_cycles_2
, int num_cycles_3
,
3508 struct stru1 ratios1
;
3509 struct stru1 ratios2
;
3511 compute_frequence_ratios(info
, freq1
, freq2
, num_cycles_1
, num_cycles_2
,
3513 compute_frequence_ratios(info
, freq1
, freq2
, num_cycles_3
, num_cycles_4
,
3515 printk (BIOS_SPEW
, "[%x] <= %x\n", reg
,
3516 ratios1
.freq4_to_max_remainder
| (ratios2
.
3517 freq4_to_max_remainder
3519 | (ratios1
.divisor_f4_to_fmax
<< 16) | (ratios2
.
3523 ratios1
.freq4_to_max_remainder
| (ratios2
.
3524 freq4_to_max_remainder
3526 | (ratios1
.divisor_f4_to_fmax
<< 16) | (ratios2
.
3532 set_2dx8_reg(struct raminfo
*info
, u16 reg
, u8 mode
, u16 freq1
, u16 freq2
,
3533 int num_cycles_2
, int num_cycles_1
, int round_it
, int add_freqs
)
3535 struct stru1 ratios
;
3537 compute_frequence_ratios(info
, freq1
, freq2
, num_cycles_2
, num_cycles_1
,
3538 round_it
, add_freqs
, &ratios
);
3541 write_mchbar32(reg
+ 4,
3542 ratios
.freq_diff_reduced
| (ratios
.
3546 ratios
.freq3_to_2_remainder
| (ratios
.
3547 freq4_to_max_remainder
3549 | (ratios
.divisor_f3_to_fmax
<< 16) | (ratios
.
3552 (ratios
.freq_min_reduced
<< 24));
3557 ratios
.freq3_to_2_remainder
| (ratios
.
3564 ratios
.freq3_to_2_remainder
| (ratios
.
3565 freq4_to_max_remainder
3569 (ratios
.divisor_f4_to_fmax
<< 20));
3573 write_mchbar32(reg
, (ratios
.divisor_f3_to_fmax
<< 4)
3574 | (ratios
.divisor_f4_to_fmax
<< 8) | (ratios
.
3577 (ratios
.freq_min_reduced
<< 16) | (ratios
.
3584 static void set_2dxx_series(struct raminfo
*info
, int s3resume
)
3586 set_2dx8_reg(info
, 0x2d00, 0, 0x78, frequency_11(info
) / 2, 1359, 1005,
3588 set_2dx8_reg(info
, 0x2d08, 0, 0x78, 0x78, 3273, 5033, 1, 1);
3589 set_2dx8_reg(info
, 0x2d10, 0, 0x78, info
->fsb_frequency
, 1475, 1131, 0,
3591 set_2dx8_reg(info
, 0x2d18, 0, 2 * info
->fsb_frequency
,
3592 frequency_11(info
), 1231, 1524, 0, 1);
3593 set_2dx8_reg(info
, 0x2d20, 0, 2 * info
->fsb_frequency
,
3594 frequency_11(info
) / 2, 1278, 2008, 0, 1);
3595 set_2dx8_reg(info
, 0x2d28, 0, info
->fsb_frequency
, frequency_11(info
),
3597 set_2dx8_reg(info
, 0x2d30, 0, info
->fsb_frequency
,
3598 frequency_11(info
) / 2, 1403, 1318, 0, 1);
3599 set_2dx8_reg(info
, 0x2d38, 0, info
->fsb_frequency
, 0x78, 3460, 5363, 1,
3601 set_2dx8_reg(info
, 0x2d40, 0, info
->fsb_frequency
, 0x3c, 2792, 5178, 1,
3603 set_2dx8_reg(info
, 0x2d48, 0, 2 * info
->fsb_frequency
, 0x78, 2738, 4610,
3605 set_2dx8_reg(info
, 0x2d50, 0, info
->fsb_frequency
, 0x78, 2819, 5932, 1,
3607 set_2dx8_reg(info
, 0x6d4, 1, info
->fsb_frequency
,
3608 frequency_11(info
) / 2, 4000, 0, 0, 0);
3609 set_2dx8_reg(info
, 0x6d8, 2, info
->fsb_frequency
,
3610 frequency_11(info
) / 2, 4000, 4000, 0, 0);
3613 printk (BIOS_SPEW
, "[6dc] <= %x\n", info
->cached_training
->reg_6dc
);
3614 write_mchbar32(0x6dc, info
->cached_training
->reg_6dc
);
3616 set_6d_reg(info
, 0x6dc, 2 * info
->fsb_frequency
, frequency_11(info
), 0,
3617 info
->delay46_ps
[0], 0,
3618 info
->delay54_ps
[0]);
3619 set_2dx8_reg(info
, 0x6e0, 1, 2 * info
->fsb_frequency
,
3620 frequency_11(info
), 2500, 0, 0, 0);
3621 set_2dx8_reg(info
, 0x6e4, 1, 2 * info
->fsb_frequency
,
3622 frequency_11(info
) / 2, 3500, 0, 0, 0);
3624 printk (BIOS_SPEW
, "[6e8] <= %x\n", info
->cached_training
->reg_6e8
);
3625 write_mchbar32(0x6e8, info
->cached_training
->reg_6e8
);
3627 set_6d_reg(info
, 0x6e8, 2 * info
->fsb_frequency
, frequency_11(info
), 0,
3628 info
->delay46_ps
[1], 0,
3629 info
->delay54_ps
[1]);
3630 set_2d5x_reg(info
, 0x2d58, 0x78, 0x78, 864, 1195, 762, 786, 0);
3631 set_2d5x_reg(info
, 0x2d60, 0x195, info
->fsb_frequency
, 1352, 725, 455,
3633 set_2d5x_reg(info
, 0x2d68, 0x195, 0x3c, 2707, 5632, 3277, 2207, 0);
3634 set_2d5x_reg(info
, 0x2d70, 0x195, frequency_11(info
) / 2, 1276, 758,
3636 set_2d5x_reg(info
, 0x2d78, 0x195, 0x78, 1021, 799, 510, 513, 0);
3637 set_2d5x_reg(info
, 0x2d80, info
->fsb_frequency
, 0xe1, 0, 2862, 2579,
3639 set_2d5x_reg(info
, 0x2d88, info
->fsb_frequency
, 0xe1, 0, 2690, 2405,
3641 set_2d5x_reg(info
, 0x2da0, 0x78, 0xe1, 0, 2560, 2264, 2251, 0);
3642 set_2d5x_reg(info
, 0x2da8, 0x195, frequency_11(info
), 1060, 775, 484,
3644 set_2d5x_reg(info
, 0x2db0, 0x195, 0x78, 4183, 6023, 2217, 2048, 0);
3645 write_mchbar32(0x2dbc, ((frequency_11(info
) / 2) - 1) | 0xe00000);
3646 write_mchbar32(0x2db8, ((info
->fsb_frequency
- 1) << 16) | 0x77);
3649 static u16
get_max_timing(struct raminfo
*info
, int channel
)
3651 int slot
, rank
, lane
;
3654 if ((read_mchbar8(0x2ca8) >> 2) < 1)
3657 if (info
->revision
< 8)
3660 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
3661 for (rank
= 0; rank
< NUM_RANKS
; rank
++)
3662 if (info
->populated_ranks
[channel
][slot
][rank
])
3663 for (lane
= 0; lane
< 8 + info
->use_ecc
; lane
++)
3664 ret
= max(ret
, read_500(info
, channel
,
3665 get_timing_register_addr
3671 static void set_274265(struct raminfo
*info
)
3673 int delay_a_ps
, delay_b_ps
, delay_c_ps
, delay_d_ps
;
3674 int delay_e_ps
, delay_e_cycles
, delay_f_cycles
;
3675 int delay_e_over_cycle_ps
;
3679 delay_a_ps
= 4 * halfcycle_ps(info
) + 6 * fsbcycle_ps(info
);
3680 info
->training
.reg2ca9_bit0
= 0;
3681 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
3683 900000 / lcm(2 * info
->fsb_frequency
, frequency_11(info
));
3685 (halfcycle_ps(info
) * get_max_timing(info
, channel
) >> 6)
3686 - info
->some_delay_3_ps_rounded
+ 200;
3688 ((info
->silicon_revision
== 0
3689 || info
->silicon_revision
== 1)
3690 && (info
->revision
>= 8)))
3691 delay_d_ps
+= halfcycle_ps(info
) * 2;
3693 halfcycle_ps(info
) * (!info
->revision_flag_1
+
3694 info
->some_delay_2_halfcycles_ceil
+
3695 2 * info
->some_delay_1_cycle_floor
+
3696 info
->clock_speed_index
+
3697 2 * info
->cas_latency
- 7 + 11);
3698 delay_d_ps
+= info
->revision
>= 8 ? 2758 : 4428;
3700 write_mchbar32(0x140,
3701 (read_mchbar32(0x140) & 0xfaffffff) | 0x2000000);
3702 write_mchbar32(0x138,
3703 (read_mchbar32(0x138) & 0xfaffffff) | 0x2000000);
3704 if ((read_mchbar8(0x144) & 0x1f) > 0x13)
3706 delay_c_ps
= delay_d_ps
+ 1800;
3707 if (delay_c_ps
<= delay_a_ps
)
3711 cycletime_ps
* div_roundup(delay_c_ps
- delay_a_ps
,
3714 delay_e_over_cycle_ps
= delay_e_ps
% (2 * halfcycle_ps(info
));
3715 delay_e_cycles
= delay_e_ps
/ (2 * halfcycle_ps(info
));
3717 div_roundup(2500 - delay_e_over_cycle_ps
,
3718 2 * halfcycle_ps(info
));
3719 if (delay_f_cycles
> delay_e_cycles
) {
3720 info
->delay46_ps
[channel
] = delay_e_ps
;
3723 info
->delay46_ps
[channel
] =
3724 delay_e_over_cycle_ps
+
3725 2 * halfcycle_ps(info
) * delay_f_cycles
;
3726 delay_e_cycles
-= delay_f_cycles
;
3729 if (info
->delay46_ps
[channel
] < 2500) {
3730 info
->delay46_ps
[channel
] = 2500;
3731 info
->training
.reg2ca9_bit0
= 1;
3733 delay_b_ps
= halfcycle_ps(info
) + delay_c_ps
;
3734 if (delay_b_ps
<= delay_a_ps
)
3737 delay_b_ps
-= delay_a_ps
;
3738 info
->delay54_ps
[channel
] =
3739 cycletime_ps
* div_roundup(delay_b_ps
,
3741 2 * halfcycle_ps(info
) * delay_e_cycles
;
3742 if (info
->delay54_ps
[channel
] < 2500)
3743 info
->delay54_ps
[channel
] = 2500;
3744 info
->training
.reg274265
[channel
][0] = delay_e_cycles
;
3745 if (delay_d_ps
+ 7 * halfcycle_ps(info
) <=
3746 24 * halfcycle_ps(info
))
3747 info
->training
.reg274265
[channel
][1] = 0;
3749 info
->training
.reg274265
[channel
][1] =
3750 div_roundup(delay_d_ps
+ 7 * halfcycle_ps(info
),
3751 4 * halfcycle_ps(info
)) - 6;
3752 write_mchbar32((channel
<< 10) + 0x274,
3753 info
->training
.reg274265
[channel
][1]
3754 | (info
->training
.reg274265
[channel
][0] << 16));
3755 info
->training
.reg274265
[channel
][2] =
3756 div_roundup(delay_c_ps
+ 3 * fsbcycle_ps(info
),
3757 4 * halfcycle_ps(info
)) + 1;
3758 write_mchbar16((channel
<< 10) + 0x265,
3759 info
->training
.reg274265
[channel
][2] << 8);
3761 if (info
->training
.reg2ca9_bit0
)
3762 write_mchbar8(0x2ca9, read_mchbar8(0x2ca9) | 1);
3764 write_mchbar8(0x2ca9, read_mchbar8(0x2ca9) & ~1);
3767 static void restore_274265(struct raminfo
*info
)
3771 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
3772 write_mchbar32((channel
<< 10) + 0x274,
3773 (info
->cached_training
->reg274265
[channel
][0] << 16)
3774 | info
->cached_training
->reg274265
[channel
][1]);
3775 write_mchbar16((channel
<< 10) + 0x265,
3776 info
->cached_training
->reg274265
[channel
][2] << 8);
3778 if (info
->cached_training
->reg2ca9_bit0
)
3779 write_mchbar8(0x2ca9, read_mchbar8(0x2ca9) | 1);
3781 write_mchbar8(0x2ca9, read_mchbar8(0x2ca9) & ~1);
3785 static void dmi_setup(void)
3787 gav(read8(DEFAULT_DMIBAR
+ 0x254));
3788 write8(DEFAULT_DMIBAR
+ 0x254, 0x1);
3789 write16(DEFAULT_DMIBAR
+ 0x1b8, 0x18f2);
3790 read_mchbar16(0x48);
3791 write_mchbar16(0x48, 0x2);
3793 write32(DEFAULT_DMIBAR
+ 0xd68, read32(DEFAULT_DMIBAR
+ 0xd68) | 0x08000000);
3795 outl((gav(inl(DEFAULT_GPIOBASE
| 0x38)) & ~0x140000) | 0x400000,
3796 DEFAULT_GPIOBASE
| 0x38);
3797 gav(inb(DEFAULT_GPIOBASE
| 0xe)); // = 0xfdcaff6e
3801 void chipset_init(const int s3resume
)
3807 x2ca8
= read_mchbar8(0x2ca8);
3808 if ((x2ca8
& 1) || (x2ca8
== 8 && !s3resume
)) {
3809 printk(BIOS_DEBUG
, "soft reset detected, rebooting properly\n");
3810 write_mchbar8(0x2ca8, 0);
3821 pre_raminit_3(x2ca8
);
3823 pre_raminit_4a(x2ca8
);
3828 write_mchbar16(0x1170, 0xa880);
3829 write_mchbar8(0x11c1, 0x1);
3830 write_mchbar16(0x1170, 0xb880);
3831 read_mchbar8(0x1210);
3832 write_mchbar8(0x1210, 0x84);
3834 if (get_option(&gfxsize
, "gfx_uma_size") != CB_SUCCESS
) {
3839 ggc
= 0xb00 | ((gfxsize
+ 5) << 4);
3841 pci_write_config16(NORTHBRIDGE
, D0F0_GGC
, ggc
| 2);
3844 deven
= pci_read_config16(NORTHBRIDGE
, D0F0_DEVEN
); // = 0x3
3847 write_mchbar8(0x2c30, 0x20);
3848 pci_read_config8(NORTHBRIDGE
, 0x8); // = 0x18
3849 write_mchbar16(0x2c30, read_mchbar16(0x2c30) | 0x200);
3850 write_mchbar16(0x2c32, 0x434);
3851 read_mchbar32(0x2c44);
3852 write_mchbar32(0x2c44, 0x1053687);
3853 pci_read_config8(GMA
, 0x62); // = 0x2
3854 pci_write_config8(GMA
, 0x62, 0x2);
3855 read8(DEFAULT_RCBA
+ 0x2318);
3856 write8(DEFAULT_RCBA
+ 0x2318, 0x47);
3857 read8(DEFAULT_RCBA
+ 0x2320);
3858 write8(DEFAULT_RCBA
+ 0x2320, 0xfc);
3861 read_mchbar32(0x30);
3862 write_mchbar32(0x30, 0x40);
3864 pci_write_config16(NORTHBRIDGE
, D0F0_GGC
, ggc
);
3865 gav(read32(DEFAULT_RCBA
+ 0x3428));
3866 write32(DEFAULT_RCBA
+ 0x3428, 0x1d);
3869 void raminit(const int s3resume
, const u8
*spd_addrmap
)
3871 unsigned channel
, slot
, lane
, rank
;
3873 struct raminfo info
;
3876 int cbmem_wasnot_inited
;
3878 x2ca8
= read_mchbar8(0x2ca8);
3879 deven
= pci_read_config16(NORTHBRIDGE
, D0F0_DEVEN
);
3881 memset(&info
, 0x5a, sizeof(info
));
3883 info
.last_500_command
[0] = 0;
3884 info
.last_500_command
[1] = 0;
3886 info
.fsb_frequency
= 135 * 2;
3887 info
.board_lane_delay
[0] = 0x14;
3888 info
.board_lane_delay
[1] = 0x07;
3889 info
.board_lane_delay
[2] = 0x07;
3890 info
.board_lane_delay
[3] = 0x08;
3891 info
.board_lane_delay
[4] = 0x56;
3892 info
.board_lane_delay
[5] = 0x04;
3893 info
.board_lane_delay
[6] = 0x04;
3894 info
.board_lane_delay
[7] = 0x05;
3895 info
.board_lane_delay
[8] = 0x10;
3897 info
.training
.reg_178
= 0;
3898 info
.training
.reg_10b
= 0;
3901 info
.memory_reserved_for_heci_mb
= 0;
3904 timestamp_add_now(101);
3906 if (!s3resume
|| REAL
) {
3907 pci_read_config8(SOUTHBRIDGE
, GEN_PMCON_2
); // = 0x80
3909 collect_system_info(&info
);
3916 memset(&info
.populated_ranks
, 0, sizeof(info
.populated_ranks
));
3919 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
3920 for (slot
= 0; slot
< NUM_SLOTS
; slot
++) {
3924 const u8 useful_addresses
[] = {
3936 0x11, 0x12, 0x13, 0x14, 0x15,
3937 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
3939 THERMAL_AND_REFRESH
,
3941 REFERENCE_RAW_CARD_USED
,
3942 RANK1_ADDRESS_MAPPING
,
3943 0x75, 0x76, 0x77, 0x78,
3944 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e,
3945 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84,
3946 0x85, 0x86, 0x87, 0x88,
3947 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e,
3948 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94,
3951 if (!spd_addrmap
[2 * channel
+ slot
])
3953 for (try = 0; try < 5; try++) {
3954 v
= smbus_read_byte(spd_addrmap
[2 * channel
+ slot
],
3963 sizeof(useful_addresses
) /
3964 sizeof(useful_addresses
[0]); addr
++)
3966 spd
[channel
][0][useful_addresses
3968 smbus_read_byte(spd_addrmap
[2 * channel
+ slot
],
3971 if (info
.spd
[channel
][0][DEVICE_TYPE
] != 11)
3972 die("Only DDR3 is supported");
3974 v
= info
.spd
[channel
][0][RANKS_AND_DQ
];
3975 info
.populated_ranks
[channel
][0][0] = 1;
3976 info
.populated_ranks
[channel
][0][1] =
3978 if (((v
>> 3) & 7) > 1)
3979 die("At most 2 ranks are supported");
3980 if ((v
& 7) == 0 || (v
& 7) > 2)
3981 die("Only x8 and x16 modules are supported");
3983 spd
[channel
][slot
][MODULE_TYPE
] & 0xF) != 2
3985 spd
[channel
][slot
][MODULE_TYPE
] & 0xF)
3987 die("Registered memory is not supported");
3988 info
.is_x16_module
[channel
][0] = (v
& 7) - 1;
3989 info
.density
[channel
][slot
] =
3990 info
.spd
[channel
][slot
][DENSITY
] & 0xF;
3993 spd
[channel
][slot
][MEMORY_BUS_WIDTH
] &
4000 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
4002 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
4003 for (rank
= 0; rank
< NUM_RANKS
; rank
++)
4005 populated_ranks
[channel
][slot
][rank
]
4006 << (2 * slot
+ rank
);
4007 info
.populated_ranks_mask
[channel
] = v
;
4012 gav(pci_read_config32(NORTHBRIDGE
, D0F0_CAPID0
+ 4));
4016 timestamp_add_now(102);
4018 write_mchbar8(0x2ca8, read_mchbar8(0x2ca8) & 0xfc);
4020 rdmsr (MTRR_PHYS_MASK (3));
4023 collect_system_info(&info
);
4024 calculate_timings(&info
);
4027 pci_write_config8(NORTHBRIDGE
, 0xdf, 0x82);
4031 u8 reg8
= pci_read_config8(SOUTHBRIDGE
, GEN_PMCON_2
);
4032 if (x2ca8
== 0 && (reg8
& 0x80)) {
4033 /* Don't enable S4-assertion stretch. Makes trouble on roda/rk9.
4034 reg8 = pci_read_config8(PCI_DEV(0, 0x1f, 0), 0xa4);
4035 pci_write_config8(PCI_DEV(0, 0x1f, 0), 0xa4, reg8 | 0x08);
4040 pci_write_config8(SOUTHBRIDGE
, GEN_PMCON_2
,
4041 (reg8
& ~(1 << 7)));
4044 "Interrupted RAM init, reset required.\n");
4052 gav(read_mchbar8(0x2ca8)); ///!!!!
4055 if (!s3resume
&& x2ca8
== 0)
4056 pci_write_config8(SOUTHBRIDGE
, GEN_PMCON_2
,
4057 pci_read_config8(SOUTHBRIDGE
, GEN_PMCON_2
) | 0x80);
4059 compute_derived_timings(&info
);
4062 gav(read_mchbar8(0x164));
4063 write_mchbar8(0x164, 0x26);
4064 write_mchbar16(0x2c20, 0x10);
4067 write_mchbar32(0x18b4, read_mchbar32(0x18b4) | 0x210000); /* OK */
4068 write_mchbar32(0x1890, read_mchbar32(0x1890) | 0x2000000); /* OK */
4069 write_mchbar32(0x18b4, read_mchbar32(0x18b4) | 0x8000);
4071 gav(pci_read_config32(PCI_DEV(0xff, 2, 1), 0x50)); // !!!!
4072 pci_write_config8(PCI_DEV(0xff, 2, 1), 0x54, 0x12);
4074 gav(read_mchbar16(0x2c10)); // !!!!
4075 write_mchbar16(0x2c10, 0x412);
4076 gav(read_mchbar16(0x2c10)); // !!!!
4077 write_mchbar16(0x2c12, read_mchbar16(0x2c12) | 0x100); /* OK */
4079 gav(read_mchbar8(0x2ca8)); // !!!!
4080 write_mchbar32(0x1804,
4081 (read_mchbar32(0x1804) & 0xfffffffc) | 0x8400080);
4083 pci_read_config32(PCI_DEV(0xff, 2, 1), 0x6c); // !!!!
4084 pci_write_config32(PCI_DEV(0xff, 2, 1), 0x6c, 0x40a0a0);
4085 gav(read_mchbar32(0x1c04)); // !!!!
4086 gav(read_mchbar32(0x1804)); // !!!!
4089 write_mchbar8(0x2ca8, read_mchbar8(0x2ca8) | 1);
4092 write_mchbar32(0x18d8, 0x120000);
4093 write_mchbar32(0x18dc, 0x30a484a);
4094 pci_write_config32(PCI_DEV(0xff, 2, 1), 0xe0, 0x0);
4095 pci_write_config32(PCI_DEV(0xff, 2, 1), 0xf4, 0x9444a);
4096 write_mchbar32(0x18d8, 0x40000);
4097 write_mchbar32(0x18dc, 0xb000000);
4098 pci_write_config32(PCI_DEV(0xff, 2, 1), 0xe0, 0x60000);
4099 pci_write_config32(PCI_DEV(0xff, 2, 1), 0xf4, 0x0);
4100 write_mchbar32(0x18d8, 0x180000);
4101 write_mchbar32(0x18dc, 0xc0000142);
4102 pci_write_config32(PCI_DEV(0xff, 2, 1), 0xe0, 0x20000);
4103 pci_write_config32(PCI_DEV(0xff, 2, 1), 0xf4, 0x142);
4104 write_mchbar32(0x18d8, 0x1e0000);
4106 gav(read_mchbar32(0x18dc)); // !!!!
4107 write_mchbar32(0x18dc, 0x3);
4108 gav(read_mchbar32(0x18dc)); // !!!!
4111 write_mchbar8(0x2ca8, read_mchbar8(0x2ca8) | 1); // guess
4114 write_mchbar32(0x188c, 0x20bc09);
4115 pci_write_config32(PCI_DEV(0xff, 2, 1), 0xd0, 0x40b0c09);
4116 write_mchbar32(0x1a10, 0x4200010e);
4117 write_mchbar32(0x18b8, read_mchbar32(0x18b8) | 0x200);
4118 gav(read_mchbar32(0x1918)); // !!!!
4119 write_mchbar32(0x1918, 0x332);
4121 gav(read_mchbar32(0x18b8)); // !!!!
4122 write_mchbar32(0x18b8, 0xe00);
4123 gav(read_mchbar32(0x182c)); // !!!!
4124 write_mchbar32(0x182c, 0x10202);
4125 gav(pci_read_config32(PCI_DEV(0xff, 2, 1), 0x94)); // !!!!
4126 pci_write_config32(PCI_DEV(0xff, 2, 1), 0x94, 0x10202);
4127 write_mchbar32(0x1a1c, read_mchbar32(0x1a1c) & 0x8fffffff);
4128 write_mchbar32(0x1a70, read_mchbar32(0x1a70) | 0x100000);
4130 write_mchbar32(0x18b4, read_mchbar32(0x18b4) & 0xffff7fff);
4131 gav(read_mchbar32(0x1a68)); // !!!!
4132 write_mchbar32(0x1a68, 0x343800);
4133 gav(read_mchbar32(0x1e68)); // !!!!
4134 gav(read_mchbar32(0x1a68)); // !!!!
4137 write_mchbar8(0x2ca8, read_mchbar8(0x2ca8) | 1); // guess
4140 pci_read_config32(PCI_DEV(0xff, 2, 0), 0x048); // !!!!
4141 pci_write_config32(PCI_DEV(0xff, 2, 0), 0x048, 0x140000);
4142 pci_read_config32(PCI_DEV(0xff, 2, 0), 0x058); // !!!!
4143 pci_write_config32(PCI_DEV(0xff, 2, 0), 0x058, 0x64555);
4144 pci_read_config32(PCI_DEV(0xff, 2, 0), 0x058); // !!!!
4145 pci_read_config32(PCI_DEV (0xff, 0, 0), 0xd0); // !!!!
4146 pci_write_config32(PCI_DEV (0xff, 0, 0), 0xd0, 0x180);
4147 gav(read_mchbar32(0x1af0)); // !!!!
4148 gav(read_mchbar32(0x1af0)); // !!!!
4149 write_mchbar32(0x1af0, 0x1f020003);
4150 gav(read_mchbar32(0x1af0)); // !!!!
4153 write_mchbar8(0x2ca8, read_mchbar8(0x2ca8) | 1); // guess
4156 gav(read_mchbar32(0x1890)); // !!!!
4157 write_mchbar32(0x1890, 0x80102);
4158 gav(read_mchbar32(0x18b4)); // !!!!
4159 write_mchbar32(0x18b4, 0x216000);
4160 write_mchbar32(0x18a4, 0x22222222);
4161 write_mchbar32(0x18a8, 0x22222222);
4162 write_mchbar32(0x18ac, 0x22222);
4166 info
.cached_training
= get_cached_training();
4170 if (s3resume
&& info
.cached_training
) {
4171 restore_274265(&info
);
4172 printk(BIOS_DEBUG
, "reg2ca9_bit0 = %x\n",
4173 info
.cached_training
->reg2ca9_bit0
);
4174 for (i
= 0; i
< 2; i
++)
4175 for (j
= 0; j
< 3; j
++)
4176 printk(BIOS_DEBUG
, "reg274265[%d][%d] = %x\n",
4177 i
, j
, info
.cached_training
->reg274265
[i
][j
]);
4180 printk(BIOS_DEBUG
, "reg2ca9_bit0 = %x\n",
4181 info
.training
.reg2ca9_bit0
);
4182 for (i
= 0; i
< 2; i
++)
4183 for (j
= 0; j
< 3; j
++)
4184 printk(BIOS_DEBUG
, "reg274265[%d][%d] = %x\n",
4185 i
, j
, info
.training
.reg274265
[i
][j
]);
4188 set_2dxx_series(&info
, s3resume
);
4191 read_mchbar32(0x2cb0);
4192 write_mchbar32(0x2cb0, 0x40);
4198 write_mchbar32(0xff8, 0x1800 | read_mchbar32(0xff8));
4199 read_mchbar32(0x2cb0);
4200 write_mchbar32(0x2cb0, 0x00);
4201 pci_read_config8(PCI_DEV (0, 0x2, 0x0), 0x4c);
4202 pci_read_config8(PCI_DEV (0, 0x2, 0x0), 0x4c);
4203 pci_read_config8(PCI_DEV (0, 0x2, 0x0), 0x4e);
4205 read_mchbar8(0x1150);
4206 read_mchbar8(0x1151);
4207 read_mchbar8(0x1022);
4208 read_mchbar8(0x16d0);
4209 write_mchbar32(0x1300, 0x60606060);
4210 write_mchbar32(0x1304, 0x60606060);
4211 write_mchbar32(0x1308, 0x78797a7b);
4212 write_mchbar32(0x130c, 0x7c7d7e7f);
4213 write_mchbar32(0x1310, 0x60606060);
4214 write_mchbar32(0x1314, 0x60606060);
4215 write_mchbar32(0x1318, 0x60606060);
4216 write_mchbar32(0x131c, 0x60606060);
4217 write_mchbar32(0x1320, 0x50515253);
4218 write_mchbar32(0x1324, 0x54555657);
4219 write_mchbar32(0x1328, 0x58595a5b);
4220 write_mchbar32(0x132c, 0x5c5d5e5f);
4221 write_mchbar32(0x1330, 0x40414243);
4222 write_mchbar32(0x1334, 0x44454647);
4223 write_mchbar32(0x1338, 0x48494a4b);
4224 write_mchbar32(0x133c, 0x4c4d4e4f);
4225 write_mchbar32(0x1340, 0x30313233);
4226 write_mchbar32(0x1344, 0x34353637);
4227 write_mchbar32(0x1348, 0x38393a3b);
4228 write_mchbar32(0x134c, 0x3c3d3e3f);
4229 write_mchbar32(0x1350, 0x20212223);
4230 write_mchbar32(0x1354, 0x24252627);
4231 write_mchbar32(0x1358, 0x28292a2b);
4232 write_mchbar32(0x135c, 0x2c2d2e2f);
4233 write_mchbar32(0x1360, 0x10111213);
4234 write_mchbar32(0x1364, 0x14151617);
4235 write_mchbar32(0x1368, 0x18191a1b);
4236 write_mchbar32(0x136c, 0x1c1d1e1f);
4237 write_mchbar32(0x1370, 0x10203);
4238 write_mchbar32(0x1374, 0x4050607);
4239 write_mchbar32(0x1378, 0x8090a0b);
4240 write_mchbar32(0x137c, 0xc0d0e0f);
4241 write_mchbar8(0x11cc, 0x4e);
4242 write_mchbar32(0x1110, 0x73970404);
4243 write_mchbar32(0x1114, 0x72960404);
4244 write_mchbar32(0x1118, 0x6f950404);
4245 write_mchbar32(0x111c, 0x6d940404);
4246 write_mchbar32(0x1120, 0x6a930404);
4247 write_mchbar32(0x1124, 0x68a41404);
4248 write_mchbar32(0x1128, 0x66a21404);
4249 write_mchbar32(0x112c, 0x63a01404);
4250 write_mchbar32(0x1130, 0x609e1404);
4251 write_mchbar32(0x1134, 0x5f9c1404);
4252 write_mchbar32(0x1138, 0x5c961404);
4253 write_mchbar32(0x113c, 0x58a02404);
4254 write_mchbar32(0x1140, 0x54942404);
4255 write_mchbar32(0x1190, 0x900080a);
4256 write_mchbar16(0x11c0, 0xc40b);
4257 write_mchbar16(0x11c2, 0x303);
4258 write_mchbar16(0x11c4, 0x301);
4259 read_mchbar32(0x1190);
4260 write_mchbar32(0x1190, 0x8900080a);
4261 write_mchbar32(0x11b8, 0x70c3000);
4262 write_mchbar8(0x11ec, 0xa);
4263 write_mchbar16(0x1100, 0x800);
4264 read_mchbar32(0x11bc);
4265 write_mchbar32(0x11bc, 0x1e84800);
4266 write_mchbar16(0x11ca, 0xfa);
4267 write_mchbar32(0x11e4, 0x4e20);
4268 write_mchbar8(0x11bc, 0xf);
4269 write_mchbar16(0x11da, 0x19);
4270 write_mchbar16(0x11ba, 0x470c);
4271 write_mchbar32(0x1680, 0xe6ffe4ff);
4272 write_mchbar32(0x1684, 0xdeffdaff);
4273 write_mchbar32(0x1688, 0xd4ffd0ff);
4274 write_mchbar32(0x168c, 0xccffc6ff);
4275 write_mchbar32(0x1690, 0xc0ffbeff);
4276 write_mchbar32(0x1694, 0xb8ffb0ff);
4277 write_mchbar32(0x1698, 0xa8ff0000);
4278 write_mchbar32(0x169c, 0xc00);
4279 write_mchbar32(0x1290, 0x5000000);
4282 write_mchbar32(0x124c, 0x15040d00);
4283 write_mchbar32(0x1250, 0x7f0000);
4284 write_mchbar32(0x1254, 0x1e220004);
4285 write_mchbar32(0x1258, 0x4000004);
4286 write_mchbar32(0x1278, 0x0);
4287 write_mchbar32(0x125c, 0x0);
4288 write_mchbar32(0x1260, 0x0);
4289 write_mchbar32(0x1264, 0x0);
4290 write_mchbar32(0x1268, 0x0);
4291 write_mchbar32(0x126c, 0x0);
4292 write_mchbar32(0x1270, 0x0);
4293 write_mchbar32(0x1274, 0x0);
4296 if ((deven
& 8) && x2ca8
== 0) {
4297 write_mchbar16(0x1214, 0x320);
4298 write_mchbar32(0x1600, 0x40000000);
4299 read_mchbar32(0x11f4);
4300 write_mchbar32(0x11f4, 0x10000000);
4301 read_mchbar16(0x1230);
4302 write_mchbar16(0x1230, 0x8000);
4303 write_mchbar32(0x1400, 0x13040020);
4304 write_mchbar32(0x1404, 0xe090120);
4305 write_mchbar32(0x1408, 0x5120220);
4306 write_mchbar32(0x140c, 0x5120330);
4307 write_mchbar32(0x1410, 0xe090220);
4308 write_mchbar32(0x1414, 0x1010001);
4309 write_mchbar32(0x1418, 0x1110000);
4310 write_mchbar32(0x141c, 0x9020020);
4311 write_mchbar32(0x1420, 0xd090220);
4312 write_mchbar32(0x1424, 0x2090220);
4313 write_mchbar32(0x1428, 0x2090330);
4314 write_mchbar32(0x142c, 0xd090220);
4315 write_mchbar32(0x1430, 0x1010001);
4316 write_mchbar32(0x1434, 0x1110000);
4317 write_mchbar32(0x1438, 0x11040020);
4318 write_mchbar32(0x143c, 0x4030220);
4319 write_mchbar32(0x1440, 0x1060220);
4320 write_mchbar32(0x1444, 0x1060330);
4321 write_mchbar32(0x1448, 0x4030220);
4322 write_mchbar32(0x144c, 0x1010001);
4323 write_mchbar32(0x1450, 0x1110000);
4324 write_mchbar32(0x1454, 0x4010020);
4325 write_mchbar32(0x1458, 0xb090220);
4326 write_mchbar32(0x145c, 0x1090220);
4327 write_mchbar32(0x1460, 0x1090330);
4328 write_mchbar32(0x1464, 0xb090220);
4329 write_mchbar32(0x1468, 0x1010001);
4330 write_mchbar32(0x146c, 0x1110000);
4331 write_mchbar32(0x1470, 0xf040020);
4332 write_mchbar32(0x1474, 0xa090220);
4333 write_mchbar32(0x1478, 0x1120220);
4334 write_mchbar32(0x147c, 0x1120330);
4335 write_mchbar32(0x1480, 0xa090220);
4336 write_mchbar32(0x1484, 0x1010001);
4337 write_mchbar32(0x1488, 0x1110000);
4338 write_mchbar32(0x148c, 0x7020020);
4339 write_mchbar32(0x1490, 0x1010220);
4340 write_mchbar32(0x1494, 0x10210);
4341 write_mchbar32(0x1498, 0x10320);
4342 write_mchbar32(0x149c, 0x1010220);
4343 write_mchbar32(0x14a0, 0x1010001);
4344 write_mchbar32(0x14a4, 0x1110000);
4345 write_mchbar32(0x14a8, 0xd040020);
4346 write_mchbar32(0x14ac, 0x8090220);
4347 write_mchbar32(0x14b0, 0x1111310);
4348 write_mchbar32(0x14b4, 0x1111420);
4349 write_mchbar32(0x14b8, 0x8090220);
4350 write_mchbar32(0x14bc, 0x1010001);
4351 write_mchbar32(0x14c0, 0x1110000);
4352 write_mchbar32(0x14c4, 0x3010020);
4353 write_mchbar32(0x14c8, 0x7090220);
4354 write_mchbar32(0x14cc, 0x1081310);
4355 write_mchbar32(0x14d0, 0x1081420);
4356 write_mchbar32(0x14d4, 0x7090220);
4357 write_mchbar32(0x14d8, 0x1010001);
4358 write_mchbar32(0x14dc, 0x1110000);
4359 write_mchbar32(0x14e0, 0xb040020);
4360 write_mchbar32(0x14e4, 0x2030220);
4361 write_mchbar32(0x14e8, 0x1051310);
4362 write_mchbar32(0x14ec, 0x1051420);
4363 write_mchbar32(0x14f0, 0x2030220);
4364 write_mchbar32(0x14f4, 0x1010001);
4365 write_mchbar32(0x14f8, 0x1110000);
4366 write_mchbar32(0x14fc, 0x5020020);
4367 write_mchbar32(0x1500, 0x5090220);
4368 write_mchbar32(0x1504, 0x2071310);
4369 write_mchbar32(0x1508, 0x2071420);
4370 write_mchbar32(0x150c, 0x5090220);
4371 write_mchbar32(0x1510, 0x1010001);
4372 write_mchbar32(0x1514, 0x1110000);
4373 write_mchbar32(0x1518, 0x7040120);
4374 write_mchbar32(0x151c, 0x2090220);
4375 write_mchbar32(0x1520, 0x70b1210);
4376 write_mchbar32(0x1524, 0x70b1310);
4377 write_mchbar32(0x1528, 0x2090220);
4378 write_mchbar32(0x152c, 0x1010001);
4379 write_mchbar32(0x1530, 0x1110000);
4380 write_mchbar32(0x1534, 0x1010110);
4381 write_mchbar32(0x1538, 0x1081310);
4382 write_mchbar32(0x153c, 0x5041200);
4383 write_mchbar32(0x1540, 0x5041310);
4384 write_mchbar32(0x1544, 0x1081310);
4385 write_mchbar32(0x1548, 0x1010001);
4386 write_mchbar32(0x154c, 0x1110000);
4387 write_mchbar32(0x1550, 0x1040120);
4388 write_mchbar32(0x1554, 0x4051210);
4389 write_mchbar32(0x1558, 0xd051200);
4390 write_mchbar32(0x155c, 0xd051200);
4391 write_mchbar32(0x1560, 0x4051210);
4392 write_mchbar32(0x1564, 0x1010001);
4393 write_mchbar32(0x1568, 0x1110000);
4394 write_mchbar16(0x1222, 0x220a);
4395 write_mchbar16(0x123c, 0x1fc0);
4396 write_mchbar16(0x1220, 0x1388);
4399 read_mchbar32(0x2c80); // !!!!
4400 write_mchbar32(0x2c80, 0x1053688);
4401 read_mchbar32(0x1c04); // !!!!
4402 write_mchbar32(0x1804, 0x406080);
4404 read_mchbar8(0x2ca8);
4407 write_mchbar8(0x2ca8, read_mchbar8(0x2ca8) & ~3);
4408 write_mchbar8(0x2ca8, read_mchbar8(0x2ca8) + 4);
4409 write_mchbar32(0x1af0, read_mchbar32(0x1af0) | 0x10);
4418 write_mchbar8(0x2ca8, read_mchbar8(0x2ca8));
4419 read_mchbar32(0x2c80); // !!!!
4420 write_mchbar32(0x2c80, 0x53688);
4421 pci_write_config32(PCI_DEV (0xff, 0, 0), 0x60, 0x20220);
4422 read_mchbar16(0x2c20); // !!!!
4423 read_mchbar16(0x2c10); // !!!!
4424 read_mchbar16(0x2c00); // !!!!
4425 write_mchbar16(0x2c00, 0x8c0);
4427 write_1d0(0, 0x33d, 0, 0);
4428 write_500(&info
, 0, 0, 0xb61, 0, 0);
4429 write_500(&info
, 1, 0, 0xb61, 0, 0);
4430 write_mchbar32(0x1a30, 0x0);
4431 write_mchbar32(0x1a34, 0x0);
4432 write_mchbar16(0x614,
4433 0xb5b | (info
.populated_ranks
[1][0][0] *
4434 0x404) | (info
.populated_ranks
[0][0][0] *
4436 write_mchbar16(0x616, 0x26a);
4437 write_mchbar32(0x134, 0x856000);
4438 write_mchbar32(0x160, 0x5ffffff);
4439 read_mchbar32(0x114); // !!!!
4440 write_mchbar32(0x114, 0xc2024440);
4441 read_mchbar32(0x118); // !!!!
4442 write_mchbar32(0x118, 0x4);
4443 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
4444 write_mchbar32(0x260 + (channel
<< 10),
4447 populated_ranks_mask
[channel
] & 3) << 20));
4448 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
4449 write_mchbar16(0x31c + (channel
<< 10), 0x101);
4450 write_mchbar16(0x360 + (channel
<< 10), 0x909);
4451 write_mchbar16(0x3a4 + (channel
<< 10), 0x101);
4452 write_mchbar16(0x3e8 + (channel
<< 10), 0x101);
4453 write_mchbar32(0x320 + (channel
<< 10), 0x29002900);
4454 write_mchbar32(0x324 + (channel
<< 10), 0x0);
4455 write_mchbar32(0x368 + (channel
<< 10), 0x32003200);
4456 write_mchbar16(0x352 + (channel
<< 10), 0x505);
4457 write_mchbar16(0x354 + (channel
<< 10), 0x3c3c);
4458 write_mchbar16(0x356 + (channel
<< 10), 0x1040);
4459 write_mchbar16(0x39a + (channel
<< 10), 0x73e4);
4460 write_mchbar16(0x3de + (channel
<< 10), 0x77ed);
4461 write_mchbar16(0x422 + (channel
<< 10), 0x1040);
4464 write_1d0(0x4, 0x151, 4, 1);
4465 write_1d0(0, 0x142, 3, 1);
4466 rdmsr(0x1ac); // !!!!
4467 write_500(&info
, 1, 1, 0x6b3, 4, 1);
4468 write_500(&info
, 1, 1, 0x6cf, 4, 1);
4470 rmw_1d0(0x21c, 0x38, 0, 6, 1);
4472 write_1d0(((!info
.populated_ranks
[1][0][0]) << 1) | ((!info
.
4476 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
4477 write_mchbar16(0x38e + (channel
<< 10), 0x5f5f);
4478 write_mchbar16(0x3d2 + (channel
<< 10), 0x5f5f);
4483 program_base_timings(&info
);
4485 write_mchbar8(0x5ff, read_mchbar8(0x5ff) | 0x80); /* OK */
4487 write_1d0(0x2, 0x1d5, 2, 1);
4488 write_1d0(0x20, 0x166, 7, 1);
4489 write_1d0(0x0, 0xeb, 3, 1);
4490 write_1d0(0x0, 0xf3, 6, 1);
4492 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
4493 for (lane
= 0; lane
< 9; lane
++) {
4494 u16 addr
= 0x125 + get_lane_offset(0, 0, lane
);
4496 a
= read_500(&info
, channel
, addr
, 6); // = 0x20040080 //!!!!
4497 write_500(&info
, channel
, a
, addr
, 6, 1);
4503 if (info
.cached_training
== NULL
) {
4506 "Couldn't find training data. Rebooting\n");
4507 reg32
= inl(DEFAULT_PMBASE
+ 0x04);
4508 outl(reg32
& ~(7 << 10), DEFAULT_PMBASE
+ 0x04);
4519 info
.training
= *info
.cached_training
;
4520 for (tm
= 0; tm
< 4; tm
++)
4521 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
4522 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
4523 for (rank
= 0; rank
< NUM_RANKS
; rank
++)
4524 for (lane
= 0; lane
< 9; lane
++)
4532 get_timing_register_addr
4536 write_1d0(info
.cached_training
->reg_178
, 0x178, 7, 1);
4537 write_1d0(info
.cached_training
->reg_10b
, 0x10b, 6, 1);
4540 read_mchbar32(0x1f4); // !!!!
4541 write_mchbar32(0x1f4, 0x20000);
4542 write_mchbar32(0x1f0, 0x1d000200);
4543 read_mchbar8(0x1f0); // !!!!
4544 write_mchbar8(0x1f0, 0x1);
4545 read_mchbar8(0x1f0); // !!!!
4547 program_board_delay(&info
);
4549 write_mchbar8(0x5ff, 0x0); /* OK */
4550 write_mchbar8(0x5ff, 0x80); /* OK */
4551 write_mchbar8(0x5f4, 0x1); /* OK */
4553 write_mchbar32(0x130, read_mchbar32(0x130) & 0xfffffffd); // | 2 when ?
4554 while (read_mchbar32(0x130) & 1);
4555 gav(read_1d0(0x14b, 7)); // = 0x81023100
4556 write_1d0(0x30, 0x14b, 7, 1);
4557 read_1d0(0xd6, 6); // = 0xfa008080 // !!!!
4558 write_1d0(7, 0xd6, 6, 1);
4559 read_1d0(0x328, 6); // = 0xfa018080 // !!!!
4560 write_1d0(7, 0x328, 6, 1);
4562 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
4563 set_4cf(&info
, channel
,
4564 info
.populated_ranks
[channel
][0][0] ? 8 : 0);
4566 read_1d0(0x116, 4); // = 0x4040432 // !!!!
4567 write_1d0(2, 0x116, 4, 1);
4568 read_1d0(0xae, 6); // = 0xe8088080 // !!!!
4569 write_1d0(0, 0xae, 6, 1);
4570 read_1d0(0x300, 4); // = 0x48088080 // !!!!
4571 write_1d0(0, 0x300, 6, 1);
4572 read_mchbar16(0x356); // !!!!
4573 write_mchbar16(0x356, 0x1040);
4574 read_mchbar16(0x756); // !!!!
4575 write_mchbar16(0x756, 0x1040);
4576 write_mchbar32(0x140, read_mchbar32(0x140) & ~0x07000000);
4577 write_mchbar32(0x138, read_mchbar32(0x138) & ~0x07000000);
4578 write_mchbar32(0x130, 0x31111301);
4579 /* Wait until REG130b0 is 1. */
4580 while (read_mchbar32(0x130) & 1)
4586 val_a1
= read_1d0(0xa1, 6); // = 0x1cf4040 // !!!!
4587 t
= read_1d0(0x2f3, 6); // = 0x10a4040 // !!!!
4588 rmw_1d0(0x320, 0x07,
4589 (t
& 4) | ((t
& 8) >> 2) | ((t
& 0x10) >> 4), 6, 1);
4590 rmw_1d0(0x14b, 0x78,
4591 ((((val_a1
>> 2) & 4) | (val_a1
& 8)) >> 2) | (val_a1
&
4595 ((((val_a1
>> 2) & 4) | (val_a1
& 8)) >> 2) | (val_a1
&
4600 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
4601 set_4cf(&info
, channel
,
4602 info
.populated_ranks
[channel
][0][0] ? 9 : 1);
4604 rmw_1d0(0x116, 0xe, 1, 4, 1); // = 0x4040432 // !!!!
4605 read_mchbar32(0x144); // !!!!
4606 write_1d0(2, 0xae, 6, 1);
4607 write_1d0(2, 0x300, 6, 1);
4608 write_1d0(2, 0x121, 3, 1);
4609 read_1d0(0xd6, 6); // = 0xfa00c0c7 // !!!!
4610 write_1d0(4, 0xd6, 6, 1);
4611 read_1d0(0x328, 6); // = 0xfa00c0c7 // !!!!
4612 write_1d0(4, 0x328, 6, 1);
4614 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
4615 set_4cf(&info
, channel
,
4616 info
.populated_ranks
[channel
][0][0] ? 9 : 0);
4618 write_mchbar32(0x130,
4620 populated_ranks
[1][0][0] << 30) | (info
.
4625 while (read_mchbar8(0x130) & 1); // !!!!
4626 read_1d0(0xa1, 6); // = 0x1cf4054 // !!!!
4627 read_1d0(0x2f3, 6); // = 0x10a4054 // !!!!
4628 read_1d0(0x21c, 6); // = 0xafa00c0 // !!!!
4629 write_1d0(0, 0x21c, 6, 1);
4630 read_1d0(0x14b, 7); // = 0x810231b0 // !!!!
4631 write_1d0(0x35, 0x14b, 7, 1);
4633 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
4634 set_4cf(&info
, channel
,
4635 info
.populated_ranks
[channel
][0][0] ? 0xb : 0x2);
4639 write_mchbar8(0x1e8, 0x4); /* OK */
4641 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
4642 write_500(&info
, channel
,
4643 0x3 & ~(info
.populated_ranks_mask
[channel
]), 0x6b7, 2,
4645 write_500(&info
, channel
, 0x3, 0x69b, 2, 1);
4647 write_mchbar32(0x2d0, (read_mchbar32(0x2d0) & 0xff2c01ff) | 0x200000); /* OK */
4648 write_mchbar16(0x6c0, 0x14a0); /* OK */
4649 write_mchbar32(0x6d0, (read_mchbar32(0x6d0) & 0xff0080ff) | 0x8000); /* OK */
4650 write_mchbar16(0x232, 0x8);
4651 write_mchbar32(0x234, (read_mchbar32(0x234) & 0xfffbfffb) | 0x40004); /* 0x40004 or 0 depending on ? */
4652 write_mchbar32(0x34, (read_mchbar32(0x34) & 0xfffffffd) | 5); /* OK */
4653 write_mchbar32(0x128, 0x2150d05);
4654 write_mchbar8(0x12c, 0x1f); /* OK */
4655 write_mchbar8(0x12d, 0x56); /* OK */
4656 write_mchbar8(0x12e, 0x31);
4657 write_mchbar8(0x12f, 0x0); /* OK */
4658 write_mchbar8(0x271, 0x2); /* OK */
4659 write_mchbar8(0x671, 0x2); /* OK */
4660 write_mchbar8(0x1e8, 0x4); /* OK */
4661 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
4662 write_mchbar32(0x294 + (channel
<< 10),
4663 (info
.populated_ranks_mask
[channel
] & 3) << 16);
4664 write_mchbar32(0x134, (read_mchbar32(0x134) & 0xfc01ffff) | 0x10000); /* OK */
4665 write_mchbar32(0x134, (read_mchbar32(0x134) & 0xfc85ffff) | 0x850000); /* OK */
4666 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
4667 write_mchbar32(0x260 + (channel
<< 10),
4668 (read_mchbar32(0x260 + (channel
<< 10)) &
4669 ~0xf00000) | 0x8000000 | ((info
.
4670 populated_ranks_mask
4678 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
4679 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
4680 for (rank
= 0; rank
< NUM_RANKS
; rank
++)
4681 if (info
.populated_ranks
[channel
][slot
][rank
]) {
4682 jedec_read(&info
, channel
, slot
, rank
,
4683 totalrank
, 0xa, 0x400);
4687 write_mchbar8(0x12c, 0x9f);
4689 read_mchbar8(0x271); // 2 // !!!!
4690 write_mchbar8(0x271, 0xe);
4691 read_mchbar8(0x671); // !!!!
4692 write_mchbar8(0x671, 0xe);
4695 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
4696 write_mchbar32(0x294 + (channel
<< 10),
4698 populated_ranks_mask
[channel
] & 3) <<
4700 write_mchbar16(0x298 + (channel
<< 10),
4702 populated_ranks
[channel
][0][0]) | (info
.
4709 write_mchbar32(0x29c + (channel
<< 10), 0x77a);
4711 read_mchbar32(0x2c0); /// !!!
4712 write_mchbar32(0x2c0, 0x6009cc00);
4716 a
= read_mchbar8(0x243); // !!!!
4717 b
= read_mchbar8(0x643); // !!!!
4718 write_mchbar8(0x243, a
| 2);
4719 write_mchbar8(0x643, b
| 2);
4722 write_1d0(7, 0x19b, 3, 1);
4723 write_1d0(7, 0x1c0, 3, 1);
4724 write_1d0(4, 0x1c6, 4, 1);
4725 write_1d0(4, 0x1cc, 4, 1);
4726 read_1d0(0x151, 4); // = 0x408c6d74 // !!!!
4727 write_1d0(4, 0x151, 4, 1);
4728 write_mchbar32(0x584, 0xfffff);
4729 write_mchbar32(0x984, 0xfffff);
4731 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++)
4732 for (slot
= 0; slot
< NUM_SLOTS
; slot
++)
4733 for (rank
= 0; rank
< NUM_RANKS
; rank
++)
4735 populated_ranks
[channel
][slot
]
4737 config_rank(&info
, s3resume
,
4741 write_mchbar8(0x243, 0x1);
4742 write_mchbar8(0x643, 0x1);
4745 /* was == 1 but is common */
4746 pci_write_config16(NORTHBRIDGE
, 0xc8, 3);
4747 write_26c(0, 0x820);
4748 write_26c(1, 0x820);
4749 write_mchbar32(0x130, read_mchbar32(0x130) | 2);
4753 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
4754 write_mchbar32(0x294 + (channel
<< 10),
4756 populated_ranks_mask
[channel
] & 3) <<
4758 write_mchbar16(0x298 + (channel
<< 10),
4760 populated_ranks
[channel
][0][0]) | (info
.
4767 write_mchbar32(0x29c + (channel
<< 10), 0x77a);
4769 read_mchbar32(0x2c0); /// !!!
4770 write_mchbar32(0x2c0, 0x6009cc00);
4773 write_mchbar32(0xfa4, read_mchbar32(0xfa4) & ~0x01000002);
4774 write_mchbar32(0xfb0, 0x2000e019);
4780 /* Before training. */
4781 timestamp_add_now(103);
4784 ram_training(&info
);
4786 /* After training. */
4787 timestamp_add_now(104);
4789 dump_timings(&info
);
4791 program_modules_memory_map(&info
, 0);
4792 program_total_memory_map(&info
);
4794 if (info
.non_interleaved_part_mb
!= 0 && info
.interleaved_part_mb
!= 0)
4795 write_mchbar8(0x111, 0x20 | (0 << 2) | (1 << 6) | (0 << 7));
4796 else if (have_match_ranks(&info
, 0, 4) && have_match_ranks(&info
, 1, 4))
4797 write_mchbar8(0x111, 0x20 | (3 << 2) | (0 << 6) | (1 << 7));
4798 else if (have_match_ranks(&info
, 0, 2) && have_match_ranks(&info
, 1, 2))
4799 write_mchbar8(0x111, 0x20 | (3 << 2) | (0 << 6) | (0 << 7));
4801 write_mchbar8(0x111, 0x20 | (3 << 2) | (1 << 6) | (0 << 7));
4803 write_mchbar32(0xfac, read_mchbar32(0xfac) & ~0x80000000); // OK
4804 write_mchbar32(0xfb4, 0x4800); // OK
4805 write_mchbar32(0xfb8, (info
.revision
< 8) ? 0x20 : 0x0); // OK
4806 write_mchbar32(0xe94, 0x7ffff); // OK
4807 write_mchbar32(0xfc0, 0x80002040); // OK
4808 write_mchbar32(0xfc4, 0x701246); // OK
4809 write_mchbar8(0xfc8, read_mchbar8(0xfc8) & ~0x70); // OK
4810 write_mchbar32(0xe5c, 0x1000000 | read_mchbar32(0xe5c)); // OK
4811 write_mchbar32(0x1a70, (read_mchbar32(0x1a70) | 0x00200000) & ~0x00100000); // OK
4812 write_mchbar32(0x50, 0x700b0); // OK
4813 write_mchbar32(0x3c, 0x10); // OK
4814 write_mchbar8(0x1aa8, (read_mchbar8(0x1aa8) & ~0x35) | 0xa); // OK
4815 write_mchbar8(0xff4, read_mchbar8(0xff4) | 0x2); // OK
4816 write_mchbar32(0xff8, (read_mchbar32(0xff8) & ~0xe008) | 0x1020); // OK
4819 write_mchbar32(0xd00, IOMMU_BASE2
| 1);
4820 write_mchbar32(0xd40, IOMMU_BASE1
| 1);
4821 write_mchbar32(0xdc0, IOMMU_BASE4
| 1);
4823 write32p(IOMMU_BASE1
| 0xffc, 0x80000000);
4824 write32p(IOMMU_BASE2
| 0xffc, 0xc0000000);
4825 write32p(IOMMU_BASE4
| 0xffc, 0x80000000);
4830 eax
= read32p(0xffc + (read_mchbar32(0xd00) & ~1)) | 0x08000000; // = 0xe911714b// OK
4831 write32p(0xffc + (read_mchbar32(0xd00) & ~1), eax
); // OK
4832 eax
= read32p(0xffc + (read_mchbar32(0xdc0) & ~1)) | 0x40000000; // = 0xe911714b// OK
4833 write32p(0xffc + (read_mchbar32(0xdc0) & ~1), eax
); // OK
4840 eax
= info
.fsb_frequency
/ 9;
4841 write_mchbar32(0xfcc, (read_mchbar32(0xfcc) & 0xfffc0000) | (eax
* 0x280) | (eax
* 0x5000) | eax
| 0x40000); // OK
4842 write_mchbar32(0x20, 0x33001); //OK
4845 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
4846 write_mchbar32(0x220 + (channel
<< 10), read_mchbar32(0x220 + (channel
<< 10)) & ~0x7770); //OK
4847 if (info
.max_slots_used_in_channel
== 1)
4848 write_mchbar16(0x237 + (channel
<< 10), (read_mchbar16(0x237 + (channel
<< 10)) | 0x0201)); //OK
4850 write_mchbar16(0x237 + (channel
<< 10), (read_mchbar16(0x237 + (channel
<< 10)) & ~0x0201)); //OK
4852 write_mchbar8(0x241 + (channel
<< 10), read_mchbar8(0x241 + (channel
<< 10)) | 1); // OK
4854 if (info
.clock_speed_index
<= 1
4855 && (info
.silicon_revision
== 2
4856 || info
.silicon_revision
== 3))
4857 write_mchbar32(0x248 + (channel
<< 10), (read_mchbar32(0x248 + (channel
<< 10)) | 0x00102000)); // OK
4859 write_mchbar32(0x248 + (channel
<< 10), (read_mchbar32(0x248 + (channel
<< 10)) & ~0x00102000)); // OK
4862 write_mchbar32(0x115, read_mchbar32(0x115) | 0x1000000); // OK
4867 if (!(info
.silicon_revision
== 0 || info
.silicon_revision
== 1))
4869 al
|= ((1 << (info
.max_slots_used_in_channel
- 1)) - 1) << 4;
4870 write_mchbar32(0x210, (al
<< 16) | 0x20); // OK
4873 for (channel
= 0; channel
< NUM_CHANNELS
; channel
++) {
4874 write_mchbar32(0x288 + (channel
<< 10), 0x70605040); // OK
4875 write_mchbar32(0x28c + (channel
<< 10), 0xfffec080); // OK
4876 write_mchbar32(0x290 + (channel
<< 10), 0x282091c | ((info
.max_slots_used_in_channel
- 1) << 0x16)); // OK
4879 pci_read_config32(NORTHBRIDGE
, 0x40); // = DEFAULT_EPBAR | 0x001 // OK
4880 reg1c
= read32p(DEFAULT_EPBAR
| 0x01c); // = 0x8001 // OK
4881 pci_read_config32(NORTHBRIDGE
, 0x40); // = DEFAULT_EPBAR | 0x001 // OK
4882 write32p(DEFAULT_EPBAR
| 0x01c, reg1c
); // OK
4883 read_mchbar8(0xe08); // = 0x0
4884 pci_read_config32(NORTHBRIDGE
, 0xe4); // = 0x316126
4885 write_mchbar8(0x1210, read_mchbar8(0x1210) | 2); // OK
4886 write_mchbar32(0x1200, 0x8800440); // OK
4887 write_mchbar32(0x1204, 0x53ff0453); // OK
4888 write_mchbar32(0x1208, 0x19002043); // OK
4889 write_mchbar16(0x1214, 0x320); // OK
4891 if (info
.revision
== 0x10 || info
.revision
== 0x11) {
4892 write_mchbar16(0x1214, 0x220); // OK
4893 write_mchbar8(0x1210, read_mchbar8(0x1210) | 0x40); // OK
4896 write_mchbar8(0x1214, read_mchbar8(0x1214) | 0x4); // OK
4897 write_mchbar8(0x120c, 0x1); // OK
4898 write_mchbar8(0x1218, 0x3); // OK
4899 write_mchbar8(0x121a, 0x3); // OK
4900 write_mchbar8(0x121c, 0x3); // OK
4901 write_mchbar16(0xc14, 0x0); // OK
4902 write_mchbar16(0xc20, 0x0); // OK
4903 write_mchbar32(0x1c, 0x0); // OK
4905 /* revision dependent here. */
4907 write_mchbar16(0x1230, read_mchbar16(0x1230) | 0x1f07); // OK
4909 if (info
.uma_enabled
)
4910 write_mchbar32(0x11f4, read_mchbar32(0x11f4) | 0x10000000); // OK
4912 write_mchbar16(0x1230, read_mchbar16(0x1230) | 0x8000); // OK
4913 write_mchbar8(0x1214, read_mchbar8(0x1214) | 1); // OK
4918 reg_1020
= read_mchbar32(0x1020); // = 0x6c733c // OK
4919 write_mchbar8(0x1070, 0x1); // OK
4921 write_mchbar32(0x1000, 0x100); // OK
4922 write_mchbar8(0x1007, 0x0); // OK
4924 if (reg_1020
!= 0) {
4925 write_mchbar16(0x1018, 0x0); // OK
4927 ebpb
= reg_1020
& 0xff;
4935 write_mchbar32(0x1014, 0xffffffff); // OK
4937 write_mchbar32(0x1010, ((((ebpb
+ 0x7d) << 7) / bl
) & 0xff) * (! !reg_1020
)); // OK
4939 write_mchbar8(0x101c, 0xb8); // OK
4941 write_mchbar8(0x123e, (read_mchbar8(0x123e) & 0xf) | 0x60); // OK
4942 if (reg_1020
!= 0) {
4943 write_mchbar32(0x123c, (read_mchbar32(0x123c) & ~0x00900000) | 0x600000); // OK
4944 write_mchbar8(0x101c, 0xb8); // OK
4947 setup_heci_uma(&info
);
4949 if (info
.uma_enabled
) {
4951 write_mchbar32(0x11b0, read_mchbar32(0x11b0) | 0x4000); // OK
4952 write_mchbar32(0x11b4, read_mchbar32(0x11b4) | 0x4000); // OK
4953 write_mchbar16(0x1190, read_mchbar16(0x1190) | 0x4000); // OK
4955 ax
= read_mchbar16(0x1190) & 0xf00; // = 0x480a // OK
4956 write_mchbar16(0x1170, ax
| (read_mchbar16(0x1170) & 0x107f) | 0x4080); // OK
4957 write_mchbar16(0x1170, read_mchbar16(0x1170) | 0x1000); // OK
4962 for (ecx
= 0xffff; ecx
&& (read_mchbar16(0x1170) & 0x1000); ecx
--); // OK
4963 write_mchbar16(0x1190, read_mchbar16(0x1190) & ~0x4000); // OK
4966 pci_write_config8(SOUTHBRIDGE
, GEN_PMCON_2
,
4967 pci_read_config8(SOUTHBRIDGE
, GEN_PMCON_2
) & ~0x80);
4969 write_mchbar16(0x2ca8, 0x8);
4973 dump_timings(&info
);
4974 cbmem_wasnot_inited
= cbmem_recovery(s3resume
);
4977 save_timings(&info
);
4978 if (s3resume
&& cbmem_wasnot_inited
) {
4980 printk(BIOS_ERR
, "Failed S3 resume.\n");
4981 ram_check(0x100000, 0x200000);
4983 /* Clear SLP_TYPE. */
4984 reg32
= inl(DEFAULT_PMBASE
+ 0x04);
4985 outl(reg32
& ~(7 << 10), DEFAULT_PMBASE
+ 0x04);
4987 /* Failed S3 resume, reset to come up cleanly */