1 #include <linux/init.h>
6 #include <asm/processor-cyrix.h>
12 cyrix_get_arr(unsigned int reg
, unsigned long *base
,
13 unsigned long *size
, mtrr_type
* type
)
16 unsigned char arr
, ccr3
, rcr
, shift
;
18 arr
= CX86_ARR_BASE
+ (reg
<< 1) + reg
; /* avoid multiplication by 3 */
20 /* Save flags and disable interrupts */
21 local_irq_save(flags
);
23 ccr3
= getCx86(CX86_CCR3
);
24 setCx86(CX86_CCR3
, (ccr3
& 0x0f) | 0x10); /* enable MAPEN */
25 ((unsigned char *) base
)[3] = getCx86(arr
);
26 ((unsigned char *) base
)[2] = getCx86(arr
+ 1);
27 ((unsigned char *) base
)[1] = getCx86(arr
+ 2);
28 rcr
= getCx86(CX86_RCR_BASE
+ reg
);
29 setCx86(CX86_CCR3
, ccr3
); /* disable MAPEN */
31 /* Enable interrupts if it was enabled previously */
32 local_irq_restore(flags
);
33 shift
= ((unsigned char *) base
)[1] & 0x0f;
36 /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
37 * Note: shift==0xf means 4G, this is unsupported.
40 *size
= (reg
< 7 ? 0x1UL
: 0x40UL
) << (shift
- 1);
44 /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
48 *type
= MTRR_TYPE_UNCACHABLE
;
51 *type
= MTRR_TYPE_WRBACK
;
54 *type
= MTRR_TYPE_WRCOMB
;
58 *type
= MTRR_TYPE_WRTHROUGH
;
64 *type
= MTRR_TYPE_UNCACHABLE
;
67 *type
= MTRR_TYPE_WRCOMB
;
70 *type
= MTRR_TYPE_WRBACK
;
74 *type
= MTRR_TYPE_WRTHROUGH
;
81 cyrix_get_free_region(unsigned long base
, unsigned long size
, int replace_reg
)
82 /* [SUMMARY] Get a free ARR.
83 <base> The starting (base) address of the region.
84 <size> The size (in bytes) of the region.
85 [RETURNS] The index of the region on success, else -1 on error.
90 unsigned long lbase
, lsize
;
92 switch (replace_reg
) {
108 /* If we are to set up a region >32M then look at ARR7 immediately */
110 cyrix_get_arr(7, &lbase
, &lsize
, <ype
);
113 /* Else try ARR0-ARR6 first */
115 for (i
= 0; i
< 7; i
++) {
116 cyrix_get_arr(i
, &lbase
, &lsize
, <ype
);
117 if ((i
== 3) && arr3_protected
)
122 /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
123 cyrix_get_arr(i
, &lbase
, &lsize
, <ype
);
124 if ((lsize
== 0) && (size
>= 0x40))
133 static void prepare_set(void)
137 /* Save value of CR4 and clear Page Global Enable (bit 7) */
140 write_cr4(cr4
& ~X86_CR4_PGE
);
143 /* Disable and flush caches. Note that wbinvd flushes the TLBs as
145 cr0
= read_cr0() | 0x40000000;
150 /* Cyrix ARRs - everything else were excluded at the top */
151 ccr3
= getCx86(CX86_CCR3
);
153 /* Cyrix ARRs - everything else were excluded at the top */
154 setCx86(CX86_CCR3
, (ccr3
& 0x0f) | 0x10);
158 static void post_set(void)
160 /* Flush caches and TLBs */
163 /* Cyrix ARRs - everything else was excluded at the top */
164 setCx86(CX86_CCR3
, ccr3
);
167 write_cr0(read_cr0() & 0xbfffffff);
169 /* Restore value of CR4 */
174 static void cyrix_set_arr(unsigned int reg
, unsigned long base
,
175 unsigned long size
, mtrr_type type
)
177 unsigned char arr
, arr_type
, arr_size
;
179 arr
= CX86_ARR_BASE
+ (reg
<< 1) + reg
; /* avoid multiplication by 3 */
181 /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
185 size
&= 0x7fff; /* make sure arr_size <= 14 */
186 for (arr_size
= 0; size
; arr_size
++, size
>>= 1) ;
190 case MTRR_TYPE_UNCACHABLE
:
193 case MTRR_TYPE_WRCOMB
:
196 case MTRR_TYPE_WRTHROUGH
:
205 case MTRR_TYPE_UNCACHABLE
:
208 case MTRR_TYPE_WRCOMB
:
211 case MTRR_TYPE_WRTHROUGH
:
223 setCx86(arr
, ((unsigned char *) &base
)[3]);
224 setCx86(arr
+ 1, ((unsigned char *) &base
)[2]);
225 setCx86(arr
+ 2, (((unsigned char *) &base
)[1]) | arr_size
);
226 setCx86(CX86_RCR_BASE
+ reg
, arr_type
);
237 static arr_state_t arr_state
[8] = {
238 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
239 {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
242 static unsigned char ccr_state
[7] = { 0, 0, 0, 0, 0, 0, 0 };
244 static void cyrix_set_all(void)
250 /* the CCRs are not contiguous */
251 for (i
= 0; i
< 4; i
++)
252 setCx86(CX86_CCR0
+ i
, ccr_state
[i
]);
254 setCx86(CX86_CCR4
+ i
, ccr_state
[i
]);
255 for (i
= 0; i
< 8; i
++)
256 cyrix_set_arr(i
, arr_state
[i
].base
,
257 arr_state
[i
].size
, arr_state
[i
].type
);
264 * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
265 * with the SMM (System Management Mode) mode. So we need the following:
266 * Check whether SMI_LOCK (CCR3 bit 0) is set
267 * if it is set, write a warning message: ARR3 cannot be changed!
268 * (it cannot be changed until the next processor reset)
269 * if it is reset, then we can change it, set all the needed bits:
270 * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
271 * - disable access to SMM memory (CCR1 bit 2 reset)
272 * - disable SMM mode (CCR1 bit 1 reset)
273 * - disable write protection of ARR3 (CCR6 bit 1 reset)
274 * - (maybe) disable ARR3
275 * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
280 struct set_mtrr_context ctxt
;
281 unsigned char ccr
[7];
282 int ccrc
[7] = { 0, 0, 0, 0, 0, 0, 0 };
287 /* flush cache and enable MAPEN */
288 set_mtrr_prepare_save(&ctxt
);
289 set_mtrr_cache_disable(&ctxt
);
291 /* Save all CCRs locally */
292 ccr
[0] = getCx86(CX86_CCR0
);
293 ccr
[1] = getCx86(CX86_CCR1
);
294 ccr
[2] = getCx86(CX86_CCR2
);
296 ccr
[4] = getCx86(CX86_CCR4
);
297 ccr
[5] = getCx86(CX86_CCR5
);
298 ccr
[6] = getCx86(CX86_CCR6
);
304 /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
305 * access to SMM memory through ARR3 (bit 7).
322 ccrc
[6] = 1; /* Disable write protection of ARR3 */
323 setCx86(CX86_CCR6
, ccr
[6]);
325 /* Disable ARR3. This is safe now that we disabled SMM. */
326 /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
328 /* If we changed CCR1 in memory, change it in the processor, too. */
330 setCx86(CX86_CCR1
, ccr
[1]);
332 /* Enable ARR usage by the processor */
333 if (!(ccr
[5] & 0x20)) {
336 setCx86(CX86_CCR5
, ccr
[5]);
339 for (i
= 0; i
< 7; i
++)
340 ccr_state
[i
] = ccr
[i
];
341 for (i
= 0; i
< 8; i
++)
343 &arr_state
[i
].base
, &arr_state
[i
].size
,
347 set_mtrr_done(&ctxt
); /* flush cache and disable MAPEN */
350 printk(KERN_INFO
"mtrr: ARR usage was not enabled, enabled manually\n");
352 printk(KERN_INFO
"mtrr: ARR3 cannot be changed\n");
354 if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
355 if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
356 if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
359 printk(KERN_INFO
"mtrr: ARR3 was write protected, unprotected\n");
363 static struct mtrr_ops cyrix_mtrr_ops
= {
364 .vendor
= X86_VENDOR_CYRIX
,
365 // .init = cyrix_arr_init,
366 .set_all
= cyrix_set_all
,
367 .set
= cyrix_set_arr
,
368 .get
= cyrix_get_arr
,
369 .get_free_region
= cyrix_get_free_region
,
370 .validate_add_page
= generic_validate_add_page
,
371 .have_wrcomb
= positive_have_wrcomb
,
374 int __init
cyrix_init_mtrr(void)
376 set_mtrr_ops(&cyrix_mtrr_ops
);
380 //arch_initcall(cyrix_init_mtrr);