RT-AC66 3.0.0.4.374.130 core
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / arch / powerpc / mm / ppc_mmu_32.c
blobec1421a20aaab96d94f6ea162066a95df81fd602
1 /*
2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification. This includes the 6xx, 7xx, 7xxx,
5 * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
6 * -- paulus
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/init.h>
29 #include <linux/highmem.h>
31 #include <asm/prom.h>
32 #include <asm/mmu.h>
33 #include <asm/machdep.h>
34 #include <asm/lmb.h>
36 #include "mmu_decl.h"
38 PTE *Hash, *Hash_end;
39 unsigned long Hash_size, Hash_mask;
40 unsigned long _SDR1;
42 union ubat { /* BAT register values to be loaded */
43 BAT bat;
44 u32 word[2];
45 } BATS[8][2]; /* 8 pairs of IBAT, DBAT */
47 struct batrange { /* stores address ranges mapped by BATs */
48 unsigned long start;
49 unsigned long limit;
50 unsigned long phys;
51 } bat_addrs[8];
54 * Return PA for this VA if it is mapped by a BAT, or 0
56 unsigned long v_mapped_by_bats(unsigned long va)
58 int b;
59 for (b = 0; b < 4; ++b)
60 if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
61 return bat_addrs[b].phys + (va - bat_addrs[b].start);
62 return 0;
66 * Return VA for a given PA or 0 if not mapped
68 unsigned long p_mapped_by_bats(unsigned long pa)
70 int b;
71 for (b = 0; b < 4; ++b)
72 if (pa >= bat_addrs[b].phys
73 && pa < (bat_addrs[b].limit-bat_addrs[b].start)
74 +bat_addrs[b].phys)
75 return bat_addrs[b].start+(pa-bat_addrs[b].phys);
76 return 0;
79 unsigned long __init mmu_mapin_ram(void)
81 #ifdef CONFIG_POWER4
82 return 0;
83 #else
84 unsigned long tot, bl, done;
85 unsigned long max_size = (256<<20);
86 unsigned long align;
88 if (__map_without_bats) {
89 printk(KERN_DEBUG "RAM mapped without BATs\n");
90 return 0;
93 /* Set up BAT2 and if necessary BAT3 to cover RAM. */
95 /* Make sure we don't map a block larger than the
96 smallest alignment of the physical address. */
97 /* alignment of PPC_MEMSTART */
98 align = ~(PPC_MEMSTART-1) & PPC_MEMSTART;
99 /* set BAT block size to MIN(max_size, align) */
100 if (align && align < max_size)
101 max_size = align;
103 tot = total_lowmem;
104 for (bl = 128<<10; bl < max_size; bl <<= 1) {
105 if (bl * 2 > tot)
106 break;
109 setbat(2, KERNELBASE, PPC_MEMSTART, bl, _PAGE_RAM);
110 done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
111 if ((done < tot) && !bat_addrs[3].limit) {
112 /* use BAT3 to cover a bit more */
113 tot -= done;
114 for (bl = 128<<10; bl < max_size; bl <<= 1)
115 if (bl * 2 > tot)
116 break;
117 setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, _PAGE_RAM);
118 done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
121 return done;
122 #endif
126 * Set up one of the I/D BAT (block address translation) register pairs.
127 * The parameters are not checked; in particular size must be a power
128 * of 2 between 128k and 256M.
130 void __init setbat(int index, unsigned long virt, unsigned long phys,
131 unsigned int size, int flags)
133 unsigned int bl;
134 int wimgxpp;
135 union ubat *bat = BATS[index];
137 if (((flags & _PAGE_NO_CACHE) == 0) &&
138 cpu_has_feature(CPU_FTR_NEED_COHERENT))
139 flags |= _PAGE_COHERENT;
141 bl = (size >> 17) - 1;
142 if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
143 /* 603, 604, etc. */
144 /* Do DBAT first */
145 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
146 | _PAGE_COHERENT | _PAGE_GUARDED);
147 wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
148 bat[1].word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
149 bat[1].word[1] = phys | wimgxpp;
150 #ifndef CONFIG_KGDB /* want user access for breakpoints */
151 if (flags & _PAGE_USER)
152 #endif
153 bat[1].bat.batu.vp = 1;
154 if (flags & _PAGE_GUARDED) {
155 /* G bit must be zero in IBATs */
156 bat[0].word[0] = bat[0].word[1] = 0;
157 } else {
158 /* make IBAT same as DBAT */
159 bat[0] = bat[1];
161 } else {
162 /* 601 cpu */
163 if (bl > BL_8M)
164 bl = BL_8M;
165 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
166 | _PAGE_COHERENT);
167 wimgxpp |= (flags & _PAGE_RW)?
168 ((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;
169 bat->word[0] = virt | wimgxpp | 4; /* Ks=0, Ku=1 */
170 bat->word[1] = phys | bl | 0x40; /* V=1 */
173 bat_addrs[index].start = virt;
174 bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
175 bat_addrs[index].phys = phys;
179 * Preload a translation in the hash table
181 void hash_preload(struct mm_struct *mm, unsigned long ea,
182 unsigned long access, unsigned long trap)
184 pmd_t *pmd;
186 if (Hash == 0)
187 return;
188 pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea);
189 if (!pmd_none(*pmd))
190 add_hash_page(mm->context.id, ea, pmd_val(*pmd));
194 * Initialize the hash table and patch the instructions in hashtable.S.
196 void __init MMU_init_hw(void)
198 unsigned int hmask, mb, mb2;
199 unsigned int n_hpteg, lg_n_hpteg;
201 extern unsigned int hash_page_patch_A[];
202 extern unsigned int hash_page_patch_B[], hash_page_patch_C[];
203 extern unsigned int hash_page[];
204 extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
206 if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) {
208 * Put a blr (procedure return) instruction at the
209 * start of hash_page, since we can still get DSI
210 * exceptions on a 603.
212 hash_page[0] = 0x4e800020;
213 flush_icache_range((unsigned long) &hash_page[0],
214 (unsigned long) &hash_page[1]);
215 return;
218 if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
220 #define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */
221 #define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
222 #define MIN_N_HPTEG 1024 /* min 64kB hash table */
225 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
226 * This is less than the recommended amount, but then
227 * Linux ain't AIX.
229 n_hpteg = total_memory / (PAGE_SIZE * 8);
230 if (n_hpteg < MIN_N_HPTEG)
231 n_hpteg = MIN_N_HPTEG;
232 lg_n_hpteg = __ilog2(n_hpteg);
233 if (n_hpteg & (n_hpteg - 1)) {
234 ++lg_n_hpteg; /* round up if not power of 2 */
235 n_hpteg = 1 << lg_n_hpteg;
237 Hash_size = n_hpteg << LG_HPTEG_SIZE;
240 * Find some memory for the hash table.
242 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
243 Hash = __va(lmb_alloc_base(Hash_size, Hash_size,
244 __initial_memory_limit));
245 cacheable_memzero(Hash, Hash_size);
246 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
248 Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
250 printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
251 total_memory >> 20, Hash_size >> 10, Hash);
255 * Patch up the instructions in hashtable.S:create_hpte
257 if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
258 Hash_mask = n_hpteg - 1;
259 hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
260 mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
261 if (lg_n_hpteg > 16)
262 mb2 = 16 - LG_HPTEG_SIZE;
264 hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff)
265 | ((unsigned int)(Hash) >> 16);
266 hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6);
267 hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0) | (mb2 << 6);
268 hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff) | hmask;
269 hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff) | hmask;
272 * Ensure that the locations we've patched have been written
273 * out from the data cache and invalidated in the instruction
274 * cache, on those machines with split caches.
276 flush_icache_range((unsigned long) &hash_page_patch_A[0],
277 (unsigned long) &hash_page_patch_C[1]);
280 * Patch up the instructions in hashtable.S:flush_hash_page
282 flush_hash_patch_A[0] = (flush_hash_patch_A[0] & ~0xffff)
283 | ((unsigned int)(Hash) >> 16);
284 flush_hash_patch_A[1] = (flush_hash_patch_A[1] & ~0x7c0) | (mb << 6);
285 flush_hash_patch_A[2] = (flush_hash_patch_A[2] & ~0x7c0) | (mb2 << 6);
286 flush_hash_patch_B[0] = (flush_hash_patch_B[0] & ~0xffff) | hmask;
287 flush_icache_range((unsigned long) &flush_hash_patch_A[0],
288 (unsigned long) &flush_hash_patch_B[1]);
290 if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);