Merge with Linux 2.3.40.
[linux-2.6/linux-mips.git] / arch / ppc / mm / 4xx_tlb.c
blobb9d9d21193f751024aea1c32b858cacbc5edf730
1 /*
3 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
5 * Module name: 4xx_tlb.c
7 * Description:
8 * Routines for manipulating the TLB on PowerPC 400-class processors.
12 #include <asm/processor.h>
13 #include <asm/mmu.h>
14 #include <asm/pgtable.h>
15 #include <asm/system.h>
18 /* Preprocessor Defines */
20 #if !defined(TRUE) || TRUE != 1
21 #define TRUE 1
22 #endif
24 #if !defined(FALSE) || FALSE != 0
25 #define FALSE 0
26 #endif
29 /* Function Macros */
32 /* Type Definitios */
34 typedef struct pin_entry_s {
35 unsigned int e_pinned: 1, /* This TLB entry is pinned down. */
36 e_used: 23; /* Number of users for this mapping. */
37 } pin_entry_t;
40 /* Global Variables */
42 static pin_entry_t pin_table[PPC4XX_TLB_SIZE];
45 /* Function Prototypes */
48 void
49 PPC4xx_tlb_pin(unsigned long va, unsigned long pa, int pagesz, int cache)
51 int i, found = FALSE;
52 unsigned long tag, data;
53 unsigned long opid;
55 opid = mfspr(SPRN_PID);
56 mtspr(SPRN_PID, 0);
58 data = (pa & TLB_RPN_MASK) | TLB_WR;
60 if (cache)
61 data |= (TLB_EX | TLB_I);
62 else
63 data |= (TLB_G | TLB_I);
65 tag = (va & TLB_EPN_MASK) | TLB_VALID | pagesz;
67 for (i = 0; i < PPC4XX_TLB_SIZE; i++) {
68 if (pin_table[i].e_pinned == FALSE) {
69 found = TRUE;
70 break;
74 if (found) {
75 /* printk("Pinning %#x -> %#x in entry %d...\n", va, pa, i); */
76 asm("tlbwe %0,%1,1" : : "r" (data), "r" (i));
77 asm("tlbwe %0,%1,0" : : "r" (tag), "r" (i));
78 asm("isync");
79 pin_table[i].e_pinned = found;
82 mtspr(SPRN_PID, opid);
83 return;
86 void
87 PPC4xx_tlb_unpin(unsigned long va, unsigned long pa, int size)
89 /* XXX - To beimplemented. */
92 void
93 PPC4xx_tlb_flush_all(void)
95 int i;
96 unsigned long flags, opid;
98 save_flags(flags);
99 cli();
101 opid = mfspr(SPRN_PID);
102 mtspr(SPRN_PID, 0);
104 for (i = 0; i < PPC4XX_TLB_SIZE; i++) {
105 unsigned long ov = 0;
107 if (pin_table[i].e_pinned)
108 continue;
110 asm("tlbwe %0,%1,0" : : "r" (ov), "r" (i));
111 asm("tlbwe %0,%1,1" : : "r" (ov), "r" (i));
114 asm("sync;isync");
116 mtspr(SPRN_PID, opid);
117 restore_flags(flags);
120 void
121 PPC4xx_tlb_flush(unsigned long va, int pid)
123 unsigned long i, tag, flags, found = 1, opid;
125 save_flags(flags);
126 cli();
128 opid = mfspr(SPRN_PID);
129 mtspr(SPRN_PID, pid);
131 asm("tlbsx. %0,0,%2;beq 1f;li %1,0;1:" : "=r" (i), "=r" (found) : "r" (va));
133 if (found && pin_table[i].e_pinned == 0) {
134 asm("tlbre %0,%1,0" : "=r" (tag) : "r" (i));
135 tag &= ~ TLB_VALID;
136 asm("tlbwe %0,%1,0" : : "r" (tag), "r" (i));
139 mtspr(SPRN_PID, opid);
141 restore_flags(flags);
144 #if 0
146 * TLB miss handling code.
150 * Handle TLB faults. We should push this back to assembly code eventually.
151 * Caller is responsible for turning off interrupts ...
153 static inline void
154 tlbDropin(unsigned long tlbhi, unsigned long tlblo) {
156 * Avoid the divide at the slight cost of a little too
157 * much emphasis on the last few entries.
159 unsigned long rand = mfspr(SPRN_TBLO);
160 rand &= 0x3f;
161 rand += NTLB_WIRED;
162 if (rand >= NTLB)
163 rand -= NTLB_WIRED;
165 asm("tlbwe %0,%1,1" : : "r" (tlblo), "r" (rand));
166 asm("tlbwe %0,%1,0" : : "r" (tlbhi), "r" (rand));
167 asm("isync;sync");
170 static inline void
171 mkTlbEntry(unsigned long addr, pte_t *pte) {
172 unsigned long tlbhi;
173 unsigned long tlblo;
174 int found = 1;
175 int idx;
178 * Construct the TLB entry.
180 tlbhi = addr & ~(PAGE_SIZE-1);
181 tlblo = virt_to_phys(pte_page(*pte)) & TLBLO_RPN;
182 if (pte_val(*pte) & _PAGE_HWWRITE)
183 tlblo |= TLBLO_WR;
184 if (pte_val(*pte) & _PAGE_NO_CACHE)
185 tlblo |= TLBLO_I;
186 tlblo |= TLBLO_EX;
187 if (addr < KERNELBASE)
188 tlblo |= TLBLO_Z_USER;
189 tlbhi |= TLBHI_PGSZ_4K;
190 tlbhi |= TLBHI_VALID;
193 * See if a match already exists in the TLB.
195 asm("tlbsx. %0,0,%2;beq 1f;li %1,0;1:" : "=r" (idx), "=r" (found) : "r" (tlbhi));
196 if (found) {
198 * Found an existing entry. Just reuse the index.
200 asm("tlbwe %0,%1,0" : : "r" (tlbhi), "r" (idx));
201 asm("tlbwe %0,%1,1" : : "r" (tlblo), "r" (idx));
203 else {
205 * Do the more expensive operation
207 tlbDropin(tlbhi, tlblo);
212 * Mainline of the TLB miss handler. The above inline routines should fold into
213 * this one, eliminating most function call overhead.
215 #ifdef TLBMISS_DEBUG
216 volatile unsigned long miss_start;
217 volatile unsigned long miss_end;
218 #endif
220 static inline int tlbMiss(struct pt_regs *regs, unsigned long badaddr, int wasWrite)
222 int spid, ospid;
223 struct mm_struct *mm;
224 pgd_t *pgd;
225 pmd_t *pmd;
226 pte_t *pte;
228 if (!user_mode(regs) && (badaddr >= KERNELBASE)) {
229 mm = task[0]->mm;
230 spid = 0;
231 #ifdef TLBMISS_DEBUG
232 miss_start = 0;
233 #endif
235 else {
236 mm = current->mm;
237 spid = mfspr(SPRN_PID);
238 #ifdef TLBMISS_DEBUG
239 miss_start = 1;
240 #endif
242 #ifdef TLBMISS_DEBUG
243 store_cache_range((unsigned long)&miss_start, sizeof(miss_start));
244 #endif
246 pgd = pgd_offset(mm, badaddr);
247 if (pgd_none(*pgd))
248 goto NOGOOD;
250 pmd = pmd_offset(pgd, badaddr);
251 if (pmd_none(*pmd))
252 goto NOGOOD;
254 pte = pte_offset(pmd, badaddr);
255 if (pte_none(*pte))
256 goto NOGOOD;
257 if (!pte_present(*pte))
258 goto NOGOOD;
259 #if 1
260 prohibit_if_guarded(badaddr, sizeof(int));
261 #endif
262 if (wasWrite) {
263 if (!pte_write(*pte)) {
264 goto NOGOOD;
266 set_pte(pte, pte_mkdirty(*pte));
268 set_pte(pte, pte_mkyoung(*pte));
270 ospid = mfspr(SPRN_PID);
271 mtspr(SPRN_PID, spid);
272 mkTlbEntry(badaddr, pte);
273 mtspr(SPRN_PID, ospid);
275 #ifdef TLBMISS_DEBUG
276 miss_end = 0;
277 store_cache_range((unsigned long)&miss_end, sizeof(miss_end));
278 #endif
279 return 0;
281 NOGOOD:
282 #ifdef TLBMISS_DEBUG
283 miss_end = 1;
284 store_cache_range((unsigned long)&miss_end, sizeof(miss_end));
285 #endif
286 return 1;
290 * End TLB miss handling code.
292 /* ---------- */
295 * Used to flush the TLB if the page fault handler decides to change
296 * something.
298 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) {
299 int spid;
300 unsigned long flags;
302 save_flags(flags);
303 cli();
305 if (addr >= KERNELBASE)
306 spid = 0;
307 else
308 spid = vma->vm_mm->context;
309 tlbFlush1(addr, spid);
311 restore_flags(flags);
315 * Given a virtual address in the current address space, make
316 * sure the associated physical page is present in memory,
317 * and if the data is to be modified, that any copy-on-write
318 * actions have taken place.
320 unsigned long make_page_present(unsigned long p, int rw) {
321 pte_t *pte;
322 char c;
324 get_user(c, (char *) p);
326 pte = findPTE(current->mm, p);
327 if (pte_none(*pte) || !pte_present(*pte))
328 debug("make_page_present didn't load page", 0);
330 if (rw) {
332 * You have to write-touch the page, so that
333 * zero-filled pages are forced to be copied
334 * rather than still pointing at the zero
335 * page.
337 extern void tlbFlush1(unsigned long, int);
338 tlbFlush1(p, get_context());
339 put_user(c, (char *) p);
340 if (!pte_write(*pte))
341 debug("make_page_present didn't make page writable", 0);
343 tlbFlush1(p, get_context());
345 return pte_page(*pte);
348 void DataTLBMissException(struct pt_regs *regs)
350 unsigned long badaddr = mfspr(SPRN_DEAR);
351 int wasWrite = mfspr(SPRN_ESR) & 0x800000;
352 if (tlbMiss(regs, badaddr, wasWrite)) {
353 sti();
354 do_page_fault(regs, badaddr, wasWrite);
355 cli();
359 void InstructionTLBMissException(struct pt_regs *regs)
361 if (!current) {
362 debug("ITLB Miss with no current task", regs);
363 sti();
364 bad_page_fault(regs, regs->nip);
365 cli();
366 return;
368 if (tlbMiss(regs, regs->nip, 0)) {
369 sti();
370 do_page_fault(regs, regs->nip, 0);
371 cli();
375 void DataPageFault(struct pt_regs *regs)
377 unsigned long badaddr = mfspr(SPRN_DEAR);
378 int wasWrite = mfspr(SPRN_ESR) & 0x800000;
379 sti();
380 do_page_fault(regs, badaddr, wasWrite);
381 cli();
384 void InstructionPageFault(struct pt_regs *regs)
386 if (!current) {
387 debug("ITLB fault with no current task", regs);
388 sti();
389 bad_page_fault(regs, regs->nip);
390 cli();
391 return;
393 sti();
394 do_page_fault(regs, regs->nip, 0);
395 cli();
397 #endif