Import 2.3.50pre1
[davej-history.git] / arch / m68k / mm / memory.c
bloba0a5336ed3d749adc730bf3b5f96a1c08dd70f9a
1 /*
2 * linux/arch/m68k/mm/memory.c
4 * Copyright (C) 1995 Hamish Macdonald
5 */
7 #include <linux/config.h>
8 #include <linux/mm.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/types.h>
12 #include <linux/malloc.h>
13 #include <linux/init.h>
14 #include <linux/pagemap.h>
16 #include <asm/setup.h>
17 #include <asm/segment.h>
18 #include <asm/page.h>
19 #include <asm/pgalloc.h>
20 #include <asm/system.h>
21 #include <asm/traps.h>
22 #include <asm/io.h>
23 #include <asm/machdep.h>
24 #ifdef CONFIG_AMIGA
25 #include <asm/amigahw.h>
26 #endif
28 struct pgtable_cache_struct quicklists;
30 void __bad_pte(pmd_t *pmd)
32 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
33 pmd_set(pmd, BAD_PAGETABLE);
36 void __bad_pmd(pgd_t *pgd)
38 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
39 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
42 pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
44 pte_t *pte;
46 pte = (pte_t *) __get_free_page(GFP_KERNEL);
47 if (pmd_none(*pmd)) {
48 if (pte) {
49 clear_page(pte);
50 __flush_page_to_ram((unsigned long)pte);
51 flush_tlb_kernel_page((unsigned long)pte);
52 nocache_page((unsigned long)pte);
53 pmd_set(pmd, pte);
54 return pte + offset;
56 pmd_set(pmd, BAD_PAGETABLE);
57 return NULL;
59 free_page((unsigned long)pte);
60 if (pmd_bad(*pmd)) {
61 __bad_pte(pmd);
62 return NULL;
64 return (pte_t *)__pmd_page(*pmd) + offset;
67 pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset)
69 pmd_t *pmd;
71 pmd = get_pointer_table();
72 if (pgd_none(*pgd)) {
73 if (pmd) {
74 pgd_set(pgd, pmd);
75 return pmd + offset;
77 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
78 return NULL;
80 free_pointer_table(pmd);
81 if (pgd_bad(*pgd)) {
82 __bad_pmd(pgd);
83 return NULL;
85 return (pmd_t *)__pgd_page(*pgd) + offset;
89 /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
90 struct page instead of separately kmalloced struct. Stolen from
91 arch/sparc/mm/srmmu.c ... */
93 typedef struct list_head ptable_desc;
94 static LIST_HEAD(ptable_list);
96 #define PD_PTABLE(page) ((ptable_desc *)&mem_map[MAP_NR(page)])
97 #define PD_PAGE(ptable) (list_entry(ptable, struct page, list))
98 #define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
100 #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
102 void __init init_pointer_table(unsigned long ptable)
104 ptable_desc *dp;
105 unsigned long page = ptable & PAGE_MASK;
106 unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
108 dp = PD_PTABLE(page);
109 if (!(PD_MARKBITS(dp) & mask)) {
110 PD_MARKBITS(dp) = 0xff;
111 list_add(dp, &ptable_list);
114 PD_MARKBITS(dp) &= ~mask;
115 #ifdef DEBUG
116 printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
117 #endif
119 /* unreserve the page so it's possible to free that page */
120 PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
121 atomic_set(&PD_PAGE(dp)->count, 1);
123 return;
126 pmd_t *get_pointer_table (void)
128 ptable_desc *dp = ptable_list.next;
129 unsigned char mask = PD_MARKBITS (dp);
130 unsigned char tmp;
131 unsigned int off;
134 * For a pointer table for a user process address space, a
135 * table is taken from a page allocated for the purpose. Each
136 * page can hold 8 pointer tables. The page is remapped in
137 * virtual address space to be noncacheable.
139 if (mask == 0) {
140 unsigned long page;
141 ptable_desc *new;
143 if (!(page = get_free_page (GFP_KERNEL)))
144 return 0;
146 flush_tlb_kernel_page(page);
147 nocache_page (page);
149 new = PD_PTABLE(page);
150 PD_MARKBITS(new) = 0xfe;
151 list_add_tail(new, dp);
153 return (pmd_t *)page;
156 for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
158 PD_MARKBITS(dp) = mask & ~tmp;
159 if (!PD_MARKBITS(dp)) {
160 /* move to end of list */
161 list_del(dp);
162 list_add_tail(dp, &ptable_list);
164 return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
167 int free_pointer_table (pmd_t *ptable)
169 ptable_desc *dp;
170 unsigned long page = (unsigned long)ptable & PAGE_MASK;
171 unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
173 dp = PD_PTABLE(page);
174 if (PD_MARKBITS (dp) & mask)
175 panic ("table already free!");
177 PD_MARKBITS (dp) |= mask;
179 if (PD_MARKBITS(dp) == 0xff) {
180 /* all tables in page are free, free page */
181 list_del(dp);
182 cache_page (page);
183 free_page (page);
184 return 1;
185 } else if (ptable_list.next != dp) {
187 * move this descriptor to the front of the list, since
188 * it has one or more free tables.
190 list_del(dp);
191 list_add(dp, &ptable_list);
193 return 0;
196 static unsigned long transp_transl_matches( unsigned long regval,
197 unsigned long vaddr )
199 unsigned long base, mask;
201 /* enabled? */
202 if (!(regval & 0x8000))
203 return( 0 );
205 if (CPU_IS_030) {
206 /* function code match? */
207 base = (regval >> 4) & 7;
208 mask = ~(regval & 7);
209 if (((SUPER_DATA ^ base) & mask) != 0)
210 return 0;
212 else {
213 /* must not be user-only */
214 if ((regval & 0x6000) == 0)
215 return( 0 );
218 /* address match? */
219 base = regval & 0xff000000;
220 mask = ~(regval << 8) & 0xff000000;
221 return (((unsigned long)vaddr ^ base) & mask) == 0;
224 #if DEBUG_INVALID_PTOV
225 int mm_inv_cnt = 5;
226 #endif
228 #ifndef CONFIG_SINGLE_MEMORY_CHUNK
230 * The following two routines map from a physical address to a kernel
231 * virtual address and vice versa.
233 unsigned long mm_vtop(unsigned long vaddr)
235 int i=0;
236 unsigned long voff = (unsigned long)vaddr - PAGE_OFFSET;
238 do {
239 if (voff < m68k_memory[i].size) {
240 #ifdef DEBUGPV
241 printk ("VTOP(%p)=%lx\n", vaddr,
242 m68k_memory[i].addr + voff);
243 #endif
244 return m68k_memory[i].addr + voff;
246 voff -= m68k_memory[i].size;
247 } while (++i < m68k_num_memory);
249 return mm_vtop_fallback(vaddr);
251 #endif
253 /* Separate function to make the common case faster (needs to save less
254 registers) */
255 unsigned long mm_vtop_fallback(unsigned long vaddr)
257 /* not in one of the memory chunks; test for applying transparent
258 * translation */
260 if (CPU_IS_030) {
261 unsigned long ttreg;
263 asm volatile( ".chip 68030\n\t"
264 "pmove %/tt0,%0@\n\t"
265 ".chip 68k"
266 : : "a" (&ttreg) );
267 if (transp_transl_matches( ttreg, vaddr ))
268 return (unsigned long)vaddr;
269 asm volatile( ".chip 68030\n\t"
270 "pmove %/tt1,%0@\n\t"
271 ".chip 68k"
272 : : "a" (&ttreg) );
273 if (transp_transl_matches( ttreg, vaddr ))
274 return (unsigned long)vaddr;
276 else if (CPU_IS_040_OR_060) {
277 unsigned long ttreg;
279 asm volatile( ".chip 68040\n\t"
280 "movec %%dtt0,%0\n\t"
281 ".chip 68k"
282 : "=d" (ttreg) );
283 if (transp_transl_matches( ttreg, vaddr ))
284 return (unsigned long)vaddr;
285 asm volatile( ".chip 68040\n\t"
286 "movec %%dtt1,%0\n\t"
287 ".chip 68k"
288 : "=d" (ttreg) );
289 if (transp_transl_matches( ttreg, vaddr ))
290 return (unsigned long)vaddr;
293 /* no match, too, so get the actual physical address from the MMU. */
295 if (CPU_IS_060) {
296 mm_segment_t fs = get_fs();
297 unsigned long paddr;
299 set_fs (MAKE_MM_SEG(SUPER_DATA));
301 /* The PLPAR instruction causes an access error if the translation
302 * is not possible. To catch this we use the same exception mechanism
303 * as for user space accesses in <asm/uaccess.h>. */
304 asm volatile (".chip 68060\n"
305 "1: plpar (%0)\n"
306 ".chip 68k\n"
307 "2:\n"
308 ".section .fixup,\"ax\"\n"
309 " .even\n"
310 "3: lea -1,%0\n"
311 " jra 2b\n"
312 ".previous\n"
313 ".section __ex_table,\"a\"\n"
314 " .align 4\n"
315 " .long 1b,3b\n"
316 ".previous"
317 : "=a" (paddr)
318 : "0" (vaddr));
319 set_fs (fs);
321 return paddr;
323 } else if (CPU_IS_040) {
324 unsigned long mmusr;
325 mm_segment_t fs = get_fs();
327 set_fs (MAKE_MM_SEG(SUPER_DATA));
329 asm volatile (".chip 68040\n\t"
330 "ptestr (%1)\n\t"
331 "movec %%mmusr, %0\n\t"
332 ".chip 68k"
333 : "=r" (mmusr)
334 : "a" (vaddr));
335 set_fs (fs);
337 if (mmusr & MMU_T_040) {
338 return (unsigned long)vaddr; /* Transparent translation */
340 if (mmusr & MMU_R_040)
341 return (mmusr & PAGE_MASK) | ((unsigned long)vaddr & (PAGE_SIZE-1));
343 printk("VTOP040: bad virtual address %lx (%lx)", vaddr, mmusr);
344 return -1;
345 } else {
346 volatile unsigned short temp;
347 unsigned short mmusr;
348 unsigned long *descaddr;
350 asm volatile ("ptestr #5,%2@,#7,%0\n\t"
351 "pmove %/psr,%1@"
352 : "=a&" (descaddr)
353 : "a" (&temp), "a" (vaddr));
354 mmusr = temp;
356 if (mmusr & (MMU_I|MMU_B|MMU_L))
357 printk("VTOP030: bad virtual address %lx (%x)\n", vaddr, mmusr);
359 descaddr = phys_to_virt((unsigned long)descaddr);
361 switch (mmusr & MMU_NUM) {
362 case 1:
363 return (*descaddr & 0xfe000000) | ((unsigned long)vaddr & 0x01ffffff);
364 case 2:
365 return (*descaddr & 0xfffc0000) | ((unsigned long)vaddr & 0x0003ffff);
366 case 3:
367 return (*descaddr & PAGE_MASK) | ((unsigned long)vaddr & (PAGE_SIZE-1));
368 default:
369 printk("VTOP: bad levels (%u) for virtual address %lx\n",
370 mmusr & MMU_NUM, vaddr);
374 printk("VTOP: bad virtual address %lx\n", vaddr);
375 return -1;
378 #ifndef CONFIG_SINGLE_MEMORY_CHUNK
379 unsigned long mm_ptov (unsigned long paddr)
381 int i = 0;
382 unsigned long poff, voff = PAGE_OFFSET;
384 do {
385 poff = paddr - m68k_memory[i].addr;
386 if (poff < m68k_memory[i].size) {
387 #ifdef DEBUGPV
388 printk ("PTOV(%lx)=%lx\n", paddr, poff + voff);
389 #endif
390 return poff + voff;
392 voff += m68k_memory[i].size;
393 } while (++i < m68k_num_memory);
395 #if DEBUG_INVALID_PTOV
396 if (mm_inv_cnt > 0) {
397 mm_inv_cnt--;
398 printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n",
399 paddr, __builtin_return_address(0));
401 #endif
403 * assume that the kernel virtual address is the same as the
404 * physical address.
406 * This should be reasonable in most situations:
407 * 1) They shouldn't be dereferencing the virtual address
408 * unless they are sure that it is valid from kernel space.
409 * 2) The only usage I see so far is converting a page table
410 * reference to some non-FASTMEM address space when freeing
411 * mmaped "/dev/mem" pages. These addresses are just passed
412 * to "free_page", which ignores addresses that aren't in
413 * the memory list anyway.
417 #ifdef CONFIG_AMIGA
419 * if on an amiga and address is in first 16M, move it
420 * to the ZTWO_VADDR range
422 if (MACH_IS_AMIGA && paddr < 16*1024*1024)
423 return ZTWO_VADDR(paddr);
424 #endif
425 return -1;
427 #endif
429 /* invalidate page in both caches */
430 #define clear040(paddr) \
431 __asm__ __volatile__ ("nop\n\t" \
432 ".chip 68040\n\t" \
433 "cinvp %%bc,(%0)\n\t" \
434 ".chip 68k" \
435 : : "a" (paddr))
437 /* invalidate page in i-cache */
438 #define cleari040(paddr) \
439 __asm__ __volatile__ ("nop\n\t" \
440 ".chip 68040\n\t" \
441 "cinvp %%ic,(%0)\n\t" \
442 ".chip 68k" \
443 : : "a" (paddr))
445 /* push page in both caches */
446 #define push040(paddr) \
447 __asm__ __volatile__ ("nop\n\t" \
448 ".chip 68040\n\t" \
449 "cpushp %%bc,(%0)\n\t" \
450 ".chip 68k" \
451 : : "a" (paddr))
453 /* push and invalidate page in both caches */
454 #define pushcl040(paddr) \
455 do { push040(paddr); \
456 if (CPU_IS_060) clear040(paddr); \
457 } while(0)
459 /* push page in both caches, invalidate in i-cache */
460 #define pushcli040(paddr) \
461 do { push040(paddr); \
462 if (CPU_IS_060) cleari040(paddr); \
463 } while(0)
467 * 040: Hit every page containing an address in the range paddr..paddr+len-1.
468 * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
469 * Hit every page until there is a page or less to go. Hit the next page,
470 * and the one after that if the range hits it.
472 /* ++roman: A little bit more care is required here: The CINVP instruction
473 * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
474 * and the end of the region must be treated differently if they are not
475 * exactly at the beginning or end of a page boundary. Else, maybe too much
476 * data becomes invalidated and thus lost forever. CPUSHP does what we need:
477 * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
478 * for discovering the problem!)
480 /* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
481 * the DPI bit in the CACR; would it cause problems with temporarily changing
482 * this?). So we have to push first and then additionally to invalidate.
487 * cache_clear() semantics: Clear any cache entries for the area in question,
488 * without writing back dirty entries first. This is useful if the data will
489 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
490 * _physical_ address.
493 void cache_clear (unsigned long paddr, int len)
495 if (CPU_IS_040_OR_060) {
496 int tmp;
499 * We need special treatment for the first page, in case it
500 * is not page-aligned. Page align the addresses to work
501 * around bug I17 in the 68060.
503 if ((tmp = -paddr & (PAGE_SIZE - 1))) {
504 pushcl040(paddr & PAGE_MASK);
505 if ((len -= tmp) <= 0)
506 return;
507 paddr += tmp;
509 tmp = PAGE_SIZE;
510 paddr &= PAGE_MASK;
511 while ((len -= tmp) >= 0) {
512 clear040(paddr);
513 paddr += tmp;
515 if ((len += tmp))
516 /* a page boundary gets crossed at the end */
517 pushcl040(paddr);
519 else /* 68030 or 68020 */
520 asm volatile ("movec %/cacr,%/d0\n\t"
521 "oriw %0,%/d0\n\t"
522 "movec %/d0,%/cacr"
523 : : "i" (FLUSH_I_AND_D)
524 : "d0");
525 #ifdef CONFIG_M68K_L2_CACHE
526 if(mach_l2_flush)
527 mach_l2_flush(0);
528 #endif
533 * cache_push() semantics: Write back any dirty cache data in the given area,
534 * and invalidate the range in the instruction cache. It needs not (but may)
535 * invalidate those entries also in the data cache. The range is defined by a
536 * _physical_ address.
539 void cache_push (unsigned long paddr, int len)
541 if (CPU_IS_040_OR_060) {
542 int tmp = PAGE_SIZE;
545 * on 68040 or 68060, push cache lines for pages in the range;
546 * on the '040 this also invalidates the pushed lines, but not on
547 * the '060!
549 len += paddr & (PAGE_SIZE - 1);
552 * Work around bug I17 in the 68060 affecting some instruction
553 * lines not being invalidated properly.
555 paddr &= PAGE_MASK;
557 do {
558 pushcli040(paddr);
559 paddr += tmp;
560 } while ((len -= tmp) > 0);
563 * 68030/68020 have no writeback cache. On the other hand,
564 * cache_push is actually a superset of cache_clear (the lines
565 * get written back and invalidated), so we should make sure
566 * to perform the corresponding actions. After all, this is getting
567 * called in places where we've just loaded code, or whatever, so
568 * flushing the icache is appropriate; flushing the dcache shouldn't
569 * be required.
571 else /* 68030 or 68020 */
572 asm volatile ("movec %/cacr,%/d0\n\t"
573 "oriw %0,%/d0\n\t"
574 "movec %/d0,%/cacr"
575 : : "i" (FLUSH_I)
576 : "d0");
577 #ifdef CONFIG_M68K_L2_CACHE
578 if(mach_l2_flush)
579 mach_l2_flush(1);
580 #endif
584 #undef clear040
585 #undef cleari040
586 #undef push040
587 #undef pushcl040
588 #undef pushcli040
590 #ifndef CONFIG_SINGLE_MEMORY_CHUNK
591 int mm_end_of_chunk (unsigned long addr, int len)
593 int i;
595 for (i = 0; i < m68k_num_memory; i++)
596 if (m68k_memory[i].addr + m68k_memory[i].size == addr + len)
597 return 1;
598 return 0;
600 #endif