2 * linux/arch/m68k/mm/memory.c
4 * Copyright (C) 1995 Hamish Macdonald
7 #include <linux/config.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/types.h>
12 #include <linux/malloc.h>
13 #include <linux/init.h>
14 #include <linux/pagemap.h>
16 #include <asm/setup.h>
17 #include <asm/segment.h>
19 #include <asm/pgalloc.h>
20 #include <asm/system.h>
21 #include <asm/traps.h>
23 #include <asm/machdep.h>
25 #include <asm/amigahw.h>
28 struct pgtable_cache_struct quicklists
;
30 void __bad_pte(pmd_t
*pmd
)
32 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd
));
33 pmd_set(pmd
, BAD_PAGETABLE
);
36 void __bad_pmd(pgd_t
*pgd
)
38 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd
));
39 pgd_set(pgd
, (pmd_t
*)BAD_PAGETABLE
);
42 pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long offset
)
46 pte
= (pte_t
*) __get_free_page(GFP_KERNEL
);
50 __flush_page_to_ram((unsigned long)pte
);
51 flush_tlb_kernel_page((unsigned long)pte
);
52 nocache_page((unsigned long)pte
);
56 pmd_set(pmd
, BAD_PAGETABLE
);
59 free_page((unsigned long)pte
);
64 return (pte_t
*)__pmd_page(*pmd
) + offset
;
67 pmd_t
*get_pmd_slow(pgd_t
*pgd
, unsigned long offset
)
71 pmd
= get_pointer_table();
77 pgd_set(pgd
, (pmd_t
*)BAD_PAGETABLE
);
80 free_pointer_table(pmd
);
85 return (pmd_t
*)__pgd_page(*pgd
) + offset
;
89 /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
90 struct page instead of separately kmalloced struct. Stolen from
91 arch/sparc/mm/srmmu.c ... */
93 typedef struct list_head ptable_desc
;
94 static LIST_HEAD(ptable_list
);
96 #define PD_PTABLE(page) ((ptable_desc *)&mem_map[MAP_NR(page)])
97 #define PD_PAGE(ptable) (list_entry(ptable, struct page, list))
98 #define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
100 #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
102 void __init
init_pointer_table(unsigned long ptable
)
105 unsigned long page
= ptable
& PAGE_MASK
;
106 unsigned char mask
= 1 << ((ptable
- page
)/PTABLE_SIZE
);
108 dp
= PD_PTABLE(page
);
109 if (!(PD_MARKBITS(dp
) & mask
)) {
110 PD_MARKBITS(dp
) = 0xff;
111 list_add(dp
, &ptable_list
);
114 PD_MARKBITS(dp
) &= ~mask
;
116 printk("init_pointer_table: %lx, %x\n", ptable
, PD_MARKBITS(dp
));
119 /* unreserve the page so it's possible to free that page */
120 PD_PAGE(dp
)->flags
&= ~(1 << PG_reserved
);
121 atomic_set(&PD_PAGE(dp
)->count
, 1);
126 pmd_t
*get_pointer_table (void)
128 ptable_desc
*dp
= ptable_list
.next
;
129 unsigned char mask
= PD_MARKBITS (dp
);
134 * For a pointer table for a user process address space, a
135 * table is taken from a page allocated for the purpose. Each
136 * page can hold 8 pointer tables. The page is remapped in
137 * virtual address space to be noncacheable.
143 if (!(page
= get_free_page (GFP_KERNEL
)))
146 flush_tlb_kernel_page(page
);
149 new = PD_PTABLE(page
);
150 PD_MARKBITS(new) = 0xfe;
151 list_add_tail(new, dp
);
153 return (pmd_t
*)page
;
156 for (tmp
= 1, off
= 0; (mask
& tmp
) == 0; tmp
<<= 1, off
+= PTABLE_SIZE
)
158 PD_MARKBITS(dp
) = mask
& ~tmp
;
159 if (!PD_MARKBITS(dp
)) {
160 /* move to end of list */
162 list_add_tail(dp
, &ptable_list
);
164 return (pmd_t
*) (page_address(PD_PAGE(dp
)) + off
);
167 int free_pointer_table (pmd_t
*ptable
)
170 unsigned long page
= (unsigned long)ptable
& PAGE_MASK
;
171 unsigned char mask
= 1 << (((unsigned long)ptable
- page
)/PTABLE_SIZE
);
173 dp
= PD_PTABLE(page
);
174 if (PD_MARKBITS (dp
) & mask
)
175 panic ("table already free!");
177 PD_MARKBITS (dp
) |= mask
;
179 if (PD_MARKBITS(dp
) == 0xff) {
180 /* all tables in page are free, free page */
185 } else if (ptable_list
.next
!= dp
) {
187 * move this descriptor to the front of the list, since
188 * it has one or more free tables.
191 list_add(dp
, &ptable_list
);
196 static unsigned long transp_transl_matches( unsigned long regval
,
197 unsigned long vaddr
)
199 unsigned long base
, mask
;
202 if (!(regval
& 0x8000))
206 /* function code match? */
207 base
= (regval
>> 4) & 7;
208 mask
= ~(regval
& 7);
209 if (((SUPER_DATA
^ base
) & mask
) != 0)
213 /* must not be user-only */
214 if ((regval
& 0x6000) == 0)
219 base
= regval
& 0xff000000;
220 mask
= ~(regval
<< 8) & 0xff000000;
221 return (((unsigned long)vaddr
^ base
) & mask
) == 0;
224 #if DEBUG_INVALID_PTOV
228 #ifndef CONFIG_SINGLE_MEMORY_CHUNK
230 * The following two routines map from a physical address to a kernel
231 * virtual address and vice versa.
233 unsigned long mm_vtop(unsigned long vaddr
)
236 unsigned long voff
= (unsigned long)vaddr
- PAGE_OFFSET
;
239 if (voff
< m68k_memory
[i
].size
) {
241 printk ("VTOP(%p)=%lx\n", vaddr
,
242 m68k_memory
[i
].addr
+ voff
);
244 return m68k_memory
[i
].addr
+ voff
;
246 voff
-= m68k_memory
[i
].size
;
247 } while (++i
< m68k_num_memory
);
249 return mm_vtop_fallback(vaddr
);
253 /* Separate function to make the common case faster (needs to save less
255 unsigned long mm_vtop_fallback(unsigned long vaddr
)
257 /* not in one of the memory chunks; test for applying transparent
263 asm volatile( ".chip 68030\n\t"
264 "pmove %/tt0,%0@\n\t"
267 if (transp_transl_matches( ttreg
, vaddr
))
268 return (unsigned long)vaddr
;
269 asm volatile( ".chip 68030\n\t"
270 "pmove %/tt1,%0@\n\t"
273 if (transp_transl_matches( ttreg
, vaddr
))
274 return (unsigned long)vaddr
;
276 else if (CPU_IS_040_OR_060
) {
279 asm volatile( ".chip 68040\n\t"
280 "movec %%dtt0,%0\n\t"
283 if (transp_transl_matches( ttreg
, vaddr
))
284 return (unsigned long)vaddr
;
285 asm volatile( ".chip 68040\n\t"
286 "movec %%dtt1,%0\n\t"
289 if (transp_transl_matches( ttreg
, vaddr
))
290 return (unsigned long)vaddr
;
293 /* no match, too, so get the actual physical address from the MMU. */
296 mm_segment_t fs
= get_fs();
299 set_fs (MAKE_MM_SEG(SUPER_DATA
));
301 /* The PLPAR instruction causes an access error if the translation
302 * is not possible. To catch this we use the same exception mechanism
303 * as for user space accesses in <asm/uaccess.h>. */
304 asm volatile (".chip 68060\n"
308 ".section .fixup,\"ax\"\n"
313 ".section __ex_table,\"a\"\n"
323 } else if (CPU_IS_040
) {
325 mm_segment_t fs
= get_fs();
327 set_fs (MAKE_MM_SEG(SUPER_DATA
));
329 asm volatile (".chip 68040\n\t"
331 "movec %%mmusr, %0\n\t"
337 if (mmusr
& MMU_T_040
) {
338 return (unsigned long)vaddr
; /* Transparent translation */
340 if (mmusr
& MMU_R_040
)
341 return (mmusr
& PAGE_MASK
) | ((unsigned long)vaddr
& (PAGE_SIZE
-1));
343 printk("VTOP040: bad virtual address %lx (%lx)", vaddr
, mmusr
);
346 volatile unsigned short temp
;
347 unsigned short mmusr
;
348 unsigned long *descaddr
;
350 asm volatile ("ptestr #5,%2@,#7,%0\n\t"
353 : "a" (&temp
), "a" (vaddr
));
356 if (mmusr
& (MMU_I
|MMU_B
|MMU_L
))
357 printk("VTOP030: bad virtual address %lx (%x)\n", vaddr
, mmusr
);
359 descaddr
= phys_to_virt((unsigned long)descaddr
);
361 switch (mmusr
& MMU_NUM
) {
363 return (*descaddr
& 0xfe000000) | ((unsigned long)vaddr
& 0x01ffffff);
365 return (*descaddr
& 0xfffc0000) | ((unsigned long)vaddr
& 0x0003ffff);
367 return (*descaddr
& PAGE_MASK
) | ((unsigned long)vaddr
& (PAGE_SIZE
-1));
369 printk("VTOP: bad levels (%u) for virtual address %lx\n",
370 mmusr
& MMU_NUM
, vaddr
);
374 printk("VTOP: bad virtual address %lx\n", vaddr
);
378 #ifndef CONFIG_SINGLE_MEMORY_CHUNK
379 unsigned long mm_ptov (unsigned long paddr
)
382 unsigned long poff
, voff
= PAGE_OFFSET
;
385 poff
= paddr
- m68k_memory
[i
].addr
;
386 if (poff
< m68k_memory
[i
].size
) {
388 printk ("PTOV(%lx)=%lx\n", paddr
, poff
+ voff
);
392 voff
+= m68k_memory
[i
].size
;
393 } while (++i
< m68k_num_memory
);
395 #if DEBUG_INVALID_PTOV
396 if (mm_inv_cnt
> 0) {
398 printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n",
399 paddr
, __builtin_return_address(0));
403 * assume that the kernel virtual address is the same as the
406 * This should be reasonable in most situations:
407 * 1) They shouldn't be dereferencing the virtual address
408 * unless they are sure that it is valid from kernel space.
409 * 2) The only usage I see so far is converting a page table
410 * reference to some non-FASTMEM address space when freeing
411 * mmaped "/dev/mem" pages. These addresses are just passed
412 * to "free_page", which ignores addresses that aren't in
413 * the memory list anyway.
419 * if on an amiga and address is in first 16M, move it
420 * to the ZTWO_VADDR range
422 if (MACH_IS_AMIGA
&& paddr
< 16*1024*1024)
423 return ZTWO_VADDR(paddr
);
429 /* invalidate page in both caches */
430 #define clear040(paddr) \
431 __asm__ __volatile__ ("nop\n\t" \
433 "cinvp %%bc,(%0)\n\t" \
437 /* invalidate page in i-cache */
438 #define cleari040(paddr) \
439 __asm__ __volatile__ ("nop\n\t" \
441 "cinvp %%ic,(%0)\n\t" \
445 /* push page in both caches */
446 #define push040(paddr) \
447 __asm__ __volatile__ ("nop\n\t" \
449 "cpushp %%bc,(%0)\n\t" \
453 /* push and invalidate page in both caches */
454 #define pushcl040(paddr) \
455 do { push040(paddr); \
456 if (CPU_IS_060) clear040(paddr); \
459 /* push page in both caches, invalidate in i-cache */
460 #define pushcli040(paddr) \
461 do { push040(paddr); \
462 if (CPU_IS_060) cleari040(paddr); \
467 * 040: Hit every page containing an address in the range paddr..paddr+len-1.
468 * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
469 * Hit every page until there is a page or less to go. Hit the next page,
470 * and the one after that if the range hits it.
472 /* ++roman: A little bit more care is required here: The CINVP instruction
473 * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
474 * and the end of the region must be treated differently if they are not
475 * exactly at the beginning or end of a page boundary. Else, maybe too much
476 * data becomes invalidated and thus lost forever. CPUSHP does what we need:
477 * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
478 * for discovering the problem!)
480 /* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
481 * the DPI bit in the CACR; would it cause problems with temporarily changing
482 * this?). So we have to push first and then additionally to invalidate.
487 * cache_clear() semantics: Clear any cache entries for the area in question,
488 * without writing back dirty entries first. This is useful if the data will
489 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
490 * _physical_ address.
493 void cache_clear (unsigned long paddr
, int len
)
495 if (CPU_IS_040_OR_060
) {
499 * We need special treatment for the first page, in case it
500 * is not page-aligned. Page align the addresses to work
501 * around bug I17 in the 68060.
503 if ((tmp
= -paddr
& (PAGE_SIZE
- 1))) {
504 pushcl040(paddr
& PAGE_MASK
);
505 if ((len
-= tmp
) <= 0)
511 while ((len
-= tmp
) >= 0) {
516 /* a page boundary gets crossed at the end */
519 else /* 68030 or 68020 */
520 asm volatile ("movec %/cacr,%/d0\n\t"
523 : : "i" (FLUSH_I_AND_D
)
525 #ifdef CONFIG_M68K_L2_CACHE
533 * cache_push() semantics: Write back any dirty cache data in the given area,
534 * and invalidate the range in the instruction cache. It needs not (but may)
535 * invalidate those entries also in the data cache. The range is defined by a
536 * _physical_ address.
539 void cache_push (unsigned long paddr
, int len
)
541 if (CPU_IS_040_OR_060
) {
545 * on 68040 or 68060, push cache lines for pages in the range;
546 * on the '040 this also invalidates the pushed lines, but not on
549 len
+= paddr
& (PAGE_SIZE
- 1);
552 * Work around bug I17 in the 68060 affecting some instruction
553 * lines not being invalidated properly.
560 } while ((len
-= tmp
) > 0);
563 * 68030/68020 have no writeback cache. On the other hand,
564 * cache_push is actually a superset of cache_clear (the lines
565 * get written back and invalidated), so we should make sure
566 * to perform the corresponding actions. After all, this is getting
567 * called in places where we've just loaded code, or whatever, so
568 * flushing the icache is appropriate; flushing the dcache shouldn't
571 else /* 68030 or 68020 */
572 asm volatile ("movec %/cacr,%/d0\n\t"
577 #ifdef CONFIG_M68K_L2_CACHE
590 #ifndef CONFIG_SINGLE_MEMORY_CHUNK
591 int mm_end_of_chunk (unsigned long addr
, int len
)
595 for (i
= 0; i
< m68k_num_memory
; i
++)
596 if (m68k_memory
[i
].addr
+ m68k_memory
[i
].size
== addr
+ len
)