2 * linux/arch/m68k/mm/memory.c
4 * Copyright (C) 1995 Hamish Macdonald
7 #include <linux/config.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/types.h>
12 #include <linux/malloc.h>
14 #include <asm/setup.h>
15 #include <asm/segment.h>
17 #include <asm/pgtable.h>
18 #include <asm/system.h>
19 #include <asm/traps.h>
21 #include <asm/amigahw.h>
24 struct pgtable_cache_struct quicklists
;
26 void __bad_pte(pmd_t
*pmd
)
28 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd
));
29 pmd_set(pmd
, BAD_PAGETABLE
);
32 void __bad_pmd(pgd_t
*pgd
)
34 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd
));
35 pgd_set(pgd
, (pmd_t
*)BAD_PAGETABLE
);
38 pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long offset
)
42 pte
= (pte_t
*) __get_free_page(GFP_KERNEL
);
45 clear_page((unsigned long)pte
);
46 flush_page_to_ram((unsigned long)pte
);
47 flush_tlb_kernel_page((unsigned long)pte
);
48 nocache_page((unsigned long)pte
);
52 pmd_set(pmd
, BAD_PAGETABLE
);
55 free_page((unsigned long)pte
);
60 return (pte_t
*) pmd_page(*pmd
) + offset
;
63 pmd_t
*get_pmd_slow(pgd_t
*pgd
, unsigned long offset
)
67 pmd
= get_pointer_table();
73 pgd_set(pgd
, (pmd_t
*)BAD_PAGETABLE
);
76 free_pointer_table(pmd
);
81 return (pmd_t
*) pgd_page(*pgd
) + offset
;
85 /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
86 struct page instead of separately kmalloced struct. Stolen from
87 arch/sparc/mm/srmmu.c ... */
89 typedef struct page ptable_desc
;
90 static ptable_desc ptable_list
= { &ptable_list
, &ptable_list
};
92 #define PD_MARKBITS(dp) (*(unsigned char *)&(dp)->offset)
93 #define PD_PAGE(dp) (PAGE_OFFSET + ((dp)->map_nr << PAGE_SHIFT))
94 #define PAGE_PD(page) ((ptable_desc *)&mem_map[MAP_NR(page)])
96 #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
98 pmd_t
*get_pointer_table (void)
100 ptable_desc
*dp
= ptable_list
.next
;
101 unsigned char mask
= PD_MARKBITS (dp
);
106 * For a pointer table for a user process address space, a
107 * table is taken from a page allocated for the purpose. Each
108 * page can hold 8 pointer tables. The page is remapped in
109 * virtual address space to be noncacheable.
115 if (!(page
= get_free_page (GFP_KERNEL
)))
118 flush_tlb_kernel_page(page
);
122 PD_MARKBITS(new) = 0xfe;
123 (new->prev
= dp
->prev
)->next
= new;
124 (new->next
= dp
)->prev
= new;
125 return (pmd_t
*)page
;
128 for (tmp
= 1, off
= 0; (mask
& tmp
) == 0; tmp
<<= 1, off
+= PTABLE_SIZE
);
129 PD_MARKBITS(dp
) = mask
& ~tmp
;
130 if (!PD_MARKBITS(dp
)) {
131 ptable_desc
*last
, *next
;
133 /* move to end of list */
135 (next
->prev
= dp
->prev
)->next
= next
;
137 last
= ptable_list
.prev
;
138 (dp
->next
= last
->next
)->prev
= dp
;
139 (dp
->prev
= last
)->next
= dp
;
141 return (pmd_t
*) (PD_PAGE(dp
) + off
);
144 int free_pointer_table (pmd_t
*ptable
)
146 ptable_desc
*dp
, *first
;
147 unsigned long page
= (unsigned long)ptable
& PAGE_MASK
;
148 unsigned char mask
= 1 << (((unsigned long)ptable
- page
)/PTABLE_SIZE
);
151 if (PD_MARKBITS (dp
) & mask
)
152 panic ("table already free!");
154 PD_MARKBITS (dp
) |= mask
;
156 if (PD_MARKBITS(dp
) == 0xff) {
157 /* all tables in page are free, free page */
158 ptable_desc
*next
= dp
->next
;
159 (next
->prev
= dp
->prev
)->next
= next
;
163 } else if ((first
= ptable_list
.next
) != dp
) {
165 * move this descriptor to the front of the list, since
166 * it has one or more free tables.
168 ptable_desc
*next
= dp
->next
;
169 (next
->prev
= dp
->prev
)->next
= next
;
171 (dp
->prev
= first
->prev
)->next
= dp
;
172 (dp
->next
= first
)->prev
= dp
;
177 /* maximum pages used for kpointer tables */
179 /* # of reserved slots */
180 #define RESERVED_KPTR 4
181 extern pmd_tablepage kernel_pmd_table
; /* reserved in head.S */
183 static struct kpointer_pages
{
184 pmd_tablepage
*page
[KPTR_PAGES
];
185 u_char alloced
[KPTR_PAGES
];
188 void init_kpointer_table(void) {
189 short i
= KPTR_PAGES
-1;
191 /* first page is reserved in head.S */
192 kptr_pages
.page
[i
] = &kernel_pmd_table
;
193 kptr_pages
.alloced
[i
] = ~(0xff>>RESERVED_KPTR
);
194 for (i
--; i
>=0; i
--) {
195 kptr_pages
.page
[i
] = NULL
;
196 kptr_pages
.alloced
[i
] = 0;
200 pmd_t
*get_kpointer_table (void)
202 /* For pointer tables for the kernel virtual address space,
203 * use the page that is reserved in head.S that can hold up to
204 * 8 pointer tables. 3 of these tables are always reserved
205 * (kernel_pg_dir, swapper_pg_dir and kernel pointer table for
206 * the first 16 MB of RAM). In addition, the 4th pointer table
207 * in this page is reserved. On Amiga and Atari, it is used to
208 * map in the hardware registers. It may be used for other
209 * purposes on other 68k machines. This leaves 4 pointer tables
210 * available for use by the kernel. 1 of them are usually used
211 * for the vmalloc tables. This allows mapping of 3 * 32 = 96 MB
212 * of physical memory. But these pointer tables are also used
213 * for other purposes, like kernel_map(), so further pages can
218 long nr
, offset
= -8;
221 for (i
=KPTR_PAGES
-1; i
>=0; i
--) {
222 asm volatile("bfffo %1{%2,#8},%0"
224 : "d" ((u_char
)~kptr_pages
.alloced
[i
]), "d" (offset
));
229 printk("No space for kernel pointer table!\n");
232 if (!(page
= kptr_pages
.page
[i
])) {
233 if (!(page
= (pmd_tablepage
*)get_free_page(GFP_KERNEL
))) {
234 printk("No space for kernel pointer table!\n");
237 flush_tlb_kernel_page((unsigned long) page
);
238 nocache_page((u_long
)(kptr_pages
.page
[i
] = page
));
240 asm volatile("bfset %0@{%1,#1}"
242 : "a" (&kptr_pages
.alloced
[i
]), "d" (nr
-offset
));
243 table
= &(*page
)[nr
-offset
];
244 memset(table
, 0, sizeof(pmd_table
));
245 return ((pmd_t
*)table
);
248 void free_kpointer_table (pmd_t
*pmdp
)
250 pmd_table
*table
= (pmd_table
*)pmdp
;
251 pmd_tablepage
*page
= (pmd_tablepage
*)((u_long
)table
& PAGE_MASK
);
255 for (i
=KPTR_PAGES
-1; i
>=0; i
--) {
256 if (kptr_pages
.page
[i
] == page
)
259 nr
= ((u_long
)table
- (u_long
)page
) / sizeof(pmd_table
);
260 if (!table
|| i
< 0 || (i
== KPTR_PAGES
-1 && nr
< RESERVED_KPTR
)) {
261 printk("Attempt to free invalid kernel pointer table: %p\n", table
);
264 asm volatile("bfclr %0@{%1,#1}"
266 : "a" (&kptr_pages
.alloced
[i
]), "d" (nr
));
267 if (!kptr_pages
.alloced
[i
]) {
268 kptr_pages
.page
[i
] = 0;
269 cache_page ((u_long
)page
);
270 free_page ((u_long
)page
);
274 static unsigned long transp_transl_matches( unsigned long regval
,
275 unsigned long vaddr
)
277 unsigned long base
, mask
;
280 if (!(regval
& 0x8000))
284 /* function code match? */
285 base
= (regval
>> 4) & 7;
286 mask
= ~(regval
& 7);
287 if ((SUPER_DATA
& mask
) != (base
& mask
))
291 /* must not be user-only */
292 if ((regval
& 0x6000) == 0)
297 base
= regval
& 0xff000000;
298 mask
= ~((regval
<< 8) & 0xff000000);
299 return( (vaddr
& mask
) == (base
& mask
) );
302 static unsigned long mm_vtop_fallback (unsigned long);
305 * The following two routines map from a physical address to a kernel
306 * virtual address and vice versa.
308 unsigned long mm_vtop (unsigned long vaddr
)
311 unsigned long voff
= vaddr
;
312 unsigned long offset
= 0;
315 if (voff
< offset
+ m68k_memory
[i
].size
) {
317 printk ("VTOP(%lx)=%lx\n", vaddr
,
318 m68k_memory
[i
].addr
+ voff
- offset
);
320 return m68k_memory
[i
].addr
+ voff
- offset
;
322 offset
+= m68k_memory
[i
].size
;
324 }while (i
< m68k_num_memory
);
325 return mm_vtop_fallback(vaddr
);
328 /* Separate function to make the common case faster (needs to save less
330 static unsigned long mm_vtop_fallback (unsigned long vaddr
)
332 /* not in one of the memory chunks; test for applying transparent
338 asm volatile( ".chip 68030\n\t"
339 "pmove %/tt0,%0@\n\t"
342 if (transp_transl_matches( ttreg
, vaddr
))
344 asm volatile( ".chip 68030\n\t"
345 "pmove %/tt1,%0@\n\t"
348 if (transp_transl_matches( ttreg
, vaddr
))
351 else if (CPU_IS_040_OR_060
) {
354 asm volatile( ".chip 68040\n\t"
355 "movec %%dtt0,%0\n\t"
358 if (transp_transl_matches( ttreg
, vaddr
))
360 asm volatile( ".chip 68040\n\t"
361 "movec %%dtt1,%0\n\t"
364 if (transp_transl_matches( ttreg
, vaddr
))
368 /* no match, too, so get the actual physical address from the MMU. */
371 mm_segment_t fs
= get_fs();
374 set_fs (MAKE_MM_SEG(SUPER_DATA
));
376 /* The PLPAR instruction causes an access error if the translation
377 * is not possible. We don't catch that here, so a bad kernel trap
378 * will be reported in this case. */
379 asm volatile (".chip 68060\n\t"
388 } else if (CPU_IS_040
) {
390 mm_segment_t fs
= get_fs();
392 set_fs (MAKE_MM_SEG(SUPER_DATA
));
394 asm volatile (".chip 68040\n\t"
396 "movec %%mmusr, %0\n\t"
402 if (mmusr
& MMU_T_040
) {
403 return (vaddr
); /* Transparent translation */
405 if (mmusr
& MMU_R_040
)
406 return (mmusr
& PAGE_MASK
) | (vaddr
& (PAGE_SIZE
-1));
408 panic ("VTOP040: bad virtual address %08lx (%lx)", vaddr
, mmusr
);
410 volatile unsigned short temp
;
411 unsigned short mmusr
;
412 unsigned long *descaddr
;
414 asm volatile ("ptestr #5,%2@,#7,%0\n\t"
417 : "a" (&temp
), "a" (vaddr
));
420 if (mmusr
& (MMU_I
|MMU_B
|MMU_L
))
421 panic ("VTOP030: bad virtual address %08lx (%x)", vaddr
, mmusr
);
423 descaddr
= (unsigned long *)PTOV(descaddr
);
425 switch (mmusr
& MMU_NUM
) {
427 return (*descaddr
& 0xfe000000) | (vaddr
& 0x01ffffff);
429 return (*descaddr
& 0xfffc0000) | (vaddr
& 0x0003ffff);
431 return (*descaddr
& PAGE_MASK
) | (vaddr
& (PAGE_SIZE
-1));
433 panic ("VTOP: bad levels (%u) for virtual address %08lx",
434 mmusr
& MMU_NUM
, vaddr
);
438 panic ("VTOP: bad virtual address %08lx", vaddr
);
441 unsigned long mm_ptov (unsigned long paddr
)
444 unsigned long offset
= 0;
447 if (paddr
>= m68k_memory
[i
].addr
&&
448 paddr
< (m68k_memory
[i
].addr
449 + m68k_memory
[i
].size
)) {
451 printk ("PTOV(%lx)=%lx\n", paddr
,
452 (paddr
- m68k_memory
[i
].addr
) + offset
);
454 return (paddr
- m68k_memory
[i
].addr
) + offset
;
456 offset
+= m68k_memory
[i
].size
;
458 }while (i
< m68k_num_memory
);
461 * assume that the kernel virtual address is the same as the
464 * This should be reasonable in most situations:
465 * 1) They shouldn't be dereferencing the virtual address
466 * unless they are sure that it is valid from kernel space.
467 * 2) The only usage I see so far is converting a page table
468 * reference to some non-FASTMEM address space when freeing
469 * mmaped "/dev/mem" pages. These addresses are just passed
470 * to "free_page", which ignores addresses that aren't in
471 * the memory list anyway.
477 * if on an amiga and address is in first 16M, move it
478 * to the ZTWO_VADDR range
480 if (MACH_IS_AMIGA
&& paddr
< 16*1024*1024)
481 return ZTWO_VADDR(paddr
);
486 /* invalidate page in both caches */
487 #define clear040(paddr) \
488 __asm__ __volatile__ ("nop\n\t" \
490 "cinvp %%bc,(%0)\n\t" \
494 /* invalidate page in i-cache */
495 #define cleari040(paddr) \
496 __asm__ __volatile__ ("nop\n\t" \
498 "cinvp %%ic,(%0)\n\t" \
502 /* push page in both caches */
503 #define push040(paddr) \
504 __asm__ __volatile__ ("nop\n\t" \
506 "cpushp %%bc,(%0)\n\t" \
510 /* push and invalidate page in both caches */
511 #define pushcl040(paddr) \
512 do { push040(paddr); \
513 if (CPU_IS_060) clear040(paddr); \
516 /* push page in both caches, invalidate in i-cache */
517 #define pushcli040(paddr) \
518 do { push040(paddr); \
519 if (CPU_IS_060) cleari040(paddr); \
524 * 040: Hit every page containing an address in the range paddr..paddr+len-1.
525 * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
526 * Hit every page until there is a page or less to go. Hit the next page,
527 * and the one after that if the range hits it.
529 /* ++roman: A little bit more care is required here: The CINVP instruction
530 * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
531 * and the end of the region must be treated differently if they are not
532 * exactly at the beginning or end of a page boundary. Else, maybe too much
533 * data becomes invalidated and thus lost forever. CPUSHP does what we need:
534 * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
535 * for discovering the problem!)
537 /* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
538 * the DPI bit in the CACR; would it cause problems with temporarily changing
539 * this?). So we have to push first and then additionally to invalidate.
543 * cache_clear() semantics: Clear any cache entries for the area in question,
544 * without writing back dirty entries first. This is useful if the data will
545 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
546 * _physical_ address.
549 void cache_clear (unsigned long paddr
, int len
)
551 if (CPU_IS_040_OR_060
) {
555 * We need special treatment for the first page, in case it
556 * is not page-aligned. Page align the addresses to work
557 * around bug I17 in the 68060.
559 if ((tmp
= -paddr
& (PAGE_SIZE
- 1))) {
560 pushcl040(paddr
& PAGE_MASK
);
561 if ((len
-= tmp
) <= 0)
567 while ((len
-= tmp
) >= 0) {
572 /* a page boundary gets crossed at the end */
575 else /* 68030 or 68020 */
576 asm volatile ("movec %/cacr,%/d0\n\t"
579 : : "i" (FLUSH_I_AND_D
)
585 * cache_push() semantics: Write back any dirty cache data in the given area,
586 * and invalidate the range in the instruction cache. It needs not (but may)
587 * invalidate those entries also in the data cache. The range is defined by a
588 * _physical_ address.
591 void cache_push (unsigned long paddr
, int len
)
593 if (CPU_IS_040_OR_060
) {
597 * on 68040 or 68060, push cache lines for pages in the range;
598 * on the '040 this also invalidates the pushed lines, but not on
601 len
+= paddr
& (PAGE_SIZE
- 1);
604 * Work around bug I17 in the 68060 affecting some instruction
605 * lines not being invalidated properly.
612 } while ((len
-= tmp
) > 0);
615 * 68030/68020 have no writeback cache. On the other hand,
616 * cache_push is actually a superset of cache_clear (the lines
617 * get written back and invalidated), so we should make sure
618 * to perform the corresponding actions. After all, this is getting
619 * called in places where we've just loaded code, or whatever, so
620 * flushing the icache is appropriate; flushing the dcache shouldn't
623 else /* 68030 or 68020 */
624 asm volatile ("movec %/cacr,%/d0\n\t"
638 int mm_end_of_chunk (unsigned long addr
, int len
)
642 for (i
= 0; i
< m68k_num_memory
; i
++)
643 if (m68k_memory
[i
].addr
+ m68k_memory
[i
].size
== addr
+ len
)