Import 2.1.116pre2
[davej-history.git] / arch / m68k / mm / memory.c
blobad7c8141a61fcadb6049bb5c43850f02a030039c
1 /*
2 * linux/arch/m68k/mm/memory.c
4 * Copyright (C) 1995 Hamish Macdonald
5 */
7 #include <linux/config.h>
8 #include <linux/mm.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/types.h>
12 #include <linux/malloc.h>
14 #include <asm/setup.h>
15 #include <asm/segment.h>
16 #include <asm/page.h>
17 #include <asm/pgtable.h>
18 #include <asm/system.h>
19 #include <asm/traps.h>
20 #ifdef CONFIG_AMIGA
21 #include <asm/amigahw.h>
22 #endif
24 struct pgtable_cache_struct quicklists;
26 void __bad_pte(pmd_t *pmd)
28 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
29 pmd_set(pmd, BAD_PAGETABLE);
32 void __bad_pmd(pgd_t *pgd)
34 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
35 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
38 pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
40 pte_t *pte;
42 pte = (pte_t *) __get_free_page(GFP_KERNEL);
43 if (pmd_none(*pmd)) {
44 if (pte) {
45 clear_page((unsigned long)pte);
46 flush_page_to_ram((unsigned long)pte);
47 flush_tlb_kernel_page((unsigned long)pte);
48 nocache_page((unsigned long)pte);
49 pmd_set(pmd, pte);
50 return pte + offset;
52 pmd_set(pmd, BAD_PAGETABLE);
53 return NULL;
55 free_page((unsigned long)pte);
56 if (pmd_bad(*pmd)) {
57 __bad_pte(pmd);
58 return NULL;
60 return (pte_t *) pmd_page(*pmd) + offset;
63 pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset)
65 pmd_t *pmd;
67 pmd = get_pointer_table();
68 if (pgd_none(*pgd)) {
69 if (pmd) {
70 pgd_set(pgd, pmd);
71 return pmd + offset;
73 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
74 return NULL;
76 free_pointer_table(pmd);
77 if (pgd_bad(*pgd)) {
78 __bad_pmd(pgd);
79 return NULL;
81 return (pmd_t *) pgd_page(*pgd) + offset;
85 /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
86 struct page instead of separately kmalloced struct. Stolen from
87 arch/sparc/mm/srmmu.c ... */
89 typedef struct page ptable_desc;
90 static ptable_desc ptable_list = { &ptable_list, &ptable_list };
92 #define PD_MARKBITS(dp) (*(unsigned char *)&(dp)->offset)
93 #define PD_PAGE(dp) (PAGE_OFFSET + ((dp)->map_nr << PAGE_SHIFT))
94 #define PAGE_PD(page) ((ptable_desc *)&mem_map[MAP_NR(page)])
96 #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
98 pmd_t *get_pointer_table (void)
100 ptable_desc *dp = ptable_list.next;
101 unsigned char mask = PD_MARKBITS (dp);
102 unsigned char tmp;
103 unsigned int off;
106 * For a pointer table for a user process address space, a
107 * table is taken from a page allocated for the purpose. Each
108 * page can hold 8 pointer tables. The page is remapped in
109 * virtual address space to be noncacheable.
111 if (mask == 0) {
112 unsigned long page;
113 ptable_desc *new;
115 if (!(page = get_free_page (GFP_KERNEL)))
116 return 0;
118 flush_tlb_kernel_page(page);
119 nocache_page (page);
121 new = PAGE_PD(page);
122 PD_MARKBITS(new) = 0xfe;
123 (new->prev = dp->prev)->next = new;
124 (new->next = dp)->prev = new;
125 return (pmd_t *)page;
128 for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE);
129 PD_MARKBITS(dp) = mask & ~tmp;
130 if (!PD_MARKBITS(dp)) {
131 ptable_desc *last, *next;
133 /* move to end of list */
134 next = dp->next;
135 (next->prev = dp->prev)->next = next;
137 last = ptable_list.prev;
138 (dp->next = last->next)->prev = dp;
139 (dp->prev = last)->next = dp;
141 return (pmd_t *) (PD_PAGE(dp) + off);
144 int free_pointer_table (pmd_t *ptable)
146 ptable_desc *dp, *first;
147 unsigned long page = (unsigned long)ptable & PAGE_MASK;
148 unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
150 dp = PAGE_PD(page);
151 if (PD_MARKBITS (dp) & mask)
152 panic ("table already free!");
154 PD_MARKBITS (dp) |= mask;
156 if (PD_MARKBITS(dp) == 0xff) {
157 /* all tables in page are free, free page */
158 ptable_desc *next = dp->next;
159 (next->prev = dp->prev)->next = next;
160 cache_page (page);
161 free_page (page);
162 return 1;
163 } else if ((first = ptable_list.next) != dp) {
165 * move this descriptor to the front of the list, since
166 * it has one or more free tables.
168 ptable_desc *next = dp->next;
169 (next->prev = dp->prev)->next = next;
171 (dp->prev = first->prev)->next = dp;
172 (dp->next = first)->prev = dp;
174 return 0;
177 /* maximum pages used for kpointer tables */
178 #define KPTR_PAGES 4
179 /* # of reserved slots */
180 #define RESERVED_KPTR 4
181 extern pmd_tablepage kernel_pmd_table; /* reserved in head.S */
183 static struct kpointer_pages {
184 pmd_tablepage *page[KPTR_PAGES];
185 u_char alloced[KPTR_PAGES];
186 } kptr_pages;
188 void init_kpointer_table(void) {
189 short i = KPTR_PAGES-1;
191 /* first page is reserved in head.S */
192 kptr_pages.page[i] = &kernel_pmd_table;
193 kptr_pages.alloced[i] = ~(0xff>>RESERVED_KPTR);
194 for (i--; i>=0; i--) {
195 kptr_pages.page[i] = NULL;
196 kptr_pages.alloced[i] = 0;
200 pmd_t *get_kpointer_table (void)
202 /* For pointer tables for the kernel virtual address space,
203 * use the page that is reserved in head.S that can hold up to
204 * 8 pointer tables. 3 of these tables are always reserved
205 * (kernel_pg_dir, swapper_pg_dir and kernel pointer table for
206 * the first 16 MB of RAM). In addition, the 4th pointer table
207 * in this page is reserved. On Amiga and Atari, it is used to
208 * map in the hardware registers. It may be used for other
209 * purposes on other 68k machines. This leaves 4 pointer tables
210 * available for use by the kernel. 1 of them are usually used
211 * for the vmalloc tables. This allows mapping of 3 * 32 = 96 MB
212 * of physical memory. But these pointer tables are also used
213 * for other purposes, like kernel_map(), so further pages can
214 * now be allocated.
216 pmd_tablepage *page;
217 pmd_table *table;
218 long nr, offset = -8;
219 short i;
221 for (i=KPTR_PAGES-1; i>=0; i--) {
222 asm volatile("bfffo %1{%2,#8},%0"
223 : "=d" (nr)
224 : "d" ((u_char)~kptr_pages.alloced[i]), "d" (offset));
225 if (nr)
226 break;
228 if (i < 0) {
229 printk("No space for kernel pointer table!\n");
230 return NULL;
232 if (!(page = kptr_pages.page[i])) {
233 if (!(page = (pmd_tablepage *)get_free_page(GFP_KERNEL))) {
234 printk("No space for kernel pointer table!\n");
235 return NULL;
237 flush_tlb_kernel_page((unsigned long) page);
238 nocache_page((u_long)(kptr_pages.page[i] = page));
240 asm volatile("bfset %0@{%1,#1}"
241 : /* no output */
242 : "a" (&kptr_pages.alloced[i]), "d" (nr-offset));
243 table = &(*page)[nr-offset];
244 memset(table, 0, sizeof(pmd_table));
245 return ((pmd_t *)table);
248 void free_kpointer_table (pmd_t *pmdp)
250 pmd_table *table = (pmd_table *)pmdp;
251 pmd_tablepage *page = (pmd_tablepage *)((u_long)table & PAGE_MASK);
252 long nr;
253 short i;
255 for (i=KPTR_PAGES-1; i>=0; i--) {
256 if (kptr_pages.page[i] == page)
257 break;
259 nr = ((u_long)table - (u_long)page) / sizeof(pmd_table);
260 if (!table || i < 0 || (i == KPTR_PAGES-1 && nr < RESERVED_KPTR)) {
261 printk("Attempt to free invalid kernel pointer table: %p\n", table);
262 return;
264 asm volatile("bfclr %0@{%1,#1}"
265 : /* no output */
266 : "a" (&kptr_pages.alloced[i]), "d" (nr));
267 if (!kptr_pages.alloced[i]) {
268 kptr_pages.page[i] = 0;
269 cache_page ((u_long)page);
270 free_page ((u_long)page);
274 static unsigned long transp_transl_matches( unsigned long regval,
275 unsigned long vaddr )
277 unsigned long base, mask;
279 /* enabled? */
280 if (!(regval & 0x8000))
281 return( 0 );
283 if (CPU_IS_030) {
284 /* function code match? */
285 base = (regval >> 4) & 7;
286 mask = ~(regval & 7);
287 if ((SUPER_DATA & mask) != (base & mask))
288 return( 0 );
290 else {
291 /* must not be user-only */
292 if ((regval & 0x6000) == 0)
293 return( 0 );
296 /* address match? */
297 base = regval & 0xff000000;
298 mask = ~((regval << 8) & 0xff000000);
299 return( (vaddr & mask) == (base & mask) );
302 static unsigned long mm_vtop_fallback (unsigned long);
305 * The following two routines map from a physical address to a kernel
306 * virtual address and vice versa.
308 unsigned long mm_vtop (unsigned long vaddr)
310 int i=0;
311 unsigned long voff = vaddr;
312 unsigned long offset = 0;
315 if (voff < offset + m68k_memory[i].size) {
316 #ifdef DEBUGPV
317 printk ("VTOP(%lx)=%lx\n", vaddr,
318 m68k_memory[i].addr + voff - offset);
319 #endif
320 return m68k_memory[i].addr + voff - offset;
321 } else
322 offset += m68k_memory[i].size;
323 i++;
324 }while (i < m68k_num_memory);
325 return mm_vtop_fallback(vaddr);
328 /* Separate function to make the common case faster (needs to save less
329 registers) */
330 static unsigned long mm_vtop_fallback (unsigned long vaddr)
332 /* not in one of the memory chunks; test for applying transparent
333 * translation */
335 if (CPU_IS_030) {
336 unsigned long ttreg;
338 asm volatile( ".chip 68030\n\t"
339 "pmove %/tt0,%0@\n\t"
340 ".chip 68k"
341 : : "a" (&ttreg) );
342 if (transp_transl_matches( ttreg, vaddr ))
343 return vaddr;
344 asm volatile( ".chip 68030\n\t"
345 "pmove %/tt1,%0@\n\t"
346 ".chip 68k"
347 : : "a" (&ttreg) );
348 if (transp_transl_matches( ttreg, vaddr ))
349 return vaddr;
351 else if (CPU_IS_040_OR_060) {
352 unsigned long ttreg;
354 asm volatile( ".chip 68040\n\t"
355 "movec %%dtt0,%0\n\t"
356 ".chip 68k"
357 : "=d" (ttreg) );
358 if (transp_transl_matches( ttreg, vaddr ))
359 return vaddr;
360 asm volatile( ".chip 68040\n\t"
361 "movec %%dtt1,%0\n\t"
362 ".chip 68k"
363 : "=d" (ttreg) );
364 if (transp_transl_matches( ttreg, vaddr ))
365 return vaddr;
368 /* no match, too, so get the actual physical address from the MMU. */
370 if (CPU_IS_060) {
371 mm_segment_t fs = get_fs();
372 unsigned long paddr;
374 set_fs (MAKE_MM_SEG(SUPER_DATA));
376 /* The PLPAR instruction causes an access error if the translation
377 * is not possible. We don't catch that here, so a bad kernel trap
378 * will be reported in this case. */
379 asm volatile (".chip 68060\n\t"
380 "plpar (%0)\n\t"
381 ".chip 68k"
382 : "=a" (paddr)
383 : "0" (vaddr));
384 set_fs (fs);
386 return paddr;
388 } else if (CPU_IS_040) {
389 unsigned long mmusr;
390 mm_segment_t fs = get_fs();
392 set_fs (MAKE_MM_SEG(SUPER_DATA));
394 asm volatile (".chip 68040\n\t"
395 "ptestr (%1)\n\t"
396 "movec %%mmusr, %0\n\t"
397 ".chip 68k"
398 : "=r" (mmusr)
399 : "a" (vaddr));
400 set_fs (fs);
402 if (mmusr & MMU_T_040) {
403 return (vaddr); /* Transparent translation */
405 if (mmusr & MMU_R_040)
406 return (mmusr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
408 panic ("VTOP040: bad virtual address %08lx (%lx)", vaddr, mmusr);
409 } else {
410 volatile unsigned short temp;
411 unsigned short mmusr;
412 unsigned long *descaddr;
414 asm volatile ("ptestr #5,%2@,#7,%0\n\t"
415 "pmove %/psr,%1@"
416 : "=a&" (descaddr)
417 : "a" (&temp), "a" (vaddr));
418 mmusr = temp;
420 if (mmusr & (MMU_I|MMU_B|MMU_L))
421 panic ("VTOP030: bad virtual address %08lx (%x)", vaddr, mmusr);
423 descaddr = (unsigned long *)PTOV(descaddr);
425 switch (mmusr & MMU_NUM) {
426 case 1:
427 return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
428 case 2:
429 return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
430 case 3:
431 return (*descaddr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
432 default:
433 panic ("VTOP: bad levels (%u) for virtual address %08lx",
434 mmusr & MMU_NUM, vaddr);
438 panic ("VTOP: bad virtual address %08lx", vaddr);
441 unsigned long mm_ptov (unsigned long paddr)
443 int i = 0;
444 unsigned long offset = 0;
447 if (paddr >= m68k_memory[i].addr &&
448 paddr < (m68k_memory[i].addr
449 + m68k_memory[i].size)) {
450 #ifdef DEBUGPV
451 printk ("PTOV(%lx)=%lx\n", paddr,
452 (paddr - m68k_memory[i].addr) + offset);
453 #endif
454 return (paddr - m68k_memory[i].addr) + offset;
455 } else
456 offset += m68k_memory[i].size;
457 i++;
458 }while (i < m68k_num_memory);
461 * assume that the kernel virtual address is the same as the
462 * physical address.
464 * This should be reasonable in most situations:
465 * 1) They shouldn't be dereferencing the virtual address
466 * unless they are sure that it is valid from kernel space.
467 * 2) The only usage I see so far is converting a page table
468 * reference to some non-FASTMEM address space when freeing
469 * mmaped "/dev/mem" pages. These addresses are just passed
470 * to "free_page", which ignores addresses that aren't in
471 * the memory list anyway.
475 #ifdef CONFIG_AMIGA
477 * if on an amiga and address is in first 16M, move it
478 * to the ZTWO_VADDR range
480 if (MACH_IS_AMIGA && paddr < 16*1024*1024)
481 return ZTWO_VADDR(paddr);
482 #endif
483 return paddr;
486 /* invalidate page in both caches */
487 #define clear040(paddr) \
488 __asm__ __volatile__ ("nop\n\t" \
489 ".chip 68040\n\t" \
490 "cinvp %%bc,(%0)\n\t" \
491 ".chip 68k" \
492 : : "a" (paddr))
494 /* invalidate page in i-cache */
495 #define cleari040(paddr) \
496 __asm__ __volatile__ ("nop\n\t" \
497 ".chip 68040\n\t" \
498 "cinvp %%ic,(%0)\n\t" \
499 ".chip 68k" \
500 : : "a" (paddr))
502 /* push page in both caches */
503 #define push040(paddr) \
504 __asm__ __volatile__ ("nop\n\t" \
505 ".chip 68040\n\t" \
506 "cpushp %%bc,(%0)\n\t" \
507 ".chip 68k" \
508 : : "a" (paddr))
510 /* push and invalidate page in both caches */
511 #define pushcl040(paddr) \
512 do { push040(paddr); \
513 if (CPU_IS_060) clear040(paddr); \
514 } while(0)
516 /* push page in both caches, invalidate in i-cache */
517 #define pushcli040(paddr) \
518 do { push040(paddr); \
519 if (CPU_IS_060) cleari040(paddr); \
520 } while(0)
524 * 040: Hit every page containing an address in the range paddr..paddr+len-1.
525 * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
526 * Hit every page until there is a page or less to go. Hit the next page,
527 * and the one after that if the range hits it.
529 /* ++roman: A little bit more care is required here: The CINVP instruction
530 * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
531 * and the end of the region must be treated differently if they are not
532 * exactly at the beginning or end of a page boundary. Else, maybe too much
533 * data becomes invalidated and thus lost forever. CPUSHP does what we need:
534 * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
535 * for discovering the problem!)
537 /* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
538 * the DPI bit in the CACR; would it cause problems with temporarily changing
539 * this?). So we have to push first and then additionally to invalidate.
543 * cache_clear() semantics: Clear any cache entries for the area in question,
544 * without writing back dirty entries first. This is useful if the data will
545 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
546 * _physical_ address.
549 void cache_clear (unsigned long paddr, int len)
551 if (CPU_IS_040_OR_060) {
552 int tmp;
555 * We need special treatment for the first page, in case it
556 * is not page-aligned. Page align the addresses to work
557 * around bug I17 in the 68060.
559 if ((tmp = -paddr & (PAGE_SIZE - 1))) {
560 pushcl040(paddr & PAGE_MASK);
561 if ((len -= tmp) <= 0)
562 return;
563 paddr += tmp;
565 tmp = PAGE_SIZE;
566 paddr &= PAGE_MASK;
567 while ((len -= tmp) >= 0) {
568 clear040(paddr);
569 paddr += tmp;
571 if ((len += tmp))
572 /* a page boundary gets crossed at the end */
573 pushcl040(paddr);
575 else /* 68030 or 68020 */
576 asm volatile ("movec %/cacr,%/d0\n\t"
577 "oriw %0,%/d0\n\t"
578 "movec %/d0,%/cacr"
579 : : "i" (FLUSH_I_AND_D)
580 : "d0");
585 * cache_push() semantics: Write back any dirty cache data in the given area,
586 * and invalidate the range in the instruction cache. It needs not (but may)
587 * invalidate those entries also in the data cache. The range is defined by a
588 * _physical_ address.
591 void cache_push (unsigned long paddr, int len)
593 if (CPU_IS_040_OR_060) {
594 int tmp = PAGE_SIZE;
597 * on 68040 or 68060, push cache lines for pages in the range;
598 * on the '040 this also invalidates the pushed lines, but not on
599 * the '060!
601 len += paddr & (PAGE_SIZE - 1);
604 * Work around bug I17 in the 68060 affecting some instruction
605 * lines not being invalidated properly.
607 paddr &= PAGE_MASK;
609 do {
610 pushcli040(paddr);
611 paddr += tmp;
612 } while ((len -= tmp) > 0);
615 * 68030/68020 have no writeback cache. On the other hand,
616 * cache_push is actually a superset of cache_clear (the lines
617 * get written back and invalidated), so we should make sure
618 * to perform the corresponding actions. After all, this is getting
619 * called in places where we've just loaded code, or whatever, so
620 * flushing the icache is appropriate; flushing the dcache shouldn't
621 * be required.
623 else /* 68030 or 68020 */
624 asm volatile ("movec %/cacr,%/d0\n\t"
625 "oriw %0,%/d0\n\t"
626 "movec %/d0,%/cacr"
627 : : "i" (FLUSH_I)
628 : "d0");
632 #undef clear040
633 #undef cleari040
634 #undef push040
635 #undef pushcl040
636 #undef pushcli040
638 int mm_end_of_chunk (unsigned long addr, int len)
640 int i;
642 for (i = 0; i < m68k_num_memory; i++)
643 if (m68k_memory[i].addr + m68k_memory[i].size == addr + len)
644 return 1;
645 return 0;