2.2.0-final
[davej-history.git] / arch / m68k / mm / memory.c
bloba97578ec24b354e2864ee0045648d13c7413a466
1 /*
2 * linux/arch/m68k/mm/memory.c
4 * Copyright (C) 1995 Hamish Macdonald
5 */
7 #include <linux/config.h>
8 #include <linux/mm.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/types.h>
12 #include <linux/malloc.h>
13 #include <linux/init.h>
15 #include <asm/setup.h>
16 #include <asm/segment.h>
17 #include <asm/page.h>
18 #include <asm/pgtable.h>
19 #include <asm/system.h>
20 #include <asm/traps.h>
21 #include <asm/io.h>
22 #include <asm/machdep.h>
23 #ifdef CONFIG_AMIGA
24 #include <asm/amigahw.h>
25 #endif
27 struct pgtable_cache_struct quicklists;
29 void __bad_pte(pmd_t *pmd)
31 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
32 pmd_set(pmd, BAD_PAGETABLE);
35 void __bad_pmd(pgd_t *pgd)
37 printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
38 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
41 pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
43 pte_t *pte;
45 pte = (pte_t *) __get_free_page(GFP_KERNEL);
46 if (pmd_none(*pmd)) {
47 if (pte) {
48 clear_page((unsigned long)pte);
49 flush_page_to_ram((unsigned long)pte);
50 flush_tlb_kernel_page((unsigned long)pte);
51 nocache_page((unsigned long)pte);
52 pmd_set(pmd, pte);
53 return pte + offset;
55 pmd_set(pmd, BAD_PAGETABLE);
56 return NULL;
58 free_page((unsigned long)pte);
59 if (pmd_bad(*pmd)) {
60 __bad_pte(pmd);
61 return NULL;
63 return (pte_t *) pmd_page(*pmd) + offset;
66 pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset)
68 pmd_t *pmd;
70 pmd = get_pointer_table();
71 if (pgd_none(*pgd)) {
72 if (pmd) {
73 pgd_set(pgd, pmd);
74 return pmd + offset;
76 pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
77 return NULL;
79 free_pointer_table(pmd);
80 if (pgd_bad(*pgd)) {
81 __bad_pmd(pgd);
82 return NULL;
84 return (pmd_t *) pgd_page(*pgd) + offset;
88 /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
89 struct page instead of separately kmalloced struct. Stolen from
90 arch/sparc/mm/srmmu.c ... */
92 typedef struct page ptable_desc;
93 static ptable_desc ptable_list = { &ptable_list, &ptable_list };
95 #define PD_MARKBITS(dp) (*(unsigned char *)&(dp)->offset)
96 #define PD_PAGE(dp) (PAGE_OFFSET + ((dp)->map_nr << PAGE_SHIFT))
97 #define PAGE_PD(page) ((ptable_desc *)&mem_map[MAP_NR(page)])
99 #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
101 void __init init_pointer_table(unsigned long ptable)
103 ptable_desc *dp;
104 unsigned long page = ptable & PAGE_MASK;
105 unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
107 dp = PAGE_PD(page);
108 if (!(PD_MARKBITS(dp) & mask)) {
109 PD_MARKBITS(dp) = 0xff;
110 (dp->prev = ptable_list.prev)->next = dp;
111 (dp->next = &ptable_list)->prev = dp;
114 PD_MARKBITS(dp) &= ~mask;
115 #ifdef DEBUG
116 printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
117 #endif
119 /* unreserve the page so it's possible to free that page */
120 dp->flags &= ~(1 << PG_reserved);
121 atomic_set(&dp->count, 1);
123 return;
126 pmd_t *get_pointer_table (void)
128 ptable_desc *dp = ptable_list.next;
129 unsigned char mask = PD_MARKBITS (dp);
130 unsigned char tmp;
131 unsigned int off;
134 * For a pointer table for a user process address space, a
135 * table is taken from a page allocated for the purpose. Each
136 * page can hold 8 pointer tables. The page is remapped in
137 * virtual address space to be noncacheable.
139 if (mask == 0) {
140 unsigned long page;
141 ptable_desc *new;
143 if (!(page = get_free_page (GFP_KERNEL)))
144 return 0;
146 flush_tlb_kernel_page(page);
147 nocache_page (page);
149 new = PAGE_PD(page);
150 PD_MARKBITS(new) = 0xfe;
151 (new->prev = dp->prev)->next = new;
152 (new->next = dp)->prev = new;
153 return (pmd_t *)page;
156 for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE);
157 PD_MARKBITS(dp) = mask & ~tmp;
158 if (!PD_MARKBITS(dp)) {
159 ptable_desc *last, *next;
161 /* move to end of list */
162 next = dp->next;
163 (next->prev = dp->prev)->next = next;
165 last = ptable_list.prev;
166 (dp->next = last->next)->prev = dp;
167 (dp->prev = last)->next = dp;
169 return (pmd_t *) (PD_PAGE(dp) + off);
172 int free_pointer_table (pmd_t *ptable)
174 ptable_desc *dp, *first;
175 unsigned long page = (unsigned long)ptable & PAGE_MASK;
176 unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
178 dp = PAGE_PD(page);
179 if (PD_MARKBITS (dp) & mask)
180 panic ("table already free!");
182 PD_MARKBITS (dp) |= mask;
184 if (PD_MARKBITS(dp) == 0xff) {
185 /* all tables in page are free, free page */
186 ptable_desc *next = dp->next;
187 (next->prev = dp->prev)->next = next;
188 cache_page (page);
189 free_page (page);
190 return 1;
191 } else if ((first = ptable_list.next) != dp) {
193 * move this descriptor to the front of the list, since
194 * it has one or more free tables.
196 ptable_desc *next = dp->next;
197 (next->prev = dp->prev)->next = next;
199 (dp->prev = first->prev)->next = dp;
200 (dp->next = first)->prev = dp;
202 return 0;
205 static unsigned long transp_transl_matches( unsigned long regval,
206 unsigned long vaddr )
208 unsigned long base, mask;
210 /* enabled? */
211 if (!(regval & 0x8000))
212 return( 0 );
214 if (CPU_IS_030) {
215 /* function code match? */
216 base = (regval >> 4) & 7;
217 mask = ~(regval & 7);
218 if ((SUPER_DATA & mask) != (base & mask))
219 return( 0 );
221 else {
222 /* must not be user-only */
223 if ((regval & 0x6000) == 0)
224 return( 0 );
227 /* address match? */
228 base = regval & 0xff000000;
229 mask = ~((regval << 8) & 0xff000000);
230 return( (vaddr & mask) == (base & mask) );
233 #ifndef CONFIG_SINGLE_MEMORY_CHUNK
235 * The following two routines map from a physical address to a kernel
236 * virtual address and vice versa.
238 unsigned long mm_vtop (unsigned long vaddr)
240 int i=0;
241 unsigned long voff = vaddr;
242 unsigned long offset = 0;
245 if (voff < offset + m68k_memory[i].size) {
246 #ifdef DEBUGPV
247 printk ("VTOP(%lx)=%lx\n", vaddr,
248 m68k_memory[i].addr + voff - offset);
249 #endif
250 return m68k_memory[i].addr + voff - offset;
251 } else
252 offset += m68k_memory[i].size;
253 i++;
254 }while (i < m68k_num_memory);
256 return mm_vtop_fallback(vaddr);
258 #endif
260 /* Separate function to make the common case faster (needs to save less
261 registers) */
262 unsigned long mm_vtop_fallback (unsigned long vaddr)
264 /* not in one of the memory chunks; test for applying transparent
265 * translation */
267 if (CPU_IS_030) {
268 unsigned long ttreg;
270 asm volatile( ".chip 68030\n\t"
271 "pmove %/tt0,%0@\n\t"
272 ".chip 68k"
273 : : "a" (&ttreg) );
274 if (transp_transl_matches( ttreg, vaddr ))
275 return vaddr;
276 asm volatile( ".chip 68030\n\t"
277 "pmove %/tt1,%0@\n\t"
278 ".chip 68k"
279 : : "a" (&ttreg) );
280 if (transp_transl_matches( ttreg, vaddr ))
281 return vaddr;
283 else if (CPU_IS_040_OR_060) {
284 unsigned long ttreg;
286 asm volatile( ".chip 68040\n\t"
287 "movec %%dtt0,%0\n\t"
288 ".chip 68k"
289 : "=d" (ttreg) );
290 if (transp_transl_matches( ttreg, vaddr ))
291 return vaddr;
292 asm volatile( ".chip 68040\n\t"
293 "movec %%dtt1,%0\n\t"
294 ".chip 68k"
295 : "=d" (ttreg) );
296 if (transp_transl_matches( ttreg, vaddr ))
297 return vaddr;
300 /* no match, too, so get the actual physical address from the MMU. */
302 if (CPU_IS_060) {
303 mm_segment_t fs = get_fs();
304 unsigned long paddr;
306 set_fs (MAKE_MM_SEG(SUPER_DATA));
308 /* The PLPAR instruction causes an access error if the translation
309 * is not possible. We don't catch that here, so a bad kernel trap
310 * will be reported in this case. */
311 asm volatile (".chip 68060\n\t"
312 "plpar (%0)\n\t"
313 ".chip 68k"
314 : "=a" (paddr)
315 : "0" (vaddr));
316 set_fs (fs);
318 return paddr;
320 } else if (CPU_IS_040) {
321 unsigned long mmusr;
322 mm_segment_t fs = get_fs();
324 set_fs (MAKE_MM_SEG(SUPER_DATA));
326 asm volatile (".chip 68040\n\t"
327 "ptestr (%1)\n\t"
328 "movec %%mmusr, %0\n\t"
329 ".chip 68k"
330 : "=r" (mmusr)
331 : "a" (vaddr));
332 set_fs (fs);
334 if (mmusr & MMU_T_040) {
335 return (vaddr); /* Transparent translation */
337 if (mmusr & MMU_R_040)
338 return (mmusr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
340 panic ("VTOP040: bad virtual address %08lx (%lx)", vaddr, mmusr);
341 } else {
342 volatile unsigned short temp;
343 unsigned short mmusr;
344 unsigned long *descaddr;
346 asm volatile ("ptestr #5,%2@,#7,%0\n\t"
347 "pmove %/psr,%1@"
348 : "=a&" (descaddr)
349 : "a" (&temp), "a" (vaddr));
350 mmusr = temp;
352 if (mmusr & (MMU_I|MMU_B|MMU_L))
353 panic ("VTOP030: bad virtual address %08lx (%x)", vaddr, mmusr);
355 descaddr = phys_to_virt((unsigned long)descaddr);
357 switch (mmusr & MMU_NUM) {
358 case 1:
359 return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
360 case 2:
361 return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
362 case 3:
363 return (*descaddr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
364 default:
365 panic ("VTOP: bad levels (%u) for virtual address %08lx",
366 mmusr & MMU_NUM, vaddr);
370 panic ("VTOP: bad virtual address %08lx", vaddr);
373 #ifndef CONFIG_SINGLE_MEMORY_CHUNK
374 unsigned long mm_ptov (unsigned long paddr)
376 int i = 0;
377 unsigned long offset = 0;
380 if (paddr >= m68k_memory[i].addr &&
381 paddr < (m68k_memory[i].addr
382 + m68k_memory[i].size)) {
383 #ifdef DEBUGPV
384 printk ("PTOV(%lx)=%lx\n", paddr,
385 (paddr - m68k_memory[i].addr) + offset);
386 #endif
387 return (paddr - m68k_memory[i].addr) + offset;
388 } else
389 offset += m68k_memory[i].size;
390 i++;
391 }while (i < m68k_num_memory);
394 * assume that the kernel virtual address is the same as the
395 * physical address.
397 * This should be reasonable in most situations:
398 * 1) They shouldn't be dereferencing the virtual address
399 * unless they are sure that it is valid from kernel space.
400 * 2) The only usage I see so far is converting a page table
401 * reference to some non-FASTMEM address space when freeing
402 * mmaped "/dev/mem" pages. These addresses are just passed
403 * to "free_page", which ignores addresses that aren't in
404 * the memory list anyway.
408 #ifdef CONFIG_AMIGA
410 * if on an amiga and address is in first 16M, move it
411 * to the ZTWO_VADDR range
413 if (MACH_IS_AMIGA && paddr < 16*1024*1024)
414 return ZTWO_VADDR(paddr);
415 #endif
416 return paddr;
418 #endif
420 /* invalidate page in both caches */
421 #define clear040(paddr) \
422 __asm__ __volatile__ ("nop\n\t" \
423 ".chip 68040\n\t" \
424 "cinvp %%bc,(%0)\n\t" \
425 ".chip 68k" \
426 : : "a" (paddr))
428 /* invalidate page in i-cache */
429 #define cleari040(paddr) \
430 __asm__ __volatile__ ("nop\n\t" \
431 ".chip 68040\n\t" \
432 "cinvp %%ic,(%0)\n\t" \
433 ".chip 68k" \
434 : : "a" (paddr))
436 /* push page in both caches */
437 #define push040(paddr) \
438 __asm__ __volatile__ ("nop\n\t" \
439 ".chip 68040\n\t" \
440 "cpushp %%bc,(%0)\n\t" \
441 ".chip 68k" \
442 : : "a" (paddr))
444 /* push and invalidate page in both caches */
445 #define pushcl040(paddr) \
446 do { push040(paddr); \
447 if (CPU_IS_060) clear040(paddr); \
448 } while(0)
450 /* push page in both caches, invalidate in i-cache */
451 #define pushcli040(paddr) \
452 do { push040(paddr); \
453 if (CPU_IS_060) cleari040(paddr); \
454 } while(0)
458 * 040: Hit every page containing an address in the range paddr..paddr+len-1.
459 * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
460 * Hit every page until there is a page or less to go. Hit the next page,
461 * and the one after that if the range hits it.
463 /* ++roman: A little bit more care is required here: The CINVP instruction
464 * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
465 * and the end of the region must be treated differently if they are not
466 * exactly at the beginning or end of a page boundary. Else, maybe too much
467 * data becomes invalidated and thus lost forever. CPUSHP does what we need:
468 * it invalidates the page after pushing dirty data to memory. (Thanks to Jes
469 * for discovering the problem!)
471 /* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
472 * the DPI bit in the CACR; would it cause problems with temporarily changing
473 * this?). So we have to push first and then additionally to invalidate.
476 #ifdef CONFIG_M68K_L2_CACHE
478 * Jes was worried about performance (urhh ???) so its optional
481 void (*mach_l2_flush)(int) = NULL;
482 #endif
485 * cache_clear() semantics: Clear any cache entries for the area in question,
486 * without writing back dirty entries first. This is useful if the data will
487 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
488 * _physical_ address.
491 void cache_clear (unsigned long paddr, int len)
493 if (CPU_IS_040_OR_060) {
494 int tmp;
497 * We need special treatment for the first page, in case it
498 * is not page-aligned. Page align the addresses to work
499 * around bug I17 in the 68060.
501 if ((tmp = -paddr & (PAGE_SIZE - 1))) {
502 pushcl040(paddr & PAGE_MASK);
503 if ((len -= tmp) <= 0)
504 return;
505 paddr += tmp;
507 tmp = PAGE_SIZE;
508 paddr &= PAGE_MASK;
509 while ((len -= tmp) >= 0) {
510 clear040(paddr);
511 paddr += tmp;
513 if ((len += tmp))
514 /* a page boundary gets crossed at the end */
515 pushcl040(paddr);
517 else /* 68030 or 68020 */
518 asm volatile ("movec %/cacr,%/d0\n\t"
519 "oriw %0,%/d0\n\t"
520 "movec %/d0,%/cacr"
521 : : "i" (FLUSH_I_AND_D)
522 : "d0");
523 #ifdef CONFIG_M68K_L2_CACHE
524 if(mach_l2_flush)
525 mach_l2_flush(0);
526 #endif
531 * cache_push() semantics: Write back any dirty cache data in the given area,
532 * and invalidate the range in the instruction cache. It needs not (but may)
533 * invalidate those entries also in the data cache. The range is defined by a
534 * _physical_ address.
537 void cache_push (unsigned long paddr, int len)
539 if (CPU_IS_040_OR_060) {
540 int tmp = PAGE_SIZE;
543 * on 68040 or 68060, push cache lines for pages in the range;
544 * on the '040 this also invalidates the pushed lines, but not on
545 * the '060!
547 len += paddr & (PAGE_SIZE - 1);
550 * Work around bug I17 in the 68060 affecting some instruction
551 * lines not being invalidated properly.
553 paddr &= PAGE_MASK;
555 do {
556 pushcli040(paddr);
557 paddr += tmp;
558 } while ((len -= tmp) > 0);
561 * 68030/68020 have no writeback cache. On the other hand,
562 * cache_push is actually a superset of cache_clear (the lines
563 * get written back and invalidated), so we should make sure
564 * to perform the corresponding actions. After all, this is getting
565 * called in places where we've just loaded code, or whatever, so
566 * flushing the icache is appropriate; flushing the dcache shouldn't
567 * be required.
569 else /* 68030 or 68020 */
570 asm volatile ("movec %/cacr,%/d0\n\t"
571 "oriw %0,%/d0\n\t"
572 "movec %/d0,%/cacr"
573 : : "i" (FLUSH_I)
574 : "d0");
575 #ifdef CONFIG_M68K_L2_CACHE
576 if(mach_l2_flush)
577 mach_l2_flush(1);
578 #endif
582 #undef clear040
583 #undef cleari040
584 #undef push040
585 #undef pushcl040
586 #undef pushcli040
588 #ifndef CONFIG_SINGLE_MEMORY_CHUNK
589 int mm_end_of_chunk (unsigned long addr, int len)
591 int i;
593 for (i = 0; i < m68k_num_memory; i++)
594 if (m68k_memory[i].addr + m68k_memory[i].size == addr + len)
595 return 1;
596 return 0;
598 #endif