2 * $Id: init.c,v 1.183 1999/09/05 19:29:44 cort Exp $
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 #include <linux/config.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/string.h>
28 #include <linux/types.h>
29 #include <linux/ptrace.h>
30 #include <linux/mman.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/openpic.h>
38 #ifdef CONFIG_BLK_DEV_INITRD
39 #include <linux/blk.h> /* for initrd_* */
44 #include <asm/mmu_context.h>
45 #include <asm/pgtable.h>
47 #include <asm/residual.h>
48 #include <asm/uaccess.h>
49 #include <asm/8xx_immap.h>
52 #include <asm/bootx.h>
53 #include <asm/machdep.h>
54 #include <asm/setup.h>
55 #include <asm/amigahw.h>
56 #include <asm/gemini.h>
59 atomic_t next_mmu_context
;
60 unsigned long *end_of_DRAM
;
62 extern pgd_t swapper_pg_dir
[];
63 extern char _start
[], _end
[];
64 extern char etext
[], _stext
[];
65 extern char __init_begin
, __init_end
;
66 extern char __prep_begin
, __prep_end
;
67 extern char __pmac_begin
, __pmac_end
;
68 extern char __apus_begin
, __apus_end
;
69 extern char __openfirmware_begin
, __openfirmware_end
;
71 struct device_node
*memory_node
;
72 unsigned long ioremap_base
;
73 unsigned long ioremap_bot
;
74 unsigned long avail_start
;
75 extern int num_memory
;
76 extern struct mem_info memory
[NUM_MEMINFO
];
77 extern boot_infos_t
*boot_infos
;
79 struct pgtable_cache_struct quicklists
;
83 static void *MMU_get_page(void);
84 unsigned long *prep_find_end_of_memory(void);
85 unsigned long *pmac_find_end_of_memory(void);
86 unsigned long *apus_find_end_of_memory(void);
87 unsigned long *gemini_find_end_of_memory(void);
88 extern unsigned long *find_end_of_memory(void);
90 unsigned long *mbx_find_end_of_memory(void);
91 #endif /* CONFIG_MBX */
92 static void mapin_ram(void);
93 void map_page(unsigned long va
, unsigned long pa
, int flags
);
94 extern void die_if_kernel(char *,struct pt_regs
*,long);
95 extern void show_net_buffers(void);
99 * The following stuff defines a data structure for representing
100 * areas of memory as an array of (address, length) pairs, and
101 * procedures for manipulating them.
103 #define MAX_MEM_REGIONS 32
107 struct reg_property regions
[MAX_MEM_REGIONS
];
109 struct mem_pieces phys_mem
;
110 struct mem_pieces phys_avail
;
111 struct mem_pieces prom_mem
;
113 static void remove_mem_piece(struct mem_pieces
*, unsigned, unsigned, int);
114 void *find_mem_piece(unsigned, unsigned);
115 static void print_mem_pieces(struct mem_pieces
*);
116 static void append_mem_piece(struct mem_pieces
*, unsigned, unsigned);
118 extern struct task_struct
*current_set
[NR_CPUS
];
120 PTE
*Hash
, *Hash_end
;
121 unsigned long Hash_size
, Hash_mask
;
124 unsigned long long _SDR1
;
128 static void hash_init(void);
130 union ubat
{ /* BAT register values to be loaded */
137 } BATS
[4][2]; /* 4 pairs of IBAT, DBAT */
139 struct batrange
{ /* stores address ranges mapped by BATs */
146 * Return PA for this VA if it is mapped by a BAT, or 0
148 static inline unsigned long v_mapped_by_bats(unsigned long va
)
151 for (b
= 0; b
< 4; ++b
)
152 if (va
>= bat_addrs
[b
].start
&& va
< bat_addrs
[b
].limit
)
153 return bat_addrs
[b
].phys
+ (va
- bat_addrs
[b
].start
);
158 * Return VA for a given PA or 0 if not mapped
160 static inline unsigned long p_mapped_by_bats(unsigned long pa
)
163 for (b
= 0; b
< 4; ++b
)
164 if (pa
>= bat_addrs
[b
].phys
165 && pa
< (bat_addrs
[b
].limit
-bat_addrs
[b
].start
)
167 return bat_addrs
[b
].start
+(pa
-bat_addrs
[b
].phys
);
171 #else /* CONFIG_8xx */
173 /* 8xx doesn't have BATs */
174 #define v_mapped_by_bats(x) (0UL)
175 #define p_mapped_by_bats(x) (0UL)
176 #endif /* CONFIG_8xx */
179 * this tells the system to map all of ram with the segregs
180 * (i.e. page tables) instead of the bats.
183 int __map_without_bats
= 0;
186 void __bad_pte(pmd_t
*pmd
)
188 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd
));
189 pmd_val(*pmd
) = (unsigned long) BAD_PAGETABLE
;
192 pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long offset
)
196 if (pmd_none(*pmd
)) {
198 pte
= (pte_t
*) MMU_get_page();
199 else if ((pte
= (pte_t
*) get_zero_page_fast()) == NULL
)
200 if ((pte
= (pte_t
*) __get_free_page(GFP_KERNEL
)))
201 clear_page((unsigned long)pte
);
203 pmd_val(*pmd
) = (unsigned long)pte
;
206 pmd_val(*pmd
) = (unsigned long)BAD_PAGETABLE
;
213 return (pte_t
*) pmd_page(*pmd
) + offset
;
216 int do_check_pgt_cache(int low
, int high
)
219 if(pgtable_cache_size
> high
) {
222 free_pgd_slow(get_pgd_fast()), freed
++;
224 free_pmd_slow(get_pmd_fast()), freed
++;
226 free_pte_slow(get_pte_fast()), freed
++;
227 } while(pgtable_cache_size
> low
);
233 * BAD_PAGE is the page that is used for page faults when linux
234 * is out-of-memory. Older versions of linux just did a
235 * do_exit(), but using this instead means there is less risk
236 * for a process dying in kernel mode, possibly leaving a inode
239 * BAD_PAGETABLE is the accompanying page-table: it is initialized
240 * to point to BAD_PAGE entries.
242 * ZERO_PAGE is a special page that is used for zero-initialized
245 unsigned long empty_bad_page_table
;
247 pte_t
* __bad_pagetable(void)
249 __clear_user((void *)empty_bad_page_table
, PAGE_SIZE
);
250 return (pte_t
*) empty_bad_page_table
;
253 unsigned long empty_bad_page
;
255 pte_t
__bad_page(void)
257 __clear_user((void *)empty_bad_page
, PAGE_SIZE
);
258 return pte_mkdirty(mk_pte(empty_bad_page
, PAGE_SHARED
));
263 int i
,free
= 0,total
= 0,reserved
= 0;
264 int shared
= 0, cached
= 0;
265 struct task_struct
*p
;
267 printk("Mem-info:\n");
269 printk("Free swap: %6dkB\n",nr_swap_pages
<<(PAGE_SHIFT
-10));
273 if (PageReserved(mem_map
+i
))
275 else if (PageSwapCache(mem_map
+i
))
277 else if (!atomic_read(&mem_map
[i
].count
))
280 shared
+= atomic_read(&mem_map
[i
].count
) - 1;
282 printk("%d pages of RAM\n",total
);
283 printk("%d free pages\n",free
);
284 printk("%d reserved pages\n",reserved
);
285 printk("%d pages shared\n",shared
);
286 printk("%d pages swap cached\n",cached
);
287 printk("%d pages in page table cache\n",(int)pgtable_cache_size
);
291 printk("%-8s %3s %8s %8s %8s %9s %8s", "Process", "Pid",
292 "Ctx", "Ctx<<4", "Last Sys", "pc", "task");
294 printk(" %3s", "CPU");
299 printk("%-8.8s %3d %8ld %8ld %8ld %c%08lx %08lx ",
301 (p
->mm
)?p
->mm
->context
:0,
302 (p
->mm
)?(p
->mm
->context
<<4):0,
303 p
->thread
.last_syscall
,
304 (p
->thread
.regs
)?user_mode(p
->thread
.regs
) ? 'u' : 'k' : '?',
305 (p
->thread
.regs
)?p
->thread
.regs
->nip
:0,
310 printk("%3d ", p
->processor
);
311 if ( (p
->processor
!= NO_PROC_ID
) &&
312 (p
== current_set
[p
->processor
]) )
324 if ( p
== last_task_used_math
)
336 void si_meminfo(struct sysinfo
*val
)
343 val
->freeram
= nr_free_pages
<< PAGE_SHIFT
;
344 val
->bufferram
= atomic_read(&buffermem
);
346 if (PageReserved(mem_map
+i
))
349 if (!atomic_read(&mem_map
[i
].count
))
351 val
->sharedram
+= atomic_read(&mem_map
[i
].count
) - 1;
353 val
->totalram
<<= PAGE_SHIFT
;
354 val
->sharedram
<<= PAGE_SHIFT
;
359 ioremap(unsigned long addr
, unsigned long size
)
361 return __ioremap(addr
, size
, _PAGE_NO_CACHE
);
365 __ioremap(unsigned long addr
, unsigned long size
, unsigned long flags
)
367 unsigned long p
, v
, i
;
370 * Choose an address to map it to.
371 * Once the vmalloc system is running, we use it.
372 * Before then, we map addresses >= ioremap_base
373 * virt == phys; for addresses below this we use
374 * space going down from ioremap_base (ioremap_bot
375 * records where we're up to).
377 p
= addr
& PAGE_MASK
;
378 size
= PAGE_ALIGN(addr
+ size
) - p
;
381 * If the address lies within the first 16 MB, assume it's in ISA
384 if (p
< 16*1024*1024)
388 * Don't allow anybody to remap normal RAM that we're using.
389 * mem_init() sets high_memory so only do the check after that.
391 if ( mem_init_done
&& (p
< virt_to_phys(high_memory
)) )
393 printk("__ioremap(): phys addr %0lx is RAM lr %p\n", p
,
394 __builtin_return_address(0));
402 * Is it already mapped? Perhaps overlapped by a previous
403 * BAT mapping. If the whole area is mapped then we're done,
404 * otherwise remap it since we want to keep the virt addrs for
405 * each request contiguous.
407 * We make the assumption here that if the bottom and top
408 * of the range we want are mapped then it's mapped to the
409 * same virt address (and this is contiguous).
412 if ((v
= p_mapped_by_bats(p
)) /*&& p_mapped_by_bats(p+size-1)*/ )
416 struct vm_struct
*area
;
417 area
= get_vm_area(size
);
420 v
= VMALLOC_VMADDR(area
->addr
);
422 if (p
>= ioremap_base
)
425 v
= (ioremap_bot
-= size
);
428 if ((flags
& _PAGE_PRESENT
) == 0)
429 flags
|= pgprot_val(PAGE_KERNEL
);
430 if (flags
& (_PAGE_NO_CACHE
| _PAGE_WRITETHRU
))
431 flags
|= _PAGE_GUARDED
;
434 * Is it a candidate for a BAT mapping?
437 for (i
= 0; i
< size
; i
+= PAGE_SIZE
)
438 map_page(v
+i
, p
+i
, flags
);
440 return (void *) (v
+ (addr
& ~PAGE_MASK
));
443 void iounmap(void *addr
)
448 unsigned long iopa(unsigned long addr
)
455 pa
= v_mapped_by_bats(addr
);
459 /* Do we have a page table? */
460 if (init_mm
.pgd
== NULL
)
463 /* Use upper 10 bits of addr to index the first level map */
464 pd
= (pmd_t
*) (init_mm
.pgd
+ (addr
>> PGDIR_SHIFT
));
468 /* Use middle 10 bits of addr to index the second-level map */
469 pg
= pte_offset(pd
, addr
);
470 return (pte_val(*pg
) & PAGE_MASK
) | (addr
& ~PAGE_MASK
);
474 map_page(unsigned long va
, unsigned long pa
, int flags
)
479 /* Use upper 10 bits of VA to index the first level map */
480 pd
= pmd_offset(pgd_offset_k(va
), va
);
482 /* Use middle 10 bits of VA to index the second-level map */
483 pg
= pte_alloc(pd
, va
);
484 if (pmd_none(oldpd
) && mem_init_done
)
485 set_pgdir(va
, *(pgd_t
*)pd
);
486 set_pte(pg
, mk_pte_phys(pa
& PAGE_MASK
, __pgprot(flags
)));
487 flush_hash_page(0, va
);
494 * - flush_tlb_all() flushes all processes TLBs
495 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
496 * - flush_tlb_page(vma, vmaddr) flushes one page
497 * - flush_tlb_range(mm, start, end) flushes a range of pages
499 * since the hardware hash table functions as an extension of the
500 * tlb as far as the linux tables are concerned, flush it too.
505 * Flush all tlb/hash table entries (except perhaps for those
506 * mapping RAM starting at PAGE_OFFSET, since they never change).
509 local_flush_tlb_all(void)
511 __clear_user(Hash
, Hash_size
);
516 * Flush all the (user) entries for the address space described
517 * by mm. We can't rely on mm->mmap describing all the entries
518 * that might be in the hash table.
521 local_flush_tlb_mm(struct mm_struct
*mm
)
523 mm
->context
= NO_CONTEXT
;
524 if (mm
== current
->mm
)
529 local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
531 if (vmaddr
< TASK_SIZE
)
532 flush_hash_page(vma
->vm_mm
->context
, vmaddr
);
534 flush_hash_page(0, vmaddr
);
539 * for each page addr in the range, call MMU_invalidate_page()
540 * if the range is very large and the hash table is small it might be
541 * faster to do a search of the hash table and just invalidate pages
542 * that are in the range but that's for study later.
546 local_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
550 if (end
- start
> 20 * PAGE_SIZE
)
556 for (; start
< end
&& start
< TASK_SIZE
; start
+= PAGE_SIZE
)
558 flush_hash_page(mm
->context
, start
);
563 * The context counter has overflowed.
564 * We set mm->context to NO_CONTEXT for all mm's in the system.
565 * We assume we can get to all mm's by looking as tsk->mm for
566 * all tasks in the system.
569 mmu_context_overflow(void)
571 struct task_struct
*tsk
;
573 printk(KERN_DEBUG
"mmu_context_overflow\n");
574 read_lock(&tasklist_lock
);
577 tsk
->mm
->context
= NO_CONTEXT
;
579 read_unlock(&tasklist_lock
);
580 flush_hash_segments(0x10, 0xffffff);
581 atomic_set(&next_mmu_context
, 0);
582 /* make sure current always has a context */
583 current
->mm
->context
= MUNGE_CONTEXT(atomic_inc_return(&next_mmu_context
));
584 set_context(current
->mm
->context
);
586 #endif /* CONFIG_8xx */
589 * Scan a region for a piece of a given size with the required alignment.
591 void __init
*find_mem_piece(unsigned size
, unsigned align
)
595 struct mem_pieces
*mp
= &phys_avail
;
597 for (i
= 0; i
< mp
->n_regions
; ++i
) {
598 a
= mp
->regions
[i
].address
;
599 e
= a
+ mp
->regions
[i
].size
;
600 a
= (a
+ align
- 1) & -align
;
602 remove_mem_piece(mp
, a
, size
, 1);
606 printk("Couldn't find %u bytes at %u alignment\n", size
, align
);
612 * Remove some memory from an array of pieces
615 remove_mem_piece(struct mem_pieces
*mp
, unsigned start
, unsigned size
,
619 unsigned end
, rs
, re
;
620 struct reg_property
*rp
;
623 for (i
= 0, rp
= mp
->regions
; i
< mp
->n_regions
; ++i
, ++rp
) {
624 if (end
> rp
->address
&& start
< rp
->address
+ rp
->size
)
627 if (i
>= mp
->n_regions
) {
629 printk("remove_mem_piece: [%x,%x) not in any region\n",
633 for (; i
< mp
->n_regions
&& end
> rp
->address
; ++i
, ++rp
) {
636 if (must_exist
&& (start
< rs
|| end
> re
)) {
637 printk("remove_mem_piece: bad overlap [%x,%x) with",
639 print_mem_pieces(mp
);
643 rp
->size
= start
- rs
;
645 /* need to split this entry */
646 if (mp
->n_regions
>= MAX_MEM_REGIONS
)
647 panic("eek... mem_pieces overflow");
648 for (j
= mp
->n_regions
; j
> i
+ 1; --j
)
649 mp
->regions
[j
] = mp
->regions
[j
-1];
652 rp
[1].size
= re
- end
;
659 /* need to delete this entry */
660 for (j
= i
; j
< mp
->n_regions
- 1; ++j
)
661 mp
->regions
[j
] = mp
->regions
[j
+1];
670 static void __init
print_mem_pieces(struct mem_pieces
*mp
)
674 for (i
= 0; i
< mp
->n_regions
; ++i
)
675 printk(" [%x, %x)", mp
->regions
[i
].address
,
676 mp
->regions
[i
].address
+ mp
->regions
[i
].size
);
681 * Add some memory to an array of pieces
684 append_mem_piece(struct mem_pieces
*mp
, unsigned start
, unsigned size
)
686 struct reg_property
*rp
;
688 if (mp
->n_regions
>= MAX_MEM_REGIONS
)
690 rp
= &mp
->regions
[mp
->n_regions
++];
696 static void hash_init(void);
697 static void get_mem_prop(char *, struct mem_pieces
*);
698 static void sort_mem_pieces(struct mem_pieces
*);
699 static void coalesce_mem_pieces(struct mem_pieces
*);
701 static void __init
sort_mem_pieces(struct mem_pieces
*mp
)
706 for (i
= 1; i
< mp
->n_regions
; ++i
) {
707 a
= mp
->regions
[i
].address
;
708 s
= mp
->regions
[i
].size
;
709 for (j
= i
- 1; j
>= 0; --j
) {
710 if (a
>= mp
->regions
[j
].address
)
712 mp
->regions
[j
+1] = mp
->regions
[j
];
714 mp
->regions
[j
+1].address
= a
;
715 mp
->regions
[j
+1].size
= s
;
719 static void __init
coalesce_mem_pieces(struct mem_pieces
*mp
)
721 unsigned long a
, s
, ns
;
725 for (i
= 0; i
< mp
->n_regions
; i
= j
) {
726 a
= mp
->regions
[i
].address
;
727 s
= mp
->regions
[i
].size
;
728 for (j
= i
+ 1; j
< mp
->n_regions
729 && mp
->regions
[j
].address
- a
<= s
; ++j
) {
730 ns
= mp
->regions
[j
].address
+ mp
->regions
[j
].size
- a
;
734 mp
->regions
[d
].address
= a
;
735 mp
->regions
[d
].size
= s
;
742 * Read in a property describing some pieces of memory.
745 static void __init
get_mem_prop(char *name
, struct mem_pieces
*mp
)
747 struct reg_property
*rp
;
750 rp
= (struct reg_property
*) get_property(memory_node
, name
, &s
);
752 printk(KERN_ERR
"error: couldn't get %s property on /memory\n",
756 mp
->n_regions
= s
/ sizeof(mp
->regions
[0]);
757 memcpy(mp
->regions
, rp
, s
);
759 /* Make sure the pieces are sorted. */
761 coalesce_mem_pieces(mp
);
765 * Set up one of the I/D BAT (block address translation) register pairs.
766 * The parameters are not checked; in particular size must be a power
767 * of 2 between 128k and 256M.
769 void __init
setbat(int index
, unsigned long virt
, unsigned long phys
,
770 unsigned int size
, int flags
)
774 union ubat
*bat
= BATS
[index
];
776 bl
= (size
>> 17) - 1;
777 if ((_get_PVR() >> 16) != 1) {
780 wimgxpp
= flags
& (_PAGE_WRITETHRU
| _PAGE_NO_CACHE
781 | _PAGE_COHERENT
| _PAGE_GUARDED
);
782 wimgxpp
|= (flags
& _PAGE_RW
)? BPP_RW
: BPP_RX
;
783 bat
[1].word
[0] = virt
| (bl
<< 2) | 2; /* Vs=1, Vp=0 */
784 bat
[1].word
[1] = phys
| wimgxpp
;
785 if (flags
& _PAGE_USER
)
786 bat
[1].bat
.batu
.vp
= 1;
787 if (flags
& _PAGE_GUARDED
) {
788 /* G bit must be zero in IBATs */
789 bat
[0].word
[0] = bat
[0].word
[1] = 0;
791 /* make IBAT same as DBAT */
798 wimgxpp
= flags
& (_PAGE_WRITETHRU
| _PAGE_NO_CACHE
800 wimgxpp
|= (flags
& _PAGE_RW
)?
801 ((flags
& _PAGE_USER
)? PP_RWRW
: PP_RWXX
): PP_RXRX
;
802 bat
->word
[0] = virt
| wimgxpp
| 4; /* Ks=0, Ku=1 */
803 bat
->word
[1] = phys
| bl
| 0x40; /* V=1 */
806 bat_addrs
[index
].start
= virt
;
807 bat_addrs
[index
].limit
= virt
+ ((bl
+ 1) << 17) - 1;
808 bat_addrs
[index
].phys
= phys
;
811 #define IO_PAGE (_PAGE_NO_CACHE | _PAGE_GUARDED | _PAGE_RW)
813 #define RAM_PAGE (_PAGE_RW|_PAGE_COHERENT)
815 #define RAM_PAGE (_PAGE_RW)
817 #endif /* CONFIG_8xx */
820 * Map in all of physical memory starting at KERNELBASE.
822 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
824 static void __init
mapin_ram(void)
827 unsigned long v
, p
, s
, f
;
830 if (!__map_without_bats
) {
831 unsigned long tot
, mem_base
, bl
, done
;
832 unsigned long max_size
= (256<<20);
835 /* Set up BAT2 and if necessary BAT3 to cover RAM. */
836 mem_base
= __pa(KERNELBASE
);
838 /* Make sure we don't map a block larger than the
839 smallest alignment of the physical address. */
840 /* alignment of mem_base */
841 align
= ~(mem_base
-1) & mem_base
;
842 /* set BAT block size to MIN(max_size, align) */
843 if (align
&& align
< max_size
)
846 tot
= (unsigned long)end_of_DRAM
- KERNELBASE
;
847 for (bl
= 128<<10; bl
< max_size
; bl
<<= 1) {
852 setbat(2, KERNELBASE
, mem_base
, bl
, RAM_PAGE
);
853 done
= (unsigned long)bat_addrs
[2].limit
- KERNELBASE
+ 1;
854 if ((done
< tot
) && !bat_addrs
[3].limit
) {
855 /* use BAT3 to cover a bit more */
857 for (bl
= 128<<10; bl
< max_size
; bl
<<= 1)
860 setbat(3, KERNELBASE
+done
, mem_base
+done
, bl
,
865 for (i
= 0; i
< phys_mem
.n_regions
; ++i
) {
866 p
= phys_mem
.regions
[i
].address
;
867 for (s
= 0; s
< phys_mem
.regions
[i
].size
; s
+= PAGE_SIZE
) {
868 f
= _PAGE_PRESENT
| _PAGE_ACCESSED
;
869 if ((char *) v
< _stext
|| (char *) v
>= etext
)
870 f
|= _PAGE_RW
| _PAGE_DIRTY
| _PAGE_HWWRITE
;
872 /* On the powerpc, no user access
873 forces R/W kernel access */
881 #else /* CONFIG_8xx */
883 for (i
= 0; i
< phys_mem
.n_regions
; ++i
) {
884 v
= (ulong
)__va(phys_mem
.regions
[i
].address
);
885 p
= phys_mem
.regions
[i
].address
;
886 for (s
= 0; s
< phys_mem
.regions
[i
].size
; s
+= PAGE_SIZE
) {
887 /* On the MPC8xx, we want the page shared so we
888 * don't get ASID compares on kernel space.
890 f
= _PAGE_PRESENT
| _PAGE_ACCESSED
| _PAGE_SHARED
;
892 /* I don't really need the rest of this code, but
893 * I grabbed it because I think the line:
895 * is incorrect. It needs to be set to bits we
896 * don't define to cause a kernel read-only. On
897 * the MPC8xx, the PAGE_DIRTY takes care of that
898 * for us (along with the RW software state).
900 if ((char *) v
< _stext
|| (char *) v
>= etext
)
901 f
|= _PAGE_RW
| _PAGE_DIRTY
| _PAGE_HWWRITE
;
907 #endif /* CONFIG_8xx */
910 /* This can get called from ioremap, so don't make it an __init, OK? */
911 static void __init
*MMU_get_page(void)
916 p
= (void *) __get_free_page(GFP_KERNEL
);
918 panic("couldn't get a page in MMU_get_page");
920 p
= find_mem_piece(PAGE_SIZE
, PAGE_SIZE
);
922 __clear_user(p
, PAGE_SIZE
);
926 void __init
free_initmem(void)
929 unsigned long num_freed_pages
= 0, num_prep_pages
= 0,
930 num_pmac_pages
= 0, num_openfirmware_pages
= 0,
932 #define FREESEC(START,END,CNT) do { \
933 a = (unsigned long)(&START); \
934 for (; a < (unsigned long)(&END); a += PAGE_SIZE) { \
935 clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags); \
936 atomic_set(&mem_map[MAP_NR(a)].count, 1); \
942 FREESEC(__init_begin
,__init_end
,num_freed_pages
);
946 FREESEC(__apus_begin
,__apus_end
,num_apus_pages
);
947 FREESEC(__prep_begin
,__prep_end
,num_prep_pages
);
950 FREESEC(__apus_begin
,__apus_end
,num_apus_pages
);
951 FREESEC(__pmac_begin
,__pmac_end
,num_pmac_pages
);
952 FREESEC(__prep_begin
,__prep_end
,num_prep_pages
);
955 FREESEC(__apus_begin
,__apus_end
,num_apus_pages
);
956 FREESEC(__pmac_begin
,__pmac_end
,num_pmac_pages
);
959 FREESEC(__apus_begin
,__apus_end
,num_apus_pages
);
960 FREESEC(__pmac_begin
,__pmac_end
,num_pmac_pages
);
961 FREESEC(__prep_begin
,__prep_end
,num_prep_pages
);
964 FREESEC(__pmac_begin
,__pmac_end
,num_pmac_pages
);
965 FREESEC(__prep_begin
,__prep_end
,num_prep_pages
);
968 FREESEC(__apus_begin
,__apus_end
,num_apus_pages
);
969 FREESEC(__pmac_begin
,__pmac_end
,num_pmac_pages
);
970 FREESEC(__prep_begin
,__prep_end
,num_prep_pages
);
975 FREESEC( __openfirmware_begin
, __openfirmware_end
,
976 num_openfirmware_pages
);
978 printk ("Freeing unused kernel memory: %ldk init",
979 (num_freed_pages
* PAGE_SIZE
) >> 10);
980 if ( num_prep_pages
)
981 printk(" %ldk prep",(num_prep_pages
*PAGE_SIZE
)>>10);
982 if ( num_pmac_pages
)
983 printk(" %ldk pmac",(num_pmac_pages
*PAGE_SIZE
)>>10);
984 if ( num_openfirmware_pages
)
985 printk(" %ldk open firmware",(num_openfirmware_pages
*PAGE_SIZE
)>>10);
986 if ( num_apus_pages
)
987 printk(" %ldk apus",(num_apus_pages
*PAGE_SIZE
)>>10);
992 * Do very early mm setup such as finding the size of memory
993 * and setting up the hash table.
994 * A lot of this is prep/pmac specific but a lot of it could
998 void __init
MMU_init(void)
1001 if ( first_cpu_booted
) return;
1002 #endif /* __SMP__ */
1003 if ( ppc_md
.progress
) ppc_md
.progress("MMU:enter", 0x111);
1006 end_of_DRAM
= pmac_find_end_of_memory();
1008 else if (_machine
== _MACH_apus
)
1009 end_of_DRAM
= apus_find_end_of_memory();
1011 else if ( _machine
== _MACH_gemini
)
1012 end_of_DRAM
= gemini_find_end_of_memory();
1014 end_of_DRAM
= prep_find_end_of_memory();
1016 if ( ppc_md
.progress
) ppc_md
.progress("MMU:hash init", 0x300);
1018 _SDR1
= __pa(Hash
) | (Hash_mask
>> 10);
1019 ioremap_base
= 0xf8000000;
1021 if ( ppc_md
.progress
) ppc_md
.progress("MMU:mapin", 0x301);
1022 /* Map in all of RAM starting at KERNELBASE */
1026 * Setup the bat mappings we're going to load that cover
1027 * the io areas. RAM was mapped by mapin_ram().
1030 if ( ppc_md
.progress
) ppc_md
.progress("MMU:setbat", 0x302);
1033 setbat(0, 0x80000000, 0x80000000, 0x10000000, IO_PAGE
);
1034 setbat(1, 0xf0000000, 0xc0000000, 0x08000000, IO_PAGE
);
1035 ioremap_base
= 0xf0000000;
1038 setbat(0, 0xf8000000, 0xf8000000, 0x08000000, IO_PAGE
);
1039 setbat(1, 0x80000000, 0x80000000, 0x10000000, IO_PAGE
);
1040 setbat(3, 0x90000000, 0x90000000, 0x10000000, IO_PAGE
);
1044 unsigned long base
= 0xf3000000;
1045 struct device_node
*macio
= find_devices("mac-io");
1046 if (macio
&& macio
->n_addrs
)
1047 base
= macio
->addrs
[0].address
;
1048 setbat(0, base
, base
, 0x100000, IO_PAGE
);
1049 ioremap_base
= 0xf0000000;
1053 /* Map PPC exception vectors. */
1054 setbat(0, 0xfff00000, 0xfff00000, 0x00020000, RAM_PAGE
);
1055 /* Map chip and ZorroII memory */
1056 setbat(1, zTwoBase
, 0x00000000, 0x01000000, IO_PAGE
);
1057 /* Note: a temporary hack in arch/ppc/amiga/setup.c
1058 (kernel_map) remaps individual IO regions to
1062 setbat(0, 0xf0000000, 0xf0000000, 0x10000000, IO_PAGE
);
1063 setbat(1, 0x80000000, 0x80000000, 0x10000000, IO_PAGE
);
1066 ioremap_bot
= ioremap_base
;
1067 #else /* CONFIG_8xx */
1069 end_of_DRAM
= mbx_find_end_of_memory();
1070 #endif /* CONFIG_MBX */
1071 /* Map in all of RAM starting at KERNELBASE */
1074 /* Now map in some of the I/O space that is generically needed
1075 * or shared with multiple devices.
1076 * All of this fits into the same 4Mbyte region, so it only
1077 * requires one page table page.
1079 ioremap(NVRAM_ADDR
, NVRAM_SIZE
);
1080 ioremap(MBX_CSR_ADDR
, MBX_CSR_SIZE
);
1081 ioremap(IMAP_ADDR
, IMAP_SIZE
);
1082 ioremap(PCI_CSR_ADDR
, PCI_CSR_SIZE
);
1083 /* ide needs to be able to get at PCI space -- Cort */
1084 ioremap(0x80000000, 0x4000);
1085 ioremap(0x81000000, 0x4000);
1086 #endif /* CONFIG_8xx */
1087 if ( ppc_md
.progress
) ppc_md
.progress("MMU:exit", 0x211);
1091 * Find some memory for setup_arch to return.
1092 * We use the largest chunk of available memory as the area
1093 * that setup_arch returns, making sure that there are at
1094 * least 32 pages unused before this for MMU_get_page to use.
1096 unsigned long __init
find_available_memory(void)
1099 unsigned long a
, free
;
1100 unsigned long start
, end
;
1102 if (_machine
== _MACH_mbx
) {
1103 /* Return the first, not the last region, because we
1104 * may not yet have properly initialized the additonal
1107 a
= PAGE_ALIGN(phys_avail
.regions
[0].address
);
1108 avail_start
= (unsigned long) __va(a
);
1113 for (i
= 1; i
< phys_avail
.n_regions
; ++i
)
1114 if (phys_avail
.regions
[i
].size
> phys_avail
.regions
[rn
].size
)
1117 for (i
= 0; i
< rn
; ++i
) {
1118 start
= phys_avail
.regions
[i
].address
;
1119 end
= start
+ phys_avail
.regions
[i
].size
;
1120 free
+= (end
& PAGE_MASK
) - PAGE_ALIGN(start
);
1122 a
= PAGE_ALIGN(phys_avail
.regions
[rn
].address
);
1123 if (free
< 32 * PAGE_SIZE
)
1124 a
+= 32 * PAGE_SIZE
- free
;
1125 avail_start
= (unsigned long) __va(a
);
1130 * paging_init() sets up the page tables - in fact we've already done this.
1132 unsigned long __init
paging_init(unsigned long start_mem
, unsigned long end_mem
)
1134 extern unsigned long free_area_init(unsigned long, unsigned long);
1136 * Grab some memory for bad_page and bad_pagetable to use.
1138 empty_bad_page
= PAGE_ALIGN(start_mem
);
1139 empty_bad_page_table
= empty_bad_page
+ PAGE_SIZE
;
1140 start_mem
= empty_bad_page
+ 2 * PAGE_SIZE
;
1142 /* note: free_area_init uses its second argument
1143 to size the mem_map array. */
1144 start_mem
= free_area_init(start_mem
, end_mem
);
1148 void __init
mem_init(unsigned long start_mem
, unsigned long end_mem
)
1152 unsigned long a
, lim
;
1156 extern unsigned int rtas_data
, rtas_size
;
1158 end_mem
&= PAGE_MASK
;
1159 high_memory
= (void *) end_mem
;
1160 max_mapnr
= MAP_NR(high_memory
);
1162 /* mark usable pages in the mem_map[] */
1163 start_mem
= PAGE_ALIGN(start_mem
);
1165 num_physpages
= max_mapnr
; /* RAM is assumed contiguous */
1166 remove_mem_piece(&phys_avail
, __pa(avail_start
),
1167 start_mem
- avail_start
, 1);
1169 for (addr
= PAGE_OFFSET
; addr
< end_mem
; addr
+= PAGE_SIZE
)
1170 set_bit(PG_reserved
, &mem_map
[MAP_NR(addr
)].flags
);
1172 for (i
= 0; i
< phys_avail
.n_regions
; ++i
) {
1173 a
= (unsigned long) __va(phys_avail
.regions
[i
].address
);
1174 lim
= a
+ phys_avail
.regions
[i
].size
;
1176 for (; a
< lim
; a
+= PAGE_SIZE
)
1177 clear_bit(PG_reserved
, &mem_map
[MAP_NR(a
)].flags
);
1179 phys_avail
.n_regions
= 0;
1181 #ifdef CONFIG_BLK_DEV_INITRD
1182 /* if we are booted from BootX with an initial ramdisk,
1183 make sure the ramdisk pages aren't reserved. */
1185 for (a
= initrd_start
; a
< initrd_end
; a
+= PAGE_SIZE
)
1186 clear_bit(PG_reserved
, &mem_map
[MAP_NR(a
)].flags
);
1188 #endif /* CONFIG_BLK_DEV_INITRD */
1190 /* free the prom's memory - no-op on prep */
1191 for (i
= 0; i
< prom_mem
.n_regions
; ++i
) {
1192 a
= (unsigned long) __va(prom_mem
.regions
[i
].address
);
1193 lim
= a
+ prom_mem
.regions
[i
].size
;
1195 for (; a
< lim
; a
+= PAGE_SIZE
)
1196 clear_bit(PG_reserved
, &mem_map
[MAP_NR(a
)].flags
);
1201 for (addr
= PAGE_OFFSET
; addr
< end_mem
; addr
+= PAGE_SIZE
) {
1202 if (PageReserved(mem_map
+ MAP_NR(addr
))) {
1203 if (addr
< (ulong
) etext
)
1205 else if (addr
>= (unsigned long)&__init_begin
1206 && addr
< (unsigned long)&__init_end
)
1208 else if (addr
< (ulong
) start_mem
)
1212 atomic_set(&mem_map
[MAP_NR(addr
)].count
, 1);
1213 #ifdef CONFIG_BLK_DEV_INITRD
1214 if (!initrd_start
||
1215 addr
< (initrd_start
& PAGE_MASK
) || addr
>= initrd_end
)
1216 #endif /* CONFIG_BLK_DEV_INITRD */
1219 addr
< (rtas_data
& PAGE_MASK
) ||
1220 addr
>= (rtas_data
+rtas_size
))
1221 #endif /* CONFIG_8xx */
1225 printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08x,%08lx]\n",
1226 (unsigned long) nr_free_pages
<< (PAGE_SHIFT
-10),
1227 codepages
<< (PAGE_SHIFT
-10),
1228 datapages
<< (PAGE_SHIFT
-10),
1229 initpages
<< (PAGE_SHIFT
-10),
1230 PAGE_OFFSET
, end_mem
);
1236 * This is a big hack right now, but it may turn into something real
1239 * For the MBX860 (at this time anyway), there is nothing to initialize
1240 * associated the PROM. Rather than include all of the prom.c
1241 * functions in the image just to get prom_init, all we really need right
1242 * now is the initialization of the physical memory region.
1244 unsigned long __init
*mbx_find_end_of_memory(void)
1246 unsigned long kstart
, ksize
;
1248 volatile memctl8xx_t
*mcp
;
1251 binfo
= (bd_t
*)res
;
1254 * The MBX does weird things with the mmaps for ram.
1255 * If there's no DIMM, it puts the onboard DRAM at
1256 * 0, if there is a DIMM it sticks it at 0 and puts
1257 * the DRAM at the end of the DIMM.
1259 * In fact, it might be the best idea to just read the DRAM
1260 * config registers and set the mem areas accordingly.
1262 mcp
= (memctl8xx_t
*)(&(((immap_t
*)IMAP_ADDR
)->im_memctl
));
1263 append_mem_piece(&phys_mem
, 0, binfo
->bi_memsize
);
1265 phys_mem
.regions
[0].address
= 0;
1266 phys_mem
.regions
[0].size
= binfo
->bi_memsize
;
1267 phys_mem
.n_regions
= 1;
1270 ret
= __va(phys_mem
.regions
[0].address
+
1271 phys_mem
.regions
[0].size
);
1273 phys_avail
= phys_mem
;
1275 kstart
= __pa(_stext
); /* should be 0 */
1276 ksize
= PAGE_ALIGN(_end
- _stext
);
1277 remove_mem_piece(&phys_avail
, kstart
, ksize
, 0);
1280 #endif /* CONFIG_MBX */
1284 * On systems with Open Firmware, collect information about
1285 * physical RAM and which pieces are already in use.
1286 * At this point, we have (at least) the first 8MB mapped with a BAT.
1287 * Our text, data, bss use something over 1MB, starting at 0.
1288 * Open Firmware may be using 1MB at the 4MB point.
1290 unsigned long __init
*pmac_find_end_of_memory(void)
1292 unsigned long a
, total
;
1293 unsigned long kstart
, ksize
;
1296 /* max amount of RAM we allow -- Cort */
1297 #define RAM_LIMIT (256<<20)
1299 memory_node
= find_devices("memory");
1300 if (memory_node
== NULL
) {
1301 printk(KERN_ERR
"can't find memory node\n");
1306 * Find out where physical memory is, and check that it
1307 * starts at 0 and is contiguous. It seems that RAM is
1308 * always physically contiguous on Power Macintoshes,
1309 * because MacOS can't cope if it isn't.
1311 * Supporting discontiguous physical memory isn't hard,
1312 * it just makes the virtual <-> physical mapping functions
1313 * more complicated (or else you end up wasting space
1316 get_mem_prop("reg", &phys_mem
);
1317 if (phys_mem
.n_regions
== 0)
1319 a
= phys_mem
.regions
[0].address
;
1321 panic("RAM doesn't start at physical address 0");
1324 * Make sure ram mappings don't stomp on IO space
1325 * This is a temporary hack to keep this from happening
1326 * until we move the KERNELBASE and can allocate RAM up
1327 * to our nearest IO area.
1330 if ( phys_mem
.regions
[0].size
>= RAM_LIMIT
)
1331 phys_mem
.regions
[0].size
= RAM_LIMIT
;
1332 total
= phys_mem
.regions
[0].size
;
1334 if (phys_mem
.n_regions
> 1) {
1335 printk("RAM starting at 0x%x is not contiguous\n",
1336 phys_mem
.regions
[1].address
);
1337 printk("Using RAM from 0 to 0x%lx\n", total
-1);
1338 phys_mem
.n_regions
= 1;
1341 if (boot_infos
== 0) {
1342 /* record which bits the prom is using */
1343 get_mem_prop("available", &phys_avail
);
1345 /* booted from BootX - it's all available (after klimit) */
1346 phys_avail
= phys_mem
;
1348 prom_mem
= phys_mem
;
1349 for (i
= 0; i
< phys_avail
.n_regions
; ++i
)
1351 if ( phys_avail
.regions
[i
].address
>= RAM_LIMIT
)
1353 if ( (phys_avail
.regions
[i
].address
+phys_avail
.regions
[i
].size
)
1355 phys_avail
.regions
[i
].size
= RAM_LIMIT
- phys_avail
.regions
[i
].address
;
1356 remove_mem_piece(&prom_mem
, phys_avail
.regions
[i
].address
,
1357 phys_avail
.regions
[i
].size
, 1);
1361 * phys_avail records memory we can use now.
1362 * prom_mem records memory allocated by the prom that we
1363 * don't want to use now, but we'll reclaim later.
1364 * Make sure the kernel text/data/bss is in neither.
1366 kstart
= __pa(_stext
); /* should be 0 */
1367 ksize
= PAGE_ALIGN(klimit
- _stext
);
1368 remove_mem_piece(&phys_avail
, kstart
, ksize
, 0);
1369 remove_mem_piece(&prom_mem
, kstart
, ksize
, 0);
1370 remove_mem_piece(&phys_avail
, 0, 0x4000, 0);
1371 remove_mem_piece(&prom_mem
, 0, 0x4000, 0);
1377 * This finds the amount of physical ram and does necessary
1378 * setup for prep. This is pretty architecture specific so
1379 * this will likely stay separate from the pmac.
1382 unsigned long __init
*prep_find_end_of_memory(void)
1384 unsigned long kstart
, ksize
;
1385 unsigned long total
;
1386 total
= res
->TotalMemory
;
1391 * I need a way to probe the amount of memory if the residual
1392 * data doesn't contain it. -- Cort
1394 printk("Ramsize from residual data was 0 -- Probing for value\n");
1396 printk("Ramsize default to be %ldM\n", total
>>20);
1398 append_mem_piece(&phys_mem
, 0, total
);
1399 phys_avail
= phys_mem
;
1400 kstart
= __pa(_stext
); /* should be 0 */
1401 ksize
= PAGE_ALIGN(klimit
- _stext
);
1402 remove_mem_piece(&phys_avail
, kstart
, ksize
, 0);
1403 remove_mem_piece(&phys_avail
, 0, 0x4000, 0);
1405 return (__va(total
));
1408 unsigned long __init
*gemini_find_end_of_memory(void)
1410 unsigned long total
, kstart
, ksize
, *ret
;
1413 reg
= readb(GEMINI_MEMCFG
);
1414 total
= ((1<<((reg
& 0x7) - 1)) *
1415 (8<<((reg
>> 3) & 0x7)));
1416 total
*= (1024*1024);
1417 phys_mem
.regions
[0].address
= 0;
1418 phys_mem
.regions
[0].size
= total
;
1419 phys_mem
.n_regions
= 1;
1421 ret
= __va(phys_mem
.regions
[0].size
);
1422 phys_avail
= phys_mem
;
1423 kstart
= __pa(_stext
);
1424 ksize
= PAGE_ALIGN( _end
- _stext
);
1425 remove_mem_piece( &phys_avail
, kstart
, ksize
, 0 );
1430 #define HARDWARE_MAPPED_SIZE (512*1024)
1431 unsigned long __init
*apus_find_end_of_memory(void)
1435 /* The memory size reported by ADOS excludes the 512KB
1436 reserved for PPC exception registers and possibly 512KB
1437 containing a shadow of the ADOS ROM. */
1439 unsigned long size
= memory
[0].size
;
1441 /* If 2MB aligned, size was probably user
1442 specified. We can't tell anything about shadowing
1443 in this case so skip shadow assignment. */
1444 if (0 != (size
& 0x1fffff)){
1445 /* Align to 512KB to ensure correct handling
1446 of both memfile and system specified
1448 size
= ((size
+0x0007ffff) & 0xfff80000);
1449 /* If memory is 1MB aligned, assume
1451 shadow
= !(size
& 0x80000);
1454 /* Add the chunk that ADOS does not see. by aligning
1455 the size to the nearest 2MB limit upwards. */
1456 memory
[0].size
= ((size
+0x001fffff) & 0xffe00000);
1459 /* Now register the memory block. */
1461 unsigned long kstart
, ksize
;
1463 append_mem_piece(&phys_mem
, memory
[0].addr
, memory
[0].size
);
1464 phys_avail
= phys_mem
;
1465 kstart
= __pa(_stext
);
1466 ksize
= PAGE_ALIGN(klimit
- _stext
);
1467 remove_mem_piece(&phys_avail
, kstart
, ksize
, 0);
1470 /* Remove the memory chunks that are controlled by special
1473 unsigned long top
= memory
[0].addr
+ memory
[0].size
;
1475 /* Remove the upper 512KB if it contains a shadow of
1476 the ADOS ROM. FIXME: It might be possible to
1477 disable this shadow HW. Check the booter
1481 top
-= HARDWARE_MAPPED_SIZE
;
1482 remove_mem_piece(&phys_avail
, top
,
1483 HARDWARE_MAPPED_SIZE
, 0);
1486 /* Remove the upper 512KB where the PPC exception
1487 vectors are mapped. */
1488 top
-= HARDWARE_MAPPED_SIZE
;
1490 /* This would be neat, but it breaks on A3000 machines!? */
1491 remove_mem_piece(&phys_avail
, top
, 16384, 0);
1493 remove_mem_piece(&phys_avail
, top
, HARDWARE_MAPPED_SIZE
, 0);
1498 /* Linux/APUS only handles one block of memory -- the one on
1499 the PowerUP board. Other system memory is horrible slow in
1500 comparison. The user can use other memory for swapping
1501 using the z2ram device. */
1502 return __va(memory
[0].addr
+ memory
[0].size
);
1504 #endif /* CONFIG_APUS */
1507 * Initialize the hash table and patch the instructions in head.S.
1509 static void __init
hash_init(void)
1512 unsigned long h
, ramsize
;
1514 extern unsigned int hash_page_patch_A
[], hash_page_patch_B
[],
1515 hash_page_patch_C
[], hash_page
[];
1517 if ( ppc_md
.progress
) ppc_md
.progress("hash:enter", 0x105);
1519 * Allow 64k of hash table for every 16MB of memory,
1520 * up to a maximum of 2MB.
1522 ramsize
= (ulong
)end_of_DRAM
- KERNELBASE
;
1525 for (h
= 256<<10; h
< ramsize
/ 256 && h
< 4<<20; h
*= 2, Hash_mask
++)
1528 Hash_mask
<<= 10; /* so setting _SDR1 works the same -- Cort */
1530 for (h
= 64<<10; h
< ramsize
/ 256 && h
< 2<<20; h
*= 2)
1533 Hash_mask
= (h
>> 6) - 1;
1536 /* shrink the htab since we don't use it on 603's -- Cort */
1537 switch (_get_PVR()>>16) {
1545 /* on 601/4 let things be */
1549 if ( ppc_md
.progress
) ppc_md
.progress("hash:find piece", 0x322);
1550 /* Find some memory for the hash table. */
1552 Hash
= find_mem_piece(Hash_size
, Hash_size
);
1556 printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
1557 ramsize
>> 20, Hash_size
>> 10, Hash
);
1560 Hash_end
= (PTE
*) ((unsigned long)Hash
+ Hash_size
);
1561 __clear_user(Hash
, Hash_size
);
1563 if ( ppc_md
.progress
) ppc_md
.progress("hash:patch", 0x345);
1565 * Patch up the instructions in head.S:hash_page
1567 Hash_bits
= ffz(~Hash_size
) - 6;
1568 hash_page_patch_A
[0] = (hash_page_patch_A
[0] & ~0xffff)
1569 | (__pa(Hash
) >> 16);
1570 hash_page_patch_A
[1] = (hash_page_patch_A
[1] & ~0x7c0)
1571 | ((26 - Hash_bits
) << 6);
1574 hash_page_patch_A
[2] = (hash_page_patch_A
[2] & ~0x7c0)
1575 | ((26 - Hash_bits
) << 6);
1576 hash_page_patch_B
[0] = (hash_page_patch_B
[0] & ~0xffff)
1577 | (Hash_mask
>> 10);
1578 hash_page_patch_C
[0] = (hash_page_patch_C
[0] & ~0xffff)
1579 | (Hash_mask
>> 10);
1580 #if 0 /* see hash_page in head.S, note also patch_C ref below */
1581 hash_page_patch_D
[0] = (hash_page_patch_D
[0] & ~0xffff)
1582 | (Hash_mask
>> 10);
1585 * Ensure that the locations we've patched have been written
1586 * out from the data cache and invalidated in the instruction
1587 * cache, on those machines with split caches.
1589 flush_icache_range((unsigned long) &hash_page_patch_A
[0],
1590 (unsigned long) &hash_page_patch_C
[1]);
1595 * Put a blr (procedure return) instruction at the
1596 * start of hash_page, since we can still get DSI
1597 * exceptions on a 603.
1599 hash_page
[0] = 0x4e800020;
1600 flush_icache_range((unsigned long) &hash_page
[0],
1601 (unsigned long) &hash_page
[1]);
1603 if ( ppc_md
.progress
) ppc_md
.progress("hash:done", 0x205);
1605 #endif /* ndef CONFIG_8xx */