2 * $Id: init.c,v 1.195 1999/10/15 16:39:39 cort Exp $
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 #include <linux/config.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/string.h>
28 #include <linux/types.h>
29 #include <linux/ptrace.h>
30 #include <linux/mman.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/openpic.h>
38 #include <linux/bootmem.h>
39 #include <linux/highmem.h>
40 #ifdef CONFIG_BLK_DEV_INITRD
41 #include <linux/blk.h> /* for initrd_* */
44 #include <asm/pgalloc.h>
47 #include <asm/mmu_context.h>
48 #include <asm/pgtable.h>
50 #include <asm/residual.h>
51 #include <asm/uaccess.h>
53 #include <asm/8xx_immap.h>
54 #include <asm/mpc8xx.h>
57 #include <asm/immap_8260.h>
58 #include <asm/mpc8260.h>
61 #include <asm/bootx.h>
62 #include <asm/machdep.h>
63 #include <asm/setup.h>
64 #include <asm/amigahw.h>
65 #include <asm/gemini.h>
67 #include "mem_pieces.h"
69 #if defined(CONFIG_4xx)
73 #define MAX_LOW_MEM (640 << 20)
75 #define PGTOKB(pages) (((pages) * PAGE_SIZE) >> 10)
78 atomic_t next_mmu_context
;
79 unsigned long *end_of_DRAM
;
80 unsigned long total_memory
;
81 unsigned long total_lowmem
;
83 int init_bootmem_done
;
85 unsigned long totalram_pages
;
86 unsigned long totalhigh_pages
;
87 extern pgd_t swapper_pg_dir
[];
88 extern char _start
[], _end
[];
89 extern char etext
[], _stext
[];
90 extern char __init_begin
, __init_end
;
91 extern char __prep_begin
, __prep_end
;
92 extern char __chrp_begin
, __chrp_end
;
93 extern char __pmac_begin
, __pmac_end
;
94 extern char __apus_begin
, __apus_end
;
95 extern char __openfirmware_begin
, __openfirmware_end
;
96 struct device_node
*memory_node
;
97 unsigned long ioremap_base
;
98 unsigned long ioremap_bot
;
99 unsigned long avail_start
;
100 extern int num_memory
;
101 extern struct mem_info memory
[];
102 extern boot_infos_t
*boot_infos
;
103 extern unsigned int rtas_data
, rtas_size
;
105 struct pgtable_cache_struct quicklists
;
107 #ifdef CONFIG_HIGHMEM
113 static void *MMU_get_page(void);
114 unsigned long prep_find_end_of_memory(void);
115 unsigned long pmac_find_end_of_memory(void);
116 unsigned long apus_find_end_of_memory(void);
117 unsigned long gemini_find_end_of_memory(void);
118 extern unsigned long find_end_of_memory(void);
120 unsigned long m8xx_find_end_of_memory(void);
121 #endif /* CONFIG_8xx */
123 unsigned long oak_find_end_of_memory(void);
126 unsigned long m8260_find_end_of_memory(void);
127 #endif /* CONFIG_8260 */
128 static void mapin_ram(void);
129 void map_page(unsigned long va
, unsigned long pa
, int flags
);
130 void set_phys_avail(struct mem_pieces
*mp
);
131 extern void die_if_kernel(char *,struct pt_regs
*,long);
133 extern char _start
[], _end
[];
134 extern char _stext
[], etext
[];
135 extern struct task_struct
*current_set
[NR_CPUS
];
137 struct mem_pieces phys_mem
;
139 struct mem_pieces phys_avail
;
141 PTE
*Hash
, *Hash_end
;
142 unsigned long Hash_size
, Hash_mask
;
143 #if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)
145 static void hash_init(void);
147 union ubat
{ /* BAT register values to be loaded */
149 #ifdef CONFIG_PPC64BRIDGE
154 } BATS
[4][2]; /* 4 pairs of IBAT, DBAT */
156 struct batrange
{ /* stores address ranges mapped by BATs */
163 * Return PA for this VA if it is mapped by a BAT, or 0
165 static inline unsigned long v_mapped_by_bats(unsigned long va
)
168 for (b
= 0; b
< 4; ++b
)
169 if (va
>= bat_addrs
[b
].start
&& va
< bat_addrs
[b
].limit
)
170 return bat_addrs
[b
].phys
+ (va
- bat_addrs
[b
].start
);
175 * Return VA for a given PA or 0 if not mapped
177 static inline unsigned long p_mapped_by_bats(unsigned long pa
)
180 for (b
= 0; b
< 4; ++b
)
181 if (pa
>= bat_addrs
[b
].phys
182 && pa
< (bat_addrs
[b
].limit
-bat_addrs
[b
].start
)
184 return bat_addrs
[b
].start
+(pa
-bat_addrs
[b
].phys
);
188 #else /* CONFIG_4xx || CONFIG_8xx */
189 #define v_mapped_by_bats(x) (0UL)
190 #define p_mapped_by_bats(x) (0UL)
191 #endif /* !CONFIG_4xx && !CONFIG_8xx */
194 * this tells the system to map all of ram with the segregs
195 * (i.e. page tables) instead of the bats.
198 int __map_without_bats
;
200 /* max amount of RAM to use */
201 unsigned long __max_memory
;
203 void __bad_pte(pmd_t
*pmd
)
205 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd
));
206 pmd_val(*pmd
) = (unsigned long) BAD_PAGETABLE
;
209 pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long offset
)
213 if (pmd_none(*pmd
)) {
215 pte
= (pte_t
*) MMU_get_page();
216 else if ((pte
= (pte_t
*) __get_free_page(GFP_KERNEL
)))
219 pmd_val(*pmd
) = (unsigned long)pte
;
222 pmd_val(*pmd
) = (unsigned long)BAD_PAGETABLE
;
229 return (pte_t
*) pmd_page(*pmd
) + offset
;
232 int do_check_pgt_cache(int low
, int high
)
235 if(pgtable_cache_size
> high
) {
238 free_pgd_slow(get_pgd_fast()), freed
++;
240 free_pmd_slow(get_pmd_fast()), freed
++;
242 free_pte_slow(get_pte_fast()), freed
++;
243 } while(pgtable_cache_size
> low
);
249 * BAD_PAGE is the page that is used for page faults when linux
250 * is out-of-memory. Older versions of linux just did a
251 * do_exit(), but using this instead means there is less risk
252 * for a process dying in kernel mode, possibly leaving a inode
255 * BAD_PAGETABLE is the accompanying page-table: it is initialized
256 * to point to BAD_PAGE entries.
258 * ZERO_PAGE is a special page that is used for zero-initialized
261 pte_t
*empty_bad_page_table
;
263 pte_t
* __bad_pagetable(void)
265 clear_page(empty_bad_page_table
);
266 return empty_bad_page_table
;
269 void *empty_bad_page
;
271 pte_t
__bad_page(void)
273 clear_page(empty_bad_page
);
274 return pte_mkdirty(mk_pte_phys(__pa(empty_bad_page
), PAGE_SHARED
));
279 int i
,free
= 0,total
= 0,reserved
= 0;
280 int shared
= 0, cached
= 0;
281 struct task_struct
*p
;
284 printk("Mem-info:\n");
286 printk("Free swap: %6dkB\n",nr_swap_pages
<<(PAGE_SHIFT
-10));
290 if (PageHighMem(mem_map
+i
))
292 if (PageReserved(mem_map
+i
))
294 else if (PageSwapCache(mem_map
+i
))
296 else if (!page_count(mem_map
+i
))
299 shared
+= atomic_read(&mem_map
[i
].count
) - 1;
301 printk("%d pages of RAM\n",total
);
302 printk("%d pages of HIGHMEM\n", highmem
);
303 printk("%d free pages\n",free
);
304 printk("%d reserved pages\n",reserved
);
305 printk("%d pages shared\n",shared
);
306 printk("%d pages swap cached\n",cached
);
307 printk("%d pages in page table cache\n",(int)pgtable_cache_size
);
309 printk("%-8s %3s %8s %8s %8s %9s %8s", "Process", "Pid",
310 "Ctx", "Ctx<<4", "Last Sys", "pc", "task");
312 printk(" %3s", "CPU");
313 #endif /* CONFIG_SMP */
317 printk("%-8.8s %3d %8ld %8ld %8ld %c%08lx %08lx ",
319 (p
->mm
)?p
->mm
->context
:0,
320 (p
->mm
)?(p
->mm
->context
<<4):0,
321 p
->thread
.last_syscall
,
322 (p
->thread
.regs
)?user_mode(p
->thread
.regs
) ? 'u' : 'k' : '?',
323 (p
->thread
.regs
)?p
->thread
.regs
->nip
:0,
328 printk("%3d ", p
->processor
);
329 if ( (p
->processor
!= NO_PROC_ID
) &&
330 (p
== current_set
[p
->processor
]) )
342 if ( p
== last_task_used_math
)
348 #endif /* CONFIG_SMP */
354 void si_meminfo(struct sysinfo
*val
)
361 val
->freeram
= nr_free_pages();
362 val
->bufferram
= atomic_read(&buffermem_pages
);
364 if (PageReserved(mem_map
+i
))
367 if (!atomic_read(&mem_map
[i
].count
))
369 val
->sharedram
+= atomic_read(&mem_map
[i
].count
) - 1;
371 val
->totalhigh
= totalhigh_pages
;
372 val
->freehigh
= nr_free_highpages();
373 val
->mem_unit
= PAGE_SIZE
;
377 ioremap(unsigned long addr
, unsigned long size
)
379 return __ioremap(addr
, size
, _PAGE_NO_CACHE
);
383 __ioremap(unsigned long addr
, unsigned long size
, unsigned long flags
)
385 unsigned long p
, v
, i
;
388 * Choose an address to map it to.
389 * Once the vmalloc system is running, we use it.
390 * Before then, we map addresses >= ioremap_base
391 * virt == phys; for addresses below this we use
392 * space going down from ioremap_base (ioremap_bot
393 * records where we're up to).
395 p
= addr
& PAGE_MASK
;
396 size
= PAGE_ALIGN(addr
+ size
) - p
;
399 * If the address lies within the first 16 MB, assume it's in ISA
402 if (p
< 16*1024*1024)
406 * Don't allow anybody to remap normal RAM that we're using.
407 * mem_init() sets high_memory so only do the check after that.
409 if ( mem_init_done
&& (p
< virt_to_phys(high_memory
)) )
411 printk("__ioremap(): phys addr %0lx is RAM lr %p\n", p
,
412 __builtin_return_address(0));
420 * Is it already mapped? Perhaps overlapped by a previous
421 * BAT mapping. If the whole area is mapped then we're done,
422 * otherwise remap it since we want to keep the virt addrs for
423 * each request contiguous.
425 * We make the assumption here that if the bottom and top
426 * of the range we want are mapped then it's mapped to the
427 * same virt address (and this is contiguous).
430 if ((v
= p_mapped_by_bats(p
)) /*&& p_mapped_by_bats(p+size-1)*/ )
434 struct vm_struct
*area
;
435 area
= get_vm_area(size
, VM_IOREMAP
);
438 v
= VMALLOC_VMADDR(area
->addr
);
440 if (p
>= ioremap_base
)
443 v
= (ioremap_bot
-= size
);
446 if ((flags
& _PAGE_PRESENT
) == 0)
447 flags
|= pgprot_val(PAGE_KERNEL
);
448 if (flags
& (_PAGE_NO_CACHE
| _PAGE_WRITETHRU
))
449 flags
|= _PAGE_GUARDED
;
452 * Is it a candidate for a BAT mapping?
454 for (i
= 0; i
< size
; i
+= PAGE_SIZE
)
455 map_page(v
+i
, p
+i
, flags
);
457 return (void *) (v
+ (addr
& ~PAGE_MASK
));
460 void iounmap(void *addr
)
462 if (addr
> high_memory
&& (unsigned long) addr
< ioremap_bot
)
463 vfree((void *) (PAGE_MASK
& (unsigned long) addr
));
466 unsigned long iopa(unsigned long addr
)
473 pa
= v_mapped_by_bats(addr
);
477 /* Do we have a page table? */
478 if (init_mm
.pgd
== NULL
)
481 /* Use upper 10 bits of addr to index the first level map */
482 pd
= (pmd_t
*) (init_mm
.pgd
+ (addr
>> PGDIR_SHIFT
));
486 /* Use middle 10 bits of addr to index the second-level map */
487 pg
= pte_offset(pd
, addr
);
488 return (pte_val(*pg
) & PAGE_MASK
) | (addr
& ~PAGE_MASK
);
492 map_page(unsigned long va
, unsigned long pa
, int flags
)
497 /* Use upper 10 bits of VA to index the first level map */
498 pd
= pmd_offset(pgd_offset_k(va
), va
);
500 /* Use middle 10 bits of VA to index the second-level map */
501 pg
= pte_alloc(pd
, va
);
502 if (pmd_none(oldpd
) && mem_init_done
)
503 set_pgdir(va
, *(pgd_t
*)pd
);
504 set_pte(pg
, mk_pte_phys(pa
& PAGE_MASK
, __pgprot(flags
)));
506 flush_hash_page(0, va
);
513 * - flush_tlb_all() flushes all processes TLBs
514 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
515 * - flush_tlb_page(vma, vmaddr) flushes one page
516 * - flush_tlb_range(mm, start, end) flushes a range of pages
518 * since the hardware hash table functions as an extension of the
519 * tlb as far as the linux tables are concerned, flush it too.
524 * Flush all tlb/hash table entries (except perhaps for those
525 * mapping RAM starting at PAGE_OFFSET, since they never change).
528 local_flush_tlb_all(void)
530 #ifdef CONFIG_PPC64BRIDGE
531 /* XXX this assumes that the vmalloc arena starts no lower than
532 * 0xd0000000 on 64-bit machines. */
533 flush_hash_segments(0xd, 0xffffff);
535 __clear_user(Hash
, Hash_size
);
538 smp_send_tlb_invalidate(0);
539 #endif /* CONFIG_SMP */
540 #endif /* CONFIG_PPC64BRIDGE */
544 * Flush all the (user) entries for the address space described
545 * by mm. We can't rely on mm->mmap describing all the entries
546 * that might be in the hash table.
549 local_flush_tlb_mm(struct mm_struct
*mm
)
551 mm
->context
= NO_CONTEXT
;
552 if (mm
== current
->mm
)
555 smp_send_tlb_invalidate(0);
560 local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
562 if (vmaddr
< TASK_SIZE
)
563 flush_hash_page(vma
->vm_mm
->context
, vmaddr
);
565 flush_hash_page(0, vmaddr
);
567 smp_send_tlb_invalidate(0);
573 * for each page addr in the range, call MMU_invalidate_page()
574 * if the range is very large and the hash table is small it might be
575 * faster to do a search of the hash table and just invalidate pages
576 * that are in the range but that's for study later.
580 local_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
584 if (end
- start
> 20 * PAGE_SIZE
)
590 for (; start
< end
&& start
< TASK_SIZE
; start
+= PAGE_SIZE
)
592 flush_hash_page(mm
->context
, start
);
595 smp_send_tlb_invalidate(0);
600 * The context counter has overflowed.
601 * We set mm->context to NO_CONTEXT for all mm's in the system.
602 * We assume we can get to all mm's by looking as tsk->mm for
603 * all tasks in the system.
606 mmu_context_overflow(void)
608 struct task_struct
*tsk
;
610 printk(KERN_DEBUG
"mmu_context_overflow\n");
611 read_lock(&tasklist_lock
);
614 tsk
->mm
->context
= NO_CONTEXT
;
616 read_unlock(&tasklist_lock
);
617 flush_hash_segments(0x10, 0xffffff);
619 smp_send_tlb_invalidate(0);
621 atomic_set(&next_mmu_context
, 0);
622 /* make sure current always has a context */
623 current
->mm
->context
= MUNGE_CONTEXT(atomic_inc_return(&next_mmu_context
));
624 /* The PGD is only a placeholder. It is only used on
627 set_context(current
->mm
->context
, current
->mm
->pgd
);
629 #endif /* CONFIG_8xx */
631 void flush_page_to_ram(struct page
*page
)
633 unsigned long vaddr
= (unsigned long) kmap(page
);
634 __flush_page_to_ram(vaddr
);
638 #if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)
639 static void get_mem_prop(char *, struct mem_pieces
*);
641 #if defined(CONFIG_ALL_PPC)
643 * Read in a property describing some pieces of memory.
646 static void __init
get_mem_prop(char *name
, struct mem_pieces
*mp
)
648 struct reg_property
*rp
;
651 rp
= (struct reg_property
*) get_property(memory_node
, name
, &s
);
653 printk(KERN_ERR
"error: couldn't get %s property on /memory\n",
657 mp
->n_regions
= s
/ sizeof(mp
->regions
[0]);
658 memcpy(mp
->regions
, rp
, s
);
660 /* Make sure the pieces are sorted. */
662 mem_pieces_coalesce(mp
);
664 #endif /* CONFIG_ALL_PPC */
667 * Set up one of the I/D BAT (block address translation) register pairs.
668 * The parameters are not checked; in particular size must be a power
669 * of 2 between 128k and 256M.
671 void __init
setbat(int index
, unsigned long virt
, unsigned long phys
,
672 unsigned int size
, int flags
)
676 union ubat
*bat
= BATS
[index
];
678 bl
= (size
>> 17) - 1;
679 if ((_get_PVR() >> 16) != 1) {
682 wimgxpp
= flags
& (_PAGE_WRITETHRU
| _PAGE_NO_CACHE
683 | _PAGE_COHERENT
| _PAGE_GUARDED
);
684 wimgxpp
|= (flags
& _PAGE_RW
)? BPP_RW
: BPP_RX
;
685 bat
[1].word
[0] = virt
| (bl
<< 2) | 2; /* Vs=1, Vp=0 */
686 bat
[1].word
[1] = phys
| wimgxpp
;
687 #ifndef CONFIG_KGDB /* want user access for breakpoints */
688 if (flags
& _PAGE_USER
)
690 bat
[1].bat
.batu
.vp
= 1;
691 if (flags
& _PAGE_GUARDED
) {
692 /* G bit must be zero in IBATs */
693 bat
[0].word
[0] = bat
[0].word
[1] = 0;
695 /* make IBAT same as DBAT */
702 wimgxpp
= flags
& (_PAGE_WRITETHRU
| _PAGE_NO_CACHE
704 wimgxpp
|= (flags
& _PAGE_RW
)?
705 ((flags
& _PAGE_USER
)? PP_RWRW
: PP_RWXX
): PP_RXRX
;
706 bat
->word
[0] = virt
| wimgxpp
| 4; /* Ks=0, Ku=1 */
707 bat
->word
[1] = phys
| bl
| 0x40; /* V=1 */
710 bat_addrs
[index
].start
= virt
;
711 bat_addrs
[index
].limit
= virt
+ ((bl
+ 1) << 17) - 1;
712 bat_addrs
[index
].phys
= phys
;
715 #define IO_PAGE (_PAGE_NO_CACHE | _PAGE_GUARDED | _PAGE_RW)
717 #define RAM_PAGE (_PAGE_RW|_PAGE_COHERENT)
719 #define RAM_PAGE (_PAGE_RW)
721 #endif /* CONFIG_8xx */
724 * Map in all of physical memory starting at KERNELBASE.
726 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
728 static void __init
mapin_ram(void)
731 unsigned long v
, p
, s
, f
;
733 #if !defined(CONFIG_4xx) && !defined(CONFIG_8xx) && !defined(CONFIG_POWER4)
734 if (!__map_without_bats
) {
735 unsigned long tot
, mem_base
, bl
, done
;
736 unsigned long max_size
= (256<<20);
739 /* Set up BAT2 and if necessary BAT3 to cover RAM. */
740 mem_base
= __pa(KERNELBASE
);
742 /* Make sure we don't map a block larger than the
743 smallest alignment of the physical address. */
744 /* alignment of mem_base */
745 align
= ~(mem_base
-1) & mem_base
;
746 /* set BAT block size to MIN(max_size, align) */
747 if (align
&& align
< max_size
)
751 for (bl
= 128<<10; bl
< max_size
; bl
<<= 1) {
756 setbat(2, KERNELBASE
, mem_base
, bl
, RAM_PAGE
);
757 done
= (unsigned long)bat_addrs
[2].limit
- KERNELBASE
+ 1;
758 if ((done
< tot
) && !bat_addrs
[3].limit
) {
759 /* use BAT3 to cover a bit more */
761 for (bl
= 128<<10; bl
< max_size
; bl
<<= 1)
764 setbat(3, KERNELBASE
+done
, mem_base
+done
, bl
,
768 #endif /* !CONFIG_4xx && !CONFIG_8xx && !CONFIG_POWER4 */
770 for (i
= 0; i
< phys_mem
.n_regions
; ++i
) {
771 v
= (ulong
)__va(phys_mem
.regions
[i
].address
);
772 p
= phys_mem
.regions
[i
].address
;
773 if (p
>= total_lowmem
)
775 for (s
= 0; s
< phys_mem
.regions
[i
].size
; s
+= PAGE_SIZE
) {
776 /* On the MPC8xx, we want the page shared so we
777 * don't get ASID compares on kernel space.
779 f
= _PAGE_PRESENT
| _PAGE_ACCESSED
| _PAGE_SHARED
;
780 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
781 /* Allows stub to set breakpoints everywhere */
782 f
|= _PAGE_RW
| _PAGE_DIRTY
| _PAGE_HWWRITE
;
784 if ((char *) v
< _stext
|| (char *) v
>= etext
)
785 f
|= _PAGE_RW
| _PAGE_DIRTY
| _PAGE_HWWRITE
;
788 /* On the powerpc (not 8xx), no user access
789 forces R/W kernel access */
791 #endif /* CONFIG_8xx */
792 #endif /* CONFIG_KGDB */
796 if (p
>= total_lowmem
)
802 /* In fact this is only called until mem_init is done. */
803 static void __init
*MMU_get_page(void)
808 p
= (void *) __get_free_page(GFP_KERNEL
);
809 } else if (init_bootmem_done
) {
810 p
= alloc_bootmem_pages(PAGE_SIZE
);
812 p
= mem_pieces_find(PAGE_SIZE
, PAGE_SIZE
);
815 panic("couldn't get a page in MMU_get_page");
816 __clear_user(p
, PAGE_SIZE
);
820 static void free_sec(unsigned long start
, unsigned long end
, const char *name
)
822 unsigned long cnt
= 0;
824 while (start
< end
) {
825 clear_bit(PG_reserved
, &virt_to_page(start
)->flags
);
826 set_page_count(virt_to_page(start
), 1);
832 printk(" %ldk %s", PGTOKB(cnt
), name
);
835 void free_initmem(void)
837 #define FREESEC(TYPE) \
838 free_sec((unsigned long)(&__ ## TYPE ## _begin), \
839 (unsigned long)(&__ ## TYPE ## _end), \
842 printk ("Freeing unused kernel memory:");
844 if (_machine
!= _MACH_Pmac
)
846 if (_machine
!= _MACH_chrp
)
848 if (_machine
!= _MACH_prep
)
850 if (_machine
!= _MACH_apus
)
853 FREESEC(openfirmware
);
858 #ifdef CONFIG_BLK_DEV_INITRD
859 void free_initrd_mem(unsigned long start
, unsigned long end
)
861 for (; start
< end
; start
+= PAGE_SIZE
) {
862 ClearPageReserved(virt_to_page(start
));
863 set_page_count(virt_to_page(start
), 1);
867 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
871 extern boot_infos_t
*disp_bi
;
874 * Do very early mm setup such as finding the size of memory
875 * and setting up the hash table.
876 * A lot of this is prep/pmac specific but a lot of it could
880 #if defined(CONFIG_4xx)
885 * The Zone Protection Register (ZPR) defines how protection will
886 * be applied to every page which is a member of a given zone. At
887 * present, we utilize only two of the 4xx's zones. The first, zone
888 * 0, is set at '00b and only allows access in supervisor-mode based
889 * on the EX and WR bits. No user-mode access is allowed. The second,
890 * zone 1, is set at '10b and in supervisor-mode allows access
891 * without regard to the EX and WR bits. In user-mode, access is
892 * allowed based on the EX and WR bits.
895 mtspr(SPRN_ZPR
, 0x2aaaaaaa);
897 /* Hardwire any TLB entries necessary here. */
899 PPC4xx_tlb_pin(KERNELBASE
, 0, TLB_PAGESZ(PAGESZ_16M
), 1);
902 * Find the top of physical memory and map all of it in starting
906 total_memory
= total_lowmem
= oak_find_end_of_memory();
907 end_of_DRAM
= __va(total_memory
);
911 * Set up the real-mode cache parameters for the exception vector
912 * handlers (which are run in real-mode).
915 mtspr(SPRN_DCWR
, 0x00000000); /* All caching is write-back */
918 * Cache instruction and data space where the exception
919 * vectors and the kernel live in real-mode.
922 mtspr(SPRN_DCCR
, 0x80000000); /* 128 MB of data space at 0x0. */
923 mtspr(SPRN_ICCR
, 0x80000000); /* 128 MB of instr. space at 0x0. */
926 /* How about ppc_md.md_find_end_of_memory instead of these
929 #ifdef CONFIG_BOOTX_TEXT
930 extern boot_infos_t
*disp_bi
;
932 void __init
MMU_init(void)
934 if ( ppc_md
.progress
) ppc_md
.progress("MMU:enter", 0x111);
937 total_memory
= pmac_find_end_of_memory();
939 else if (_machine
== _MACH_apus
)
940 total_memory
= apus_find_end_of_memory();
943 else if ( _machine
== _MACH_gemini
)
944 total_memory
= gemini_find_end_of_memory();
945 #endif /* CONFIG_GEMINI */
946 #if defined(CONFIG_8260)
948 total_memory
= m8260_find_end_of_memory();
951 total_memory
= prep_find_end_of_memory();
954 total_lowmem
= total_memory
;
955 #ifdef CONFIG_HIGHMEM
956 if (total_lowmem
> MAX_LOW_MEM
) {
957 total_lowmem
= MAX_LOW_MEM
;
958 mem_pieces_remove(&phys_avail
, total_lowmem
,
959 total_memory
- total_lowmem
, 0);
961 #endif /* CONFIG_HIGHMEM */
962 end_of_DRAM
= __va(total_lowmem
);
964 if ( ppc_md
.progress
) ppc_md
.progress("MMU:hash init", 0x300);
966 #ifndef CONFIG_PPC64BRIDGE
967 _SDR1
= __pa(Hash
) | (Hash_mask
>> 10);
970 ioremap_base
= 0xf8000000;
972 if ( ppc_md
.progress
) ppc_md
.progress("MMU:mapin", 0x301);
973 /* Map in all of RAM starting at KERNELBASE */
977 ioremap_base
= ioremap_bot
= 0xfffff000;
978 isa_io_base
= (unsigned long) ioremap(0xffd00000, 0x200000) + 0x100000;
980 #else /* CONFIG_POWER4 */
982 * Setup the bat mappings we're going to load that cover
983 * the io areas. RAM was mapped by mapin_ram().
986 if ( ppc_md
.progress
) ppc_md
.progress("MMU:setbat", 0x302);
989 setbat(0, 0x80000000, 0x80000000, 0x10000000, IO_PAGE
);
990 setbat(1, 0xf0000000, 0xc0000000, 0x08000000, IO_PAGE
);
991 ioremap_base
= 0xf0000000;
994 setbat(0, 0xf8000000, 0xf8000000, 0x08000000, IO_PAGE
);
995 #ifdef CONFIG_PPC64BRIDGE
996 setbat(1, 0x80000000, 0xc0000000, 0x10000000, IO_PAGE
);
998 setbat(1, 0x80000000, 0x80000000, 0x10000000, IO_PAGE
);
999 setbat(3, 0x90000000, 0x90000000, 0x10000000, IO_PAGE
);
1003 ioremap_base
= 0xfe000000;
1006 /* Map PPC exception vectors. */
1007 setbat(0, 0xfff00000, 0xfff00000, 0x00020000, RAM_PAGE
);
1008 /* Map chip and ZorroII memory */
1009 setbat(1, zTwoBase
, 0x00000000, 0x01000000, IO_PAGE
);
1012 setbat(0, 0xf0000000, 0xf0000000, 0x10000000, IO_PAGE
);
1013 setbat(1, 0x80000000, 0x80000000, 0x10000000, IO_PAGE
);
1016 /* Map the IMMR, plus anything else we can cover
1017 * in that upper space according to the memory controller
1018 * chip select mapping. Grab another bunch of space
1019 * below that for stuff we can't cover in the upper.
1021 setbat(0, 0xf0000000, 0xf0000000, 0x10000000, IO_PAGE
);
1022 setbat(1, 0xe0000000, 0xe0000000, 0x10000000, IO_PAGE
);
1023 ioremap_base
= 0xe0000000;
1026 ioremap_bot
= ioremap_base
;
1027 #endif /* CONFIG_POWER4 */
1028 #else /* CONFIG_8xx */
1030 total_memory
= total_lowmem
= m8xx_find_end_of_memory();
1031 #ifdef CONFIG_HIGHMEM
1032 if (total_lowmem
> MAX_LOW_MEM
) {
1033 total_lowmem
= MAX_LOW_MEM
;
1034 mem_pieces_remove(&phys_avail
, total_lowmem
,
1035 total_memory
- total_lowmem
, 0);
1037 #endif /* CONFIG_HIGHMEM */
1038 end_of_DRAM
= __va(total_lowmem
);
1040 /* Map in all of RAM starting at KERNELBASE */
1043 /* Now map in some of the I/O space that is generically needed
1044 * or shared with multiple devices.
1045 * All of this fits into the same 4Mbyte region, so it only
1046 * requires one page table page.
1048 ioremap(IMAP_ADDR
, IMAP_SIZE
);
1050 ioremap(NVRAM_ADDR
, NVRAM_SIZE
);
1051 ioremap(MBX_CSR_ADDR
, MBX_CSR_SIZE
);
1052 ioremap(PCI_CSR_ADDR
, PCI_CSR_SIZE
);
1054 /* Map some of the PCI/ISA I/O space to get the IDE interface.
1056 ioremap(PCI_ISA_IO_ADDR
, 0x4000);
1057 ioremap(PCI_IDE_ADDR
, 0x4000);
1059 #ifdef CONFIG_RPXLITE
1060 ioremap(RPX_CSR_ADDR
, RPX_CSR_SIZE
);
1061 ioremap(HIOX_CSR_ADDR
, HIOX_CSR_SIZE
);
1063 #ifdef CONFIG_RPXCLASSIC
1064 ioremap(PCI_CSR_ADDR
, PCI_CSR_SIZE
);
1065 ioremap(RPX_CSR_ADDR
, RPX_CSR_SIZE
);
1067 #endif /* CONFIG_8xx */
1068 if ( ppc_md
.progress
) ppc_md
.progress("MMU:exit", 0x211);
1069 #ifdef CONFIG_BOOTX_TEXT
1070 /* Must be done last, or ppc_md.progress will die */
1071 if (_machine
== _MACH_Pmac
|| _machine
== _MACH_chrp
)
1075 #endif /* CONFIG_4xx */
1078 * Initialize the bootmem system and give it all the memory we
1081 void __init
do_init_bootmem(void)
1083 unsigned long start
, size
;
1087 * Find an area to use for the bootmem bitmap.
1088 * We look for the first area which is at least
1089 * 128kB in length (128kB is enough for a bitmap
1090 * for 4GB of memory, using 4kB pages), plus 1 page
1091 * (in case the address isn't page-aligned).
1095 for (i
= 0; i
< phys_avail
.n_regions
; ++i
) {
1096 unsigned long a
= phys_avail
.regions
[i
].address
;
1097 unsigned long s
= phys_avail
.regions
[i
].size
;
1102 if (s
>= 33 * PAGE_SIZE
)
1105 start
= PAGE_ALIGN(start
);
1107 boot_mapsize
= init_bootmem(start
>> PAGE_SHIFT
,
1108 total_lowmem
>> PAGE_SHIFT
);
1110 /* remove the bootmem bitmap from the available memory */
1111 mem_pieces_remove(&phys_avail
, start
, boot_mapsize
, 1);
1113 /* add everything in phys_avail into the bootmem map */
1114 for (i
= 0; i
< phys_avail
.n_regions
; ++i
)
1115 free_bootmem(phys_avail
.regions
[i
].address
,
1116 phys_avail
.regions
[i
].size
);
1118 init_bootmem_done
= 1;
1122 * paging_init() sets up the page tables - in fact we've already done this.
1124 void __init
paging_init(void)
1126 unsigned long zones_size
[MAX_NR_ZONES
], i
;
1128 #ifdef CONFIG_HIGHMEM
1129 map_page(PKMAP_BASE
, 0, 0); /* XXX gross */
1130 pkmap_page_table
= pte_offset(pmd_offset(pgd_offset_k(PKMAP_BASE
), PKMAP_BASE
), PKMAP_BASE
);
1131 map_page(KMAP_FIX_BEGIN
, 0, 0); /* XXX gross */
1132 kmap_pte
= pte_offset(pmd_offset(pgd_offset_k(KMAP_FIX_BEGIN
), KMAP_FIX_BEGIN
), KMAP_FIX_BEGIN
);
1133 kmap_prot
= PAGE_KERNEL
;
1134 #endif /* CONFIG_HIGHMEM */
1137 * Grab some memory for bad_page and bad_pagetable to use.
1139 empty_bad_page
= alloc_bootmem_pages(PAGE_SIZE
);
1140 empty_bad_page_table
= alloc_bootmem_pages(PAGE_SIZE
);
1143 * All pages are DMA-able so we put them all in the DMA zone.
1145 zones_size
[ZONE_DMA
] = total_lowmem
>> PAGE_SHIFT
;
1146 for (i
= 1; i
< MAX_NR_ZONES
; i
++)
1149 #ifdef CONFIG_HIGHMEM
1150 zones_size
[ZONE_HIGHMEM
] = (total_memory
- total_lowmem
) >> PAGE_SHIFT
;
1151 #endif /* CONFIG_HIGHMEM */
1153 free_area_init(zones_size
);
1156 void __init
mem_init(void)
1158 extern char *sysmap
;
1159 extern unsigned long sysmap_size
;
1164 #ifdef CONFIG_HIGHMEM
1165 unsigned long highmem_mapnr
;
1167 highmem_mapnr
= total_lowmem
>> PAGE_SHIFT
;
1168 highmem_start_page
= mem_map
+ highmem_mapnr
;
1169 max_mapnr
= total_memory
>> PAGE_SHIFT
;
1170 totalram_pages
+= max_mapnr
- highmem_mapnr
;
1172 max_mapnr
= max_low_pfn
;
1173 #endif /* CONFIG_HIGHMEM */
1175 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
1176 num_physpages
= max_mapnr
; /* RAM is assumed contiguous */
1178 totalram_pages
+= free_all_bootmem();
1180 #ifdef CONFIG_BLK_DEV_INITRD
1181 /* if we are booted from BootX with an initial ramdisk,
1182 make sure the ramdisk pages aren't reserved. */
1184 for (addr
= initrd_start
; addr
< initrd_end
; addr
+= PAGE_SIZE
)
1185 clear_bit(PG_reserved
, &virt_to_page(addr
)->flags
);
1187 #endif /* CONFIG_BLK_DEV_INITRD */
1189 #if defined(CONFIG_ALL_PPC)
1190 /* mark the RTAS pages as reserved */
1192 for (addr
= rtas_data
; addr
< PAGE_ALIGN(rtas_data
+rtas_size
) ;
1194 SetPageReserved(virt_to_page(addr
));
1195 #endif /* defined(CONFIG_ALL_PPC) */
1197 for (addr
= (unsigned long)sysmap
;
1198 addr
< PAGE_ALIGN((unsigned long)sysmap
+sysmap_size
) ;
1200 SetPageReserved(virt_to_page(addr
));
1202 for (addr
= PAGE_OFFSET
; addr
< (unsigned long)end_of_DRAM
;
1203 addr
+= PAGE_SIZE
) {
1204 if (!PageReserved(virt_to_page(addr
)))
1206 if (addr
< (ulong
) etext
)
1208 else if (addr
>= (unsigned long)&__init_begin
1209 && addr
< (unsigned long)&__init_end
)
1211 else if (addr
< (ulong
) klimit
)
1215 #ifdef CONFIG_HIGHMEM
1219 for (pfn
= highmem_mapnr
; pfn
< max_mapnr
; ++pfn
) {
1220 struct page
*page
= mem_map
+ pfn
;
1222 ClearPageReserved(page
);
1223 set_bit(PG_highmem
, &page
->flags
);
1224 atomic_set(&page
->count
, 1);
1228 totalram_pages
+= totalhigh_pages
;
1230 #endif /* CONFIG_HIGHMEM */
1232 printk("Memory: %luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
1233 (unsigned long)nr_free_pages()<< (PAGE_SHIFT
-10),
1234 codepages
<< (PAGE_SHIFT
-10), datapages
<< (PAGE_SHIFT
-10),
1235 initpages
<< (PAGE_SHIFT
-10),
1236 (unsigned long) (totalhigh_pages
<< (PAGE_SHIFT
-10)));
1240 #if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)
1241 #if defined(CONFIG_ALL_PPC)
1243 * On systems with Open Firmware, collect information about
1244 * physical RAM and which pieces are already in use.
1245 * At this point, we have (at least) the first 8MB mapped with a BAT.
1246 * Our text, data, bss use something over 1MB, starting at 0.
1247 * Open Firmware may be using 1MB at the 4MB point.
1249 unsigned long __init
pmac_find_end_of_memory(void)
1251 unsigned long a
, total
;
1252 unsigned long ram_limit
= 0xe0000000 - KERNELBASE
;
1254 memory_node
= find_devices("memory");
1255 if (memory_node
== NULL
) {
1256 printk(KERN_ERR
"can't find memory node\n");
1261 * Find out where physical memory is, and check that it
1262 * starts at 0 and is contiguous. It seems that RAM is
1263 * always physically contiguous on Power Macintoshes,
1264 * because MacOS can't cope if it isn't.
1266 * Supporting discontiguous physical memory isn't hard,
1267 * it just makes the virtual <-> physical mapping functions
1268 * more complicated (or else you end up wasting space
1271 get_mem_prop("reg", &phys_mem
);
1272 if (phys_mem
.n_regions
== 0)
1274 a
= phys_mem
.regions
[0].address
;
1276 panic("RAM doesn't start at physical address 0");
1277 if (__max_memory
== 0 || __max_memory
> ram_limit
)
1278 __max_memory
= ram_limit
;
1279 if (phys_mem
.regions
[0].size
>= __max_memory
) {
1280 phys_mem
.regions
[0].size
= __max_memory
;
1281 phys_mem
.n_regions
= 1;
1283 total
= phys_mem
.regions
[0].size
;
1285 if (phys_mem
.n_regions
> 1) {
1286 printk("RAM starting at 0x%x is not contiguous\n",
1287 phys_mem
.regions
[1].address
);
1288 printk("Using RAM from 0 to 0x%lx\n", total
-1);
1289 phys_mem
.n_regions
= 1;
1292 set_phys_avail(&phys_mem
);
1296 #endif /* CONFIG_ALL_PPC */
1298 #if defined(CONFIG_ALL_PPC)
1300 * This finds the amount of physical ram and does necessary
1301 * setup for prep. This is pretty architecture specific so
1302 * this will likely stay separate from the pmac.
1305 unsigned long __init
prep_find_end_of_memory(void)
1307 unsigned long total
;
1308 total
= res
->TotalMemory
;
1313 * I need a way to probe the amount of memory if the residual
1314 * data doesn't contain it. -- Cort
1316 printk("Ramsize from residual data was 0 -- Probing for value\n");
1318 printk("Ramsize default to be %ldM\n", total
>>20);
1320 mem_pieces_append(&phys_mem
, 0, total
);
1321 set_phys_avail(&phys_mem
);
1325 #endif /* defined(CONFIG_ALL_PPC) */
1328 #if defined(CONFIG_GEMINI)
1329 unsigned long __init
gemini_find_end_of_memory(void)
1331 unsigned long total
;
1334 reg
= readb(GEMINI_MEMCFG
);
1335 total
= ((1<<((reg
& 0x7) - 1)) *
1336 (8<<((reg
>> 3) & 0x7)));
1337 total
*= (1024*1024);
1338 phys_mem
.regions
[0].address
= 0;
1339 phys_mem
.regions
[0].size
= total
;
1340 phys_mem
.n_regions
= 1;
1342 set_phys_avail(&phys_mem
);
1343 return phys_mem
.regions
[0].size
;
1345 #endif /* defined(CONFIG_GEMINI) */
1351 unsigned long __init
m8260_find_end_of_memory(void)
1354 extern unsigned char __res
[];
1356 binfo
= (bd_t
*)__res
;
1358 phys_mem
.regions
[0].address
= 0;
1359 phys_mem
.regions
[0].size
= binfo
->bi_memsize
;
1360 phys_mem
.n_regions
= 1;
1362 set_phys_avail(&phys_mem
);
1363 return phys_mem
.regions
[0].size
;
1365 #endif /* CONFIG_8260 */
1368 #define HARDWARE_MAPPED_SIZE (512*1024)
1369 unsigned long __init
apus_find_end_of_memory(void)
1373 /* The memory size reported by ADOS excludes the 512KB
1374 reserved for PPC exception registers and possibly 512KB
1375 containing a shadow of the ADOS ROM. */
1377 unsigned long size
= memory
[0].size
;
1379 /* If 2MB aligned, size was probably user
1380 specified. We can't tell anything about shadowing
1381 in this case so skip shadow assignment. */
1382 if (0 != (size
& 0x1fffff)){
1383 /* Align to 512KB to ensure correct handling
1384 of both memfile and system specified
1386 size
= ((size
+0x0007ffff) & 0xfff80000);
1387 /* If memory is 1MB aligned, assume
1389 shadow
= !(size
& 0x80000);
1392 /* Add the chunk that ADOS does not see. by aligning
1393 the size to the nearest 2MB limit upwards. */
1394 memory
[0].size
= ((size
+0x001fffff) & 0xffe00000);
1397 /* Now register the memory block. */
1398 mem_pieces_append(&phys_mem
, memory
[0].addr
, memory
[0].size
);
1399 set_phys_avail(&phys_mem
);
1401 /* Remove the memory chunks that are controlled by special
1404 unsigned long top
= memory
[0].addr
+ memory
[0].size
;
1406 /* Remove the upper 512KB if it contains a shadow of
1407 the ADOS ROM. FIXME: It might be possible to
1408 disable this shadow HW. Check the booter
1412 top
-= HARDWARE_MAPPED_SIZE
;
1413 mem_pieces_remove(&phys_avail
, top
,
1414 HARDWARE_MAPPED_SIZE
, 0);
1417 /* Remove the upper 512KB where the PPC exception
1418 vectors are mapped. */
1419 top
-= HARDWARE_MAPPED_SIZE
;
1421 /* This would be neat, but it breaks on A3000 machines!? */
1422 mem_pieces_remove(&phys_avail
, top
, 16384, 0);
1424 mem_pieces_remove(&phys_avail
, top
, HARDWARE_MAPPED_SIZE
, 0);
1429 /* Linux/APUS only handles one block of memory -- the one on
1430 the PowerUP board. Other system memory is horrible slow in
1431 comparison. The user can use other memory for swapping
1432 using the z2ram device. */
1433 return memory
[0].addr
+ memory
[0].size
;
1435 #endif /* CONFIG_APUS */
1438 * Initialize the hash table and patch the instructions in head.S.
1440 static void __init
hash_init(void)
1442 int Hash_bits
, mb
, mb2
;
1443 unsigned int hmask
, ramsize
, h
;
1445 extern unsigned int hash_page_patch_A
[], hash_page_patch_B
[],
1446 hash_page_patch_C
[], hash_page
[];
1448 ramsize
= (ulong
)end_of_DRAM
- KERNELBASE
;
1449 #ifdef CONFIG_PPC64BRIDGE
1450 /* The hash table has already been allocated and initialized
1452 Hash_mask
= (Hash_size
>> 7) - 1;
1453 hmask
= Hash_mask
>> 9;
1454 Hash_bits
= __ilog2(Hash_size
) - 7;
1455 mb
= 25 - Hash_bits
;
1458 mb2
= 25 - Hash_bits
;
1460 #else /* CONFIG_PPC64BRIDGE */
1462 if ( ppc_md
.progress
) ppc_md
.progress("hash:enter", 0x105);
1464 * Allow 64k of hash table for every 16MB of memory,
1465 * up to a maximum of 2MB.
1467 for (h
= 64<<10; h
< ramsize
/ 256 && h
< (2<<20); h
*= 2)
1470 Hash_mask
= (h
>> 6) - 1;
1471 hmask
= Hash_mask
>> 10;
1472 Hash_bits
= __ilog2(h
) - 6;
1473 mb
= 26 - Hash_bits
;
1476 mb2
= 26 - Hash_bits
;
1478 /* shrink the htab since we don't use it on 603's -- Cort */
1479 switch (_get_PVR()>>16) {
1483 case 0x0081: /* 82xx */
1488 /* on 601/4 let things be */
1492 if ( ppc_md
.progress
) ppc_md
.progress("hash:find piece", 0x322);
1493 /* Find some memory for the hash table. */
1495 Hash
= mem_pieces_find(Hash_size
, Hash_size
);
1496 cacheable_memzero(Hash
, Hash_size
);
1499 #endif /* CONFIG_PPC64BRIDGE */
1501 printk("Total memory = %dMB; using %ldkB for hash table (at %p)\n",
1502 ramsize
>> 20, Hash_size
>> 10, Hash
);
1505 if ( ppc_md
.progress
) ppc_md
.progress("hash:patch", 0x345);
1506 Hash_end
= (PTE
*) ((unsigned long)Hash
+ Hash_size
);
1509 * Patch up the instructions in head.S:hash_page
1511 hash_page_patch_A
[0] = (hash_page_patch_A
[0] & ~0xffff)
1512 | (__pa(Hash
) >> 16);
1513 hash_page_patch_A
[1] = (hash_page_patch_A
[1] & ~0x7c0)
1515 hash_page_patch_A
[2] = (hash_page_patch_A
[2] & ~0x7c0)
1517 hash_page_patch_B
[0] = (hash_page_patch_B
[0] & ~0xffff)
1519 hash_page_patch_C
[0] = (hash_page_patch_C
[0] & ~0xffff)
1521 #if 0 /* see hash_page in head.S, note also patch_C ref below */
1522 hash_page_patch_D
[0] = (hash_page_patch_D
[0] & ~0xffff)
1526 * Ensure that the locations we've patched have been written
1527 * out from the data cache and invalidated in the instruction
1528 * cache, on those machines with split caches.
1530 flush_icache_range((unsigned long) &hash_page_patch_A
[0],
1531 (unsigned long) &hash_page_patch_C
[1]);
1536 * Put a blr (procedure return) instruction at the
1537 * start of hash_page, since we can still get DSI
1538 * exceptions on a 603.
1540 hash_page
[0] = 0x4e800020;
1541 flush_icache_range((unsigned long) &hash_page
[0],
1542 (unsigned long) &hash_page
[1]);
1544 if ( ppc_md
.progress
) ppc_md
.progress("hash:done", 0x205);
1546 #elif defined(CONFIG_8xx)
1548 * This is a big hack right now, but it may turn into something real
1551 * For the 8xx boards (at this time anyway), there is nothing to initialize
1552 * associated the PROM. Rather than include all of the prom.c
1553 * functions in the image just to get prom_init, all we really need right
1554 * now is the initialization of the physical memory region.
1556 unsigned long __init
m8xx_find_end_of_memory(void)
1559 extern unsigned char __res
[];
1561 binfo
= (bd_t
*)__res
;
1563 phys_mem
.regions
[0].address
= 0;
1564 phys_mem
.regions
[0].size
= binfo
->bi_memsize
;
1565 phys_mem
.n_regions
= 1;
1567 set_phys_avail(&phys_mem
);
1568 return phys_mem
.regions
[0].address
+ phys_mem
.regions
[0].size
;
1570 #endif /* !CONFIG_4xx && !CONFIG_8xx */
1574 * Return the virtual address representing the top of physical RAM
1577 unsigned long __init
1578 oak_find_end_of_memory(void)
1580 extern unsigned char __res
[];
1583 bd_t
*bip
= (bd_t
*)__res
;
1585 phys_mem
.regions
[0].address
= 0;
1586 phys_mem
.regions
[0].size
= bip
->bi_memsize
;
1587 phys_mem
.n_regions
= 1;
1589 set_phys_avail(&phys_mem
);
1590 return (phys_mem
.regions
[0].address
+ phys_mem
.regions
[0].size
);
1595 * Set phys_avail to phys_mem less the kernel text/data/bss.
1598 set_phys_avail(struct mem_pieces
*mp
)
1600 unsigned long kstart
, ksize
;
1603 * Initially, available phyiscal memory is equivalent to all
1610 * Map out the kernel text/data/bss from the available physical
1614 kstart
= __pa(_stext
); /* should be 0 */
1615 ksize
= PAGE_ALIGN(klimit
- _stext
);
1617 mem_pieces_remove(&phys_avail
, kstart
, ksize
, 0);
1618 mem_pieces_remove(&phys_avail
, 0, 0x4000, 0);
1620 #if defined(CONFIG_BLK_DEV_INITRD)
1621 /* Remove the init RAM disk from the available memory. */
1623 mem_pieces_remove(&phys_avail
, __pa(initrd_start
),
1624 initrd_end
- initrd_start
, 1);
1626 #endif /* CONFIG_BLK_DEV_INITRD */
1627 #ifdef CONFIG_ALL_PPC
1628 /* remove the RTAS pages from the available memory */
1630 mem_pieces_remove(&phys_avail
, rtas_data
, rtas_size
, 1);
1631 #endif /* CONFIG_ALL_PPC */
1632 #ifdef CONFIG_PPC64BRIDGE
1633 /* Remove the hash table from the available memory */
1635 mem_pieces_remove(&phys_avail
, __pa(Hash
), Hash_size
, 1);
1636 #endif /* CONFIG_PPC64BRIDGE */