3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 #include <linux/config.h>
22 #include <linux/module.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
29 #include <linux/stddef.h>
30 #include <linux/init.h>
31 #include <linux/bootmem.h>
32 #include <linux/highmem.h>
33 #include <linux/initrd.h>
34 #include <linux/pagemap.h>
36 #include <asm/pgalloc.h>
39 #include <asm/mmu_context.h>
40 #include <asm/pgtable.h>
43 #include <asm/machdep.h>
44 #include <asm/btext.h>
48 #include <asm/sections.h>
55 #ifndef CPU_FTR_COHERENT_ICACHE
56 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
57 #define CPU_FTR_NOEXECUTE 0
60 int init_bootmem_done
;
62 unsigned long memory_limit
;
65 * This is called by /dev/mem to know if a given address has to
66 * be mapped non-cacheable or not
68 int page_is_ram(unsigned long pfn
)
70 unsigned long paddr
= (pfn
<< PAGE_SHIFT
);
72 #ifndef CONFIG_PPC64 /* XXX for now */
73 return paddr
< __pa(high_memory
);
76 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
79 base
= lmb
.memory
.region
[i
].base
;
81 if ((paddr
>= base
) &&
82 (paddr
< (base
+ lmb
.memory
.region
[i
].size
))) {
90 EXPORT_SYMBOL(page_is_ram
);
92 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
93 unsigned long size
, pgprot_t vma_prot
)
95 if (ppc_md
.phys_mem_access_prot
)
96 return ppc_md
.phys_mem_access_prot(file
, pfn
, size
, vma_prot
);
98 if (!page_is_ram(pfn
))
99 vma_prot
= __pgprot(pgprot_val(vma_prot
)
100 | _PAGE_GUARDED
| _PAGE_NO_CACHE
);
103 EXPORT_SYMBOL(phys_mem_access_prot
);
105 #ifdef CONFIG_MEMORY_HOTPLUG
107 void online_page(struct page
*page
)
109 ClearPageReserved(page
);
110 free_cold_page(page
);
116 * This works only for the non-NUMA case. Later, we'll need a lookup
117 * to convert from real physical addresses to nid, that doesn't use
120 int __devinit
add_memory(u64 start
, u64 size
)
122 struct pglist_data
*pgdata
= NODE_DATA(0);
124 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
125 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
127 /* this should work for most non-highmem platforms */
128 zone
= pgdata
->node_zones
;
130 return __add_pages(zone
, start_pfn
, nr_pages
);
136 * First pass at this code will check to determine if the remove
137 * request is within the RMO. Do not allow removal within the RMO.
139 int __devinit
remove_memory(u64 start
, u64 size
)
142 unsigned long start_pfn
, end_pfn
, nr_pages
;
144 start_pfn
= start
>> PAGE_SHIFT
;
145 nr_pages
= size
>> PAGE_SHIFT
;
146 end_pfn
= start_pfn
+ nr_pages
;
148 printk("%s(): Attempting to remove memoy in range "
149 "%lx to %lx\n", __func__
, start
, start
+size
);
151 * check for range within RMO
153 zone
= page_zone(pfn_to_page(start_pfn
));
155 printk("%s(): memory will be removed from "
156 "the %s zone\n", __func__
, zone
->name
);
159 * not handling removing memory ranges that
160 * overlap multiple zones yet
162 if (end_pfn
> (zone
->zone_start_pfn
+ zone
->spanned_pages
))
165 /* make sure it is NOT in RMO */
166 if ((start
< lmb
.rmo_size
) || ((start
+size
) < lmb
.rmo_size
)) {
167 printk("%s(): range to be removed must NOT be in RMO!\n",
172 return __remove_pages(zone
, start_pfn
, nr_pages
);
175 printk("%s(): memory range to be removed overlaps "
176 "multiple zones!!!\n", __func__
);
180 #endif /* CONFIG_MEMORY_HOTPLUG */
184 unsigned long total
= 0, reserved
= 0;
185 unsigned long shared
= 0, cached
= 0;
186 unsigned long highmem
= 0;
191 printk("Mem-info:\n");
193 printk("Free swap: %6ldkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
194 for_each_pgdat(pgdat
) {
196 pgdat_resize_lock(pgdat
, &flags
);
197 for (i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
198 page
= pgdat_page_nr(pgdat
, i
);
200 if (PageHighMem(page
))
202 if (PageReserved(page
))
204 else if (PageSwapCache(page
))
206 else if (page_count(page
))
207 shared
+= page_count(page
) - 1;
209 pgdat_resize_unlock(pgdat
, &flags
);
211 printk("%ld pages of RAM\n", total
);
212 #ifdef CONFIG_HIGHMEM
213 printk("%ld pages of HIGHMEM\n", highmem
);
215 printk("%ld reserved pages\n", reserved
);
216 printk("%ld pages shared\n", shared
);
217 printk("%ld pages swap cached\n", cached
);
221 * Initialize the bootmem system and give it all the memory we
222 * have available. If we are using highmem, we only put the
223 * lowmem into the bootmem system.
225 #ifndef CONFIG_NEED_MULTIPLE_NODES
226 void __init
do_init_bootmem(void)
229 unsigned long start
, bootmap_pages
;
230 unsigned long total_pages
;
233 max_pfn
= total_pages
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
234 #ifdef CONFIG_HIGHMEM
235 total_pages
= total_lowmem
>> PAGE_SHIFT
;
239 * Find an area to use for the bootmem bitmap. Calculate the size of
240 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
241 * Add 1 additional page in case the address isn't page-aligned.
243 bootmap_pages
= bootmem_bootmap_pages(total_pages
);
245 start
= lmb_alloc(bootmap_pages
<< PAGE_SHIFT
, PAGE_SIZE
);
248 boot_mapsize
= init_bootmem(start
>> PAGE_SHIFT
, total_pages
);
250 /* Add all physical memory to the bootmem map, mark each area
253 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
254 unsigned long base
= lmb
.memory
.region
[i
].base
;
255 unsigned long size
= lmb_size_bytes(&lmb
.memory
, i
);
256 #ifdef CONFIG_HIGHMEM
257 if (base
>= total_lowmem
)
259 if (base
+ size
> total_lowmem
)
260 size
= total_lowmem
- base
;
262 free_bootmem(base
, size
);
265 /* reserve the sections we're already using */
266 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++)
267 reserve_bootmem(lmb
.reserved
.region
[i
].base
,
268 lmb_size_bytes(&lmb
.reserved
, i
));
270 /* XXX need to clip this if using highmem? */
271 for (i
= 0; i
< lmb
.memory
.cnt
; i
++)
272 memory_present(0, lmb_start_pfn(&lmb
.memory
, i
),
273 lmb_end_pfn(&lmb
.memory
, i
));
274 init_bootmem_done
= 1;
278 * paging_init() sets up the page tables - in fact we've already done this.
280 void __init
paging_init(void)
282 unsigned long zones_size
[MAX_NR_ZONES
];
283 unsigned long zholes_size
[MAX_NR_ZONES
];
284 unsigned long total_ram
= lmb_phys_mem_size();
285 unsigned long top_of_ram
= lmb_end_of_DRAM();
287 #ifdef CONFIG_HIGHMEM
288 map_page(PKMAP_BASE
, 0, 0); /* XXX gross */
289 pkmap_page_table
= pte_offset_kernel(pmd_offset(pgd_offset_k
290 (PKMAP_BASE
), PKMAP_BASE
), PKMAP_BASE
);
291 map_page(KMAP_FIX_BEGIN
, 0, 0); /* XXX gross */
292 kmap_pte
= pte_offset_kernel(pmd_offset(pgd_offset_k
293 (KMAP_FIX_BEGIN
), KMAP_FIX_BEGIN
), KMAP_FIX_BEGIN
);
294 kmap_prot
= PAGE_KERNEL
;
295 #endif /* CONFIG_HIGHMEM */
297 printk(KERN_INFO
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
298 top_of_ram
, total_ram
);
299 printk(KERN_INFO
"Memory hole size: %ldMB\n",
300 (top_of_ram
- total_ram
) >> 20);
302 * All pages are DMA-able so we put them all in the DMA zone.
304 memset(zones_size
, 0, sizeof(zones_size
));
305 memset(zholes_size
, 0, sizeof(zholes_size
));
307 zones_size
[ZONE_DMA
] = top_of_ram
>> PAGE_SHIFT
;
308 zholes_size
[ZONE_DMA
] = (top_of_ram
- total_ram
) >> PAGE_SHIFT
;
310 #ifdef CONFIG_HIGHMEM
311 zones_size
[ZONE_DMA
] = total_lowmem
>> PAGE_SHIFT
;
312 zones_size
[ZONE_HIGHMEM
] = (total_memory
- total_lowmem
) >> PAGE_SHIFT
;
313 zholes_size
[ZONE_HIGHMEM
] = (top_of_ram
- total_ram
) >> PAGE_SHIFT
;
315 zones_size
[ZONE_DMA
] = top_of_ram
>> PAGE_SHIFT
;
316 zholes_size
[ZONE_DMA
] = (top_of_ram
- total_ram
) >> PAGE_SHIFT
;
317 #endif /* CONFIG_HIGHMEM */
319 free_area_init_node(0, NODE_DATA(0), zones_size
,
320 __pa(PAGE_OFFSET
) >> PAGE_SHIFT
, zholes_size
);
322 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
324 void __init
mem_init(void)
326 #ifdef CONFIG_NEED_MULTIPLE_NODES
332 unsigned long reservedpages
= 0, codesize
, initsize
, datasize
, bsssize
;
334 num_physpages
= max_pfn
; /* RAM is assumed contiguous */
335 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
337 #ifdef CONFIG_NEED_MULTIPLE_NODES
338 for_each_online_node(nid
) {
339 if (NODE_DATA(nid
)->node_spanned_pages
!= 0) {
340 printk("freeing bootmem node %x\n", nid
);
342 free_all_bootmem_node(NODE_DATA(nid
));
346 max_mapnr
= num_physpages
;
347 totalram_pages
+= free_all_bootmem();
349 for_each_pgdat(pgdat
) {
350 for (i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
351 page
= pgdat_page_nr(pgdat
, i
);
352 if (PageReserved(page
))
357 codesize
= (unsigned long)&_sdata
- (unsigned long)&_stext
;
358 datasize
= (unsigned long)&__init_begin
- (unsigned long)&_sdata
;
359 initsize
= (unsigned long)&__init_end
- (unsigned long)&__init_begin
;
360 bsssize
= (unsigned long)&__bss_stop
- (unsigned long)&__bss_start
;
362 #ifdef CONFIG_HIGHMEM
364 unsigned long pfn
, highmem_mapnr
;
366 highmem_mapnr
= total_lowmem
>> PAGE_SHIFT
;
367 for (pfn
= highmem_mapnr
; pfn
< max_mapnr
; ++pfn
) {
368 struct page
*page
= pfn_to_page(pfn
);
370 ClearPageReserved(page
);
371 set_page_count(page
, 1);
375 totalram_pages
+= totalhigh_pages
;
376 printk(KERN_INFO
"High memory: %luk\n",
377 totalhigh_pages
<< (PAGE_SHIFT
-10));
379 #endif /* CONFIG_HIGHMEM */
381 printk(KERN_INFO
"Memory: %luk/%luk available (%luk kernel code, "
382 "%luk reserved, %luk data, %luk bss, %luk init)\n",
383 (unsigned long)nr_free_pages() << (PAGE_SHIFT
-10),
384 num_physpages
<< (PAGE_SHIFT
-10),
386 reservedpages
<< (PAGE_SHIFT
-10),
394 /* Initialize the vDSO */
400 * This is called when a page has been modified by the kernel.
401 * It just marks the page as not i-cache clean. We do the i-cache
402 * flush later when the page is given to a user process, if necessary.
404 void flush_dcache_page(struct page
*page
)
406 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
408 /* avoid an atomic op if possible */
409 if (test_bit(PG_arch_1
, &page
->flags
))
410 clear_bit(PG_arch_1
, &page
->flags
);
412 EXPORT_SYMBOL(flush_dcache_page
);
414 void flush_dcache_icache_page(struct page
*page
)
417 void *start
= kmap_atomic(page
, KM_PPC_SYNC_ICACHE
);
418 __flush_dcache_icache(start
);
419 kunmap_atomic(start
, KM_PPC_SYNC_ICACHE
);
420 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
421 /* On 8xx there is no need to kmap since highmem is not supported */
422 __flush_dcache_icache(page_address(page
));
424 __flush_dcache_icache_phys(page_to_pfn(page
) << PAGE_SHIFT
);
428 void clear_user_page(void *page
, unsigned long vaddr
, struct page
*pg
)
432 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
435 * We shouldnt have to do this, but some versions of glibc
436 * require it (ld.so assumes zero filled pages are icache clean)
440 /* avoid an atomic op if possible */
441 if (test_bit(PG_arch_1
, &pg
->flags
))
442 clear_bit(PG_arch_1
, &pg
->flags
);
444 EXPORT_SYMBOL(clear_user_page
);
446 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
449 copy_page(vto
, vfrom
);
452 * We should be able to use the following optimisation, however
453 * there are two problems.
454 * Firstly a bug in some versions of binutils meant PLT sections
455 * were not marked executable.
456 * Secondly the first word in the GOT section is blrl, used
457 * to establish the GOT address. Until recently the GOT was
458 * not marked executable.
462 if (!vma
->vm_file
&& ((vma
->vm_flags
& VM_EXEC
) == 0))
466 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
469 /* avoid an atomic op if possible */
470 if (test_bit(PG_arch_1
, &pg
->flags
))
471 clear_bit(PG_arch_1
, &pg
->flags
);
474 void flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
475 unsigned long addr
, int len
)
479 maddr
= (unsigned long) kmap(page
) + (addr
& ~PAGE_MASK
);
480 flush_icache_range(maddr
, maddr
+ len
);
483 EXPORT_SYMBOL(flush_icache_user_range
);
486 * This is called at the end of handling a user page fault, when the
487 * fault has been handled by updating a PTE in the linux page tables.
488 * We use it to preload an HPTE into the hash table corresponding to
489 * the updated linux PTE.
491 * This must always be called with the mm->page_table_lock held
493 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
,
496 /* handle i-cache coherency */
497 unsigned long pfn
= pte_pfn(pte
);
509 /* handle i-cache coherency */
510 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE
) &&
511 !cpu_has_feature(CPU_FTR_NOEXECUTE
) &&
513 struct page
*page
= pfn_to_page(pfn
);
514 if (!PageReserved(page
)
515 && !test_bit(PG_arch_1
, &page
->flags
)) {
516 if (vma
->vm_mm
== current
->active_mm
) {
518 /* On 8xx, cache control instructions (particularly
519 * "dcbst" from flush_dcache_icache) fault as write
520 * operation if there is an unpopulated TLB entry
521 * for the address in question. To workaround that,
522 * we invalidate the TLB here, thus avoiding dcbst
527 __flush_dcache_icache((void *) address
);
529 flush_dcache_icache_page(page
);
530 set_bit(PG_arch_1
, &page
->flags
);
534 #ifdef CONFIG_PPC_STD_MMU
535 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
536 if (!pte_young(pte
) || address
>= TASK_SIZE
)
541 pmd
= pmd_offset(pgd_offset(vma
->vm_mm
, address
), address
);
543 add_hash_page(vma
->vm_mm
->context
, address
, pmd_val(*pmd
));
545 pgdir
= vma
->vm_mm
->pgd
;
549 ptep
= find_linux_pte(pgdir
, address
);
553 vsid
= get_vsid(vma
->vm_mm
->context
.id
, address
);
555 local_irq_save(flags
);
556 tmp
= cpumask_of_cpu(smp_processor_id());
557 if (cpus_equal(vma
->vm_mm
->cpu_vm_mask
, tmp
))
560 __hash_page(address
, 0, vsid
, ptep
, 0x300, local
);
561 local_irq_restore(flags
);