3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
27 #include <linux/stddef.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
30 #include <linux/highmem.h>
31 #include <linux/initrd.h>
32 #include <linux/pagemap.h>
33 #include <linux/suspend.h>
34 #include <linux/lmb.h>
36 #include <asm/pgalloc.h>
39 #include <asm/mmu_context.h>
40 #include <asm/pgtable.h>
43 #include <asm/machdep.h>
44 #include <asm/btext.h>
46 #include <asm/sections.h>
51 #ifndef CPU_FTR_COHERENT_ICACHE
52 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
53 #define CPU_FTR_NOEXECUTE 0
56 int init_bootmem_done
;
58 unsigned long memory_limit
;
60 int page_is_ram(unsigned long pfn
)
62 unsigned long paddr
= (pfn
<< PAGE_SHIFT
);
64 #ifndef CONFIG_PPC64 /* XXX for now */
65 return paddr
< __pa(high_memory
);
68 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
71 base
= lmb
.memory
.region
[i
].base
;
73 if ((paddr
>= base
) &&
74 (paddr
< (base
+ lmb
.memory
.region
[i
].size
))) {
83 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
84 unsigned long size
, pgprot_t vma_prot
)
86 if (ppc_md
.phys_mem_access_prot
)
87 return ppc_md
.phys_mem_access_prot(file
, pfn
, size
, vma_prot
);
89 if (!page_is_ram(pfn
))
90 vma_prot
= __pgprot(pgprot_val(vma_prot
)
91 | _PAGE_GUARDED
| _PAGE_NO_CACHE
);
94 EXPORT_SYMBOL(phys_mem_access_prot
);
96 #ifdef CONFIG_MEMORY_HOTPLUG
98 void online_page(struct page
*page
)
100 ClearPageReserved(page
);
101 init_page_count(page
);
108 int memory_add_physaddr_to_nid(u64 start
)
110 return hot_add_scn_to_nid(start
);
114 int arch_add_memory(int nid
, u64 start
, u64 size
)
116 struct pglist_data
*pgdata
;
118 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
119 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
121 pgdata
= NODE_DATA(nid
);
123 start
= (unsigned long)__va(start
);
124 create_section_mapping(start
, start
+ size
);
126 /* this should work for most non-highmem platforms */
127 zone
= pgdata
->node_zones
;
129 return __add_pages(zone
, start_pfn
, nr_pages
);
132 #ifdef CONFIG_MEMORY_HOTREMOVE
133 int remove_memory(u64 start
, u64 size
)
135 unsigned long start_pfn
, end_pfn
;
138 start_pfn
= start
>> PAGE_SHIFT
;
139 end_pfn
= start_pfn
+ (size
>> PAGE_SHIFT
);
140 ret
= offline_pages(start_pfn
, end_pfn
, 120 * HZ
);
143 /* Arch-specific calls go here - next patch */
147 #endif /* CONFIG_MEMORY_HOTREMOVE */
150 * walk_memory_resource() needs to make sure there is no holes in a given
151 * memory range. On PPC64, since this range comes from /sysfs, the range
152 * is guaranteed to be valid, non-overlapping and can not contain any
153 * holes. By the time we get here (memory add or remove), /proc/device-tree
154 * is updated and correct. Only reason we need to check against device-tree
155 * would be if we allow user-land to specify a memory range through a
156 * system call/ioctl etc. instead of doing offline/online through /sysfs.
159 walk_memory_resource(unsigned long start_pfn
, unsigned long nr_pages
, void *arg
,
160 int (*func
)(unsigned long, unsigned long, void *))
162 return (*func
)(start_pfn
, nr_pages
, arg
);
165 #endif /* CONFIG_MEMORY_HOTPLUG */
169 unsigned long total
= 0, reserved
= 0;
170 unsigned long shared
= 0, cached
= 0;
171 unsigned long highmem
= 0;
176 printk("Mem-info:\n");
178 for_each_online_pgdat(pgdat
) {
180 pgdat_resize_lock(pgdat
, &flags
);
181 for (i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
182 if (!pfn_valid(pgdat
->node_start_pfn
+ i
))
184 page
= pgdat_page_nr(pgdat
, i
);
186 if (PageHighMem(page
))
188 if (PageReserved(page
))
190 else if (PageSwapCache(page
))
192 else if (page_count(page
))
193 shared
+= page_count(page
) - 1;
195 pgdat_resize_unlock(pgdat
, &flags
);
197 printk("%ld pages of RAM\n", total
);
198 #ifdef CONFIG_HIGHMEM
199 printk("%ld pages of HIGHMEM\n", highmem
);
201 printk("%ld reserved pages\n", reserved
);
202 printk("%ld pages shared\n", shared
);
203 printk("%ld pages swap cached\n", cached
);
207 * Initialize the bootmem system and give it all the memory we
208 * have available. If we are using highmem, we only put the
209 * lowmem into the bootmem system.
211 #ifndef CONFIG_NEED_MULTIPLE_NODES
212 void __init
do_init_bootmem(void)
215 unsigned long start
, bootmap_pages
;
216 unsigned long total_pages
;
219 max_pfn
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
220 total_pages
= (lmb_end_of_DRAM() - memstart_addr
) >> PAGE_SHIFT
;
221 #ifdef CONFIG_HIGHMEM
222 total_pages
= total_lowmem
>> PAGE_SHIFT
;
223 max_low_pfn
= lowmem_end_addr
>> PAGE_SHIFT
;
227 * Find an area to use for the bootmem bitmap. Calculate the size of
228 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
229 * Add 1 additional page in case the address isn't page-aligned.
231 bootmap_pages
= bootmem_bootmap_pages(total_pages
);
233 start
= lmb_alloc(bootmap_pages
<< PAGE_SHIFT
, PAGE_SIZE
);
235 boot_mapsize
= init_bootmem(start
>> PAGE_SHIFT
, total_pages
);
237 /* Add active regions with valid PFNs */
238 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
239 unsigned long start_pfn
, end_pfn
;
240 start_pfn
= lmb
.memory
.region
[i
].base
>> PAGE_SHIFT
;
241 end_pfn
= start_pfn
+ lmb_size_pages(&lmb
.memory
, i
);
242 add_active_range(0, start_pfn
, end_pfn
);
245 /* Add all physical memory to the bootmem map, mark each area
248 #ifdef CONFIG_HIGHMEM
249 free_bootmem_with_active_regions(0, lowmem_end_addr
>> PAGE_SHIFT
);
251 /* reserve the sections we're already using */
252 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
253 unsigned long addr
= lmb
.reserved
.region
[i
].base
+
254 lmb_size_bytes(&lmb
.reserved
, i
) - 1;
255 if (addr
< lowmem_end_addr
)
256 reserve_bootmem(lmb
.reserved
.region
[i
].base
,
257 lmb_size_bytes(&lmb
.reserved
, i
),
259 else if (lmb
.reserved
.region
[i
].base
< lowmem_end_addr
) {
260 unsigned long adjusted_size
= lowmem_end_addr
-
261 lmb
.reserved
.region
[i
].base
;
262 reserve_bootmem(lmb
.reserved
.region
[i
].base
,
263 adjusted_size
, BOOTMEM_DEFAULT
);
267 free_bootmem_with_active_regions(0, max_pfn
);
269 /* reserve the sections we're already using */
270 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++)
271 reserve_bootmem(lmb
.reserved
.region
[i
].base
,
272 lmb_size_bytes(&lmb
.reserved
, i
),
276 /* XXX need to clip this if using highmem? */
277 sparse_memory_present_with_active_regions(0);
279 init_bootmem_done
= 1;
282 /* mark pages that don't exist as nosave */
283 static int __init
mark_nonram_nosave(void)
285 unsigned long lmb_next_region_start_pfn
,
289 for (i
= 0; i
< lmb
.memory
.cnt
- 1; i
++) {
291 (lmb
.memory
.region
[i
].base
>> PAGE_SHIFT
) +
292 (lmb
.memory
.region
[i
].size
>> PAGE_SHIFT
);
293 lmb_next_region_start_pfn
=
294 lmb
.memory
.region
[i
+1].base
>> PAGE_SHIFT
;
296 if (lmb_region_max_pfn
< lmb_next_region_start_pfn
)
297 register_nosave_region(lmb_region_max_pfn
,
298 lmb_next_region_start_pfn
);
305 * paging_init() sets up the page tables - in fact we've already done this.
307 void __init
paging_init(void)
309 unsigned long total_ram
= lmb_phys_mem_size();
310 unsigned long top_of_ram
= lmb_end_of_DRAM();
311 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
313 #ifdef CONFIG_HIGHMEM
314 map_page(PKMAP_BASE
, 0, 0); /* XXX gross */
315 pkmap_page_table
= pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
316 (PKMAP_BASE
), PKMAP_BASE
), PKMAP_BASE
), PKMAP_BASE
);
317 map_page(KMAP_FIX_BEGIN
, 0, 0); /* XXX gross */
318 kmap_pte
= pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
319 (KMAP_FIX_BEGIN
), KMAP_FIX_BEGIN
), KMAP_FIX_BEGIN
),
321 kmap_prot
= PAGE_KERNEL
;
322 #endif /* CONFIG_HIGHMEM */
324 printk(KERN_DEBUG
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
325 top_of_ram
, total_ram
);
326 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
327 (top_of_ram
- total_ram
) >> 20);
328 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
329 #ifdef CONFIG_HIGHMEM
330 max_zone_pfns
[ZONE_DMA
] = lowmem_end_addr
>> PAGE_SHIFT
;
331 max_zone_pfns
[ZONE_HIGHMEM
] = top_of_ram
>> PAGE_SHIFT
;
333 max_zone_pfns
[ZONE_DMA
] = top_of_ram
>> PAGE_SHIFT
;
335 free_area_init_nodes(max_zone_pfns
);
337 mark_nonram_nosave();
339 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
341 void __init
mem_init(void)
343 #ifdef CONFIG_NEED_MULTIPLE_NODES
349 unsigned long reservedpages
= 0, codesize
, initsize
, datasize
, bsssize
;
351 num_physpages
= lmb
.memory
.size
>> PAGE_SHIFT
;
352 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
354 #ifdef CONFIG_NEED_MULTIPLE_NODES
355 for_each_online_node(nid
) {
356 if (NODE_DATA(nid
)->node_spanned_pages
!= 0) {
357 printk("freeing bootmem node %d\n", nid
);
359 free_all_bootmem_node(NODE_DATA(nid
));
364 totalram_pages
+= free_all_bootmem();
366 for_each_online_pgdat(pgdat
) {
367 for (i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
368 if (!pfn_valid(pgdat
->node_start_pfn
+ i
))
370 page
= pgdat_page_nr(pgdat
, i
);
371 if (PageReserved(page
))
376 codesize
= (unsigned long)&_sdata
- (unsigned long)&_stext
;
377 datasize
= (unsigned long)&_edata
- (unsigned long)&_sdata
;
378 initsize
= (unsigned long)&__init_end
- (unsigned long)&__init_begin
;
379 bsssize
= (unsigned long)&__bss_stop
- (unsigned long)&__bss_start
;
381 #ifdef CONFIG_HIGHMEM
383 unsigned long pfn
, highmem_mapnr
;
385 highmem_mapnr
= lowmem_end_addr
>> PAGE_SHIFT
;
386 for (pfn
= highmem_mapnr
; pfn
< max_mapnr
; ++pfn
) {
387 struct page
*page
= pfn_to_page(pfn
);
388 if (lmb_is_reserved(pfn
<< PAGE_SHIFT
))
390 ClearPageReserved(page
);
391 init_page_count(page
);
396 totalram_pages
+= totalhigh_pages
;
397 printk(KERN_DEBUG
"High memory: %luk\n",
398 totalhigh_pages
<< (PAGE_SHIFT
-10));
400 #endif /* CONFIG_HIGHMEM */
402 printk(KERN_INFO
"Memory: %luk/%luk available (%luk kernel code, "
403 "%luk reserved, %luk data, %luk bss, %luk init)\n",
404 (unsigned long)nr_free_pages() << (PAGE_SHIFT
-10),
405 num_physpages
<< (PAGE_SHIFT
-10),
407 reservedpages
<< (PAGE_SHIFT
-10),
416 * This is called when a page has been modified by the kernel.
417 * It just marks the page as not i-cache clean. We do the i-cache
418 * flush later when the page is given to a user process, if necessary.
420 void flush_dcache_page(struct page
*page
)
422 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
424 /* avoid an atomic op if possible */
425 if (test_bit(PG_arch_1
, &page
->flags
))
426 clear_bit(PG_arch_1
, &page
->flags
);
428 EXPORT_SYMBOL(flush_dcache_page
);
430 void flush_dcache_icache_page(struct page
*page
)
433 void *start
= kmap_atomic(page
, KM_PPC_SYNC_ICACHE
);
434 __flush_dcache_icache(start
);
435 kunmap_atomic(start
, KM_PPC_SYNC_ICACHE
);
436 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
437 /* On 8xx there is no need to kmap since highmem is not supported */
438 __flush_dcache_icache(page_address(page
));
440 __flush_dcache_icache_phys(page_to_pfn(page
) << PAGE_SHIFT
);
444 void clear_user_page(void *page
, unsigned long vaddr
, struct page
*pg
)
449 * We shouldnt have to do this, but some versions of glibc
450 * require it (ld.so assumes zero filled pages are icache clean)
453 flush_dcache_page(pg
);
455 EXPORT_SYMBOL(clear_user_page
);
457 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
460 copy_page(vto
, vfrom
);
463 * We should be able to use the following optimisation, however
464 * there are two problems.
465 * Firstly a bug in some versions of binutils meant PLT sections
466 * were not marked executable.
467 * Secondly the first word in the GOT section is blrl, used
468 * to establish the GOT address. Until recently the GOT was
469 * not marked executable.
473 if (!vma
->vm_file
&& ((vma
->vm_flags
& VM_EXEC
) == 0))
477 flush_dcache_page(pg
);
480 void flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
481 unsigned long addr
, int len
)
485 maddr
= (unsigned long) kmap(page
) + (addr
& ~PAGE_MASK
);
486 flush_icache_range(maddr
, maddr
+ len
);
489 EXPORT_SYMBOL(flush_icache_user_range
);
492 * This is called at the end of handling a user page fault, when the
493 * fault has been handled by updating a PTE in the linux page tables.
494 * We use it to preload an HPTE into the hash table corresponding to
495 * the updated linux PTE.
497 * This must always be called with the pte lock held.
499 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
,
502 #ifdef CONFIG_PPC_STD_MMU
503 unsigned long access
= 0, trap
;
505 unsigned long pfn
= pte_pfn(pte
);
507 /* handle i-cache coherency */
508 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE
) &&
509 !cpu_has_feature(CPU_FTR_NOEXECUTE
) &&
511 struct page
*page
= pfn_to_page(pfn
);
513 /* On 8xx, cache control instructions (particularly
514 * "dcbst" from flush_dcache_icache) fault as write
515 * operation if there is an unpopulated TLB entry
516 * for the address in question. To workaround that,
517 * we invalidate the TLB here, thus avoiding dcbst
520 _tlbie(address
, 0 /* 8xx doesn't care about PID */);
522 /* The _PAGE_USER test should really be _PAGE_EXEC, but
523 * older glibc versions execute some code from no-exec
524 * pages, which for now we are supporting. If exec-only
525 * pages are ever implemented, this will have to change.
527 if (!PageReserved(page
) && (pte_val(pte
) & _PAGE_USER
)
528 && !test_bit(PG_arch_1
, &page
->flags
)) {
529 if (vma
->vm_mm
== current
->active_mm
) {
530 __flush_dcache_icache((void *) address
);
532 flush_dcache_icache_page(page
);
533 set_bit(PG_arch_1
, &page
->flags
);
537 #ifdef CONFIG_PPC_STD_MMU
538 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
539 if (!pte_young(pte
) || address
>= TASK_SIZE
)
542 /* We try to figure out if we are coming from an instruction
543 * access fault and pass that down to __hash_page so we avoid
544 * double-faulting on execution of fresh text. We have to test
545 * for regs NULL since init will get here first thing at boot
547 * We also avoid filling the hash if not coming from a fault
549 if (current
->thread
.regs
== NULL
)
551 trap
= TRAP(current
->thread
.regs
);
553 access
|= _PAGE_EXEC
;
554 else if (trap
!= 0x300)
556 hash_preload(vma
->vm_mm
, address
, access
, trap
);
557 #endif /* CONFIG_PPC_STD_MMU */