2 * Initialize MMU support.
4 * Copyright (C) 1998, 1999 Hewlett-Packard Co
5 * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <linux/bootmem.h>
13 #include <linux/reboot.h>
14 #include <linux/slab.h>
15 #include <linux/swap.h>
17 #include <asm/bitops.h>
22 #include <asm/pgalloc.h>
24 #include <asm/system.h>
26 /* References to section boundaries: */
27 extern char _stext
, _etext
, _edata
, __init_begin
, __init_end
;
30 * These are allocated in head.S so that we get proper page alignment.
31 * If you change the size of these then change head.S as well.
33 extern char empty_bad_page
[PAGE_SIZE
];
34 extern pmd_t empty_bad_pmd_table
[PTRS_PER_PMD
];
35 extern pte_t empty_bad_pte_table
[PTRS_PER_PTE
];
37 extern void ia64_tlb_init (void);
39 static unsigned long totalram_pages
;
42 * Fill in empty_bad_pmd_table with entries pointing to
43 * empty_bad_pte_table and return the address of this PMD table.
46 get_bad_pmd_table (void)
51 pmd_set(&v
, empty_bad_pte_table
);
53 for (i
= 0; i
< PTRS_PER_PMD
; ++i
)
54 empty_bad_pmd_table
[i
] = v
;
56 return empty_bad_pmd_table
;
60 * Fill in empty_bad_pte_table with PTEs pointing to empty_bad_page
61 * and return the address of this PTE table.
64 get_bad_pte_table (void)
69 set_pte(&v
, pte_mkdirty(mk_pte_phys(__pa(empty_bad_page
), PAGE_SHARED
)));
71 for (i
= 0; i
< PTRS_PER_PTE
; ++i
)
72 empty_bad_pte_table
[i
] = v
;
74 return empty_bad_pte_table
;
78 __handle_bad_pgd (pgd_t
*pgd
)
81 pgd_set(pgd
, get_bad_pmd_table());
85 __handle_bad_pmd (pmd_t
*pmd
)
88 pmd_set(pmd
, get_bad_pte_table());
92 * Allocate and initialize an L3 directory page and set
93 * the L2 directory entry PMD to the newly allocated page.
96 get_pte_slow (pmd_t
*pmd
, unsigned long offset
)
100 pte
= (pte_t
*) __get_free_page(GFP_KERNEL
);
101 if (pmd_none(*pmd
)) {
103 /* everything A-OK */
108 pmd_set(pmd
, get_bad_pte_table());
111 free_page((unsigned long) pte
);
113 __handle_bad_pmd(pmd
);
116 return (pte_t
*) pmd_page(*pmd
) + offset
;
120 do_check_pgt_cache (int low
, int high
)
124 if (pgtable_cache_size
> high
) {
127 free_page((unsigned long)get_pgd_fast()), ++freed
;
129 free_page((unsigned long)get_pmd_fast()), ++freed
;
131 free_page((unsigned long)get_pte_fast()), ++freed
;
132 } while (pgtable_cache_size
> low
);
138 * This performs some platform-dependent address space initialization.
139 * On IA-64, we want to setup the VM area for the register backing
140 * store (which grows upwards) and install the gateway page which is
141 * used for signal trampolines, etc.
144 ia64_init_addr_space (void)
146 struct vm_area_struct
*vma
;
149 * If we're out of memory and kmem_cache_alloc() returns NULL,
150 * we simply ignore the problem. When the process attempts to
151 * write to the register backing store for the first time, it
152 * will get a SEGFAULT in this case.
154 vma
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
156 vma
->vm_mm
= current
->mm
;
157 vma
->vm_start
= IA64_RBS_BOT
;
158 vma
->vm_end
= vma
->vm_start
+ PAGE_SIZE
;
159 vma
->vm_page_prot
= PAGE_COPY
;
160 vma
->vm_flags
= VM_READ
|VM_WRITE
|VM_MAYREAD
|VM_MAYWRITE
|VM_GROWSUP
;
164 vma
->vm_private_data
= NULL
;
165 insert_vm_struct(current
->mm
, vma
);
174 addr
= (unsigned long) &__init_begin
;
175 for (; addr
< (unsigned long) &__init_end
; addr
+= PAGE_SIZE
) {
176 clear_bit(PG_reserved
, &virt_to_page(addr
)->flags
);
177 set_page_count(virt_to_page(addr
), 1);
181 printk ("Freeing unused kernel memory: %ldkB freed\n",
182 (&__init_end
- &__init_begin
) >> 10);
186 free_initrd_mem(unsigned long start
, unsigned long end
)
189 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
190 * Thus EFI and the kernel may have different page sizes. It is
191 * therefore possible to have the initrd share the same page as
192 * the end of the kernel (given current setup).
194 * To avoid freeing/using the wrong page (kernel sized) we:
195 * - align up the beginning of initrd
196 * - keep the end untouched
199 * |=============| a000
205 * |=============| 8000
208 * |/////////////| 7000
211 * |=============| 6000
214 * K=kernel using 8KB pages
216 * In this example, we must free page 8000 ONLY. So we must align up
217 * initrd_start and keep initrd_end as is.
219 start
= PAGE_ALIGN(start
);
222 printk ("Freeing initrd memory: %ldkB freed\n", (end
- start
) >> 10);
224 for (; start
< end
; start
+= PAGE_SIZE
) {
225 clear_bit(PG_reserved
, &virt_to_page(start
)->flags
);
226 set_page_count(virt_to_page(start
), 1);
233 si_meminfo (struct sysinfo
*val
)
235 val
->totalram
= totalram_pages
;
237 val
->freeram
= nr_free_pages();
238 val
->bufferram
= atomic_read(&buffermem_pages
);
241 val
->mem_unit
= PAGE_SIZE
;
248 int i
, total
= 0, reserved
= 0;
249 int shared
= 0, cached
= 0;
251 printk("Mem-info:\n");
253 printk("Free swap: %6dkB\n", nr_swap_pages
<<(PAGE_SHIFT
-10));
257 if (PageReserved(mem_map
+i
))
259 else if (PageSwapCache(mem_map
+i
))
261 else if (page_count(mem_map
+ i
))
262 shared
+= page_count(mem_map
+ i
) - 1;
264 printk("%d pages of RAM\n", total
);
265 printk("%d reserved pages\n", reserved
);
266 printk("%d pages shared\n", shared
);
267 printk("%d pages swap cached\n", cached
);
268 printk("%ld pages in page table cache\n", pgtable_cache_size
);
273 * This is like put_dirty_page() but installs a clean page with PAGE_GATE protection
274 * (execute-only, typically).
277 put_gate_page (struct page
*page
, unsigned long address
)
283 if (!PageReserved(page
))
284 printk("put_gate_page: gate page at 0x%p not in reserved memory\n",
287 pgd
= pgd_offset_k(address
); /* note: this is NOT pgd_offset()! */
288 pmd
= pmd_alloc(pgd
, address
);
291 panic("Out of memory.");
294 pte
= pte_alloc(pmd
, address
);
297 panic("Out of memory.");
300 if (!pte_none(*pte
)) {
305 flush_page_to_ram(page
);
306 set_pte(pte
, page_pte_prot(page
, PAGE_GATE
));
307 /* no need for flush_tlb */
314 unsigned long flags
, rid
, pta
, impl_va_msb
;
316 /* Set up the kernel identity mappings (regions 6 & 7) and the vmalloc area (region 5): */
317 ia64_clear_ic(flags
);
319 rid
= ia64_rid(IA64_REGION_ID_KERNEL
, __IA64_UNCACHED_OFFSET
);
320 ia64_set_rr(__IA64_UNCACHED_OFFSET
, (rid
<< 8) | (_PAGE_SIZE_256M
<< 2));
322 rid
= ia64_rid(IA64_REGION_ID_KERNEL
, PAGE_OFFSET
);
323 ia64_set_rr(PAGE_OFFSET
, (rid
<< 8) | (_PAGE_SIZE_256M
<< 2));
325 rid
= ia64_rid(IA64_REGION_ID_KERNEL
, VMALLOC_START
);
326 ia64_set_rr(VMALLOC_START
, (rid
<< 8) | (PAGE_SHIFT
<< 2) | 1);
328 __restore_flags(flags
);
331 * Check if the virtually mapped linear page table (VMLPT)
332 * overlaps with a mapped address space. The IA-64
333 * architecture guarantees that at least 50 bits of virtual
334 * address space are implemented but if we pick a large enough
335 * page size (e.g., 64KB), the VMLPT is big enough that it
336 * will overlap with the upper half of the kernel mapped
337 * region. I assume that once we run on machines big enough
338 * to warrant 64KB pages, IMPL_VA_MSB will be significantly
339 * bigger, so we can just adjust the number below to get
340 * things going. Alternatively, we could truncate the upper
341 * half of each regions address space to not permit mappings
342 * that would overlap with the VMLPT. --davidm 99/11/13
344 # define ld_pte_size 3
345 # define ld_max_addr_space_pages 3*(PAGE_SHIFT - ld_pte_size) /* max # of mappable pages */
346 # define ld_max_addr_space_size (ld_max_addr_space_pages + PAGE_SHIFT)
347 # define ld_max_vpt_size (ld_max_addr_space_pages + ld_pte_size)
348 # define POW2(n) (1ULL << (n))
349 impl_va_msb
= ffz(~my_cpu_data
.unimpl_va_mask
) - 1;
351 if (impl_va_msb
< 50 || impl_va_msb
> 60)
352 panic("Bogus impl_va_msb value of %lu!\n", impl_va_msb
);
354 if (POW2(ld_max_addr_space_size
- 1) + POW2(ld_max_vpt_size
) > POW2(impl_va_msb
))
355 panic("mm/init: overlap between virtually mapped linear page table and "
356 "mapped kernel space!");
357 pta
= POW2(61) - POW2(impl_va_msb
);
358 #ifndef CONFIG_DISABLE_VHPT
360 * Set the (virtually mapped linear) page table address. Bit
361 * 8 selects between the short and long format, bits 2-7 the
362 * size of the table, and bit 0 whether the VHPT walker is
365 ia64_set_pta(pta
| (0<<8) | ((3*(PAGE_SHIFT
-3)+3)<<2) | 1);
367 ia64_set_pta(pta
| (0<<8) | ((3*(PAGE_SHIFT
-3)+3)<<2) | 0);
372 * Set up the page tables.
377 unsigned long max_dma
, zones_size
[MAX_NR_ZONES
];
379 clear_page((void *) ZERO_PAGE_ADDR
);
381 /* initialize mem_map[] */
383 memset(zones_size
, 0, sizeof(zones_size
));
385 max_dma
= (PAGE_ALIGN(MAX_DMA_ADDRESS
) >> PAGE_SHIFT
);
386 if (max_low_pfn
< max_dma
)
387 zones_size
[ZONE_DMA
] = max_low_pfn
;
389 zones_size
[ZONE_DMA
] = max_dma
;
390 zones_size
[ZONE_NORMAL
] = max_low_pfn
- max_dma
;
392 free_area_init(zones_size
);
396 count_pages (u64 start
, u64 end
, void *arg
)
398 unsigned long *count
= arg
;
400 *count
+= (end
- start
) >> PAGE_SHIFT
;
405 count_reserved_pages (u64 start
, u64 end
, void *arg
)
407 unsigned long num_reserved
= 0;
408 unsigned long *count
= arg
;
411 for (pg
= virt_to_page(start
); pg
< virt_to_page(end
); ++pg
)
412 if (PageReserved(pg
))
414 *count
+= num_reserved
;
421 extern char __start_gate_section
[];
422 long reserved_pages
, codesize
, datasize
, initsize
;
428 efi_memmap_walk(count_pages
, &num_physpages
);
430 max_mapnr
= max_low_pfn
;
431 high_memory
= __va(max_low_pfn
* PAGE_SIZE
);
433 totalram_pages
+= free_all_bootmem();
436 efi_memmap_walk(count_reserved_pages
, &reserved_pages
);
438 codesize
= (unsigned long) &_etext
- (unsigned long) &_stext
;
439 datasize
= (unsigned long) &_edata
- (unsigned long) &_etext
;
440 initsize
= (unsigned long) &__init_end
- (unsigned long) &__init_begin
;
442 printk("Memory: %luk/%luk available (%luk code, %luk reserved, %luk data, %luk init)\n",
443 (unsigned long) nr_free_pages() << (PAGE_SHIFT
- 10),
444 max_mapnr
<< (PAGE_SHIFT
- 10), codesize
>> 10, reserved_pages
<< (PAGE_SHIFT
- 10),
445 datasize
>> 10, initsize
>> 10);
447 /* install the gate page in the global page table: */
448 put_gate_page(virt_to_page(__start_gate_section
), GATE_ADDR
);
450 #ifdef CONFIG_IA32_SUPPORT