- Linus: more PageDirty / swapcache handling
[davej-history.git] / arch / ia64 / mm / init.c
blobbfbb2050e2fd7fbbd5ff4fba10209388510144ef
1 /*
2 * Initialize MMU support.
4 * Copyright (C) 1998, 1999 Hewlett-Packard Co
5 * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <linux/bootmem.h>
12 #include <linux/mm.h>
13 #include <linux/reboot.h>
14 #include <linux/slab.h>
15 #include <linux/swap.h>
17 #include <asm/bitops.h>
18 #include <asm/dma.h>
19 #include <asm/efi.h>
20 #include <asm/ia32.h>
21 #include <asm/io.h>
22 #include <asm/pgalloc.h>
23 #include <asm/sal.h>
24 #include <asm/system.h>
26 /* References to section boundaries: */
27 extern char _stext, _etext, _edata, __init_begin, __init_end;
30 * These are allocated in head.S so that we get proper page alignment.
31 * If you change the size of these then change head.S as well.
33 extern char empty_bad_page[PAGE_SIZE];
34 extern pmd_t empty_bad_pmd_table[PTRS_PER_PMD];
35 extern pte_t empty_bad_pte_table[PTRS_PER_PTE];
37 extern void ia64_tlb_init (void);
39 static unsigned long totalram_pages;
42 * Fill in empty_bad_pmd_table with entries pointing to
43 * empty_bad_pte_table and return the address of this PMD table.
45 static pmd_t *
46 get_bad_pmd_table (void)
48 pmd_t v;
49 int i;
51 pmd_set(&v, empty_bad_pte_table);
53 for (i = 0; i < PTRS_PER_PMD; ++i)
54 empty_bad_pmd_table[i] = v;
56 return empty_bad_pmd_table;
60 * Fill in empty_bad_pte_table with PTEs pointing to empty_bad_page
61 * and return the address of this PTE table.
63 static pte_t *
64 get_bad_pte_table (void)
66 pte_t v;
67 int i;
69 set_pte(&v, pte_mkdirty(mk_pte_phys(__pa(empty_bad_page), PAGE_SHARED)));
71 for (i = 0; i < PTRS_PER_PTE; ++i)
72 empty_bad_pte_table[i] = v;
74 return empty_bad_pte_table;
77 void
78 __handle_bad_pgd (pgd_t *pgd)
80 pgd_ERROR(*pgd);
81 pgd_set(pgd, get_bad_pmd_table());
84 void
85 __handle_bad_pmd (pmd_t *pmd)
87 pmd_ERROR(*pmd);
88 pmd_set(pmd, get_bad_pte_table());
92 * Allocate and initialize an L3 directory page and set
93 * the L2 directory entry PMD to the newly allocated page.
95 pte_t*
96 get_pte_slow (pmd_t *pmd, unsigned long offset)
98 pte_t *pte;
100 pte = (pte_t *) __get_free_page(GFP_KERNEL);
101 if (pmd_none(*pmd)) {
102 if (pte) {
103 /* everything A-OK */
104 clear_page(pte);
105 pmd_set(pmd, pte);
106 return pte + offset;
108 pmd_set(pmd, get_bad_pte_table());
109 return NULL;
111 free_page((unsigned long) pte);
112 if (pmd_bad(*pmd)) {
113 __handle_bad_pmd(pmd);
114 return NULL;
116 return (pte_t *) pmd_page(*pmd) + offset;
120 do_check_pgt_cache (int low, int high)
122 int freed = 0;
124 if (pgtable_cache_size > high) {
125 do {
126 if (pgd_quicklist)
127 free_page((unsigned long)get_pgd_fast()), ++freed;
128 if (pmd_quicklist)
129 free_page((unsigned long)get_pmd_fast()), ++freed;
130 if (pte_quicklist)
131 free_page((unsigned long)get_pte_fast()), ++freed;
132 } while (pgtable_cache_size > low);
134 return freed;
138 * This performs some platform-dependent address space initialization.
139 * On IA-64, we want to setup the VM area for the register backing
140 * store (which grows upwards) and install the gateway page which is
141 * used for signal trampolines, etc.
143 void
144 ia64_init_addr_space (void)
146 struct vm_area_struct *vma;
149 * If we're out of memory and kmem_cache_alloc() returns NULL,
150 * we simply ignore the problem. When the process attempts to
151 * write to the register backing store for the first time, it
152 * will get a SEGFAULT in this case.
154 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
155 if (vma) {
156 vma->vm_mm = current->mm;
157 vma->vm_start = IA64_RBS_BOT;
158 vma->vm_end = vma->vm_start + PAGE_SIZE;
159 vma->vm_page_prot = PAGE_COPY;
160 vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
161 vma->vm_ops = NULL;
162 vma->vm_pgoff = 0;
163 vma->vm_file = NULL;
164 vma->vm_private_data = NULL;
165 insert_vm_struct(current->mm, vma);
169 void
170 free_initmem (void)
172 unsigned long addr;
174 addr = (unsigned long) &__init_begin;
175 for (; addr < (unsigned long) &__init_end; addr += PAGE_SIZE) {
176 clear_bit(PG_reserved, &virt_to_page(addr)->flags);
177 set_page_count(virt_to_page(addr), 1);
178 free_page(addr);
179 ++totalram_pages;
181 printk ("Freeing unused kernel memory: %ldkB freed\n",
182 (&__init_end - &__init_begin) >> 10);
185 void
186 free_initrd_mem(unsigned long start, unsigned long end)
189 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
190 * Thus EFI and the kernel may have different page sizes. It is
191 * therefore possible to have the initrd share the same page as
192 * the end of the kernel (given current setup).
194 * To avoid freeing/using the wrong page (kernel sized) we:
195 * - align up the beginning of initrd
196 * - keep the end untouched
198 * | |
199 * |=============| a000
200 * | |
201 * | |
202 * | | 9000
203 * |/////////////|
204 * |/////////////|
205 * |=============| 8000
206 * |///INITRD////|
207 * |/////////////|
208 * |/////////////| 7000
209 * | |
210 * |KKKKKKKKKKKKK|
211 * |=============| 6000
212 * |KKKKKKKKKKKKK|
213 * |KKKKKKKKKKKKK|
214 * K=kernel using 8KB pages
216 * In this example, we must free page 8000 ONLY. So we must align up
217 * initrd_start and keep initrd_end as is.
219 start = PAGE_ALIGN(start);
221 if (start < end)
222 printk ("Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
224 for (; start < end; start += PAGE_SIZE) {
225 clear_bit(PG_reserved, &virt_to_page(start)->flags);
226 set_page_count(virt_to_page(start), 1);
227 free_page(start);
228 ++totalram_pages;
232 void
233 si_meminfo (struct sysinfo *val)
235 val->totalram = totalram_pages;
236 val->sharedram = 0;
237 val->freeram = nr_free_pages();
238 val->bufferram = atomic_read(&buffermem_pages);
239 val->totalhigh = 0;
240 val->freehigh = 0;
241 val->mem_unit = PAGE_SIZE;
242 return;
245 void
246 show_mem (void)
248 int i, total = 0, reserved = 0;
249 int shared = 0, cached = 0;
251 printk("Mem-info:\n");
252 show_free_areas();
253 printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
254 i = max_mapnr;
255 while (i-- > 0) {
256 total++;
257 if (PageReserved(mem_map+i))
258 reserved++;
259 else if (PageSwapCache(mem_map+i))
260 cached++;
261 else if (page_count(mem_map + i))
262 shared += page_count(mem_map + i) - 1;
264 printk("%d pages of RAM\n", total);
265 printk("%d reserved pages\n", reserved);
266 printk("%d pages shared\n", shared);
267 printk("%d pages swap cached\n", cached);
268 printk("%ld pages in page table cache\n", pgtable_cache_size);
269 show_buffers();
273 * This is like put_dirty_page() but installs a clean page with PAGE_GATE protection
274 * (execute-only, typically).
276 struct page *
277 put_gate_page (struct page *page, unsigned long address)
279 pgd_t *pgd;
280 pmd_t *pmd;
281 pte_t *pte;
283 if (!PageReserved(page))
284 printk("put_gate_page: gate page at 0x%p not in reserved memory\n",
285 page_address(page));
287 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
288 pmd = pmd_alloc(pgd, address);
289 if (!pmd) {
290 __free_page(page);
291 panic("Out of memory.");
292 return 0;
294 pte = pte_alloc(pmd, address);
295 if (!pte) {
296 __free_page(page);
297 panic("Out of memory.");
298 return 0;
300 if (!pte_none(*pte)) {
301 pte_ERROR(*pte);
302 __free_page(page);
303 return 0;
305 flush_page_to_ram(page);
306 set_pte(pte, page_pte_prot(page, PAGE_GATE));
307 /* no need for flush_tlb */
308 return page;
311 void __init
312 ia64_rid_init (void)
314 unsigned long flags, rid, pta, impl_va_msb;
316 /* Set up the kernel identity mappings (regions 6 & 7) and the vmalloc area (region 5): */
317 ia64_clear_ic(flags);
319 rid = ia64_rid(IA64_REGION_ID_KERNEL, __IA64_UNCACHED_OFFSET);
320 ia64_set_rr(__IA64_UNCACHED_OFFSET, (rid << 8) | (_PAGE_SIZE_256M << 2));
322 rid = ia64_rid(IA64_REGION_ID_KERNEL, PAGE_OFFSET);
323 ia64_set_rr(PAGE_OFFSET, (rid << 8) | (_PAGE_SIZE_256M << 2));
325 rid = ia64_rid(IA64_REGION_ID_KERNEL, VMALLOC_START);
326 ia64_set_rr(VMALLOC_START, (rid << 8) | (PAGE_SHIFT << 2) | 1);
328 __restore_flags(flags);
331 * Check if the virtually mapped linear page table (VMLPT)
332 * overlaps with a mapped address space. The IA-64
333 * architecture guarantees that at least 50 bits of virtual
334 * address space are implemented but if we pick a large enough
335 * page size (e.g., 64KB), the VMLPT is big enough that it
336 * will overlap with the upper half of the kernel mapped
337 * region. I assume that once we run on machines big enough
338 * to warrant 64KB pages, IMPL_VA_MSB will be significantly
339 * bigger, so we can just adjust the number below to get
340 * things going. Alternatively, we could truncate the upper
341 * half of each regions address space to not permit mappings
342 * that would overlap with the VMLPT. --davidm 99/11/13
344 # define ld_pte_size 3
345 # define ld_max_addr_space_pages 3*(PAGE_SHIFT - ld_pte_size) /* max # of mappable pages */
346 # define ld_max_addr_space_size (ld_max_addr_space_pages + PAGE_SHIFT)
347 # define ld_max_vpt_size (ld_max_addr_space_pages + ld_pte_size)
348 # define POW2(n) (1ULL << (n))
349 impl_va_msb = ffz(~my_cpu_data.unimpl_va_mask) - 1;
351 if (impl_va_msb < 50 || impl_va_msb > 60)
352 panic("Bogus impl_va_msb value of %lu!\n", impl_va_msb);
354 if (POW2(ld_max_addr_space_size - 1) + POW2(ld_max_vpt_size) > POW2(impl_va_msb))
355 panic("mm/init: overlap between virtually mapped linear page table and "
356 "mapped kernel space!");
357 pta = POW2(61) - POW2(impl_va_msb);
358 #ifndef CONFIG_DISABLE_VHPT
360 * Set the (virtually mapped linear) page table address. Bit
361 * 8 selects between the short and long format, bits 2-7 the
362 * size of the table, and bit 0 whether the VHPT walker is
363 * enabled.
365 ia64_set_pta(pta | (0<<8) | ((3*(PAGE_SHIFT-3)+3)<<2) | 1);
366 #else
367 ia64_set_pta(pta | (0<<8) | ((3*(PAGE_SHIFT-3)+3)<<2) | 0);
368 #endif
372 * Set up the page tables.
374 void
375 paging_init (void)
377 unsigned long max_dma, zones_size[MAX_NR_ZONES];
379 clear_page((void *) ZERO_PAGE_ADDR);
381 /* initialize mem_map[] */
383 memset(zones_size, 0, sizeof(zones_size));
385 max_dma = (PAGE_ALIGN(MAX_DMA_ADDRESS) >> PAGE_SHIFT);
386 if (max_low_pfn < max_dma)
387 zones_size[ZONE_DMA] = max_low_pfn;
388 else {
389 zones_size[ZONE_DMA] = max_dma;
390 zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
392 free_area_init(zones_size);
395 static int
396 count_pages (u64 start, u64 end, void *arg)
398 unsigned long *count = arg;
400 *count += (end - start) >> PAGE_SHIFT;
401 return 0;
404 static int
405 count_reserved_pages (u64 start, u64 end, void *arg)
407 unsigned long num_reserved = 0;
408 unsigned long *count = arg;
409 struct page *pg;
411 for (pg = virt_to_page(start); pg < virt_to_page(end); ++pg)
412 if (PageReserved(pg))
413 ++num_reserved;
414 *count += num_reserved;
415 return 0;
418 void
419 mem_init (void)
421 extern char __start_gate_section[];
422 long reserved_pages, codesize, datasize, initsize;
424 if (!mem_map)
425 BUG();
427 num_physpages = 0;
428 efi_memmap_walk(count_pages, &num_physpages);
430 max_mapnr = max_low_pfn;
431 high_memory = __va(max_low_pfn * PAGE_SIZE);
433 totalram_pages += free_all_bootmem();
435 reserved_pages = 0;
436 efi_memmap_walk(count_reserved_pages, &reserved_pages);
438 codesize = (unsigned long) &_etext - (unsigned long) &_stext;
439 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
440 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
442 printk("Memory: %luk/%luk available (%luk code, %luk reserved, %luk data, %luk init)\n",
443 (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
444 max_mapnr << (PAGE_SHIFT - 10), codesize >> 10, reserved_pages << (PAGE_SHIFT - 10),
445 datasize >> 10, initsize >> 10);
447 /* install the gate page in the global page table: */
448 put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR);
450 #ifdef CONFIG_IA32_SUPPORT
451 ia32_gdt_init();
452 #endif