2 * linux/arch/parisc/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright 1999 SuSE GmbH
6 * changed by Philipp Rumpf
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2004 Randolph Chung (tausq@debian.org)
9 * Copyright 2006-2007 Helge Deller (deller@gmx.de)
14 #include <linux/module.h>
16 #include <linux/bootmem.h>
17 #include <linux/gfp.h>
18 #include <linux/delay.h>
19 #include <linux/init.h>
20 #include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
21 #include <linux/initrd.h>
22 #include <linux/swap.h>
23 #include <linux/unistd.h>
24 #include <linux/nodemask.h> /* for node_online_map */
25 #include <linux/pagemap.h> /* for release_pages and page_cache_release */
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
30 #include <asm/pdc_chassis.h>
31 #include <asm/mmzone.h>
32 #include <asm/sections.h>
34 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
36 extern int data_start
;
38 #ifdef CONFIG_DISCONTIGMEM
39 struct node_map_data node_data
[MAX_NUMNODES
] __read_mostly
;
40 unsigned char pfnnid_map
[PFNNID_MAP_MAX
] __read_mostly
;
43 static struct resource data_resource
= {
44 .name
= "Kernel data",
45 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
,
48 static struct resource code_resource
= {
49 .name
= "Kernel code",
50 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
,
53 static struct resource pdcdata_resource
= {
54 .name
= "PDC data (Page Zero)",
57 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
,
60 static struct resource sysram_resources
[MAX_PHYSMEM_RANGES
] __read_mostly
;
62 /* The following array is initialized from the firmware specific
63 * information retrieved in kernel/inventory.c.
66 physmem_range_t pmem_ranges
[MAX_PHYSMEM_RANGES
] __read_mostly
;
67 int npmem_ranges __read_mostly
;
70 #define MAX_MEM (~0UL)
71 #else /* !CONFIG_64BIT */
72 #define MAX_MEM (3584U*1024U*1024U)
73 #endif /* !CONFIG_64BIT */
75 static unsigned long mem_limit __read_mostly
= MAX_MEM
;
77 static void __init
mem_limit_func(void)
82 /* We need this before __setup() functions are called */
85 for (cp
= boot_command_line
; *cp
; ) {
86 if (memcmp(cp
, "mem=", 4) == 0) {
88 limit
= memparse(cp
, &end
);
93 while (*cp
!= ' ' && *cp
)
100 if (limit
< mem_limit
)
104 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
106 static void __init
setup_bootmem(void)
108 unsigned long bootmap_size
;
109 unsigned long mem_max
;
110 unsigned long bootmap_pages
;
111 unsigned long bootmap_start_pfn
;
112 unsigned long bootmap_pfn
;
113 #ifndef CONFIG_DISCONTIGMEM
114 physmem_range_t pmem_holes
[MAX_PHYSMEM_RANGES
- 1];
117 int i
, sysram_resource_count
;
119 disable_sr_hashing(); /* Turn off space register hashing */
122 * Sort the ranges. Since the number of ranges is typically
123 * small, and performance is not an issue here, just do
124 * a simple insertion sort.
127 for (i
= 1; i
< npmem_ranges
; i
++) {
130 for (j
= i
; j
> 0; j
--) {
133 if (pmem_ranges
[j
-1].start_pfn
<
134 pmem_ranges
[j
].start_pfn
) {
138 tmp
= pmem_ranges
[j
-1].start_pfn
;
139 pmem_ranges
[j
-1].start_pfn
= pmem_ranges
[j
].start_pfn
;
140 pmem_ranges
[j
].start_pfn
= tmp
;
141 tmp
= pmem_ranges
[j
-1].pages
;
142 pmem_ranges
[j
-1].pages
= pmem_ranges
[j
].pages
;
143 pmem_ranges
[j
].pages
= tmp
;
147 #ifndef CONFIG_DISCONTIGMEM
149 * Throw out ranges that are too far apart (controlled by
153 for (i
= 1; i
< npmem_ranges
; i
++) {
154 if (pmem_ranges
[i
].start_pfn
-
155 (pmem_ranges
[i
-1].start_pfn
+
156 pmem_ranges
[i
-1].pages
) > MAX_GAP
) {
158 printk("Large gap in memory detected (%ld pages). "
159 "Consider turning on CONFIG_DISCONTIGMEM\n",
160 pmem_ranges
[i
].start_pfn
-
161 (pmem_ranges
[i
-1].start_pfn
+
162 pmem_ranges
[i
-1].pages
));
168 if (npmem_ranges
> 1) {
170 /* Print the memory ranges */
172 printk(KERN_INFO
"Memory Ranges:\n");
174 for (i
= 0; i
< npmem_ranges
; i
++) {
178 size
= (pmem_ranges
[i
].pages
<< PAGE_SHIFT
);
179 start
= (pmem_ranges
[i
].start_pfn
<< PAGE_SHIFT
);
180 printk(KERN_INFO
"%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
181 i
,start
, start
+ (size
- 1), size
>> 20);
185 sysram_resource_count
= npmem_ranges
;
186 for (i
= 0; i
< sysram_resource_count
; i
++) {
187 struct resource
*res
= &sysram_resources
[i
];
188 res
->name
= "System RAM";
189 res
->start
= pmem_ranges
[i
].start_pfn
<< PAGE_SHIFT
;
190 res
->end
= res
->start
+ (pmem_ranges
[i
].pages
<< PAGE_SHIFT
)-1;
191 res
->flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
192 request_resource(&iomem_resource
, res
);
196 * For 32 bit kernels we limit the amount of memory we can
197 * support, in order to preserve enough kernel address space
198 * for other purposes. For 64 bit kernels we don't normally
199 * limit the memory, but this mechanism can be used to
200 * artificially limit the amount of memory (and it is written
201 * to work with multiple memory ranges).
204 mem_limit_func(); /* check for "mem=" argument */
208 for (i
= 0; i
< npmem_ranges
; i
++) {
211 rsize
= pmem_ranges
[i
].pages
<< PAGE_SHIFT
;
212 if ((mem_max
+ rsize
) > mem_limit
) {
213 printk(KERN_WARNING
"Memory truncated to %ld MB\n", mem_limit
>> 20);
214 if (mem_max
== mem_limit
)
217 pmem_ranges
[i
].pages
= (mem_limit
>> PAGE_SHIFT
)
218 - (mem_max
>> PAGE_SHIFT
);
219 npmem_ranges
= i
+ 1;
222 num_physpages
+= pmem_ranges
[i
].pages
;
225 num_physpages
+= pmem_ranges
[i
].pages
;
229 printk(KERN_INFO
"Total Memory: %ld MB\n",mem_max
>> 20);
231 #ifndef CONFIG_DISCONTIGMEM
232 /* Merge the ranges, keeping track of the holes */
235 unsigned long end_pfn
;
236 unsigned long hole_pages
;
239 end_pfn
= pmem_ranges
[0].start_pfn
+ pmem_ranges
[0].pages
;
240 for (i
= 1; i
< npmem_ranges
; i
++) {
242 hole_pages
= pmem_ranges
[i
].start_pfn
- end_pfn
;
244 pmem_holes
[npmem_holes
].start_pfn
= end_pfn
;
245 pmem_holes
[npmem_holes
++].pages
= hole_pages
;
246 end_pfn
+= hole_pages
;
248 end_pfn
+= pmem_ranges
[i
].pages
;
251 pmem_ranges
[0].pages
= end_pfn
- pmem_ranges
[0].start_pfn
;
257 for (i
= 0; i
< npmem_ranges
; i
++)
258 bootmap_pages
+= bootmem_bootmap_pages(pmem_ranges
[i
].pages
);
260 bootmap_start_pfn
= PAGE_ALIGN(__pa((unsigned long) &_end
)) >> PAGE_SHIFT
;
262 #ifdef CONFIG_DISCONTIGMEM
263 for (i
= 0; i
< MAX_PHYSMEM_RANGES
; i
++) {
264 memset(NODE_DATA(i
), 0, sizeof(pg_data_t
));
265 NODE_DATA(i
)->bdata
= &bootmem_node_data
[i
];
267 memset(pfnnid_map
, 0xff, sizeof(pfnnid_map
));
269 for (i
= 0; i
< npmem_ranges
; i
++) {
270 node_set_state(i
, N_NORMAL_MEMORY
);
276 * Initialize and free the full range of memory in each range.
277 * Note that the only writing these routines do are to the bootmap,
278 * and we've made sure to locate the bootmap properly so that they
279 * won't be writing over anything important.
282 bootmap_pfn
= bootmap_start_pfn
;
284 for (i
= 0; i
< npmem_ranges
; i
++) {
285 unsigned long start_pfn
;
286 unsigned long npages
;
288 start_pfn
= pmem_ranges
[i
].start_pfn
;
289 npages
= pmem_ranges
[i
].pages
;
291 bootmap_size
= init_bootmem_node(NODE_DATA(i
),
294 (start_pfn
+ npages
) );
295 free_bootmem_node(NODE_DATA(i
),
296 (start_pfn
<< PAGE_SHIFT
),
297 (npages
<< PAGE_SHIFT
) );
298 bootmap_pfn
+= (bootmap_size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
299 if ((start_pfn
+ npages
) > max_pfn
)
300 max_pfn
= start_pfn
+ npages
;
303 /* IOMMU is always used to access "high mem" on those boxes
304 * that can support enough mem that a PCI device couldn't
305 * directly DMA to any physical addresses.
306 * ISA DMA support will need to revisit this.
308 max_low_pfn
= max_pfn
;
310 /* bootmap sizing messed up? */
311 BUG_ON((bootmap_pfn
- bootmap_start_pfn
) != bootmap_pages
);
313 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
315 #define PDC_CONSOLE_IO_IODC_SIZE 32768
317 reserve_bootmem_node(NODE_DATA(0), 0UL,
318 (unsigned long)(PAGE0
->mem_free
+
319 PDC_CONSOLE_IO_IODC_SIZE
), BOOTMEM_DEFAULT
);
320 reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text
),
321 (unsigned long)(_end
- _text
), BOOTMEM_DEFAULT
);
322 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn
<< PAGE_SHIFT
),
323 ((bootmap_pfn
- bootmap_start_pfn
) << PAGE_SHIFT
),
326 #ifndef CONFIG_DISCONTIGMEM
328 /* reserve the holes */
330 for (i
= 0; i
< npmem_holes
; i
++) {
331 reserve_bootmem_node(NODE_DATA(0),
332 (pmem_holes
[i
].start_pfn
<< PAGE_SHIFT
),
333 (pmem_holes
[i
].pages
<< PAGE_SHIFT
),
338 #ifdef CONFIG_BLK_DEV_INITRD
340 printk(KERN_INFO
"initrd: %08lx-%08lx\n", initrd_start
, initrd_end
);
341 if (__pa(initrd_start
) < mem_max
) {
342 unsigned long initrd_reserve
;
344 if (__pa(initrd_end
) > mem_max
) {
345 initrd_reserve
= mem_max
- __pa(initrd_start
);
347 initrd_reserve
= initrd_end
- initrd_start
;
349 initrd_below_start_ok
= 1;
350 printk(KERN_INFO
"initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start
), __pa(initrd_start
) + initrd_reserve
, mem_max
);
352 reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start
),
353 initrd_reserve
, BOOTMEM_DEFAULT
);
358 data_resource
.start
= virt_to_phys(&data_start
);
359 data_resource
.end
= virt_to_phys(_end
) - 1;
360 code_resource
.start
= virt_to_phys(_text
);
361 code_resource
.end
= virt_to_phys(&data_start
)-1;
363 /* We don't know which region the kernel will be in, so try
366 for (i
= 0; i
< sysram_resource_count
; i
++) {
367 struct resource
*res
= &sysram_resources
[i
];
368 request_resource(res
, &code_resource
);
369 request_resource(res
, &data_resource
);
371 request_resource(&sysram_resources
[0], &pdcdata_resource
);
374 void free_initmem(void)
377 unsigned long init_begin
= (unsigned long)__init_begin
;
378 unsigned long init_end
= (unsigned long)__init_end
;
380 #ifdef CONFIG_DEBUG_KERNEL
381 /* Attempt to catch anyone trying to execute code here
382 * by filling the page with BRK insns.
384 memset((void *)init_begin
, 0x00, init_end
- init_begin
);
385 flush_icache_range(init_begin
, init_end
);
388 /* align __init_begin and __init_end to page size,
389 ignoring linker script where we might have tried to save RAM */
390 init_begin
= PAGE_ALIGN(init_begin
);
391 init_end
= PAGE_ALIGN(init_end
);
392 for (addr
= init_begin
; addr
< init_end
; addr
+= PAGE_SIZE
) {
393 ClearPageReserved(virt_to_page(addr
));
394 init_page_count(virt_to_page(addr
));
400 /* set up a new led state on systems shipped LED State panel */
401 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE
);
403 printk(KERN_INFO
"Freeing unused kernel memory: %luk freed\n",
404 (init_end
- init_begin
) >> 10);
408 #ifdef CONFIG_DEBUG_RODATA
409 void mark_rodata_ro(void)
411 /* rodata memory was already mapped with KERNEL_RO access rights by
412 pagetable_init() and map_pages(). No need to do additional stuff here */
413 printk (KERN_INFO
"Write protecting the kernel read-only data: %luk\n",
414 (unsigned long)(__end_rodata
- __start_rodata
) >> 10);
420 * Just an arbitrary offset to serve as a "hole" between mapping areas
421 * (between top of physical memory and a potential pcxl dma mapping
422 * area, and below the vmalloc mapping area).
424 * The current 32K value just means that there will be a 32K "hole"
425 * between mapping areas. That means that any out-of-bounds memory
426 * accesses will hopefully be caught. The vmalloc() routines leaves
427 * a hole of 4kB between each vmalloced area for the same reason.
430 /* Leave room for gateway page expansion */
431 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
432 #error KERNEL_MAP_START is in gateway reserved region
434 #define MAP_START (KERNEL_MAP_START)
436 #define VM_MAP_OFFSET (32*1024)
437 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
438 & ~(VM_MAP_OFFSET-1)))
440 void *parisc_vmalloc_start __read_mostly
;
441 EXPORT_SYMBOL(parisc_vmalloc_start
);
444 unsigned long pcxl_dma_start __read_mostly
;
447 void __init
mem_init(void)
449 int codesize
, reservedpages
, datasize
, initsize
;
451 /* Do sanity checks on page table constants */
452 BUILD_BUG_ON(PTE_ENTRY_SIZE
!= sizeof(pte_t
));
453 BUILD_BUG_ON(PMD_ENTRY_SIZE
!= sizeof(pmd_t
));
454 BUILD_BUG_ON(PGD_ENTRY_SIZE
!= sizeof(pgd_t
));
455 BUILD_BUG_ON(PAGE_SHIFT
+ BITS_PER_PTE
+ BITS_PER_PMD
+ BITS_PER_PGD
458 high_memory
= __va((max_pfn
<< PAGE_SHIFT
));
460 #ifndef CONFIG_DISCONTIGMEM
461 max_mapnr
= page_to_pfn(virt_to_page(high_memory
- 1)) + 1;
462 totalram_pages
+= free_all_bootmem();
467 for (i
= 0; i
< npmem_ranges
; i
++)
468 totalram_pages
+= free_all_bootmem_node(NODE_DATA(i
));
472 codesize
= (unsigned long)_etext
- (unsigned long)_text
;
473 datasize
= (unsigned long)_edata
- (unsigned long)_etext
;
474 initsize
= (unsigned long)__init_end
- (unsigned long)__init_begin
;
479 #ifdef CONFIG_DISCONTIGMEM
482 for (i
= 0; i
< npmem_ranges
; i
++) {
483 for (pfn
= node_start_pfn(i
); pfn
< node_end_pfn(i
); pfn
++) {
484 if (PageReserved(pfn_to_page(pfn
)))
488 #else /* !CONFIG_DISCONTIGMEM */
489 for (pfn
= 0; pfn
< max_pfn
; pfn
++) {
491 * Only count reserved RAM pages
493 if (PageReserved(pfn_to_page(pfn
)))
500 if (hppa_dma_ops
== &pcxl_dma_ops
) {
501 pcxl_dma_start
= (unsigned long)SET_MAP_OFFSET(MAP_START
);
502 parisc_vmalloc_start
= SET_MAP_OFFSET(pcxl_dma_start
503 + PCXL_DMA_MAP_SIZE
);
506 parisc_vmalloc_start
= SET_MAP_OFFSET(MAP_START
);
509 parisc_vmalloc_start
= SET_MAP_OFFSET(MAP_START
);
512 printk(KERN_INFO
"Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
513 nr_free_pages() << (PAGE_SHIFT
-10),
514 num_physpages
<< (PAGE_SHIFT
-10),
516 reservedpages
<< (PAGE_SHIFT
-10),
521 #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
522 printk("virtual kernel memory layout:\n"
523 " vmalloc : 0x%p - 0x%p (%4ld MB)\n"
524 " memory : 0x%p - 0x%p (%4ld MB)\n"
525 " .init : 0x%p - 0x%p (%4ld kB)\n"
526 " .data : 0x%p - 0x%p (%4ld kB)\n"
527 " .text : 0x%p - 0x%p (%4ld kB)\n",
529 (void*)VMALLOC_START
, (void*)VMALLOC_END
,
530 (VMALLOC_END
- VMALLOC_START
) >> 20,
532 __va(0), high_memory
,
533 ((unsigned long)high_memory
- (unsigned long)__va(0)) >> 20,
535 __init_begin
, __init_end
,
536 ((unsigned long)__init_end
- (unsigned long)__init_begin
) >> 10,
539 ((unsigned long)_edata
- (unsigned long)_etext
) >> 10,
542 ((unsigned long)_etext
- (unsigned long)_text
) >> 10);
546 unsigned long *empty_zero_page __read_mostly
;
547 EXPORT_SYMBOL(empty_zero_page
);
549 void show_mem(unsigned int filter
)
551 int i
,free
= 0,total
= 0,reserved
= 0;
552 int shared
= 0, cached
= 0;
554 printk(KERN_INFO
"Mem-info:\n");
556 #ifndef CONFIG_DISCONTIGMEM
560 if (PageReserved(mem_map
+i
))
562 else if (PageSwapCache(mem_map
+i
))
564 else if (!page_count(&mem_map
[i
]))
567 shared
+= page_count(&mem_map
[i
]) - 1;
570 for (i
= 0; i
< npmem_ranges
; i
++) {
573 for (j
= node_start_pfn(i
); j
< node_end_pfn(i
); j
++) {
577 pgdat_resize_lock(NODE_DATA(i
), &flags
);
578 p
= nid_page_nr(i
, j
) - node_start_pfn(i
);
583 else if (PageSwapCache(p
))
585 else if (!page_count(p
))
588 shared
+= page_count(p
) - 1;
589 pgdat_resize_unlock(NODE_DATA(i
), &flags
);
593 printk(KERN_INFO
"%d pages of RAM\n", total
);
594 printk(KERN_INFO
"%d reserved pages\n", reserved
);
595 printk(KERN_INFO
"%d pages shared\n", shared
);
596 printk(KERN_INFO
"%d pages swap cached\n", cached
);
599 #ifdef CONFIG_DISCONTIGMEM
604 for (i
= 0; i
< npmem_ranges
; i
++) {
605 zl
= node_zonelist(i
, 0);
606 for (j
= 0; j
< MAX_NR_ZONES
; j
++) {
610 printk("Zone list for zone %d on node %d: ", j
, i
);
611 for_each_zone_zonelist(zone
, z
, zl
, j
)
612 printk("[%d/%s] ", zone_to_nid(zone
),
622 static void __init
map_pages(unsigned long start_vaddr
, unsigned long start_paddr
, unsigned long size
, pgprot_t pgprot
)
627 unsigned long end_paddr
;
628 unsigned long start_pmd
;
629 unsigned long start_pte
;
632 unsigned long address
;
633 unsigned long ro_start
;
634 unsigned long ro_end
;
635 unsigned long fv_addr
;
636 unsigned long gw_addr
;
637 extern const unsigned long fault_vector_20
;
638 extern void * const linux_gateway_page
;
640 ro_start
= __pa((unsigned long)_text
);
641 ro_end
= __pa((unsigned long)&data_start
);
642 fv_addr
= __pa((unsigned long)&fault_vector_20
) & PAGE_MASK
;
643 gw_addr
= __pa((unsigned long)&linux_gateway_page
) & PAGE_MASK
;
645 end_paddr
= start_paddr
+ size
;
647 pg_dir
= pgd_offset_k(start_vaddr
);
649 #if PTRS_PER_PMD == 1
652 start_pmd
= ((start_vaddr
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1));
654 start_pte
= ((start_vaddr
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1));
656 address
= start_paddr
;
657 while (address
< end_paddr
) {
658 #if PTRS_PER_PMD == 1
659 pmd
= (pmd_t
*)__pa(pg_dir
);
661 pmd
= (pmd_t
*)pgd_address(*pg_dir
);
664 * pmd is physical at this point
668 pmd
= (pmd_t
*) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE
<< PMD_ORDER
);
669 pmd
= (pmd_t
*) __pa(pmd
);
672 pgd_populate(NULL
, pg_dir
, __va(pmd
));
676 /* now change pmd to kernel virtual addresses */
678 pmd
= (pmd_t
*)__va(pmd
) + start_pmd
;
679 for (tmp1
= start_pmd
; tmp1
< PTRS_PER_PMD
; tmp1
++,pmd
++) {
682 * pg_table is physical at this point
685 pg_table
= (pte_t
*)pmd_address(*pmd
);
688 alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE
);
689 pg_table
= (pte_t
*) __pa(pg_table
);
692 pmd_populate_kernel(NULL
, pmd
, __va(pg_table
));
694 /* now change pg_table to kernel virtual addresses */
696 pg_table
= (pte_t
*) __va(pg_table
) + start_pte
;
697 for (tmp2
= start_pte
; tmp2
< PTRS_PER_PTE
; tmp2
++,pg_table
++) {
701 * Map the fault vector writable so we can
702 * write the HPMC checksum.
704 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
705 if (address
>= ro_start
&& address
< ro_end
706 && address
!= fv_addr
707 && address
!= gw_addr
)
708 pte
= __mk_pte(address
, PAGE_KERNEL_RO
);
711 pte
= __mk_pte(address
, pgprot
);
713 if (address
>= end_paddr
)
716 set_pte(pg_table
, pte
);
718 address
+= PAGE_SIZE
;
722 if (address
>= end_paddr
)
730 * pagetable_init() sets up the page tables
732 * Note that gateway_init() places the Linux gateway page at page 0.
733 * Since gateway pages cannot be dereferenced this has the desirable
734 * side effect of trapping those pesky NULL-reference errors in the
737 static void __init
pagetable_init(void)
741 /* Map each physical memory range to its kernel vaddr */
743 for (range
= 0; range
< npmem_ranges
; range
++) {
744 unsigned long start_paddr
;
745 unsigned long end_paddr
;
748 start_paddr
= pmem_ranges
[range
].start_pfn
<< PAGE_SHIFT
;
749 end_paddr
= start_paddr
+ (pmem_ranges
[range
].pages
<< PAGE_SHIFT
);
750 size
= pmem_ranges
[range
].pages
<< PAGE_SHIFT
;
752 map_pages((unsigned long)__va(start_paddr
), start_paddr
,
756 #ifdef CONFIG_BLK_DEV_INITRD
757 if (initrd_end
&& initrd_end
> mem_limit
) {
758 printk(KERN_INFO
"initrd: mapping %08lx-%08lx\n", initrd_start
, initrd_end
);
759 map_pages(initrd_start
, __pa(initrd_start
),
760 initrd_end
- initrd_start
, PAGE_KERNEL
);
764 empty_zero_page
= alloc_bootmem_pages(PAGE_SIZE
);
765 memset(empty_zero_page
, 0, PAGE_SIZE
);
768 static void __init
gateway_init(void)
770 unsigned long linux_gateway_page_addr
;
771 /* FIXME: This is 'const' in order to trick the compiler
772 into not treating it as DP-relative data. */
773 extern void * const linux_gateway_page
;
775 linux_gateway_page_addr
= LINUX_GATEWAY_ADDR
& PAGE_MASK
;
778 * Setup Linux Gateway page.
780 * The Linux gateway page will reside in kernel space (on virtual
781 * page 0), so it doesn't need to be aliased into user space.
784 map_pages(linux_gateway_page_addr
, __pa(&linux_gateway_page
),
785 PAGE_SIZE
, PAGE_GATEWAY
);
790 map_hpux_gateway_page(struct task_struct
*tsk
, struct mm_struct
*mm
)
795 unsigned long start_pmd
;
796 unsigned long start_pte
;
797 unsigned long address
;
798 unsigned long hpux_gw_page_addr
;
799 /* FIXME: This is 'const' in order to trick the compiler
800 into not treating it as DP-relative data. */
801 extern void * const hpux_gateway_page
;
803 hpux_gw_page_addr
= HPUX_GATEWAY_ADDR
& PAGE_MASK
;
806 * Setup HP-UX Gateway page.
808 * The HP-UX gateway page resides in the user address space,
809 * so it needs to be aliased into each process.
812 pg_dir
= pgd_offset(mm
,hpux_gw_page_addr
);
814 #if PTRS_PER_PMD == 1
817 start_pmd
= ((hpux_gw_page_addr
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1));
819 start_pte
= ((hpux_gw_page_addr
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1));
821 address
= __pa(&hpux_gateway_page
);
822 #if PTRS_PER_PMD == 1
823 pmd
= (pmd_t
*)__pa(pg_dir
);
825 pmd
= (pmd_t
*) pgd_address(*pg_dir
);
828 * pmd is physical at this point
832 pmd
= (pmd_t
*) get_zeroed_page(GFP_KERNEL
);
833 pmd
= (pmd_t
*) __pa(pmd
);
836 __pgd_val_set(*pg_dir
, PxD_FLAG_PRESENT
| PxD_FLAG_VALID
| (unsigned long) pmd
);
838 /* now change pmd to kernel virtual addresses */
840 pmd
= (pmd_t
*)__va(pmd
) + start_pmd
;
843 * pg_table is physical at this point
846 pg_table
= (pte_t
*) pmd_address(*pmd
);
848 pg_table
= (pte_t
*) __pa(get_zeroed_page(GFP_KERNEL
));
850 __pmd_val_set(*pmd
, PxD_FLAG_PRESENT
| PxD_FLAG_VALID
| (unsigned long) pg_table
);
852 /* now change pg_table to kernel virtual addresses */
854 pg_table
= (pte_t
*) __va(pg_table
) + start_pte
;
855 set_pte(pg_table
, __mk_pte(address
, PAGE_GATEWAY
));
857 EXPORT_SYMBOL(map_hpux_gateway_page
);
860 void __init
paging_init(void)
867 flush_cache_all_local(); /* start with known state */
868 flush_tlb_all_local(NULL
);
870 for (i
= 0; i
< npmem_ranges
; i
++) {
871 unsigned long zones_size
[MAX_NR_ZONES
] = { 0, };
873 zones_size
[ZONE_NORMAL
] = pmem_ranges
[i
].pages
;
875 #ifdef CONFIG_DISCONTIGMEM
876 /* Need to initialize the pfnnid_map before we can initialize
880 for (j
= (pmem_ranges
[i
].start_pfn
>> PFNNID_SHIFT
);
881 j
<= ((pmem_ranges
[i
].start_pfn
+ pmem_ranges
[i
].pages
) >> PFNNID_SHIFT
);
888 free_area_init_node(i
, zones_size
,
889 pmem_ranges
[i
].start_pfn
, NULL
);
896 * Currently, all PA20 chips have 18 bit protection IDs, which is the
897 * limiting factor (space ids are 32 bits).
900 #define NR_SPACE_IDS 262144
905 * Currently we have a one-to-one relationship between space IDs and
906 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
907 * support 15 bit protection IDs, so that is the limiting factor.
908 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
909 * probably not worth the effort for a special case here.
912 #define NR_SPACE_IDS 32768
914 #endif /* !CONFIG_PA20 */
916 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
917 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
919 static unsigned long space_id
[SID_ARRAY_SIZE
] = { 1 }; /* disallow space 0 */
920 static unsigned long dirty_space_id
[SID_ARRAY_SIZE
];
921 static unsigned long space_id_index
;
922 static unsigned long free_space_ids
= NR_SPACE_IDS
- 1;
923 static unsigned long dirty_space_ids
= 0;
925 static DEFINE_SPINLOCK(sid_lock
);
927 unsigned long alloc_sid(void)
931 spin_lock(&sid_lock
);
933 if (free_space_ids
== 0) {
934 if (dirty_space_ids
!= 0) {
935 spin_unlock(&sid_lock
);
936 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
937 spin_lock(&sid_lock
);
939 BUG_ON(free_space_ids
== 0);
944 index
= find_next_zero_bit(space_id
, NR_SPACE_IDS
, space_id_index
);
945 space_id
[index
>> SHIFT_PER_LONG
] |= (1L << (index
& (BITS_PER_LONG
- 1)));
946 space_id_index
= index
;
948 spin_unlock(&sid_lock
);
950 return index
<< SPACEID_SHIFT
;
953 void free_sid(unsigned long spaceid
)
955 unsigned long index
= spaceid
>> SPACEID_SHIFT
;
956 unsigned long *dirty_space_offset
;
958 dirty_space_offset
= dirty_space_id
+ (index
>> SHIFT_PER_LONG
);
959 index
&= (BITS_PER_LONG
- 1);
961 spin_lock(&sid_lock
);
963 BUG_ON(*dirty_space_offset
& (1L << index
)); /* attempt to free space id twice */
965 *dirty_space_offset
|= (1L << index
);
968 spin_unlock(&sid_lock
);
973 static void get_dirty_sids(unsigned long *ndirtyptr
,unsigned long *dirty_array
)
977 /* NOTE: sid_lock must be held upon entry */
979 *ndirtyptr
= dirty_space_ids
;
980 if (dirty_space_ids
!= 0) {
981 for (i
= 0; i
< SID_ARRAY_SIZE
; i
++) {
982 dirty_array
[i
] = dirty_space_id
[i
];
983 dirty_space_id
[i
] = 0;
991 static void recycle_sids(unsigned long ndirty
,unsigned long *dirty_array
)
995 /* NOTE: sid_lock must be held upon entry */
998 for (i
= 0; i
< SID_ARRAY_SIZE
; i
++) {
999 space_id
[i
] ^= dirty_array
[i
];
1002 free_space_ids
+= ndirty
;
1007 #else /* CONFIG_SMP */
1009 static void recycle_sids(void)
1013 /* NOTE: sid_lock must be held upon entry */
1015 if (dirty_space_ids
!= 0) {
1016 for (i
= 0; i
< SID_ARRAY_SIZE
; i
++) {
1017 space_id
[i
] ^= dirty_space_id
[i
];
1018 dirty_space_id
[i
] = 0;
1021 free_space_ids
+= dirty_space_ids
;
1022 dirty_space_ids
= 0;
1029 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
1030 * purged, we can safely reuse the space ids that were released but
1031 * not flushed from the tlb.
1036 static unsigned long recycle_ndirty
;
1037 static unsigned long recycle_dirty_array
[SID_ARRAY_SIZE
];
1038 static unsigned int recycle_inuse
;
1040 void flush_tlb_all(void)
1045 spin_lock(&sid_lock
);
1046 if (dirty_space_ids
> RECYCLE_THRESHOLD
) {
1047 BUG_ON(recycle_inuse
); /* FIXME: Use a semaphore/wait queue here */
1048 get_dirty_sids(&recycle_ndirty
,recycle_dirty_array
);
1052 spin_unlock(&sid_lock
);
1053 on_each_cpu(flush_tlb_all_local
, NULL
, 1);
1055 spin_lock(&sid_lock
);
1056 recycle_sids(recycle_ndirty
,recycle_dirty_array
);
1058 spin_unlock(&sid_lock
);
1062 void flush_tlb_all(void)
1064 spin_lock(&sid_lock
);
1065 flush_tlb_all_local(NULL
);
1067 spin_unlock(&sid_lock
);
1071 #ifdef CONFIG_BLK_DEV_INITRD
1072 void free_initrd_mem(unsigned long start
, unsigned long end
)
1076 printk(KERN_INFO
"Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
1077 for (; start
< end
; start
+= PAGE_SIZE
) {
1078 ClearPageReserved(virt_to_page(start
));
1079 init_page_count(virt_to_page(start
));