2 * linux/arch/parisc/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright 1999 SuSE GmbH
6 * changed by Philipp Rumpf
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2004 Randolph Chung (tausq@debian.org)
12 #include <linux/config.h>
14 #include <linux/module.h>
16 #include <linux/bootmem.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
20 #include <linux/initrd.h>
21 #include <linux/swap.h>
22 #include <linux/unistd.h>
24 #include <asm/pgalloc.h>
26 #include <asm/pdc_chassis.h>
27 #include <asm/mmzone.h>
29 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
31 extern char _text
; /* start of kernel code, defined by linker */
32 extern int data_start
;
33 extern char _end
; /* end of BSS, defined by linker */
34 extern char __init_begin
, __init_end
;
36 #ifdef CONFIG_DISCONTIGMEM
37 struct node_map_data node_data
[MAX_NUMNODES
];
38 bootmem_data_t bmem_data
[MAX_NUMNODES
];
39 unsigned char pfnnid_map
[PFNNID_MAP_MAX
];
42 static struct resource data_resource
= {
43 .name
= "Kernel data",
44 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
,
47 static struct resource code_resource
= {
48 .name
= "Kernel code",
49 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
,
52 static struct resource pdcdata_resource
= {
53 .name
= "PDC data (Page Zero)",
56 .flags
= IORESOURCE_BUSY
| IORESOURCE_MEM
,
59 static struct resource sysram_resources
[MAX_PHYSMEM_RANGES
];
61 static unsigned long max_pfn
;
63 /* The following array is initialized from the firmware specific
64 * information retrieved in kernel/inventory.c.
67 physmem_range_t pmem_ranges
[MAX_PHYSMEM_RANGES
];
71 #define MAX_MEM (~0UL)
73 #define MAX_MEM (3584U*1024U*1024U)
74 #endif /* !__LP64__ */
76 static unsigned long mem_limit
= MAX_MEM
;
78 static void __init
mem_limit_func(void)
82 extern char saved_command_line
[];
84 /* We need this before __setup() functions are called */
87 for (cp
= saved_command_line
; *cp
; ) {
88 if (memcmp(cp
, "mem=", 4) == 0) {
90 limit
= memparse(cp
, &end
);
95 while (*cp
!= ' ' && *cp
)
102 if (limit
< mem_limit
)
106 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
108 static void __init
setup_bootmem(void)
110 unsigned long bootmap_size
;
111 unsigned long mem_max
;
112 unsigned long bootmap_pages
;
113 unsigned long bootmap_start_pfn
;
114 unsigned long bootmap_pfn
;
115 #ifndef CONFIG_DISCONTIGMEM
116 physmem_range_t pmem_holes
[MAX_PHYSMEM_RANGES
- 1];
119 int i
, sysram_resource_count
;
121 disable_sr_hashing(); /* Turn off space register hashing */
124 * Sort the ranges. Since the number of ranges is typically
125 * small, and performance is not an issue here, just do
126 * a simple insertion sort.
129 for (i
= 1; i
< npmem_ranges
; i
++) {
132 for (j
= i
; j
> 0; j
--) {
135 if (pmem_ranges
[j
-1].start_pfn
<
136 pmem_ranges
[j
].start_pfn
) {
140 tmp
= pmem_ranges
[j
-1].start_pfn
;
141 pmem_ranges
[j
-1].start_pfn
= pmem_ranges
[j
].start_pfn
;
142 pmem_ranges
[j
].start_pfn
= tmp
;
143 tmp
= pmem_ranges
[j
-1].pages
;
144 pmem_ranges
[j
-1].pages
= pmem_ranges
[j
].pages
;
145 pmem_ranges
[j
].pages
= tmp
;
149 #ifndef CONFIG_DISCONTIGMEM
151 * Throw out ranges that are too far apart (controlled by
155 for (i
= 1; i
< npmem_ranges
; i
++) {
156 if (pmem_ranges
[i
].start_pfn
-
157 (pmem_ranges
[i
-1].start_pfn
+
158 pmem_ranges
[i
-1].pages
) > MAX_GAP
) {
160 printk("Large gap in memory detected (%ld pages). "
161 "Consider turning on CONFIG_DISCONTIGMEM\n",
162 pmem_ranges
[i
].start_pfn
-
163 (pmem_ranges
[i
-1].start_pfn
+
164 pmem_ranges
[i
-1].pages
));
170 if (npmem_ranges
> 1) {
172 /* Print the memory ranges */
174 printk(KERN_INFO
"Memory Ranges:\n");
176 for (i
= 0; i
< npmem_ranges
; i
++) {
180 size
= (pmem_ranges
[i
].pages
<< PAGE_SHIFT
);
181 start
= (pmem_ranges
[i
].start_pfn
<< PAGE_SHIFT
);
182 printk(KERN_INFO
"%2d) Start 0x%016lx End 0x%016lx Size %6ld Mb\n",
183 i
,start
, start
+ (size
- 1), size
>> 20);
187 sysram_resource_count
= npmem_ranges
;
188 for (i
= 0; i
< sysram_resource_count
; i
++) {
189 struct resource
*res
= &sysram_resources
[i
];
190 res
->name
= "System RAM";
191 res
->start
= pmem_ranges
[i
].start_pfn
<< PAGE_SHIFT
;
192 res
->end
= res
->start
+ (pmem_ranges
[i
].pages
<< PAGE_SHIFT
)-1;
193 res
->flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
194 request_resource(&iomem_resource
, res
);
198 * For 32 bit kernels we limit the amount of memory we can
199 * support, in order to preserve enough kernel address space
200 * for other purposes. For 64 bit kernels we don't normally
201 * limit the memory, but this mechanism can be used to
202 * artificially limit the amount of memory (and it is written
203 * to work with multiple memory ranges).
206 mem_limit_func(); /* check for "mem=" argument */
210 for (i
= 0; i
< npmem_ranges
; i
++) {
213 rsize
= pmem_ranges
[i
].pages
<< PAGE_SHIFT
;
214 if ((mem_max
+ rsize
) > mem_limit
) {
215 printk(KERN_WARNING
"Memory truncated to %ld Mb\n", mem_limit
>> 20);
216 if (mem_max
== mem_limit
)
219 pmem_ranges
[i
].pages
= (mem_limit
>> PAGE_SHIFT
)
220 - (mem_max
>> PAGE_SHIFT
);
221 npmem_ranges
= i
+ 1;
224 num_physpages
+= pmem_ranges
[i
].pages
;
227 num_physpages
+= pmem_ranges
[i
].pages
;
231 printk(KERN_INFO
"Total Memory: %ld Mb\n",mem_max
>> 20);
233 #ifndef CONFIG_DISCONTIGMEM
234 /* Merge the ranges, keeping track of the holes */
237 unsigned long end_pfn
;
238 unsigned long hole_pages
;
241 end_pfn
= pmem_ranges
[0].start_pfn
+ pmem_ranges
[0].pages
;
242 for (i
= 1; i
< npmem_ranges
; i
++) {
244 hole_pages
= pmem_ranges
[i
].start_pfn
- end_pfn
;
246 pmem_holes
[npmem_holes
].start_pfn
= end_pfn
;
247 pmem_holes
[npmem_holes
++].pages
= hole_pages
;
248 end_pfn
+= hole_pages
;
250 end_pfn
+= pmem_ranges
[i
].pages
;
253 pmem_ranges
[0].pages
= end_pfn
- pmem_ranges
[0].start_pfn
;
259 for (i
= 0; i
< npmem_ranges
; i
++)
260 bootmap_pages
+= bootmem_bootmap_pages(pmem_ranges
[i
].pages
);
262 bootmap_start_pfn
= PAGE_ALIGN(__pa((unsigned long) &_end
)) >> PAGE_SHIFT
;
264 #ifdef CONFIG_DISCONTIGMEM
265 for (i
= 0; i
< MAX_PHYSMEM_RANGES
; i
++) {
266 memset(NODE_DATA(i
), 0, sizeof(pg_data_t
));
267 NODE_DATA(i
)->bdata
= &bmem_data
[i
];
269 memset(pfnnid_map
, 0xff, sizeof(pfnnid_map
));
271 numnodes
= npmem_ranges
;
273 for (i
= 0; i
< npmem_ranges
; i
++)
278 * Initialize and free the full range of memory in each range.
279 * Note that the only writing these routines do are to the bootmap,
280 * and we've made sure to locate the bootmap properly so that they
281 * won't be writing over anything important.
284 bootmap_pfn
= bootmap_start_pfn
;
286 for (i
= 0; i
< npmem_ranges
; i
++) {
287 unsigned long start_pfn
;
288 unsigned long npages
;
290 start_pfn
= pmem_ranges
[i
].start_pfn
;
291 npages
= pmem_ranges
[i
].pages
;
293 bootmap_size
= init_bootmem_node(NODE_DATA(i
),
296 (start_pfn
+ npages
) );
297 free_bootmem_node(NODE_DATA(i
),
298 (start_pfn
<< PAGE_SHIFT
),
299 (npages
<< PAGE_SHIFT
) );
300 bootmap_pfn
+= (bootmap_size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
301 if ((start_pfn
+ npages
) > max_pfn
)
302 max_pfn
= start_pfn
+ npages
;
305 if ((bootmap_pfn
- bootmap_start_pfn
) != bootmap_pages
) {
306 printk(KERN_WARNING
"WARNING! bootmap sizing is messed up!\n");
310 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
312 #define PDC_CONSOLE_IO_IODC_SIZE 32768
314 reserve_bootmem_node(NODE_DATA(0), 0UL,
315 (unsigned long)(PAGE0
->mem_free
+ PDC_CONSOLE_IO_IODC_SIZE
));
316 reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text
),
317 (unsigned long)(&_end
- &_text
));
318 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn
<< PAGE_SHIFT
),
319 ((bootmap_pfn
- bootmap_start_pfn
) << PAGE_SHIFT
));
321 #ifndef CONFIG_DISCONTIGMEM
323 /* reserve the holes */
325 for (i
= 0; i
< npmem_holes
; i
++) {
326 reserve_bootmem_node(NODE_DATA(0),
327 (pmem_holes
[i
].start_pfn
<< PAGE_SHIFT
),
328 (pmem_holes
[i
].pages
<< PAGE_SHIFT
));
332 #ifdef CONFIG_BLK_DEV_INITRD
334 printk(KERN_INFO
"initrd: %08lx-%08lx\n", initrd_start
, initrd_end
);
335 if (__pa(initrd_start
) < mem_max
) {
336 unsigned long initrd_reserve
;
338 if (__pa(initrd_end
) > mem_max
) {
339 initrd_reserve
= mem_max
- __pa(initrd_start
);
341 initrd_reserve
= initrd_end
- initrd_start
;
343 initrd_below_start_ok
= 1;
344 printk(KERN_INFO
"initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start
), __pa(initrd_start
) + initrd_reserve
, mem_max
);
346 reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start
), initrd_reserve
);
351 data_resource
.start
= virt_to_phys(&data_start
);
352 data_resource
.end
= virt_to_phys(&_end
)-1;
353 code_resource
.start
= virt_to_phys(&_text
);
354 code_resource
.end
= virt_to_phys(&data_start
)-1;
356 /* We don't know which region the kernel will be in, so try
359 for (i
= 0; i
< sysram_resource_count
; i
++) {
360 struct resource
*res
= &sysram_resources
[i
];
361 request_resource(res
, &code_resource
);
362 request_resource(res
, &data_resource
);
364 request_resource(&sysram_resources
[0], &pdcdata_resource
);
367 void free_initmem(void)
371 printk(KERN_INFO
"NOT FREEING INITMEM (%dk)\n",
372 (&__init_end
- &__init_begin
) >> 10);
377 printk(KERN_INFO
"Freeing unused kernel memory: ");
380 /* Attempt to catch anyone trying to execute code here
381 * by filling the page with BRK insns.
383 * If we disable interrupts for all CPUs, then IPI stops working.
384 * Kinda breaks the global cache flushing.
388 memset(&__init_begin
, 0x00,
389 (unsigned long)&__init_end
- (unsigned long)&__init_begin
);
392 asm volatile("sync" : : );
393 flush_icache_range((unsigned long)&__init_begin
, (unsigned long)&__init_end
);
394 asm volatile("sync" : : );
399 addr
= (unsigned long)(&__init_begin
);
400 for (; addr
< (unsigned long)(&__init_end
); addr
+= PAGE_SIZE
) {
401 ClearPageReserved(virt_to_page(addr
));
402 set_page_count(virt_to_page(addr
), 1);
408 /* set up a new led state on systems shipped LED State panel */
409 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE
);
411 printk("%luk freed\n", (unsigned long)(&__init_end
- &__init_begin
) >> 10);
416 * Just an arbitrary offset to serve as a "hole" between mapping areas
417 * (between top of physical memory and a potential pcxl dma mapping
418 * area, and below the vmalloc mapping area).
420 * The current 32K value just means that there will be a 32K "hole"
421 * between mapping areas. That means that any out-of-bounds memory
422 * accesses will hopefully be caught. The vmalloc() routines leaves
423 * a hole of 4kB between each vmalloced area for the same reason.
426 /* Leave room for gateway page expansion */
427 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
428 #error KERNEL_MAP_START is in gateway reserved region
430 #define MAP_START (KERNEL_MAP_START)
432 #define VM_MAP_OFFSET (32*1024)
433 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
434 & ~(VM_MAP_OFFSET-1)))
437 EXPORT_SYMBOL(vmalloc_start
);
440 unsigned long pcxl_dma_start
;
443 void __init
mem_init(void)
445 high_memory
= __va((max_pfn
<< PAGE_SHIFT
));
447 #ifndef CONFIG_DISCONTIGMEM
448 max_mapnr
= page_to_pfn(virt_to_page(high_memory
- 1)) + 1;
449 mem_map
= zone_table
[ZONE_DMA
]->zone_mem_map
;
450 totalram_pages
+= free_all_bootmem();
455 for (i
= 0; i
< npmem_ranges
; i
++)
456 totalram_pages
+= free_all_bootmem_node(NODE_DATA(i
));
460 printk(KERN_INFO
"Memory: %luk available\n", num_physpages
<< (PAGE_SHIFT
-10));
463 if (hppa_dma_ops
== &pcxl_dma_ops
) {
464 pcxl_dma_start
= (unsigned long)SET_MAP_OFFSET(MAP_START
);
465 vmalloc_start
= SET_MAP_OFFSET(pcxl_dma_start
+ PCXL_DMA_MAP_SIZE
);
468 vmalloc_start
= SET_MAP_OFFSET(MAP_START
);
471 vmalloc_start
= SET_MAP_OFFSET(MAP_START
);
476 int do_check_pgt_cache(int low
, int high
)
481 unsigned long *empty_zero_page
;
485 int i
,free
= 0,total
= 0,reserved
= 0;
486 int shared
= 0, cached
= 0;
488 printk(KERN_INFO
"Mem-info:\n");
490 printk(KERN_INFO
"Free swap: %6ldkB\n",
491 nr_swap_pages
<<(PAGE_SHIFT
-10));
492 #ifndef CONFIG_DISCONTIGMEM
496 if (PageReserved(mem_map
+i
))
498 else if (PageSwapCache(mem_map
+i
))
500 else if (!page_count(&mem_map
[i
]))
503 shared
+= page_count(&mem_map
[i
]) - 1;
506 for (i
= 0; i
< npmem_ranges
; i
++) {
509 for (j
= node_start_pfn(i
); j
< node_end_pfn(i
); j
++) {
512 p
= node_mem_map(i
) + j
- node_start_pfn(i
);
517 else if (PageSwapCache(p
))
519 else if (!page_count(p
))
522 shared
+= page_count(p
) - 1;
526 printk(KERN_INFO
"%d pages of RAM\n", total
);
527 printk(KERN_INFO
"%d reserved pages\n", reserved
);
528 printk(KERN_INFO
"%d pages shared\n", shared
);
529 printk(KERN_INFO
"%d pages swap cached\n", cached
);
532 #ifdef CONFIG_DISCONTIGMEM
537 for (i
= 0; i
< npmem_ranges
; i
++) {
538 for (j
= 0; j
< MAX_NR_ZONES
; j
++) {
539 zl
= NODE_DATA(i
)->node_zonelists
+ j
;
541 printk("Zone list for zone %d on node %d: ", j
, i
);
542 for (k
= 0; zl
->zones
[k
] != NULL
; k
++)
543 printk("[%d/%s] ", zl
->zones
[k
]->zone_pgdat
->node_id
, zl
->zones
[k
]->name
);
552 static void __init
map_pages(unsigned long start_vaddr
, unsigned long start_paddr
, unsigned long size
, pgprot_t pgprot
)
557 unsigned long end_paddr
;
558 unsigned long start_pmd
;
559 unsigned long start_pte
;
562 unsigned long address
;
563 unsigned long ro_start
;
564 unsigned long ro_end
;
565 unsigned long fv_addr
;
566 unsigned long gw_addr
;
567 extern const unsigned long fault_vector_20
;
568 extern void * const linux_gateway_page
;
570 ro_start
= __pa((unsigned long)&_text
);
571 ro_end
= __pa((unsigned long)&data_start
);
572 fv_addr
= __pa((unsigned long)&fault_vector_20
) & PAGE_MASK
;
573 gw_addr
= __pa((unsigned long)&linux_gateway_page
) & PAGE_MASK
;
575 end_paddr
= start_paddr
+ size
;
577 pg_dir
= pgd_offset_k(start_vaddr
);
579 #if PTRS_PER_PMD == 1
582 start_pmd
= ((start_vaddr
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1));
584 start_pte
= ((start_vaddr
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1));
586 address
= start_paddr
;
587 while (address
< end_paddr
) {
588 #if PTRS_PER_PMD == 1
589 pmd
= (pmd_t
*)__pa(pg_dir
);
591 pmd
= (pmd_t
*)pgd_address(*pg_dir
);
594 * pmd is physical at this point
598 pmd
= (pmd_t
*) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE
<< PMD_ORDER
);
599 pmd
= (pmd_t
*) __pa(pmd
);
602 pgd_populate(NULL
, pg_dir
, __va(pmd
));
606 /* now change pmd to kernel virtual addresses */
608 pmd
= (pmd_t
*)__va(pmd
) + start_pmd
;
609 for (tmp1
= start_pmd
; tmp1
< PTRS_PER_PMD
; tmp1
++,pmd
++) {
612 * pg_table is physical at this point
615 pg_table
= (pte_t
*)pmd_address(*pmd
);
618 alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE
);
619 pg_table
= (pte_t
*) __pa(pg_table
);
622 pmd_populate_kernel(NULL
, pmd
, __va(pg_table
));
624 /* now change pg_table to kernel virtual addresses */
626 pg_table
= (pte_t
*) __va(pg_table
) + start_pte
;
627 for (tmp2
= start_pte
; tmp2
< PTRS_PER_PTE
; tmp2
++,pg_table
++) {
631 * Map the fault vector writable so we can
632 * write the HPMC checksum.
634 if (address
>= ro_start
&& address
< ro_end
635 && address
!= fv_addr
636 && address
!= gw_addr
)
637 pte
= __mk_pte(address
, PAGE_KERNEL_RO
);
639 pte
= __mk_pte(address
, pgprot
);
641 if (address
>= end_paddr
)
644 set_pte(pg_table
, pte
);
646 address
+= PAGE_SIZE
;
650 if (address
>= end_paddr
)
658 * pagetable_init() sets up the page tables
660 * Note that gateway_init() places the Linux gateway page at page 0.
661 * Since gateway pages cannot be dereferenced this has the desirable
662 * side effect of trapping those pesky NULL-reference errors in the
665 static void __init
pagetable_init(void)
669 /* Map each physical memory range to its kernel vaddr */
671 for (range
= 0; range
< npmem_ranges
; range
++) {
672 unsigned long start_paddr
;
673 unsigned long end_paddr
;
676 start_paddr
= pmem_ranges
[range
].start_pfn
<< PAGE_SHIFT
;
677 end_paddr
= start_paddr
+ (pmem_ranges
[range
].pages
<< PAGE_SHIFT
);
678 size
= pmem_ranges
[range
].pages
<< PAGE_SHIFT
;
680 map_pages((unsigned long)__va(start_paddr
), start_paddr
,
684 #ifdef CONFIG_BLK_DEV_INITRD
685 if (initrd_end
&& initrd_end
> mem_limit
) {
686 printk("initrd: mapping %08lx-%08lx\n", initrd_start
, initrd_end
);
687 map_pages(initrd_start
, __pa(initrd_start
),
688 initrd_end
- initrd_start
, PAGE_KERNEL
);
692 empty_zero_page
= alloc_bootmem_pages(PAGE_SIZE
);
693 memset(empty_zero_page
, 0, PAGE_SIZE
);
696 static void __init
gateway_init(void)
698 unsigned long linux_gateway_page_addr
;
699 /* FIXME: This is 'const' in order to trick the compiler
700 into not treating it as DP-relative data. */
701 extern void * const linux_gateway_page
;
703 linux_gateway_page_addr
= LINUX_GATEWAY_ADDR
& PAGE_MASK
;
706 * Setup Linux Gateway page.
708 * The Linux gateway page will reside in kernel space (on virtual
709 * page 0), so it doesn't need to be aliased into user space.
712 map_pages(linux_gateway_page_addr
, __pa(&linux_gateway_page
),
713 PAGE_SIZE
, PAGE_GATEWAY
);
718 map_hpux_gateway_page(struct task_struct
*tsk
, struct mm_struct
*mm
)
723 unsigned long start_pmd
;
724 unsigned long start_pte
;
725 unsigned long address
;
726 unsigned long hpux_gw_page_addr
;
727 /* FIXME: This is 'const' in order to trick the compiler
728 into not treating it as DP-relative data. */
729 extern void * const hpux_gateway_page
;
731 hpux_gw_page_addr
= HPUX_GATEWAY_ADDR
& PAGE_MASK
;
734 * Setup HP-UX Gateway page.
736 * The HP-UX gateway page resides in the user address space,
737 * so it needs to be aliased into each process.
740 pg_dir
= pgd_offset(mm
,hpux_gw_page_addr
);
742 #if PTRS_PER_PMD == 1
745 start_pmd
= ((hpux_gw_page_addr
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1));
747 start_pte
= ((hpux_gw_page_addr
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1));
749 address
= __pa(&hpux_gateway_page
);
750 #if PTRS_PER_PMD == 1
751 pmd
= (pmd_t
*)__pa(pg_dir
);
753 pmd
= (pmd_t
*) (PAGE_MASK
& pgd_val(*pg_dir
));
756 * pmd is physical at this point
760 pmd
= (pmd_t
*) get_zeroed_page(GFP_KERNEL
);
761 pmd
= (pmd_t
*) __pa(pmd
);
764 pgd_val(*pg_dir
) = _PAGE_TABLE
| (unsigned long) pmd
;
766 /* now change pmd to kernel virtual addresses */
768 pmd
= (pmd_t
*)__va(pmd
) + start_pmd
;
771 * pg_table is physical at this point
774 pg_table
= (pte_t
*) (PAGE_MASK
& pmd_val(*pmd
));
776 pg_table
= (pte_t
*) __pa(get_zeroed_page(GFP_KERNEL
));
778 pmd_val(*pmd
) = _PAGE_TABLE
| (unsigned long) pg_table
;
780 /* now change pg_table to kernel virtual addresses */
782 pg_table
= (pte_t
*) __va(pg_table
) + start_pte
;
783 set_pte(pg_table
, __mk_pte(address
, PAGE_GATEWAY
));
785 EXPORT_SYMBOL(map_hpux_gateway_page
);
788 extern void flush_tlb_all_local(void);
790 void __init
paging_init(void)
797 flush_cache_all_local(); /* start with known state */
798 flush_tlb_all_local();
800 for (i
= 0; i
< npmem_ranges
; i
++) {
801 unsigned long zones_size
[MAX_NR_ZONES
] = { 0, 0, 0 };
803 /* We have an IOMMU, so all memory can go into a single
805 zones_size
[ZONE_DMA
] = pmem_ranges
[i
].pages
;
807 free_area_init_node(i
, NODE_DATA(i
), zones_size
,
808 pmem_ranges
[i
].start_pfn
, 0);
810 #ifdef CONFIG_DISCONTIGMEM
813 for (j
= (node_start_pfn(i
) >> PFNNID_SHIFT
);
814 j
<= (node_end_pfn(i
) >> PFNNID_SHIFT
);
826 * Currently, all PA20 chips have 18 bit protection id's, which is the
827 * limiting factor (space ids are 32 bits).
830 #define NR_SPACE_IDS 262144
835 * Currently we have a one-to-one relationship between space id's and
836 * protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
837 * support 15 bit protection id's, so that is the limiting factor.
838 * PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's
839 * probably not worth the effort for a special case here.
842 #define NR_SPACE_IDS 32768
844 #endif /* !CONFIG_PA20 */
846 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
847 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
849 static unsigned long space_id
[SID_ARRAY_SIZE
] = { 1 }; /* disallow space 0 */
850 static unsigned long dirty_space_id
[SID_ARRAY_SIZE
];
851 static unsigned long space_id_index
;
852 static unsigned long free_space_ids
= NR_SPACE_IDS
- 1;
853 static unsigned long dirty_space_ids
= 0;
855 static spinlock_t sid_lock
= SPIN_LOCK_UNLOCKED
;
857 unsigned long alloc_sid(void)
861 spin_lock(&sid_lock
);
863 if (free_space_ids
== 0) {
864 if (dirty_space_ids
!= 0) {
865 spin_unlock(&sid_lock
);
866 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
867 spin_lock(&sid_lock
);
869 if (free_space_ids
== 0)
875 index
= find_next_zero_bit(space_id
, NR_SPACE_IDS
, space_id_index
);
876 space_id
[index
>> SHIFT_PER_LONG
] |= (1L << (index
& (BITS_PER_LONG
- 1)));
877 space_id_index
= index
;
879 spin_unlock(&sid_lock
);
881 return index
<< SPACEID_SHIFT
;
884 void free_sid(unsigned long spaceid
)
886 unsigned long index
= spaceid
>> SPACEID_SHIFT
;
887 unsigned long *dirty_space_offset
;
889 dirty_space_offset
= dirty_space_id
+ (index
>> SHIFT_PER_LONG
);
890 index
&= (BITS_PER_LONG
- 1);
892 spin_lock(&sid_lock
);
894 if (*dirty_space_offset
& (1L << index
))
895 BUG(); /* attempt to free space id twice */
897 *dirty_space_offset
|= (1L << index
);
900 spin_unlock(&sid_lock
);
905 static void get_dirty_sids(unsigned long *ndirtyptr
,unsigned long *dirty_array
)
909 /* NOTE: sid_lock must be held upon entry */
911 *ndirtyptr
= dirty_space_ids
;
912 if (dirty_space_ids
!= 0) {
913 for (i
= 0; i
< SID_ARRAY_SIZE
; i
++) {
914 dirty_array
[i
] = dirty_space_id
[i
];
915 dirty_space_id
[i
] = 0;
923 static void recycle_sids(unsigned long ndirty
,unsigned long *dirty_array
)
927 /* NOTE: sid_lock must be held upon entry */
930 for (i
= 0; i
< SID_ARRAY_SIZE
; i
++) {
931 space_id
[i
] ^= dirty_array
[i
];
934 free_space_ids
+= ndirty
;
939 #else /* CONFIG_SMP */
941 static void recycle_sids(void)
945 /* NOTE: sid_lock must be held upon entry */
947 if (dirty_space_ids
!= 0) {
948 for (i
= 0; i
< SID_ARRAY_SIZE
; i
++) {
949 space_id
[i
] ^= dirty_space_id
[i
];
950 dirty_space_id
[i
] = 0;
953 free_space_ids
+= dirty_space_ids
;
961 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
962 * purged, we can safely reuse the space ids that were released but
963 * not flushed from the tlb.
968 static unsigned long recycle_ndirty
;
969 static unsigned long recycle_dirty_array
[SID_ARRAY_SIZE
];
970 static unsigned int recycle_inuse
= 0;
972 void flush_tlb_all(void)
977 spin_lock(&sid_lock
);
978 if (dirty_space_ids
> RECYCLE_THRESHOLD
) {
980 BUG(); /* FIXME: Use a semaphore/wait queue here */
982 get_dirty_sids(&recycle_ndirty
,recycle_dirty_array
);
986 spin_unlock(&sid_lock
);
987 on_each_cpu((void (*)(void *))flush_tlb_all_local
, NULL
, 1, 1);
989 spin_lock(&sid_lock
);
990 recycle_sids(recycle_ndirty
,recycle_dirty_array
);
992 spin_unlock(&sid_lock
);
996 void flush_tlb_all(void)
998 spin_lock(&sid_lock
);
999 flush_tlb_all_local();
1001 spin_unlock(&sid_lock
);
1005 #ifdef CONFIG_BLK_DEV_INITRD
1006 void free_initrd_mem(unsigned long start
, unsigned long end
)
1010 printk(KERN_INFO
"Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
1011 for (; start
< end
; start
+= PAGE_SIZE
) {
1012 ClearPageReserved(virt_to_page(start
));
1013 set_page_count(virt_to_page(start
), 1);