5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
8 * Derived from "arch/i386/mm/init.c"
9 * Copyright (C) 1995 Linus Torvalds
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/ptrace.h>
19 #include <linux/mman.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/pagemap.h>
25 #include <linux/bootmem.h>
26 #include <linux/pfn.h>
27 #include <linux/poison.h>
28 #include <linux/initrd.h>
29 #include <linux/export.h>
30 #include <linux/gfp.h>
31 #include <asm/processor.h>
32 #include <asm/system.h>
33 #include <asm/uaccess.h>
34 #include <asm/pgtable.h>
35 #include <asm/pgalloc.h>
37 #include <asm/lowcore.h>
39 #include <asm/tlbflush.h>
40 #include <asm/sections.h>
42 pgd_t swapper_pg_dir
[PTRS_PER_PGD
] __attribute__((__aligned__(PAGE_SIZE
)));
44 unsigned long empty_zero_page
, zero_page_mask
;
45 EXPORT_SYMBOL(empty_zero_page
);
47 static unsigned long setup_zero_pages(void)
56 switch (cpu_id
.machine
) {
58 case 0x2064: /* z900 */
59 case 0x2066: /* z900 */
60 case 0x2084: /* z990 */
61 case 0x2086: /* z990 */
62 case 0x2094: /* z9-109 */
63 case 0x2096: /* z9-109 */
66 case 0x2097: /* z10 */
67 case 0x2098: /* z10 */
73 empty_zero_page
= __get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
75 panic("Out of memory in setup_zero_pages");
77 page
= virt_to_page((void *) empty_zero_page
);
78 split_page(page
, order
);
79 for (i
= 1 << order
; i
> 0; i
--) {
80 SetPageReserved(page
);
84 size
= PAGE_SIZE
<< order
;
85 zero_page_mask
= (size
- 1) & PAGE_MASK
;
91 * paging_init() sets up the page tables
93 void __init
paging_init(void)
95 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
96 unsigned long pgd_type
;
98 init_mm
.pgd
= swapper_pg_dir
;
99 S390_lowcore
.kernel_asce
= __pa(init_mm
.pgd
) & PAGE_MASK
;
101 /* A three level page table (4TB) is enough for the kernel space. */
102 S390_lowcore
.kernel_asce
|= _ASCE_TYPE_REGION3
| _ASCE_TABLE_LENGTH
;
103 pgd_type
= _REGION3_ENTRY_EMPTY
;
105 S390_lowcore
.kernel_asce
|= _ASCE_TABLE_LENGTH
;
106 pgd_type
= _SEGMENT_ENTRY_EMPTY
;
108 clear_table((unsigned long *) init_mm
.pgd
, pgd_type
,
109 sizeof(unsigned long)*2048);
112 /* enable virtual mapping in kernel mode */
113 __ctl_load(S390_lowcore
.kernel_asce
, 1, 1);
114 __ctl_load(S390_lowcore
.kernel_asce
, 7, 7);
115 __ctl_load(S390_lowcore
.kernel_asce
, 13, 13);
116 arch_local_irq_restore(4UL << (BITS_PER_LONG
- 8));
118 atomic_set(&init_mm
.context
.attach_count
, 1);
120 sparse_memory_present_with_active_regions(MAX_NUMNODES
);
122 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
123 max_zone_pfns
[ZONE_DMA
] = PFN_DOWN(MAX_DMA_ADDRESS
);
124 max_zone_pfns
[ZONE_NORMAL
] = max_low_pfn
;
125 free_area_init_nodes(max_zone_pfns
);
129 void __init
mem_init(void)
131 unsigned long codesize
, reservedpages
, datasize
, initsize
;
133 max_mapnr
= num_physpages
= max_low_pfn
;
134 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
136 /* Setup guest page hinting */
139 /* this will put all low memory onto the freelists */
140 totalram_pages
+= free_all_bootmem();
141 totalram_pages
-= setup_zero_pages(); /* Setup zeroed pages. */
145 codesize
= (unsigned long) &_etext
- (unsigned long) &_text
;
146 datasize
= (unsigned long) &_edata
- (unsigned long) &_etext
;
147 initsize
= (unsigned long) &__init_end
- (unsigned long) &__init_begin
;
148 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
149 nr_free_pages() << (PAGE_SHIFT
-10),
150 max_mapnr
<< (PAGE_SHIFT
-10),
152 reservedpages
<< (PAGE_SHIFT
-10),
155 printk("Write protected kernel read-only data: %#lx - %#lx\n",
156 (unsigned long)&_stext
,
157 PFN_ALIGN((unsigned long)&_eshared
) - 1);
160 #ifdef CONFIG_DEBUG_PAGEALLOC
161 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
167 unsigned long address
;
170 for (i
= 0; i
< numpages
; i
++) {
171 address
= page_to_phys(page
+ i
);
172 pgd
= pgd_offset_k(address
);
173 pud
= pud_offset(pgd
, address
);
174 pmd
= pmd_offset(pud
, address
);
175 pte
= pte_offset_kernel(pmd
, address
);
177 __ptep_ipte(address
, pte
);
178 pte_val(*pte
) = _PAGE_TYPE_EMPTY
;
181 *pte
= mk_pte_phys(address
, __pgprot(_PAGE_TYPE_RW
));
182 /* Flush cpu write queue. */
188 void free_init_pages(char *what
, unsigned long begin
, unsigned long end
)
190 unsigned long addr
= begin
;
194 for (; addr
< end
; addr
+= PAGE_SIZE
) {
195 ClearPageReserved(virt_to_page(addr
));
196 init_page_count(virt_to_page(addr
));
197 memset((void *)(addr
& PAGE_MASK
), POISON_FREE_INITMEM
,
202 printk(KERN_INFO
"Freeing %s: %luk freed\n", what
, (end
- begin
) >> 10);
205 void free_initmem(void)
207 free_init_pages("unused kernel memory",
208 (unsigned long)&__init_begin
,
209 (unsigned long)&__init_end
);
212 #ifdef CONFIG_BLK_DEV_INITRD
213 void free_initrd_mem(unsigned long start
, unsigned long end
)
215 free_init_pages("initrd memory", start
, end
);
219 #ifdef CONFIG_MEMORY_HOTPLUG
220 int arch_add_memory(int nid
, u64 start
, u64 size
)
222 struct pglist_data
*pgdat
;
226 pgdat
= NODE_DATA(nid
);
227 zone
= pgdat
->node_zones
+ ZONE_MOVABLE
;
228 rc
= vmem_add_mapping(start
, size
);
231 rc
= __add_pages(nid
, zone
, PFN_DOWN(start
), PFN_DOWN(size
));
233 vmem_remove_mapping(start
, size
);
236 #endif /* CONFIG_MEMORY_HOTPLUG */