2 #include <linux/initrd.h>
3 #include <linux/ioport.h>
4 #include <linux/swap.h>
5 #include <linux/memblock.h>
7 #include <asm/cacheflush.h>
11 #include <asm/page_types.h>
12 #include <asm/sections.h>
13 #include <asm/setup.h>
14 #include <asm/system.h>
15 #include <asm/tlbflush.h>
17 #include <asm/proto.h>
19 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
21 unsigned long __initdata pgt_buf_start
;
22 unsigned long __meminitdata pgt_buf_end
;
23 unsigned long __meminitdata pgt_buf_top
;
28 #ifdef CONFIG_DIRECT_GBPAGES
33 static void __init
find_early_table_space(unsigned long end
, int use_pse
,
36 unsigned long puds
, pmds
, ptes
, tables
, start
= 0, good_end
= end
;
39 puds
= (end
+ PUD_SIZE
- 1) >> PUD_SHIFT
;
40 tables
= roundup(puds
* sizeof(pud_t
), PAGE_SIZE
);
45 extra
= end
- ((end
>>PUD_SHIFT
) << PUD_SHIFT
);
46 pmds
= (extra
+ PMD_SIZE
- 1) >> PMD_SHIFT
;
48 pmds
= (end
+ PMD_SIZE
- 1) >> PMD_SHIFT
;
50 tables
+= roundup(pmds
* sizeof(pmd_t
), PAGE_SIZE
);
55 extra
= end
- ((end
>>PMD_SHIFT
) << PMD_SHIFT
);
59 ptes
= (extra
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
61 ptes
= (end
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
63 tables
+= roundup(ptes
* sizeof(pte_t
), PAGE_SIZE
);
67 tables
+= roundup(__end_of_fixed_addresses
* sizeof(pte_t
), PAGE_SIZE
);
69 good_end
= max_pfn_mapped
<< PAGE_SHIFT
;
72 base
= memblock_find_in_range(start
, good_end
, tables
, PAGE_SIZE
);
73 if (base
== MEMBLOCK_ERROR
)
74 panic("Cannot find space for the kernel page tables");
76 pgt_buf_start
= base
>> PAGE_SHIFT
;
77 pgt_buf_end
= pgt_buf_start
;
78 pgt_buf_top
= pgt_buf_start
+ (tables
>> PAGE_SHIFT
);
80 printk(KERN_DEBUG
"kernel direct mapping tables up to %lx @ %lx-%lx\n",
81 end
, pgt_buf_start
<< PAGE_SHIFT
, pgt_buf_top
<< PAGE_SHIFT
);
87 unsigned page_size_mask
;
92 #else /* CONFIG_X86_64 */
96 static int __meminit
save_mr(struct map_range
*mr
, int nr_range
,
97 unsigned long start_pfn
, unsigned long end_pfn
,
98 unsigned long page_size_mask
)
100 if (start_pfn
< end_pfn
) {
101 if (nr_range
>= NR_RANGE_MR
)
102 panic("run out of range for init_memory_mapping\n");
103 mr
[nr_range
].start
= start_pfn
<<PAGE_SHIFT
;
104 mr
[nr_range
].end
= end_pfn
<<PAGE_SHIFT
;
105 mr
[nr_range
].page_size_mask
= page_size_mask
;
113 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
114 * This runs before bootmem is initialized and gets pages directly from
115 * the physical memory. To access them they are temporarily mapped.
117 unsigned long __init_refok
init_memory_mapping(unsigned long start
,
120 unsigned long page_size_mask
= 0;
121 unsigned long start_pfn
, end_pfn
;
122 unsigned long ret
= 0;
125 struct map_range mr
[NR_RANGE_MR
];
127 int use_pse
, use_gbpages
;
129 printk(KERN_INFO
"init_memory_mapping: %016lx-%016lx\n", start
, end
);
131 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
133 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
134 * This will simplify cpa(), which otherwise needs to support splitting
135 * large pages into small in interrupt context, etc.
137 use_pse
= use_gbpages
= 0;
139 use_pse
= cpu_has_pse
;
140 use_gbpages
= direct_gbpages
;
143 /* Enable PSE if available */
145 set_in_cr4(X86_CR4_PSE
);
147 /* Enable PGE if available */
149 set_in_cr4(X86_CR4_PGE
);
150 __supported_pte_mask
|= _PAGE_GLOBAL
;
154 page_size_mask
|= 1 << PG_LEVEL_1G
;
156 page_size_mask
|= 1 << PG_LEVEL_2M
;
158 memset(mr
, 0, sizeof(mr
));
161 /* head if not big page alignment ? */
162 start_pfn
= start
>> PAGE_SHIFT
;
163 pos
= start_pfn
<< PAGE_SHIFT
;
166 * Don't use a large page for the first 2/4MB of memory
167 * because there are often fixed size MTRRs in there
168 * and overlapping MTRRs into large pages can cause
172 end_pfn
= 1<<(PMD_SHIFT
- PAGE_SHIFT
);
174 end_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
175 << (PMD_SHIFT
- PAGE_SHIFT
);
176 #else /* CONFIG_X86_64 */
177 end_pfn
= ((pos
+ (PMD_SIZE
- 1)) >> PMD_SHIFT
)
178 << (PMD_SHIFT
- PAGE_SHIFT
);
180 if (end_pfn
> (end
>> PAGE_SHIFT
))
181 end_pfn
= end
>> PAGE_SHIFT
;
182 if (start_pfn
< end_pfn
) {
183 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
184 pos
= end_pfn
<< PAGE_SHIFT
;
187 /* big page (2M) range */
188 start_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
189 << (PMD_SHIFT
- PAGE_SHIFT
);
191 end_pfn
= (end
>>PMD_SHIFT
) << (PMD_SHIFT
- PAGE_SHIFT
);
192 #else /* CONFIG_X86_64 */
193 end_pfn
= ((pos
+ (PUD_SIZE
- 1))>>PUD_SHIFT
)
194 << (PUD_SHIFT
- PAGE_SHIFT
);
195 if (end_pfn
> ((end
>>PMD_SHIFT
)<<(PMD_SHIFT
- PAGE_SHIFT
)))
196 end_pfn
= ((end
>>PMD_SHIFT
)<<(PMD_SHIFT
- PAGE_SHIFT
));
199 if (start_pfn
< end_pfn
) {
200 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
201 page_size_mask
& (1<<PG_LEVEL_2M
));
202 pos
= end_pfn
<< PAGE_SHIFT
;
206 /* big page (1G) range */
207 start_pfn
= ((pos
+ (PUD_SIZE
- 1))>>PUD_SHIFT
)
208 << (PUD_SHIFT
- PAGE_SHIFT
);
209 end_pfn
= (end
>> PUD_SHIFT
) << (PUD_SHIFT
- PAGE_SHIFT
);
210 if (start_pfn
< end_pfn
) {
211 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
213 ((1<<PG_LEVEL_2M
)|(1<<PG_LEVEL_1G
)));
214 pos
= end_pfn
<< PAGE_SHIFT
;
217 /* tail is not big page (1G) alignment */
218 start_pfn
= ((pos
+ (PMD_SIZE
- 1))>>PMD_SHIFT
)
219 << (PMD_SHIFT
- PAGE_SHIFT
);
220 end_pfn
= (end
>> PMD_SHIFT
) << (PMD_SHIFT
- PAGE_SHIFT
);
221 if (start_pfn
< end_pfn
) {
222 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
,
223 page_size_mask
& (1<<PG_LEVEL_2M
));
224 pos
= end_pfn
<< PAGE_SHIFT
;
228 /* tail is not big page (2M) alignment */
229 start_pfn
= pos
>>PAGE_SHIFT
;
230 end_pfn
= end
>>PAGE_SHIFT
;
231 nr_range
= save_mr(mr
, nr_range
, start_pfn
, end_pfn
, 0);
233 /* try to merge same page size and continuous */
234 for (i
= 0; nr_range
> 1 && i
< nr_range
- 1; i
++) {
235 unsigned long old_start
;
236 if (mr
[i
].end
!= mr
[i
+1].start
||
237 mr
[i
].page_size_mask
!= mr
[i
+1].page_size_mask
)
240 old_start
= mr
[i
].start
;
241 memmove(&mr
[i
], &mr
[i
+1],
242 (nr_range
- 1 - i
) * sizeof(struct map_range
));
243 mr
[i
--].start
= old_start
;
247 for (i
= 0; i
< nr_range
; i
++)
248 printk(KERN_DEBUG
" %010lx - %010lx page %s\n",
249 mr
[i
].start
, mr
[i
].end
,
250 (mr
[i
].page_size_mask
& (1<<PG_LEVEL_1G
))?"1G":(
251 (mr
[i
].page_size_mask
& (1<<PG_LEVEL_2M
))?"2M":"4k"));
254 * Find space for the kernel direct mapping tables.
256 * Later we should allocate these tables in the local node of the
257 * memory mapped. Unfortunately this is done currently before the
258 * nodes are discovered.
261 find_early_table_space(end
, use_pse
, use_gbpages
);
263 for (i
= 0; i
< nr_range
; i
++)
264 ret
= kernel_physical_mapping_init(mr
[i
].start
, mr
[i
].end
,
265 mr
[i
].page_size_mask
);
268 early_ioremap_page_table_range_init();
270 load_cr3(swapper_pg_dir
);
275 if (!after_bootmem
&& pgt_buf_end
> pgt_buf_start
)
276 memblock_x86_reserve_range(pgt_buf_start
<< PAGE_SHIFT
,
277 pgt_buf_end
<< PAGE_SHIFT
, "PGTABLE");
280 early_memtest(start
, end
);
282 return ret
>> PAGE_SHIFT
;
287 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
288 * is valid. The argument is a physical page number.
291 * On x86, access has to be given to the first megabyte of ram because that area
292 * contains bios code and data regions used by X and dosemu and similar apps.
293 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
294 * mmio resources as well as potential bios/acpi data regions.
296 int devmem_is_allowed(unsigned long pagenr
)
300 if (iomem_is_exclusive(pagenr
<< PAGE_SHIFT
))
302 if (!page_is_ram(pagenr
))
307 void free_init_pages(char *what
, unsigned long begin
, unsigned long end
)
310 unsigned long begin_aligned
, end_aligned
;
312 /* Make sure boundaries are page aligned */
313 begin_aligned
= PAGE_ALIGN(begin
);
314 end_aligned
= end
& PAGE_MASK
;
316 if (WARN_ON(begin_aligned
!= begin
|| end_aligned
!= end
)) {
317 begin
= begin_aligned
;
327 * If debugging page accesses then do not free this memory but
328 * mark them not present - any buggy init-section access will
329 * create a kernel page fault:
331 #ifdef CONFIG_DEBUG_PAGEALLOC
332 printk(KERN_INFO
"debug: unmapping init memory %08lx..%08lx\n",
334 set_memory_np(begin
, (end
- begin
) >> PAGE_SHIFT
);
337 * We just marked the kernel text read only above, now that
338 * we are going to free part of that, we need to make that
339 * writeable and non-executable first.
341 set_memory_nx(begin
, (end
- begin
) >> PAGE_SHIFT
);
342 set_memory_rw(begin
, (end
- begin
) >> PAGE_SHIFT
);
344 printk(KERN_INFO
"Freeing %s: %luk freed\n", what
, (end
- begin
) >> 10);
346 for (; addr
< end
; addr
+= PAGE_SIZE
) {
347 ClearPageReserved(virt_to_page(addr
));
348 init_page_count(virt_to_page(addr
));
349 memset((void *)addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
356 void free_initmem(void)
358 free_init_pages("unused kernel memory",
359 (unsigned long)(&__init_begin
),
360 (unsigned long)(&__init_end
));
363 #ifdef CONFIG_BLK_DEV_INITRD
364 void free_initrd_mem(unsigned long start
, unsigned long end
)
367 * end could be not aligned, and We can not align that,
368 * decompresser could be confused by aligned initrd_end
369 * We already reserve the end partial page before in
370 * - i386_start_kernel()
371 * - x86_64_start_kernel()
372 * - relocate_initrd()
373 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
375 free_init_pages("initrd memory", start
, PAGE_ALIGN(end
));