4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
8 #include <linux/bootmem.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/setup.h>
16 #include <asm/tlbflush.h>
18 static DEFINE_MUTEX(vmem_mutex
);
20 struct memory_segment
{
21 struct list_head list
;
26 static LIST_HEAD(mem_segs
);
28 void __meminit
memmap_init(unsigned long size
, int nid
, unsigned long zone
,
29 unsigned long start_pfn
)
31 struct page
*start
, *end
;
32 struct page
*map_start
, *map_end
;
35 start
= pfn_to_page(start_pfn
);
38 for (i
= 0; i
< MEMORY_CHUNKS
&& memory_chunk
[i
].size
> 0; i
++) {
39 unsigned long cstart
, cend
;
41 cstart
= PFN_DOWN(memory_chunk
[i
].addr
);
42 cend
= cstart
+ PFN_DOWN(memory_chunk
[i
].size
);
44 map_start
= mem_map
+ cstart
;
45 map_end
= mem_map
+ cend
;
47 if (map_start
< start
)
52 map_start
-= ((unsigned long) map_start
& (PAGE_SIZE
- 1))
53 / sizeof(struct page
);
54 map_end
+= ((PFN_ALIGN((unsigned long) map_end
)
55 - (unsigned long) map_end
)
56 / sizeof(struct page
));
58 if (map_start
< map_end
)
59 memmap_init_zone((unsigned long)(map_end
- map_start
),
60 nid
, zone
, page_to_pfn(map_start
),
65 static void __ref
*vmem_alloc_pages(unsigned int order
)
67 if (slab_is_available())
68 return (void *)__get_free_pages(GFP_KERNEL
, order
);
69 return alloc_bootmem_pages((1 << order
) * PAGE_SIZE
);
72 #define vmem_pud_alloc() ({ BUG(); ((pud_t *) NULL); })
74 static inline pmd_t
*vmem_pmd_alloc(void)
79 pmd
= vmem_alloc_pages(2);
82 clear_table((unsigned long *) pmd
, _SEGMENT_ENTRY_EMPTY
, PAGE_SIZE
*4);
87 static pte_t __init_refok
*vmem_pte_alloc(void)
91 if (slab_is_available())
92 pte
= (pte_t
*) page_table_alloc(&init_mm
);
94 pte
= alloc_bootmem(PTRS_PER_PTE
* sizeof(pte_t
));
97 clear_table((unsigned long *) pte
, _PAGE_TYPE_EMPTY
,
98 PTRS_PER_PTE
* sizeof(pte_t
));
103 * Add a physical memory range to the 1:1 mapping.
105 static int vmem_add_range(unsigned long start
, unsigned long size
)
107 unsigned long address
;
115 for (address
= start
; address
< start
+ size
; address
+= PAGE_SIZE
) {
116 pg_dir
= pgd_offset_k(address
);
117 if (pgd_none(*pg_dir
)) {
118 pu_dir
= vmem_pud_alloc();
121 pgd_populate_kernel(&init_mm
, pg_dir
, pu_dir
);
124 pu_dir
= pud_offset(pg_dir
, address
);
125 if (pud_none(*pu_dir
)) {
126 pm_dir
= vmem_pmd_alloc();
129 pud_populate_kernel(&init_mm
, pu_dir
, pm_dir
);
132 pm_dir
= pmd_offset(pu_dir
, address
);
133 if (pmd_none(*pm_dir
)) {
134 pt_dir
= vmem_pte_alloc();
137 pmd_populate_kernel(&init_mm
, pm_dir
, pt_dir
);
140 pt_dir
= pte_offset_kernel(pm_dir
, address
);
141 pte
= pfn_pte(address
>> PAGE_SHIFT
, PAGE_KERNEL
);
146 flush_tlb_kernel_range(start
, start
+ size
);
151 * Remove a physical memory range from the 1:1 mapping.
152 * Currently only invalidates page table entries.
154 static void vmem_remove_range(unsigned long start
, unsigned long size
)
156 unsigned long address
;
163 pte_val(pte
) = _PAGE_TYPE_EMPTY
;
164 for (address
= start
; address
< start
+ size
; address
+= PAGE_SIZE
) {
165 pg_dir
= pgd_offset_k(address
);
166 pu_dir
= pud_offset(pg_dir
, address
);
167 if (pud_none(*pu_dir
))
169 pm_dir
= pmd_offset(pu_dir
, address
);
170 if (pmd_none(*pm_dir
))
172 pt_dir
= pte_offset_kernel(pm_dir
, address
);
175 flush_tlb_kernel_range(start
, start
+ size
);
179 * Add a backed mem_map array to the virtual mem_map array.
181 static int vmem_add_mem_map(unsigned long start
, unsigned long size
)
183 unsigned long address
, start_addr
, end_addr
;
184 struct page
*map_start
, *map_end
;
192 map_start
= VMEM_MAP
+ PFN_DOWN(start
);
193 map_end
= VMEM_MAP
+ PFN_DOWN(start
+ size
);
195 start_addr
= (unsigned long) map_start
& PAGE_MASK
;
196 end_addr
= PFN_ALIGN((unsigned long) map_end
);
198 for (address
= start_addr
; address
< end_addr
; address
+= PAGE_SIZE
) {
199 pg_dir
= pgd_offset_k(address
);
200 if (pgd_none(*pg_dir
)) {
201 pu_dir
= vmem_pud_alloc();
204 pgd_populate_kernel(&init_mm
, pg_dir
, pu_dir
);
207 pu_dir
= pud_offset(pg_dir
, address
);
208 if (pud_none(*pu_dir
)) {
209 pm_dir
= vmem_pmd_alloc();
212 pud_populate_kernel(&init_mm
, pu_dir
, pm_dir
);
215 pm_dir
= pmd_offset(pu_dir
, address
);
216 if (pmd_none(*pm_dir
)) {
217 pt_dir
= vmem_pte_alloc();
220 pmd_populate_kernel(&init_mm
, pm_dir
, pt_dir
);
223 pt_dir
= pte_offset_kernel(pm_dir
, address
);
224 if (pte_none(*pt_dir
)) {
225 unsigned long new_page
;
227 new_page
=__pa(vmem_alloc_pages(0));
230 pte
= pfn_pte(new_page
>> PAGE_SHIFT
, PAGE_KERNEL
);
236 flush_tlb_kernel_range(start_addr
, end_addr
);
240 static int vmem_add_mem(unsigned long start
, unsigned long size
)
244 ret
= vmem_add_mem_map(start
, size
);
247 return vmem_add_range(start
, size
);
251 * Add memory segment to the segment list if it doesn't overlap with
252 * an already present segment.
254 static int insert_memory_segment(struct memory_segment
*seg
)
256 struct memory_segment
*tmp
;
258 if (seg
->start
+ seg
->size
>= VMEM_MAX_PHYS
||
259 seg
->start
+ seg
->size
< seg
->start
)
262 list_for_each_entry(tmp
, &mem_segs
, list
) {
263 if (seg
->start
>= tmp
->start
+ tmp
->size
)
265 if (seg
->start
+ seg
->size
<= tmp
->start
)
269 list_add(&seg
->list
, &mem_segs
);
274 * Remove memory segment from the segment list.
276 static void remove_memory_segment(struct memory_segment
*seg
)
278 list_del(&seg
->list
);
281 static void __remove_shared_memory(struct memory_segment
*seg
)
283 remove_memory_segment(seg
);
284 vmem_remove_range(seg
->start
, seg
->size
);
287 int remove_shared_memory(unsigned long start
, unsigned long size
)
289 struct memory_segment
*seg
;
292 mutex_lock(&vmem_mutex
);
295 list_for_each_entry(seg
, &mem_segs
, list
) {
296 if (seg
->start
== start
&& seg
->size
== size
)
300 if (seg
->start
!= start
|| seg
->size
!= size
)
304 __remove_shared_memory(seg
);
307 mutex_unlock(&vmem_mutex
);
311 int add_shared_memory(unsigned long start
, unsigned long size
)
313 struct memory_segment
*seg
;
315 unsigned long pfn
, num_pfn
, end_pfn
;
318 mutex_lock(&vmem_mutex
);
320 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
326 ret
= insert_memory_segment(seg
);
330 ret
= vmem_add_mem(start
, size
);
334 pfn
= PFN_DOWN(start
);
335 num_pfn
= PFN_DOWN(size
);
336 end_pfn
= pfn
+ num_pfn
;
338 page
= pfn_to_page(pfn
);
339 memset(page
, 0, num_pfn
* sizeof(struct page
));
341 for (; pfn
< end_pfn
; pfn
++) {
342 page
= pfn_to_page(pfn
);
343 init_page_count(page
);
344 reset_page_mapcount(page
);
345 SetPageReserved(page
);
346 INIT_LIST_HEAD(&page
->lru
);
351 __remove_shared_memory(seg
);
355 mutex_unlock(&vmem_mutex
);
360 * map whole physical memory to virtual memory (identity mapping)
361 * we reserve enough space in the vmalloc area for vmemmap to hotplug
362 * additional memory segments.
364 void __init
vmem_map_init(void)
368 INIT_LIST_HEAD(&init_mm
.context
.crst_list
);
369 INIT_LIST_HEAD(&init_mm
.context
.pgtable_list
);
370 init_mm
.context
.noexec
= 0;
371 NODE_DATA(0)->node_mem_map
= VMEM_MAP
;
372 for (i
= 0; i
< MEMORY_CHUNKS
&& memory_chunk
[i
].size
> 0; i
++)
373 vmem_add_mem(memory_chunk
[i
].addr
, memory_chunk
[i
].size
);
377 * Convert memory chunk array to a memory segment list so there is a single
378 * list that contains both r/w memory and shared memory segments.
380 static int __init
vmem_convert_memory_chunk(void)
382 struct memory_segment
*seg
;
385 mutex_lock(&vmem_mutex
);
386 for (i
= 0; i
< MEMORY_CHUNKS
; i
++) {
387 if (!memory_chunk
[i
].size
)
389 seg
= kzalloc(sizeof(*seg
), GFP_KERNEL
);
391 panic("Out of memory...\n");
392 seg
->start
= memory_chunk
[i
].addr
;
393 seg
->size
= memory_chunk
[i
].size
;
394 insert_memory_segment(seg
);
396 mutex_unlock(&vmem_mutex
);
400 core_initcall(vmem_convert_memory_chunk
);