2 * sparse memory mappings.
5 #include <linux/mmzone.h>
6 #include <linux/bootmem.h>
7 #include <linux/highmem.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
18 * Permanent SPARSEMEM data:
20 * 1) mem_section - memory sections, mem_map's for valid memory
22 #ifdef CONFIG_SPARSEMEM_EXTREME
23 struct mem_section
*mem_section
[NR_SECTION_ROOTS
]
24 ____cacheline_internodealigned_in_smp
;
26 struct mem_section mem_section
[NR_SECTION_ROOTS
][SECTIONS_PER_ROOT
]
27 ____cacheline_internodealigned_in_smp
;
29 EXPORT_SYMBOL(mem_section
);
31 #ifdef NODE_NOT_IN_PAGE_FLAGS
33 * If we did not store the node number in the page then we have to
34 * do a lookup in the section_to_node_table in order to find which
35 * node the page belongs to.
37 #if MAX_NUMNODES <= 256
38 static u8 section_to_node_table
[NR_MEM_SECTIONS
] __cacheline_aligned
;
40 static u16 section_to_node_table
[NR_MEM_SECTIONS
] __cacheline_aligned
;
43 int page_to_nid(struct page
*page
)
45 return section_to_node_table
[page_to_section(page
)];
47 EXPORT_SYMBOL(page_to_nid
);
49 static void set_section_nid(unsigned long section_nr
, int nid
)
51 section_to_node_table
[section_nr
] = nid
;
53 #else /* !NODE_NOT_IN_PAGE_FLAGS */
54 static inline void set_section_nid(unsigned long section_nr
, int nid
)
59 #ifdef CONFIG_SPARSEMEM_EXTREME
60 static struct mem_section noinline __init_refok
*sparse_index_alloc(int nid
)
62 struct mem_section
*section
= NULL
;
63 unsigned long array_size
= SECTIONS_PER_ROOT
*
64 sizeof(struct mem_section
);
66 if (slab_is_available())
67 section
= kmalloc_node(array_size
, GFP_KERNEL
, nid
);
69 section
= alloc_bootmem_node(NODE_DATA(nid
), array_size
);
72 memset(section
, 0, array_size
);
77 static int __meminit
sparse_index_init(unsigned long section_nr
, int nid
)
79 static DEFINE_SPINLOCK(index_init_lock
);
80 unsigned long root
= SECTION_NR_TO_ROOT(section_nr
);
81 struct mem_section
*section
;
84 if (mem_section
[root
])
87 section
= sparse_index_alloc(nid
);
91 * This lock keeps two different sections from
92 * reallocating for the same index
94 spin_lock(&index_init_lock
);
96 if (mem_section
[root
]) {
101 mem_section
[root
] = section
;
103 spin_unlock(&index_init_lock
);
106 #else /* !SPARSEMEM_EXTREME */
107 static inline int sparse_index_init(unsigned long section_nr
, int nid
)
114 * Although written for the SPARSEMEM_EXTREME case, this happens
115 * to also work for the flat array case because
116 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
118 int __section_nr(struct mem_section
* ms
)
120 unsigned long root_nr
;
121 struct mem_section
* root
;
123 for (root_nr
= 0; root_nr
< NR_SECTION_ROOTS
; root_nr
++) {
124 root
= __nr_to_section(root_nr
* SECTIONS_PER_ROOT
);
128 if ((ms
>= root
) && (ms
< (root
+ SECTIONS_PER_ROOT
)))
132 return (root_nr
* SECTIONS_PER_ROOT
) + (ms
- root
);
136 * During early boot, before section_mem_map is used for an actual
137 * mem_map, we use section_mem_map to store the section's NUMA
138 * node. This keeps us from having to use another data structure. The
139 * node information is cleared just before we store the real mem_map.
141 static inline unsigned long sparse_encode_early_nid(int nid
)
143 return (nid
<< SECTION_NID_SHIFT
);
146 static inline int sparse_early_nid(struct mem_section
*section
)
148 return (section
->section_mem_map
>> SECTION_NID_SHIFT
);
151 /* Validate the physical addressing limitations of the model */
152 void __meminit
mminit_validate_memmodel_limits(unsigned long *start_pfn
,
153 unsigned long *end_pfn
)
155 unsigned long max_sparsemem_pfn
= 1UL << (MAX_PHYSMEM_BITS
-PAGE_SHIFT
);
158 * Sanity checks - do not allow an architecture to pass
159 * in larger pfns than the maximum scope of sparsemem:
161 if (*start_pfn
> max_sparsemem_pfn
) {
162 mminit_dprintk(MMINIT_WARNING
, "pfnvalidation",
163 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
164 *start_pfn
, *end_pfn
, max_sparsemem_pfn
);
166 *start_pfn
= max_sparsemem_pfn
;
167 *end_pfn
= max_sparsemem_pfn
;
170 if (*end_pfn
> max_sparsemem_pfn
) {
171 mminit_dprintk(MMINIT_WARNING
, "pfnvalidation",
172 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
173 *start_pfn
, *end_pfn
, max_sparsemem_pfn
);
175 *end_pfn
= max_sparsemem_pfn
;
179 /* Record a memory area against a node. */
180 void __init
memory_present(int nid
, unsigned long start
, unsigned long end
)
184 start
&= PAGE_SECTION_MASK
;
185 mminit_validate_memmodel_limits(&start
, &end
);
186 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
) {
187 unsigned long section
= pfn_to_section_nr(pfn
);
188 struct mem_section
*ms
;
190 sparse_index_init(section
, nid
);
191 set_section_nid(section
, nid
);
193 ms
= __nr_to_section(section
);
194 if (!ms
->section_mem_map
)
195 ms
->section_mem_map
= sparse_encode_early_nid(nid
) |
196 SECTION_MARKED_PRESENT
;
201 * Only used by the i386 NUMA architecures, but relatively
204 unsigned long __init
node_memmap_size_bytes(int nid
, unsigned long start_pfn
,
205 unsigned long end_pfn
)
208 unsigned long nr_pages
= 0;
210 mminit_validate_memmodel_limits(&start_pfn
, &end_pfn
);
211 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= PAGES_PER_SECTION
) {
212 if (nid
!= early_pfn_to_nid(pfn
))
215 if (pfn_present(pfn
))
216 nr_pages
+= PAGES_PER_SECTION
;
219 return nr_pages
* sizeof(struct page
);
223 * Subtle, we encode the real pfn into the mem_map such that
224 * the identity pfn - section_mem_map will return the actual
225 * physical page frame number.
227 static unsigned long sparse_encode_mem_map(struct page
*mem_map
, unsigned long pnum
)
229 return (unsigned long)(mem_map
- (section_nr_to_pfn(pnum
)));
233 * Decode mem_map from the coded memmap
235 struct page
*sparse_decode_mem_map(unsigned long coded_mem_map
, unsigned long pnum
)
237 /* mask off the extra low bits of information */
238 coded_mem_map
&= SECTION_MAP_MASK
;
239 return ((struct page
*)coded_mem_map
) + section_nr_to_pfn(pnum
);
242 static int __meminit
sparse_init_one_section(struct mem_section
*ms
,
243 unsigned long pnum
, struct page
*mem_map
,
244 unsigned long *pageblock_bitmap
)
246 if (!present_section(ms
))
249 ms
->section_mem_map
&= ~SECTION_MAP_MASK
;
250 ms
->section_mem_map
|= sparse_encode_mem_map(mem_map
, pnum
) |
252 ms
->pageblock_flags
= pageblock_bitmap
;
257 unsigned long usemap_size(void)
259 unsigned long size_bytes
;
260 size_bytes
= roundup(SECTION_BLOCKFLAGS_BITS
, 8) / 8;
261 size_bytes
= roundup(size_bytes
, sizeof(unsigned long));
265 #ifdef CONFIG_MEMORY_HOTPLUG
266 static unsigned long *__kmalloc_section_usemap(void)
268 return kmalloc(usemap_size(), GFP_KERNEL
);
270 #endif /* CONFIG_MEMORY_HOTPLUG */
272 static unsigned long *__init
sparse_early_usemap_alloc(unsigned long pnum
)
274 unsigned long *usemap
;
275 struct mem_section
*ms
= __nr_to_section(pnum
);
276 int nid
= sparse_early_nid(ms
);
278 usemap
= alloc_bootmem_node(NODE_DATA(nid
), usemap_size());
282 /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
285 printk(KERN_WARNING
"%s: allocation failed\n", __func__
);
289 #ifndef CONFIG_SPARSEMEM_VMEMMAP
290 struct page __init
*sparse_mem_map_populate(unsigned long pnum
, int nid
)
294 map
= alloc_remap(nid
, sizeof(struct page
) * PAGES_PER_SECTION
);
298 map
= alloc_bootmem_pages_node(NODE_DATA(nid
),
299 PAGE_ALIGN(sizeof(struct page
) * PAGES_PER_SECTION
));
302 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
304 struct page __init
*sparse_early_mem_map_alloc(unsigned long pnum
)
307 struct mem_section
*ms
= __nr_to_section(pnum
);
308 int nid
= sparse_early_nid(ms
);
310 map
= sparse_mem_map_populate(pnum
, nid
);
314 printk(KERN_ERR
"%s: sparsemem memory map backing failed "
315 "some memory will not be available.\n", __func__
);
316 ms
->section_mem_map
= 0;
320 void __attribute__((weak
)) __meminit
vmemmap_populate_print_last(void)
324 * Allocate the accumulated non-linear sections, allocate a mem_map
325 * for each and record the physical to section mapping.
327 void __init
sparse_init(void)
331 unsigned long *usemap
;
332 unsigned long **usemap_map
;
336 * map is using big page (aka 2M in x86 64 bit)
337 * usemap is less one page (aka 24 bytes)
338 * so alloc 2M (with 2M align) and 24 bytes in turn will
339 * make next 2M slip to one more 2M later.
340 * then in big system, the memory will have a lot of holes...
341 * here try to allocate 2M pages continously.
343 * powerpc need to call sparse_init_one_section right after each
344 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
346 size
= sizeof(unsigned long *) * NR_MEM_SECTIONS
;
347 usemap_map
= alloc_bootmem(size
);
349 panic("can not allocate usemap_map\n");
351 for (pnum
= 0; pnum
< NR_MEM_SECTIONS
; pnum
++) {
352 if (!present_section_nr(pnum
))
354 usemap_map
[pnum
] = sparse_early_usemap_alloc(pnum
);
357 for (pnum
= 0; pnum
< NR_MEM_SECTIONS
; pnum
++) {
358 if (!present_section_nr(pnum
))
361 usemap
= usemap_map
[pnum
];
365 map
= sparse_early_mem_map_alloc(pnum
);
369 sparse_init_one_section(__nr_to_section(pnum
), pnum
, map
,
373 vmemmap_populate_print_last();
375 free_bootmem(__pa(usemap_map
), size
);
378 #ifdef CONFIG_MEMORY_HOTPLUG
379 #ifdef CONFIG_SPARSEMEM_VMEMMAP
380 static inline struct page
*kmalloc_section_memmap(unsigned long pnum
, int nid
,
381 unsigned long nr_pages
)
383 /* This will make the necessary allocations eventually. */
384 return sparse_mem_map_populate(pnum
, nid
);
386 static void __kfree_section_memmap(struct page
*memmap
, unsigned long nr_pages
)
388 return; /* XXX: Not implemented yet */
390 static void free_map_bootmem(struct page
*page
, unsigned long nr_pages
)
394 static struct page
*__kmalloc_section_memmap(unsigned long nr_pages
)
396 struct page
*page
, *ret
;
397 unsigned long memmap_size
= sizeof(struct page
) * nr_pages
;
399 page
= alloc_pages(GFP_KERNEL
|__GFP_NOWARN
, get_order(memmap_size
));
403 ret
= vmalloc(memmap_size
);
409 ret
= (struct page
*)pfn_to_kaddr(page_to_pfn(page
));
411 memset(ret
, 0, memmap_size
);
416 static inline struct page
*kmalloc_section_memmap(unsigned long pnum
, int nid
,
417 unsigned long nr_pages
)
419 return __kmalloc_section_memmap(nr_pages
);
422 static void __kfree_section_memmap(struct page
*memmap
, unsigned long nr_pages
)
424 if (is_vmalloc_addr(memmap
))
427 free_pages((unsigned long)memmap
,
428 get_order(sizeof(struct page
) * nr_pages
));
431 static void free_map_bootmem(struct page
*page
, unsigned long nr_pages
)
433 unsigned long maps_section_nr
, removing_section_nr
, i
;
436 for (i
= 0; i
< nr_pages
; i
++, page
++) {
437 magic
= atomic_read(&page
->_mapcount
);
439 BUG_ON(magic
== NODE_INFO
);
441 maps_section_nr
= pfn_to_section_nr(page_to_pfn(page
));
442 removing_section_nr
= page
->private;
445 * When this function is called, the removing section is
446 * logical offlined state. This means all pages are isolated
447 * from page allocator. If removing section's memmap is placed
448 * on the same section, it must not be freed.
449 * If it is freed, page allocator may allocate it which will
450 * be removed physically soon.
452 if (maps_section_nr
!= removing_section_nr
)
453 put_page_bootmem(page
);
456 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
458 static void free_section_usemap(struct page
*memmap
, unsigned long *usemap
)
460 struct page
*usemap_page
;
461 unsigned long nr_pages
;
466 usemap_page
= virt_to_page(usemap
);
468 * Check to see if allocation came from hot-plug-add
470 if (PageSlab(usemap_page
)) {
473 __kfree_section_memmap(memmap
, PAGES_PER_SECTION
);
478 * The usemap came from bootmem. This is packed with other usemaps
479 * on the section which has pgdat at boot time. Just keep it as is now.
483 struct page
*memmap_page
;
484 memmap_page
= virt_to_page(memmap
);
486 nr_pages
= PAGE_ALIGN(PAGES_PER_SECTION
* sizeof(struct page
))
489 free_map_bootmem(memmap_page
, nr_pages
);
494 * returns the number of sections whose mem_maps were properly
495 * set. If this is <=0, then that means that the passed-in
496 * map was not consumed and must be freed.
498 int sparse_add_one_section(struct zone
*zone
, unsigned long start_pfn
,
501 unsigned long section_nr
= pfn_to_section_nr(start_pfn
);
502 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
503 struct mem_section
*ms
;
505 unsigned long *usemap
;
510 * no locking for this, because it does its own
511 * plus, it does a kmalloc
513 ret
= sparse_index_init(section_nr
, pgdat
->node_id
);
514 if (ret
< 0 && ret
!= -EEXIST
)
516 memmap
= kmalloc_section_memmap(section_nr
, pgdat
->node_id
, nr_pages
);
519 usemap
= __kmalloc_section_usemap();
521 __kfree_section_memmap(memmap
, nr_pages
);
525 pgdat_resize_lock(pgdat
, &flags
);
527 ms
= __pfn_to_section(start_pfn
);
528 if (ms
->section_mem_map
& SECTION_MARKED_PRESENT
) {
533 ms
->section_mem_map
|= SECTION_MARKED_PRESENT
;
535 ret
= sparse_init_one_section(ms
, section_nr
, memmap
, usemap
);
538 pgdat_resize_unlock(pgdat
, &flags
);
541 __kfree_section_memmap(memmap
, nr_pages
);
546 void sparse_remove_one_section(struct zone
*zone
, struct mem_section
*ms
)
548 struct page
*memmap
= NULL
;
549 unsigned long *usemap
= NULL
;
551 if (ms
->section_mem_map
) {
552 usemap
= ms
->pageblock_flags
;
553 memmap
= sparse_decode_mem_map(ms
->section_mem_map
,
555 ms
->section_mem_map
= 0;
556 ms
->pageblock_flags
= NULL
;
559 free_section_usemap(memmap
, usemap
);