2 * sparse memory mappings.
5 #include <linux/slab.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/highmem.h>
9 #include <linux/export.h>
10 #include <linux/spinlock.h>
11 #include <linux/vmalloc.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
18 * Permanent SPARSEMEM data:
20 * 1) mem_section - memory sections, mem_map's for valid memory
22 #ifdef CONFIG_SPARSEMEM_EXTREME
23 struct mem_section
*mem_section
[NR_SECTION_ROOTS
]
24 ____cacheline_internodealigned_in_smp
;
26 struct mem_section mem_section
[NR_SECTION_ROOTS
][SECTIONS_PER_ROOT
]
27 ____cacheline_internodealigned_in_smp
;
29 EXPORT_SYMBOL(mem_section
);
31 #ifdef NODE_NOT_IN_PAGE_FLAGS
33 * If we did not store the node number in the page then we have to
34 * do a lookup in the section_to_node_table in order to find which
35 * node the page belongs to.
37 #if MAX_NUMNODES <= 256
38 static u8 section_to_node_table
[NR_MEM_SECTIONS
] __cacheline_aligned
;
40 static u16 section_to_node_table
[NR_MEM_SECTIONS
] __cacheline_aligned
;
43 int page_to_nid(const struct page
*page
)
45 return section_to_node_table
[page_to_section(page
)];
47 EXPORT_SYMBOL(page_to_nid
);
49 static void set_section_nid(unsigned long section_nr
, int nid
)
51 section_to_node_table
[section_nr
] = nid
;
53 #else /* !NODE_NOT_IN_PAGE_FLAGS */
54 static inline void set_section_nid(unsigned long section_nr
, int nid
)
59 #ifdef CONFIG_SPARSEMEM_EXTREME
60 static struct mem_section noinline __init_refok
*sparse_index_alloc(int nid
)
62 struct mem_section
*section
= NULL
;
63 unsigned long array_size
= SECTIONS_PER_ROOT
*
64 sizeof(struct mem_section
);
66 if (slab_is_available()) {
67 if (node_state(nid
, N_HIGH_MEMORY
))
68 section
= kzalloc_node(array_size
, GFP_KERNEL
, nid
);
70 section
= kzalloc(array_size
, GFP_KERNEL
);
72 section
= alloc_bootmem_node(NODE_DATA(nid
), array_size
);
78 static int __meminit
sparse_index_init(unsigned long section_nr
, int nid
)
80 unsigned long root
= SECTION_NR_TO_ROOT(section_nr
);
81 struct mem_section
*section
;
83 if (mem_section
[root
])
86 section
= sparse_index_alloc(nid
);
90 mem_section
[root
] = section
;
94 #else /* !SPARSEMEM_EXTREME */
95 static inline int sparse_index_init(unsigned long section_nr
, int nid
)
102 * Although written for the SPARSEMEM_EXTREME case, this happens
103 * to also work for the flat array case because
104 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
106 int __section_nr(struct mem_section
* ms
)
108 unsigned long root_nr
;
109 struct mem_section
* root
;
111 for (root_nr
= 0; root_nr
< NR_SECTION_ROOTS
; root_nr
++) {
112 root
= __nr_to_section(root_nr
* SECTIONS_PER_ROOT
);
116 if ((ms
>= root
) && (ms
< (root
+ SECTIONS_PER_ROOT
)))
120 VM_BUG_ON(root_nr
== NR_SECTION_ROOTS
);
122 return (root_nr
* SECTIONS_PER_ROOT
) + (ms
- root
);
126 * During early boot, before section_mem_map is used for an actual
127 * mem_map, we use section_mem_map to store the section's NUMA
128 * node. This keeps us from having to use another data structure. The
129 * node information is cleared just before we store the real mem_map.
131 static inline unsigned long sparse_encode_early_nid(int nid
)
133 return (nid
<< SECTION_NID_SHIFT
);
136 static inline int sparse_early_nid(struct mem_section
*section
)
138 return (section
->section_mem_map
>> SECTION_NID_SHIFT
);
141 /* Validate the physical addressing limitations of the model */
142 void __meminit
mminit_validate_memmodel_limits(unsigned long *start_pfn
,
143 unsigned long *end_pfn
)
145 unsigned long max_sparsemem_pfn
= 1UL << (MAX_PHYSMEM_BITS
-PAGE_SHIFT
);
148 * Sanity checks - do not allow an architecture to pass
149 * in larger pfns than the maximum scope of sparsemem:
151 if (*start_pfn
> max_sparsemem_pfn
) {
152 mminit_dprintk(MMINIT_WARNING
, "pfnvalidation",
153 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
154 *start_pfn
, *end_pfn
, max_sparsemem_pfn
);
156 *start_pfn
= max_sparsemem_pfn
;
157 *end_pfn
= max_sparsemem_pfn
;
158 } else if (*end_pfn
> max_sparsemem_pfn
) {
159 mminit_dprintk(MMINIT_WARNING
, "pfnvalidation",
160 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
161 *start_pfn
, *end_pfn
, max_sparsemem_pfn
);
163 *end_pfn
= max_sparsemem_pfn
;
167 /* Record a memory area against a node. */
168 void __init
memory_present(int nid
, unsigned long start
, unsigned long end
)
172 start
&= PAGE_SECTION_MASK
;
173 mminit_validate_memmodel_limits(&start
, &end
);
174 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
) {
175 unsigned long section
= pfn_to_section_nr(pfn
);
176 struct mem_section
*ms
;
178 sparse_index_init(section
, nid
);
179 set_section_nid(section
, nid
);
181 ms
= __nr_to_section(section
);
182 if (!ms
->section_mem_map
)
183 ms
->section_mem_map
= sparse_encode_early_nid(nid
) |
184 SECTION_MARKED_PRESENT
;
189 * Only used by the i386 NUMA architecures, but relatively
192 unsigned long __init
node_memmap_size_bytes(int nid
, unsigned long start_pfn
,
193 unsigned long end_pfn
)
196 unsigned long nr_pages
= 0;
198 mminit_validate_memmodel_limits(&start_pfn
, &end_pfn
);
199 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= PAGES_PER_SECTION
) {
200 if (nid
!= early_pfn_to_nid(pfn
))
203 if (pfn_present(pfn
))
204 nr_pages
+= PAGES_PER_SECTION
;
207 return nr_pages
* sizeof(struct page
);
211 * Subtle, we encode the real pfn into the mem_map such that
212 * the identity pfn - section_mem_map will return the actual
213 * physical page frame number.
215 static unsigned long sparse_encode_mem_map(struct page
*mem_map
, unsigned long pnum
)
217 return (unsigned long)(mem_map
- (section_nr_to_pfn(pnum
)));
221 * Decode mem_map from the coded memmap
223 struct page
*sparse_decode_mem_map(unsigned long coded_mem_map
, unsigned long pnum
)
225 /* mask off the extra low bits of information */
226 coded_mem_map
&= SECTION_MAP_MASK
;
227 return ((struct page
*)coded_mem_map
) + section_nr_to_pfn(pnum
);
230 static int __meminit
sparse_init_one_section(struct mem_section
*ms
,
231 unsigned long pnum
, struct page
*mem_map
,
232 unsigned long *pageblock_bitmap
)
234 if (!present_section(ms
))
237 ms
->section_mem_map
&= ~SECTION_MAP_MASK
;
238 ms
->section_mem_map
|= sparse_encode_mem_map(mem_map
, pnum
) |
240 ms
->pageblock_flags
= pageblock_bitmap
;
245 unsigned long usemap_size(void)
247 unsigned long size_bytes
;
248 size_bytes
= roundup(SECTION_BLOCKFLAGS_BITS
, 8) / 8;
249 size_bytes
= roundup(size_bytes
, sizeof(unsigned long));
253 #ifdef CONFIG_MEMORY_HOTPLUG
254 static unsigned long *__kmalloc_section_usemap(void)
256 return kmalloc(usemap_size(), GFP_KERNEL
);
258 #endif /* CONFIG_MEMORY_HOTPLUG */
260 #ifdef CONFIG_MEMORY_HOTREMOVE
261 static unsigned long * __init
262 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data
*pgdat
,
265 unsigned long goal
, limit
;
269 * A page may contain usemaps for other sections preventing the
270 * page being freed and making a section unremovable while
271 * other sections referencing the usemap retmain active. Similarly,
272 * a pgdat can prevent a section being removed. If section A
273 * contains a pgdat and section B contains the usemap, both
274 * sections become inter-dependent. This allocates usemaps
275 * from the same section as the pgdat where possible to avoid
278 goal
= __pa(pgdat
) & (PAGE_SECTION_MASK
<< PAGE_SHIFT
);
279 limit
= goal
+ (1UL << PA_SECTION_SHIFT
);
280 nid
= early_pfn_to_nid(goal
>> PAGE_SHIFT
);
282 p
= ___alloc_bootmem_node_nopanic(NODE_DATA(nid
), size
,
283 SMP_CACHE_BYTES
, goal
, limit
);
291 static void __init
check_usemap_section_nr(int nid
, unsigned long *usemap
)
293 unsigned long usemap_snr
, pgdat_snr
;
294 static unsigned long old_usemap_snr
= NR_MEM_SECTIONS
;
295 static unsigned long old_pgdat_snr
= NR_MEM_SECTIONS
;
296 struct pglist_data
*pgdat
= NODE_DATA(nid
);
299 usemap_snr
= pfn_to_section_nr(__pa(usemap
) >> PAGE_SHIFT
);
300 pgdat_snr
= pfn_to_section_nr(__pa(pgdat
) >> PAGE_SHIFT
);
301 if (usemap_snr
== pgdat_snr
)
304 if (old_usemap_snr
== usemap_snr
&& old_pgdat_snr
== pgdat_snr
)
305 /* skip redundant message */
308 old_usemap_snr
= usemap_snr
;
309 old_pgdat_snr
= pgdat_snr
;
311 usemap_nid
= sparse_early_nid(__nr_to_section(usemap_snr
));
312 if (usemap_nid
!= nid
) {
314 "node %d must be removed before remove section %ld\n",
319 * There is a circular dependency.
320 * Some platforms allow un-removable section because they will just
321 * gather other removable sections for dynamic partitioning.
322 * Just notify un-removable section's number here.
324 printk(KERN_INFO
"Section %ld and %ld (node %d)", usemap_snr
,
327 " have a circular dependency on usemap and pgdat allocations\n");
330 static unsigned long * __init
331 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data
*pgdat
,
334 return alloc_bootmem_node_nopanic(pgdat
, size
);
337 static void __init
check_usemap_section_nr(int nid
, unsigned long *usemap
)
340 #endif /* CONFIG_MEMORY_HOTREMOVE */
342 static void __init
sparse_early_usemaps_alloc_node(unsigned long**usemap_map
,
343 unsigned long pnum_begin
,
344 unsigned long pnum_end
,
345 unsigned long usemap_count
, int nodeid
)
349 int size
= usemap_size();
351 usemap
= sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid
),
352 size
* usemap_count
);
354 printk(KERN_WARNING
"%s: allocation failed\n", __func__
);
358 for (pnum
= pnum_begin
; pnum
< pnum_end
; pnum
++) {
359 if (!present_section_nr(pnum
))
361 usemap_map
[pnum
] = usemap
;
363 check_usemap_section_nr(nodeid
, usemap_map
[pnum
]);
367 #ifndef CONFIG_SPARSEMEM_VMEMMAP
368 struct page __init
*sparse_mem_map_populate(unsigned long pnum
, int nid
)
373 map
= alloc_remap(nid
, sizeof(struct page
) * PAGES_PER_SECTION
);
377 size
= PAGE_ALIGN(sizeof(struct page
) * PAGES_PER_SECTION
);
378 map
= __alloc_bootmem_node_high(NODE_DATA(nid
), size
,
379 PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
));
382 void __init
sparse_mem_maps_populate_node(struct page
**map_map
,
383 unsigned long pnum_begin
,
384 unsigned long pnum_end
,
385 unsigned long map_count
, int nodeid
)
389 unsigned long size
= sizeof(struct page
) * PAGES_PER_SECTION
;
391 map
= alloc_remap(nodeid
, size
* map_count
);
393 for (pnum
= pnum_begin
; pnum
< pnum_end
; pnum
++) {
394 if (!present_section_nr(pnum
))
402 size
= PAGE_ALIGN(size
);
403 map
= __alloc_bootmem_node_high(NODE_DATA(nodeid
), size
* map_count
,
404 PAGE_SIZE
, __pa(MAX_DMA_ADDRESS
));
406 for (pnum
= pnum_begin
; pnum
< pnum_end
; pnum
++) {
407 if (!present_section_nr(pnum
))
416 for (pnum
= pnum_begin
; pnum
< pnum_end
; pnum
++) {
417 struct mem_section
*ms
;
419 if (!present_section_nr(pnum
))
421 map_map
[pnum
] = sparse_mem_map_populate(pnum
, nodeid
);
424 ms
= __nr_to_section(pnum
);
425 printk(KERN_ERR
"%s: sparsemem memory map backing failed "
426 "some memory will not be available.\n", __func__
);
427 ms
->section_mem_map
= 0;
430 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
432 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
433 static void __init
sparse_early_mem_maps_alloc_node(struct page
**map_map
,
434 unsigned long pnum_begin
,
435 unsigned long pnum_end
,
436 unsigned long map_count
, int nodeid
)
438 sparse_mem_maps_populate_node(map_map
, pnum_begin
, pnum_end
,
442 static struct page __init
*sparse_early_mem_map_alloc(unsigned long pnum
)
445 struct mem_section
*ms
= __nr_to_section(pnum
);
446 int nid
= sparse_early_nid(ms
);
448 map
= sparse_mem_map_populate(pnum
, nid
);
452 printk(KERN_ERR
"%s: sparsemem memory map backing failed "
453 "some memory will not be available.\n", __func__
);
454 ms
->section_mem_map
= 0;
459 void __attribute__((weak
)) __meminit
vmemmap_populate_print_last(void)
464 * Allocate the accumulated non-linear sections, allocate a mem_map
465 * for each and record the physical to section mapping.
467 void __init
sparse_init(void)
471 unsigned long *usemap
;
472 unsigned long **usemap_map
;
474 int nodeid_begin
= 0;
475 unsigned long pnum_begin
= 0;
476 unsigned long usemap_count
;
477 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
478 unsigned long map_count
;
480 struct page
**map_map
;
483 /* see include/linux/mmzone.h 'struct mem_section' definition */
484 BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section
)));
486 /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
487 set_pageblock_order();
490 * map is using big page (aka 2M in x86 64 bit)
491 * usemap is less one page (aka 24 bytes)
492 * so alloc 2M (with 2M align) and 24 bytes in turn will
493 * make next 2M slip to one more 2M later.
494 * then in big system, the memory will have a lot of holes...
495 * here try to allocate 2M pages continuously.
497 * powerpc need to call sparse_init_one_section right after each
498 * sparse_early_mem_map_alloc, so allocate usemap_map at first.
500 size
= sizeof(unsigned long *) * NR_MEM_SECTIONS
;
501 usemap_map
= alloc_bootmem(size
);
503 panic("can not allocate usemap_map\n");
505 for (pnum
= 0; pnum
< NR_MEM_SECTIONS
; pnum
++) {
506 struct mem_section
*ms
;
508 if (!present_section_nr(pnum
))
510 ms
= __nr_to_section(pnum
);
511 nodeid_begin
= sparse_early_nid(ms
);
516 for (pnum
= pnum_begin
+ 1; pnum
< NR_MEM_SECTIONS
; pnum
++) {
517 struct mem_section
*ms
;
520 if (!present_section_nr(pnum
))
522 ms
= __nr_to_section(pnum
);
523 nodeid
= sparse_early_nid(ms
);
524 if (nodeid
== nodeid_begin
) {
528 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
529 sparse_early_usemaps_alloc_node(usemap_map
, pnum_begin
, pnum
,
530 usemap_count
, nodeid_begin
);
531 /* new start, update count etc*/
532 nodeid_begin
= nodeid
;
537 sparse_early_usemaps_alloc_node(usemap_map
, pnum_begin
, NR_MEM_SECTIONS
,
538 usemap_count
, nodeid_begin
);
540 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
541 size2
= sizeof(struct page
*) * NR_MEM_SECTIONS
;
542 map_map
= alloc_bootmem(size2
);
544 panic("can not allocate map_map\n");
546 for (pnum
= 0; pnum
< NR_MEM_SECTIONS
; pnum
++) {
547 struct mem_section
*ms
;
549 if (!present_section_nr(pnum
))
551 ms
= __nr_to_section(pnum
);
552 nodeid_begin
= sparse_early_nid(ms
);
557 for (pnum
= pnum_begin
+ 1; pnum
< NR_MEM_SECTIONS
; pnum
++) {
558 struct mem_section
*ms
;
561 if (!present_section_nr(pnum
))
563 ms
= __nr_to_section(pnum
);
564 nodeid
= sparse_early_nid(ms
);
565 if (nodeid
== nodeid_begin
) {
569 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
570 sparse_early_mem_maps_alloc_node(map_map
, pnum_begin
, pnum
,
571 map_count
, nodeid_begin
);
572 /* new start, update count etc*/
573 nodeid_begin
= nodeid
;
578 sparse_early_mem_maps_alloc_node(map_map
, pnum_begin
, NR_MEM_SECTIONS
,
579 map_count
, nodeid_begin
);
582 for (pnum
= 0; pnum
< NR_MEM_SECTIONS
; pnum
++) {
583 if (!present_section_nr(pnum
))
586 usemap
= usemap_map
[pnum
];
590 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
593 map
= sparse_early_mem_map_alloc(pnum
);
598 sparse_init_one_section(__nr_to_section(pnum
), pnum
, map
,
602 vmemmap_populate_print_last();
604 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
605 free_bootmem(__pa(map_map
), size2
);
607 free_bootmem(__pa(usemap_map
), size
);
610 #ifdef CONFIG_MEMORY_HOTPLUG
611 #ifdef CONFIG_SPARSEMEM_VMEMMAP
612 static inline struct page
*kmalloc_section_memmap(unsigned long pnum
, int nid
,
613 unsigned long nr_pages
)
615 /* This will make the necessary allocations eventually. */
616 return sparse_mem_map_populate(pnum
, nid
);
618 static void __kfree_section_memmap(struct page
*memmap
, unsigned long nr_pages
)
620 unsigned long start
= (unsigned long)memmap
;
621 unsigned long end
= (unsigned long)(memmap
+ nr_pages
);
623 vmemmap_free(start
, end
);
625 #ifdef CONFIG_MEMORY_HOTREMOVE
626 static void free_map_bootmem(struct page
*memmap
, unsigned long nr_pages
)
628 unsigned long start
= (unsigned long)memmap
;
629 unsigned long end
= (unsigned long)(memmap
+ nr_pages
);
631 vmemmap_free(start
, end
);
633 #endif /* CONFIG_MEMORY_HOTREMOVE */
635 static struct page
*__kmalloc_section_memmap(unsigned long nr_pages
)
637 struct page
*page
, *ret
;
638 unsigned long memmap_size
= sizeof(struct page
) * nr_pages
;
640 page
= alloc_pages(GFP_KERNEL
|__GFP_NOWARN
, get_order(memmap_size
));
644 ret
= vmalloc(memmap_size
);
650 ret
= (struct page
*)pfn_to_kaddr(page_to_pfn(page
));
656 static inline struct page
*kmalloc_section_memmap(unsigned long pnum
, int nid
,
657 unsigned long nr_pages
)
659 return __kmalloc_section_memmap(nr_pages
);
662 static void __kfree_section_memmap(struct page
*memmap
, unsigned long nr_pages
)
664 if (is_vmalloc_addr(memmap
))
667 free_pages((unsigned long)memmap
,
668 get_order(sizeof(struct page
) * nr_pages
));
671 #ifdef CONFIG_MEMORY_HOTREMOVE
672 static void free_map_bootmem(struct page
*memmap
, unsigned long nr_pages
)
674 unsigned long maps_section_nr
, removing_section_nr
, i
;
676 struct page
*page
= virt_to_page(memmap
);
678 for (i
= 0; i
< nr_pages
; i
++, page
++) {
679 magic
= (unsigned long) page
->lru
.next
;
681 BUG_ON(magic
== NODE_INFO
);
683 maps_section_nr
= pfn_to_section_nr(page_to_pfn(page
));
684 removing_section_nr
= page
->private;
687 * When this function is called, the removing section is
688 * logical offlined state. This means all pages are isolated
689 * from page allocator. If removing section's memmap is placed
690 * on the same section, it must not be freed.
691 * If it is freed, page allocator may allocate it which will
692 * be removed physically soon.
694 if (maps_section_nr
!= removing_section_nr
)
695 put_page_bootmem(page
);
698 #endif /* CONFIG_MEMORY_HOTREMOVE */
699 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
702 * returns the number of sections whose mem_maps were properly
703 * set. If this is <=0, then that means that the passed-in
704 * map was not consumed and must be freed.
706 int __meminit
sparse_add_one_section(struct zone
*zone
, unsigned long start_pfn
,
709 unsigned long section_nr
= pfn_to_section_nr(start_pfn
);
710 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
711 struct mem_section
*ms
;
713 unsigned long *usemap
;
718 * no locking for this, because it does its own
719 * plus, it does a kmalloc
721 ret
= sparse_index_init(section_nr
, pgdat
->node_id
);
722 if (ret
< 0 && ret
!= -EEXIST
)
724 memmap
= kmalloc_section_memmap(section_nr
, pgdat
->node_id
, nr_pages
);
727 usemap
= __kmalloc_section_usemap();
729 __kfree_section_memmap(memmap
, nr_pages
);
733 pgdat_resize_lock(pgdat
, &flags
);
735 ms
= __pfn_to_section(start_pfn
);
736 if (ms
->section_mem_map
& SECTION_MARKED_PRESENT
) {
741 memset(memmap
, 0, sizeof(struct page
) * nr_pages
);
743 ms
->section_mem_map
|= SECTION_MARKED_PRESENT
;
745 ret
= sparse_init_one_section(ms
, section_nr
, memmap
, usemap
);
748 pgdat_resize_unlock(pgdat
, &flags
);
751 __kfree_section_memmap(memmap
, nr_pages
);
756 #ifdef CONFIG_MEMORY_HOTREMOVE
757 #ifdef CONFIG_MEMORY_FAILURE
758 static void clear_hwpoisoned_pages(struct page
*memmap
, int nr_pages
)
765 for (i
= 0; i
< PAGES_PER_SECTION
; i
++) {
766 if (PageHWPoison(&memmap
[i
])) {
767 atomic_long_sub(1, &num_poisoned_pages
);
768 ClearPageHWPoison(&memmap
[i
]);
773 static inline void clear_hwpoisoned_pages(struct page
*memmap
, int nr_pages
)
778 static void free_section_usemap(struct page
*memmap
, unsigned long *usemap
)
780 struct page
*usemap_page
;
781 unsigned long nr_pages
;
786 usemap_page
= virt_to_page(usemap
);
788 * Check to see if allocation came from hot-plug-add
790 if (PageSlab(usemap_page
) || PageCompound(usemap_page
)) {
793 __kfree_section_memmap(memmap
, PAGES_PER_SECTION
);
798 * The usemap came from bootmem. This is packed with other usemaps
799 * on the section which has pgdat at boot time. Just keep it as is now.
803 nr_pages
= PAGE_ALIGN(PAGES_PER_SECTION
* sizeof(struct page
))
806 free_map_bootmem(memmap
, nr_pages
);
810 void sparse_remove_one_section(struct zone
*zone
, struct mem_section
*ms
)
812 struct page
*memmap
= NULL
;
813 unsigned long *usemap
= NULL
, flags
;
814 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
816 pgdat_resize_lock(pgdat
, &flags
);
817 if (ms
->section_mem_map
) {
818 usemap
= ms
->pageblock_flags
;
819 memmap
= sparse_decode_mem_map(ms
->section_mem_map
,
821 ms
->section_mem_map
= 0;
822 ms
->pageblock_flags
= NULL
;
824 pgdat_resize_unlock(pgdat
, &flags
);
826 clear_hwpoisoned_pages(memmap
, PAGES_PER_SECTION
);
827 free_section_usemap(memmap
, usemap
);
829 #endif /* CONFIG_MEMORY_HOTREMOVE */
830 #endif /* CONFIG_MEMORY_HOTPLUG */