2 * sparse memory mappings.
5 #include <linux/mmzone.h>
6 #include <linux/bootmem.h>
7 #include <linux/highmem.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
15 * Permanent SPARSEMEM data:
17 * 1) mem_section - memory sections, mem_map's for valid memory
19 #ifdef CONFIG_SPARSEMEM_EXTREME
20 struct mem_section
*mem_section
[NR_SECTION_ROOTS
]
21 ____cacheline_internodealigned_in_smp
;
23 struct mem_section mem_section
[NR_SECTION_ROOTS
][SECTIONS_PER_ROOT
]
24 ____cacheline_internodealigned_in_smp
;
26 EXPORT_SYMBOL(mem_section
);
28 #ifdef NODE_NOT_IN_PAGE_FLAGS
30 * If we did not store the node number in the page then we have to
31 * do a lookup in the section_to_node_table in order to find which
32 * node the page belongs to.
34 #if MAX_NUMNODES <= 256
35 static u8 section_to_node_table
[NR_MEM_SECTIONS
] __cacheline_aligned
;
37 static u16 section_to_node_table
[NR_MEM_SECTIONS
] __cacheline_aligned
;
40 int page_to_nid(struct page
*page
)
42 return section_to_node_table
[page_to_section(page
)];
44 EXPORT_SYMBOL(page_to_nid
);
47 #ifdef CONFIG_SPARSEMEM_EXTREME
48 static struct mem_section noinline __init_refok
*sparse_index_alloc(int nid
)
50 struct mem_section
*section
= NULL
;
51 unsigned long array_size
= SECTIONS_PER_ROOT
*
52 sizeof(struct mem_section
);
54 if (slab_is_available())
55 section
= kmalloc_node(array_size
, GFP_KERNEL
, nid
);
57 section
= alloc_bootmem_node(NODE_DATA(nid
), array_size
);
60 memset(section
, 0, array_size
);
65 static int __meminit
sparse_index_init(unsigned long section_nr
, int nid
)
67 static DEFINE_SPINLOCK(index_init_lock
);
68 unsigned long root
= SECTION_NR_TO_ROOT(section_nr
);
69 struct mem_section
*section
;
72 #ifdef NODE_NOT_IN_PAGE_FLAGS
73 section_to_node_table
[section_nr
] = nid
;
76 if (mem_section
[root
])
79 section
= sparse_index_alloc(nid
);
81 * This lock keeps two different sections from
82 * reallocating for the same index
84 spin_lock(&index_init_lock
);
86 if (mem_section
[root
]) {
91 mem_section
[root
] = section
;
93 spin_unlock(&index_init_lock
);
96 #else /* !SPARSEMEM_EXTREME */
97 static inline int sparse_index_init(unsigned long section_nr
, int nid
)
104 * Although written for the SPARSEMEM_EXTREME case, this happens
105 * to also work for the flat array case becase
106 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
108 int __section_nr(struct mem_section
* ms
)
110 unsigned long root_nr
;
111 struct mem_section
* root
;
113 for (root_nr
= 0; root_nr
< NR_SECTION_ROOTS
; root_nr
++) {
114 root
= __nr_to_section(root_nr
* SECTIONS_PER_ROOT
);
118 if ((ms
>= root
) && (ms
< (root
+ SECTIONS_PER_ROOT
)))
122 return (root_nr
* SECTIONS_PER_ROOT
) + (ms
- root
);
126 * During early boot, before section_mem_map is used for an actual
127 * mem_map, we use section_mem_map to store the section's NUMA
128 * node. This keeps us from having to use another data structure. The
129 * node information is cleared just before we store the real mem_map.
131 static inline unsigned long sparse_encode_early_nid(int nid
)
133 return (nid
<< SECTION_NID_SHIFT
);
136 static inline int sparse_early_nid(struct mem_section
*section
)
138 return (section
->section_mem_map
>> SECTION_NID_SHIFT
);
141 /* Validate the physical addressing limitations of the model */
142 void __meminit
mminit_validate_memmodel_limits(unsigned long *start_pfn
,
143 unsigned long *end_pfn
)
145 unsigned long max_sparsemem_pfn
= 1UL << (MAX_PHYSMEM_BITS
-PAGE_SHIFT
);
148 * Sanity checks - do not allow an architecture to pass
149 * in larger pfns than the maximum scope of sparsemem:
151 if (*start_pfn
> max_sparsemem_pfn
) {
152 mminit_dprintk(MMINIT_WARNING
, "pfnvalidation",
153 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
154 *start_pfn
, *end_pfn
, max_sparsemem_pfn
);
156 *start_pfn
= max_sparsemem_pfn
;
157 *end_pfn
= max_sparsemem_pfn
;
160 if (*end_pfn
> max_sparsemem_pfn
) {
161 mminit_dprintk(MMINIT_WARNING
, "pfnvalidation",
162 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
163 *start_pfn
, *end_pfn
, max_sparsemem_pfn
);
165 *end_pfn
= max_sparsemem_pfn
;
169 /* Record a memory area against a node. */
170 void __init
memory_present(int nid
, unsigned long start
, unsigned long end
)
174 start
&= PAGE_SECTION_MASK
;
175 mminit_validate_memmodel_limits(&start
, &end
);
176 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
) {
177 unsigned long section
= pfn_to_section_nr(pfn
);
178 struct mem_section
*ms
;
180 sparse_index_init(section
, nid
);
182 ms
= __nr_to_section(section
);
183 if (!ms
->section_mem_map
)
184 ms
->section_mem_map
= sparse_encode_early_nid(nid
) |
185 SECTION_MARKED_PRESENT
;
190 * Only used by the i386 NUMA architecures, but relatively
193 unsigned long __init
node_memmap_size_bytes(int nid
, unsigned long start_pfn
,
194 unsigned long end_pfn
)
197 unsigned long nr_pages
= 0;
199 mminit_validate_memmodel_limits(&start_pfn
, &end_pfn
);
200 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= PAGES_PER_SECTION
) {
201 if (nid
!= early_pfn_to_nid(pfn
))
205 nr_pages
+= PAGES_PER_SECTION
;
208 return nr_pages
* sizeof(struct page
);
212 * Subtle, we encode the real pfn into the mem_map such that
213 * the identity pfn - section_mem_map will return the actual
214 * physical page frame number.
216 static unsigned long sparse_encode_mem_map(struct page
*mem_map
, unsigned long pnum
)
218 return (unsigned long)(mem_map
- (section_nr_to_pfn(pnum
)));
222 * We need this if we ever free the mem_maps. While not implemented yet,
223 * this function is included for parity with its sibling.
225 static __attribute((unused
))
226 struct page
*sparse_decode_mem_map(unsigned long coded_mem_map
, unsigned long pnum
)
228 return ((struct page
*)coded_mem_map
) + section_nr_to_pfn(pnum
);
231 static int __meminit
sparse_init_one_section(struct mem_section
*ms
,
232 unsigned long pnum
, struct page
*mem_map
)
234 if (!valid_section(ms
))
237 ms
->section_mem_map
&= ~SECTION_MAP_MASK
;
238 ms
->section_mem_map
|= sparse_encode_mem_map(mem_map
, pnum
);
243 static struct page __init
*sparse_early_mem_map_alloc(unsigned long pnum
)
246 struct mem_section
*ms
= __nr_to_section(pnum
);
247 int nid
= sparse_early_nid(ms
);
249 map
= alloc_remap(nid
, sizeof(struct page
) * PAGES_PER_SECTION
);
253 map
= alloc_bootmem_node(NODE_DATA(nid
),
254 sizeof(struct page
) * PAGES_PER_SECTION
);
258 printk(KERN_WARNING
"%s: allocation failed\n", __FUNCTION__
);
259 ms
->section_mem_map
= 0;
264 * Allocate the accumulated non-linear sections, allocate a mem_map
265 * for each and record the physical to section mapping.
267 void __init
sparse_init(void)
272 for (pnum
= 0; pnum
< NR_MEM_SECTIONS
; pnum
++) {
273 if (!valid_section_nr(pnum
))
276 map
= sparse_early_mem_map_alloc(pnum
);
279 sparse_init_one_section(__nr_to_section(pnum
), pnum
, map
);
283 #ifdef CONFIG_MEMORY_HOTPLUG
284 static struct page
*__kmalloc_section_memmap(unsigned long nr_pages
)
286 struct page
*page
, *ret
;
287 unsigned long memmap_size
= sizeof(struct page
) * nr_pages
;
289 page
= alloc_pages(GFP_KERNEL
|__GFP_NOWARN
, get_order(memmap_size
));
293 ret
= vmalloc(memmap_size
);
299 ret
= (struct page
*)pfn_to_kaddr(page_to_pfn(page
));
301 memset(ret
, 0, memmap_size
);
306 static int vaddr_in_vmalloc_area(void *addr
)
308 if (addr
>= (void *)VMALLOC_START
&&
309 addr
< (void *)VMALLOC_END
)
314 static void __kfree_section_memmap(struct page
*memmap
, unsigned long nr_pages
)
316 if (vaddr_in_vmalloc_area(memmap
))
319 free_pages((unsigned long)memmap
,
320 get_order(sizeof(struct page
) * nr_pages
));
324 * returns the number of sections whose mem_maps were properly
325 * set. If this is <=0, then that means that the passed-in
326 * map was not consumed and must be freed.
328 int sparse_add_one_section(struct zone
*zone
, unsigned long start_pfn
,
331 unsigned long section_nr
= pfn_to_section_nr(start_pfn
);
332 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
333 struct mem_section
*ms
;
339 * no locking for this, because it does its own
340 * plus, it does a kmalloc
342 sparse_index_init(section_nr
, pgdat
->node_id
);
343 memmap
= __kmalloc_section_memmap(nr_pages
);
345 pgdat_resize_lock(pgdat
, &flags
);
347 ms
= __pfn_to_section(start_pfn
);
348 if (ms
->section_mem_map
& SECTION_MARKED_PRESENT
) {
352 ms
->section_mem_map
|= SECTION_MARKED_PRESENT
;
354 ret
= sparse_init_one_section(ms
, section_nr
, memmap
);
357 pgdat_resize_unlock(pgdat
, &flags
);
359 __kfree_section_memmap(memmap
, nr_pages
);