2 * sparse memory mappings.
5 #include <linux/mmzone.h>
6 #include <linux/bootmem.h>
7 #include <linux/highmem.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
14 * Permanent SPARSEMEM data:
16 * 1) mem_section - memory sections, mem_map's for valid memory
18 #ifdef CONFIG_SPARSEMEM_EXTREME
19 struct mem_section
*mem_section
[NR_SECTION_ROOTS
]
20 ____cacheline_internodealigned_in_smp
;
22 struct mem_section mem_section
[NR_SECTION_ROOTS
][SECTIONS_PER_ROOT
]
23 ____cacheline_internodealigned_in_smp
;
25 EXPORT_SYMBOL(mem_section
);
27 #ifdef CONFIG_SPARSEMEM_EXTREME
28 static struct mem_section
*sparse_index_alloc(int nid
)
30 struct mem_section
*section
= NULL
;
31 unsigned long array_size
= SECTIONS_PER_ROOT
*
32 sizeof(struct mem_section
);
34 if (slab_is_available())
35 section
= kmalloc_node(array_size
, GFP_KERNEL
, nid
);
37 section
= alloc_bootmem_node(NODE_DATA(nid
), array_size
);
40 memset(section
, 0, array_size
);
45 static int sparse_index_init(unsigned long section_nr
, int nid
)
47 static DEFINE_SPINLOCK(index_init_lock
);
48 unsigned long root
= SECTION_NR_TO_ROOT(section_nr
);
49 struct mem_section
*section
;
52 if (mem_section
[root
])
55 section
= sparse_index_alloc(nid
);
57 * This lock keeps two different sections from
58 * reallocating for the same index
60 spin_lock(&index_init_lock
);
62 if (mem_section
[root
]) {
67 mem_section
[root
] = section
;
69 spin_unlock(&index_init_lock
);
72 #else /* !SPARSEMEM_EXTREME */
73 static inline int sparse_index_init(unsigned long section_nr
, int nid
)
80 * Although written for the SPARSEMEM_EXTREME case, this happens
81 * to also work for the flat array case becase
82 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
84 int __section_nr(struct mem_section
* ms
)
86 unsigned long root_nr
;
87 struct mem_section
* root
;
89 for (root_nr
= 0; root_nr
< NR_SECTION_ROOTS
; root_nr
++) {
90 root
= __nr_to_section(root_nr
* SECTIONS_PER_ROOT
);
94 if ((ms
>= root
) && (ms
< (root
+ SECTIONS_PER_ROOT
)))
98 return (root_nr
* SECTIONS_PER_ROOT
) + (ms
- root
);
102 * During early boot, before section_mem_map is used for an actual
103 * mem_map, we use section_mem_map to store the section's NUMA
104 * node. This keeps us from having to use another data structure. The
105 * node information is cleared just before we store the real mem_map.
107 static inline unsigned long sparse_encode_early_nid(int nid
)
109 return (nid
<< SECTION_NID_SHIFT
);
112 static inline int sparse_early_nid(struct mem_section
*section
)
114 return (section
->section_mem_map
>> SECTION_NID_SHIFT
);
117 /* Record a memory area against a node. */
118 void memory_present(int nid
, unsigned long start
, unsigned long end
)
122 start
&= PAGE_SECTION_MASK
;
123 for (pfn
= start
; pfn
< end
; pfn
+= PAGES_PER_SECTION
) {
124 unsigned long section
= pfn_to_section_nr(pfn
);
125 struct mem_section
*ms
;
127 sparse_index_init(section
, nid
);
129 ms
= __nr_to_section(section
);
130 if (!ms
->section_mem_map
)
131 ms
->section_mem_map
= sparse_encode_early_nid(nid
) |
132 SECTION_MARKED_PRESENT
;
137 * Only used by the i386 NUMA architecures, but relatively
140 unsigned long __init
node_memmap_size_bytes(int nid
, unsigned long start_pfn
,
141 unsigned long end_pfn
)
144 unsigned long nr_pages
= 0;
146 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= PAGES_PER_SECTION
) {
147 if (nid
!= early_pfn_to_nid(pfn
))
151 nr_pages
+= PAGES_PER_SECTION
;
154 return nr_pages
* sizeof(struct page
);
158 * Subtle, we encode the real pfn into the mem_map such that
159 * the identity pfn - section_mem_map will return the actual
160 * physical page frame number.
162 static unsigned long sparse_encode_mem_map(struct page
*mem_map
, unsigned long pnum
)
164 return (unsigned long)(mem_map
- (section_nr_to_pfn(pnum
)));
168 * We need this if we ever free the mem_maps. While not implemented yet,
169 * this function is included for parity with its sibling.
171 static __attribute((unused
))
172 struct page
*sparse_decode_mem_map(unsigned long coded_mem_map
, unsigned long pnum
)
174 return ((struct page
*)coded_mem_map
) + section_nr_to_pfn(pnum
);
177 static int sparse_init_one_section(struct mem_section
*ms
,
178 unsigned long pnum
, struct page
*mem_map
)
180 if (!valid_section(ms
))
183 ms
->section_mem_map
&= ~SECTION_MAP_MASK
;
184 ms
->section_mem_map
|= sparse_encode_mem_map(mem_map
, pnum
);
189 static struct page
*sparse_early_mem_map_alloc(unsigned long pnum
)
192 struct mem_section
*ms
= __nr_to_section(pnum
);
193 int nid
= sparse_early_nid(ms
);
195 map
= alloc_remap(nid
, sizeof(struct page
) * PAGES_PER_SECTION
);
199 map
= alloc_bootmem_node(NODE_DATA(nid
),
200 sizeof(struct page
) * PAGES_PER_SECTION
);
204 printk(KERN_WARNING
"%s: allocation failed\n", __FUNCTION__
);
205 ms
->section_mem_map
= 0;
209 static struct page
*__kmalloc_section_memmap(unsigned long nr_pages
)
211 struct page
*page
, *ret
;
212 unsigned long memmap_size
= sizeof(struct page
) * nr_pages
;
214 page
= alloc_pages(GFP_KERNEL
, get_order(memmap_size
));
218 ret
= vmalloc(memmap_size
);
224 ret
= (struct page
*)pfn_to_kaddr(page_to_pfn(page
));
226 memset(ret
, 0, memmap_size
);
231 static int vaddr_in_vmalloc_area(void *addr
)
233 if (addr
>= (void *)VMALLOC_START
&&
234 addr
< (void *)VMALLOC_END
)
239 static void __kfree_section_memmap(struct page
*memmap
, unsigned long nr_pages
)
241 if (vaddr_in_vmalloc_area(memmap
))
244 free_pages((unsigned long)memmap
,
245 get_order(sizeof(struct page
) * nr_pages
));
249 * Allocate the accumulated non-linear sections, allocate a mem_map
250 * for each and record the physical to section mapping.
252 void sparse_init(void)
257 for (pnum
= 0; pnum
< NR_MEM_SECTIONS
; pnum
++) {
258 if (!valid_section_nr(pnum
))
261 map
= sparse_early_mem_map_alloc(pnum
);
264 sparse_init_one_section(__nr_to_section(pnum
), pnum
, map
);
269 * returns the number of sections whose mem_maps were properly
270 * set. If this is <=0, then that means that the passed-in
271 * map was not consumed and must be freed.
273 int sparse_add_one_section(struct zone
*zone
, unsigned long start_pfn
,
276 unsigned long section_nr
= pfn_to_section_nr(start_pfn
);
277 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
278 struct mem_section
*ms
;
284 * no locking for this, because it does its own
285 * plus, it does a kmalloc
287 sparse_index_init(section_nr
, pgdat
->node_id
);
288 memmap
= __kmalloc_section_memmap(nr_pages
);
290 pgdat_resize_lock(pgdat
, &flags
);
292 ms
= __pfn_to_section(start_pfn
);
293 if (ms
->section_mem_map
& SECTION_MARKED_PRESENT
) {
297 ms
->section_mem_map
|= SECTION_MARKED_PRESENT
;
299 ret
= sparse_init_one_section(ms
, section_nr
, memmap
);
302 pgdat_resize_unlock(pgdat
, &flags
);
304 __kfree_section_memmap(memmap
, nr_pages
);