2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/memblock.h>
11 #include <linux/mmzone.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/nodemask.h>
15 #include <linux/sched.h>
16 #include <linux/acpi.h>
19 #include <asm/proto.h>
23 #include <asm/amd_nb.h>
25 struct pglist_data
*node_data
[MAX_NUMNODES
] __read_mostly
;
26 EXPORT_SYMBOL(node_data
);
28 struct memnode memnode
;
30 static unsigned long __initdata nodemap_addr
;
31 static unsigned long __initdata nodemap_size
;
34 * Given a shift value, try to populate memnodemap[]
37 * 0 if memnodmap[] too small (of shift too small)
38 * -1 if node overlap or lost ram (shift too big)
40 static int __init
populate_memnodemap(const struct bootnode
*nodes
,
41 int numnodes
, int shift
, int *nodeids
)
43 unsigned long addr
, end
;
46 memset(memnodemap
, 0xff, sizeof(s16
)*memnodemapsize
);
47 for (i
= 0; i
< numnodes
; i
++) {
48 addr
= nodes
[i
].start
;
52 if ((end
>> shift
) >= memnodemapsize
)
55 if (memnodemap
[addr
>> shift
] != NUMA_NO_NODE
)
59 memnodemap
[addr
>> shift
] = i
;
61 memnodemap
[addr
>> shift
] = nodeids
[i
];
63 addr
+= (1UL << shift
);
70 static int __init
allocate_cachealigned_memnodemap(void)
74 memnodemap
= memnode
.embedded_map
;
75 if (memnodemapsize
<= ARRAY_SIZE(memnode
.embedded_map
))
79 nodemap_size
= roundup(sizeof(s16
) * memnodemapsize
, L1_CACHE_BYTES
);
80 nodemap_addr
= memblock_find_in_range(addr
, get_max_mapped(),
81 nodemap_size
, L1_CACHE_BYTES
);
82 if (nodemap_addr
== MEMBLOCK_ERROR
) {
84 "NUMA: Unable to allocate Memory to Node hash map\n");
85 nodemap_addr
= nodemap_size
= 0;
88 memnodemap
= phys_to_virt(nodemap_addr
);
89 memblock_x86_reserve_range(nodemap_addr
, nodemap_addr
+ nodemap_size
, "MEMNODEMAP");
91 printk(KERN_DEBUG
"NUMA: Allocated memnodemap from %lx - %lx\n",
92 nodemap_addr
, nodemap_addr
+ nodemap_size
);
97 * The LSB of all start and end addresses in the node map is the value of the
98 * maximum possible shift.
100 static int __init
extract_lsb_from_nodes(const struct bootnode
*nodes
,
103 int i
, nodes_used
= 0;
104 unsigned long start
, end
;
105 unsigned long bitfield
= 0, memtop
= 0;
107 for (i
= 0; i
< numnodes
; i
++) {
108 start
= nodes
[i
].start
;
120 i
= find_first_bit(&bitfield
, sizeof(unsigned long)*8);
121 memnodemapsize
= (memtop
>> i
)+1;
125 int __init
compute_hash_shift(struct bootnode
*nodes
, int numnodes
,
130 shift
= extract_lsb_from_nodes(nodes
, numnodes
);
131 if (allocate_cachealigned_memnodemap())
133 printk(KERN_DEBUG
"NUMA: Using %d for the hash shift.\n",
136 if (populate_memnodemap(nodes
, numnodes
, shift
, nodeids
) != 1) {
137 printk(KERN_INFO
"Your memory is not aligned you need to "
138 "rebuild your kernel with a bigger NODEMAPSIZE "
139 "shift=%d\n", shift
);
145 int __meminit
__early_pfn_to_nid(unsigned long pfn
)
147 return phys_to_nid(pfn
<< PAGE_SHIFT
);
150 static void * __init
early_node_mem(int nodeid
, unsigned long start
,
151 unsigned long end
, unsigned long size
,
157 * put it on high as possible
158 * something will go with NODE_DATA
160 if (start
< (MAX_DMA_PFN
<<PAGE_SHIFT
))
161 start
= MAX_DMA_PFN
<<PAGE_SHIFT
;
162 if (start
< (MAX_DMA32_PFN
<<PAGE_SHIFT
) &&
163 end
> (MAX_DMA32_PFN
<<PAGE_SHIFT
))
164 start
= MAX_DMA32_PFN
<<PAGE_SHIFT
;
165 mem
= memblock_x86_find_in_range_node(nodeid
, start
, end
, size
, align
);
166 if (mem
!= MEMBLOCK_ERROR
)
169 /* extend the search scope */
170 end
= max_pfn_mapped
<< PAGE_SHIFT
;
171 start
= MAX_DMA_PFN
<< PAGE_SHIFT
;
172 mem
= memblock_find_in_range(start
, end
, size
, align
);
173 if (mem
!= MEMBLOCK_ERROR
)
176 printk(KERN_ERR
"Cannot find %lu bytes in node %d\n",
182 /* Initialize bootmem allocator for a node */
184 setup_node_bootmem(int nodeid
, unsigned long start
, unsigned long end
)
186 unsigned long start_pfn
, last_pfn
, nodedata_phys
;
187 const int pgdat_size
= roundup(sizeof(pg_data_t
), PAGE_SIZE
);
194 * Don't confuse VM with a node that doesn't have the
195 * minimum amount of memory:
197 if (end
&& (end
- start
) < NODE_MIN_SIZE
)
200 start
= roundup(start
, ZONE_ALIGN
);
202 printk(KERN_INFO
"Initmem setup node %d %016lx-%016lx\n", nodeid
,
205 start_pfn
= start
>> PAGE_SHIFT
;
206 last_pfn
= end
>> PAGE_SHIFT
;
208 node_data
[nodeid
] = early_node_mem(nodeid
, start
, end
, pgdat_size
,
210 if (node_data
[nodeid
] == NULL
)
212 nodedata_phys
= __pa(node_data
[nodeid
]);
213 memblock_x86_reserve_range(nodedata_phys
, nodedata_phys
+ pgdat_size
, "NODE_DATA");
214 printk(KERN_INFO
" NODE_DATA [%016lx - %016lx]\n", nodedata_phys
,
215 nodedata_phys
+ pgdat_size
- 1);
216 nid
= phys_to_nid(nodedata_phys
);
218 printk(KERN_INFO
" NODE_DATA(%d) on node %d\n", nodeid
, nid
);
220 memset(NODE_DATA(nodeid
), 0, sizeof(pg_data_t
));
221 NODE_DATA(nodeid
)->node_id
= nodeid
;
222 NODE_DATA(nodeid
)->node_start_pfn
= start_pfn
;
223 NODE_DATA(nodeid
)->node_spanned_pages
= last_pfn
- start_pfn
;
225 node_set_online(nodeid
);
228 #ifdef CONFIG_NUMA_EMU
230 static struct bootnode nodes
[MAX_NUMNODES
] __initdata
;
231 static struct bootnode physnodes
[MAX_NUMNODES
] __cpuinitdata
;
232 static char *cmdline __initdata
;
234 void __init
numa_emu_cmdline(char *str
)
239 static int __init
setup_physnodes(unsigned long start
, unsigned long end
,
245 memset(physnodes
, 0, sizeof(physnodes
));
246 #ifdef CONFIG_ACPI_NUMA
248 acpi_get_nodes(physnodes
, start
, end
);
250 #ifdef CONFIG_AMD_NUMA
252 amd_get_nodes(physnodes
);
255 * Basic sanity checking on the physical node map: there may be errors
256 * if the SRAT or AMD code incorrectly reported the topology or the mem=
257 * kernel parameter is used.
259 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
260 if (physnodes
[i
].start
== physnodes
[i
].end
)
262 if (physnodes
[i
].start
> end
) {
263 physnodes
[i
].end
= physnodes
[i
].start
;
266 if (physnodes
[i
].end
< start
) {
267 physnodes
[i
].start
= physnodes
[i
].end
;
270 if (physnodes
[i
].start
< start
)
271 physnodes
[i
].start
= start
;
272 if (physnodes
[i
].end
> end
)
273 physnodes
[i
].end
= end
;
278 * If no physical topology was detected, a single node is faked to cover
279 * the entire address space.
282 physnodes
[ret
].start
= start
;
283 physnodes
[ret
].end
= end
;
289 static void __init
fake_physnodes(int acpi
, int amd
, int nr_nodes
)
294 #ifdef CONFIG_ACPI_NUMA
296 acpi_fake_nodes(nodes
, nr_nodes
);
298 #ifdef CONFIG_AMD_NUMA
300 amd_fake_nodes(nodes
, nr_nodes
);
303 for (i
= 0; i
< nr_cpu_ids
; i
++)
308 * Setups up nid to range from addr to addr + size. If the end
309 * boundary is greater than max_addr, then max_addr is used instead.
310 * The return value is 0 if there is additional memory left for
311 * allocation past addr and -1 otherwise. addr is adjusted to be at
312 * the end of the node.
314 static int __init
setup_node_range(int nid
, u64
*addr
, u64 size
, u64 max_addr
)
317 nodes
[nid
].start
= *addr
;
319 if (*addr
>= max_addr
) {
323 nodes
[nid
].end
= *addr
;
324 node_set(nid
, node_possible_map
);
325 printk(KERN_INFO
"Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid
,
326 nodes
[nid
].start
, nodes
[nid
].end
,
327 (nodes
[nid
].end
- nodes
[nid
].start
) >> 20);
332 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
333 * to max_addr. The return value is the number of nodes allocated.
335 static int __init
split_nodes_interleave(u64 addr
, u64 max_addr
, int nr_nodes
)
337 nodemask_t physnode_mask
= NODE_MASK_NONE
;
345 if (nr_nodes
> MAX_NUMNODES
) {
346 pr_info("numa=fake=%d too large, reducing to %d\n",
347 nr_nodes
, MAX_NUMNODES
);
348 nr_nodes
= MAX_NUMNODES
;
351 size
= (max_addr
- addr
- memblock_x86_hole_size(addr
, max_addr
)) / nr_nodes
;
353 * Calculate the number of big nodes that can be allocated as a result
354 * of consolidating the remainder.
356 big
= ((size
& ~FAKE_NODE_MIN_HASH_MASK
) * nr_nodes
) /
359 size
&= FAKE_NODE_MIN_HASH_MASK
;
361 pr_err("Not enough memory for each node. "
362 "NUMA emulation disabled.\n");
366 for (i
= 0; i
< MAX_NUMNODES
; i
++)
367 if (physnodes
[i
].start
!= physnodes
[i
].end
)
368 node_set(i
, physnode_mask
);
371 * Continue to fill physical nodes with fake nodes until there is no
372 * memory left on any of them.
374 while (nodes_weight(physnode_mask
)) {
375 for_each_node_mask(i
, physnode_mask
) {
376 u64 end
= physnodes
[i
].start
+ size
;
377 u64 dma32_end
= PFN_PHYS(MAX_DMA32_PFN
);
380 end
+= FAKE_NODE_MIN_SIZE
;
383 * Continue to add memory to this fake node if its
384 * non-reserved memory is less than the per-node size.
386 while (end
- physnodes
[i
].start
-
387 memblock_x86_hole_size(physnodes
[i
].start
, end
) < size
) {
388 end
+= FAKE_NODE_MIN_SIZE
;
389 if (end
> physnodes
[i
].end
) {
390 end
= physnodes
[i
].end
;
396 * If there won't be at least FAKE_NODE_MIN_SIZE of
397 * non-reserved memory in ZONE_DMA32 for the next node,
398 * this one must extend to the boundary.
400 if (end
< dma32_end
&& dma32_end
- end
-
401 memblock_x86_hole_size(end
, dma32_end
) < FAKE_NODE_MIN_SIZE
)
405 * If there won't be enough non-reserved memory for the
406 * next node, this one must extend to the end of the
409 if (physnodes
[i
].end
- end
-
410 memblock_x86_hole_size(end
, physnodes
[i
].end
) < size
)
411 end
= physnodes
[i
].end
;
414 * Avoid allocating more nodes than requested, which can
415 * happen as a result of rounding down each node's size
416 * to FAKE_NODE_MIN_SIZE.
418 if (nodes_weight(physnode_mask
) + ret
>= nr_nodes
)
419 end
= physnodes
[i
].end
;
421 if (setup_node_range(ret
++, &physnodes
[i
].start
,
422 end
- physnodes
[i
].start
,
423 physnodes
[i
].end
) < 0)
424 node_clear(i
, physnode_mask
);
431 * Returns the end address of a node so that there is at least `size' amount of
432 * non-reserved memory or `max_addr' is reached.
434 static u64 __init
find_end_of_node(u64 start
, u64 max_addr
, u64 size
)
436 u64 end
= start
+ size
;
438 while (end
- start
- memblock_x86_hole_size(start
, end
) < size
) {
439 end
+= FAKE_NODE_MIN_SIZE
;
440 if (end
> max_addr
) {
449 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
450 * `addr' to `max_addr'. The return value is the number of nodes allocated.
452 static int __init
split_nodes_size_interleave(u64 addr
, u64 max_addr
, u64 size
)
454 nodemask_t physnode_mask
= NODE_MASK_NONE
;
462 * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
463 * increased accordingly if the requested size is too small. This
464 * creates a uniform distribution of node sizes across the entire
465 * machine (but not necessarily over physical nodes).
467 min_size
= (max_addr
- addr
- memblock_x86_hole_size(addr
, max_addr
)) /
469 min_size
= max(min_size
, FAKE_NODE_MIN_SIZE
);
470 if ((min_size
& FAKE_NODE_MIN_HASH_MASK
) < min_size
)
471 min_size
= (min_size
+ FAKE_NODE_MIN_SIZE
) &
472 FAKE_NODE_MIN_HASH_MASK
;
473 if (size
< min_size
) {
474 pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
475 size
>> 20, min_size
>> 20);
478 size
&= FAKE_NODE_MIN_HASH_MASK
;
480 for (i
= 0; i
< MAX_NUMNODES
; i
++)
481 if (physnodes
[i
].start
!= physnodes
[i
].end
)
482 node_set(i
, physnode_mask
);
484 * Fill physical nodes with fake nodes of size until there is no memory
485 * left on any of them.
487 while (nodes_weight(physnode_mask
)) {
488 for_each_node_mask(i
, physnode_mask
) {
489 u64 dma32_end
= MAX_DMA32_PFN
<< PAGE_SHIFT
;
492 end
= find_end_of_node(physnodes
[i
].start
,
493 physnodes
[i
].end
, size
);
495 * If there won't be at least FAKE_NODE_MIN_SIZE of
496 * non-reserved memory in ZONE_DMA32 for the next node,
497 * this one must extend to the boundary.
499 if (end
< dma32_end
&& dma32_end
- end
-
500 memblock_x86_hole_size(end
, dma32_end
) < FAKE_NODE_MIN_SIZE
)
504 * If there won't be enough non-reserved memory for the
505 * next node, this one must extend to the end of the
508 if (physnodes
[i
].end
- end
-
509 memblock_x86_hole_size(end
, physnodes
[i
].end
) < size
)
510 end
= physnodes
[i
].end
;
513 * Setup the fake node that will be allocated as bootmem
514 * later. If setup_node_range() returns non-zero, there
515 * is no more memory available on this physical node.
517 if (setup_node_range(ret
++, &physnodes
[i
].start
,
518 end
- physnodes
[i
].start
,
519 physnodes
[i
].end
) < 0)
520 node_clear(i
, physnode_mask
);
527 * Sets up the system RAM area from start_pfn to last_pfn according to the
528 * numa=fake command-line option.
530 static int __init
numa_emulation(unsigned long start_pfn
,
531 unsigned long last_pfn
, int acpi
, int amd
)
533 u64 addr
= start_pfn
<< PAGE_SHIFT
;
534 u64 max_addr
= last_pfn
<< PAGE_SHIFT
;
539 * If the numa=fake command-line contains a 'M' or 'G', it represents
540 * the fixed node size. Otherwise, if it is just a single number N,
541 * split the system RAM into N fake nodes.
543 if (strchr(cmdline
, 'M') || strchr(cmdline
, 'G')) {
546 size
= memparse(cmdline
, &cmdline
);
547 num_nodes
= split_nodes_size_interleave(addr
, max_addr
, size
);
551 n
= simple_strtoul(cmdline
, NULL
, 0);
552 num_nodes
= split_nodes_interleave(addr
, max_addr
, n
);
557 memnode_shift
= compute_hash_shift(nodes
, num_nodes
, NULL
);
558 if (memnode_shift
< 0) {
560 printk(KERN_ERR
"No NUMA hash function found. NUMA emulation "
566 * We need to vacate all active ranges that may have been registered for
567 * the e820 memory map.
569 remove_all_active_ranges();
570 for_each_node_mask(i
, node_possible_map
)
571 memblock_x86_register_active_regions(i
, nodes
[i
].start
>> PAGE_SHIFT
,
572 nodes
[i
].end
>> PAGE_SHIFT
);
573 init_memory_mapping_high();
574 for_each_node_mask(i
, node_possible_map
)
575 setup_node_bootmem(i
, nodes
[i
].start
, nodes
[i
].end
);
576 setup_physnodes(addr
, max_addr
, acpi
, amd
);
577 fake_physnodes(acpi
, amd
, num_nodes
);
581 #endif /* CONFIG_NUMA_EMU */
583 void __init
initmem_init(void)
585 int acpi
= 0, amd
= 0;
588 #ifdef CONFIG_ACPI_NUMA
590 * Parse SRAT to discover nodes.
592 acpi
= !x86_acpi_numa_init();
595 #ifdef CONFIG_AMD_NUMA
597 amd
= !amd_numa_init();
600 nodes_clear(node_possible_map
);
601 nodes_clear(node_online_map
);
603 #ifdef CONFIG_NUMA_EMU
604 setup_physnodes(0, max_pfn
<< PAGE_SHIFT
, acpi
, amd
);
605 if (cmdline
&& !numa_emulation(0, max_pfn
, acpi
, amd
))
607 setup_physnodes(0, max_pfn
<< PAGE_SHIFT
, acpi
, amd
);
608 nodes_clear(node_possible_map
);
609 nodes_clear(node_online_map
);
612 #ifdef CONFIG_ACPI_NUMA
613 if (!numa_off
&& acpi
&& !acpi_scan_nodes())
615 nodes_clear(node_possible_map
);
616 nodes_clear(node_online_map
);
619 #ifdef CONFIG_AMD_NUMA
620 if (!numa_off
&& amd
&& !amd_scan_nodes())
622 nodes_clear(node_possible_map
);
623 nodes_clear(node_online_map
);
625 printk(KERN_INFO
"%s\n",
626 numa_off
? "NUMA turned off" : "No NUMA configuration found");
628 printk(KERN_INFO
"Faking a node at %016lx-%016lx\n",
629 0LU, max_pfn
<< PAGE_SHIFT
);
630 /* setup dummy node covering all memory */
632 memnodemap
= memnode
.embedded_map
;
635 node_set(0, node_possible_map
);
636 for (i
= 0; i
< MAX_LOCAL_APIC
; i
++)
637 set_apicid_to_node(i
, NUMA_NO_NODE
);
638 memblock_x86_register_active_regions(0, 0, max_pfn
);
639 init_memory_mapping_high();
640 setup_node_bootmem(0, 0, max_pfn
<< PAGE_SHIFT
);
644 unsigned long __init
numa_free_all_bootmem(void)
646 unsigned long pages
= 0;
649 for_each_online_node(i
)
650 pages
+= free_all_bootmem_node(NODE_DATA(i
));
652 pages
+= free_all_memory_core_early(MAX_NUMNODES
);
657 int __cpuinit
numa_cpu_node(int cpu
)
659 int apicid
= early_per_cpu(x86_cpu_to_apicid
, cpu
);
661 if (apicid
!= BAD_APICID
)
662 return __apicid_to_node
[apicid
];
667 * UGLINESS AHEAD: Currently, CONFIG_NUMA_EMU is 64bit only and makes use
668 * of 64bit specific data structures. The distinction is artificial and
669 * should be removed. numa_{add|remove}_cpu() are implemented in numa.c
670 * for both 32 and 64bit when CONFIG_NUMA_EMU is disabled but here when
673 * NUMA emulation is planned to be made generic and the following and other
674 * related code should be moved to numa.c.
676 #ifdef CONFIG_NUMA_EMU
677 # ifndef CONFIG_DEBUG_PER_CPU_MAPS
678 void __cpuinit
numa_add_cpu(int cpu
)
683 nid
= numa_cpu_node(cpu
);
684 if (nid
== NUMA_NO_NODE
)
685 nid
= early_cpu_to_node(cpu
);
686 BUG_ON(nid
== NUMA_NO_NODE
|| !node_online(nid
));
689 * Use the starting address of the emulated node to find which physical
690 * node it is allocated on.
692 addr
= node_start_pfn(nid
) << PAGE_SHIFT
;
693 for (physnid
= 0; physnid
< MAX_NUMNODES
; physnid
++)
694 if (addr
>= physnodes
[physnid
].start
&&
695 addr
< physnodes
[physnid
].end
)
699 * Map the cpu to each emulated node that is allocated on the physical
700 * node of the cpu's apic id.
702 for_each_online_node(nid
) {
703 addr
= node_start_pfn(nid
) << PAGE_SHIFT
;
704 if (addr
>= physnodes
[physnid
].start
&&
705 addr
< physnodes
[physnid
].end
)
706 cpumask_set_cpu(cpu
, node_to_cpumask_map
[nid
]);
710 void __cpuinit
numa_remove_cpu(int cpu
)
714 for_each_online_node(i
)
715 cpumask_clear_cpu(cpu
, node_to_cpumask_map
[i
]);
717 # else /* !CONFIG_DEBUG_PER_CPU_MAPS */
718 static void __cpuinit
numa_set_cpumask(int cpu
, int enable
)
720 int node
= early_cpu_to_node(cpu
);
721 struct cpumask
*mask
;
724 if (node
== NUMA_NO_NODE
) {
725 /* early_cpu_to_node() already emits a warning and trace */
728 for_each_online_node(i
) {
731 addr
= node_start_pfn(i
) << PAGE_SHIFT
;
732 if (addr
< physnodes
[node
].start
||
733 addr
>= physnodes
[node
].end
)
735 mask
= debug_cpumask_set_cpu(cpu
, enable
);
740 cpumask_set_cpu(cpu
, mask
);
742 cpumask_clear_cpu(cpu
, mask
);
746 void __cpuinit
numa_add_cpu(int cpu
)
748 numa_set_cpumask(cpu
, 1);
751 void __cpuinit
numa_remove_cpu(int cpu
)
753 numa_set_cpumask(cpu
, 0);
755 # endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
756 #endif /* CONFIG_NUMA_EMU */