2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/mmzone.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/nodemask.h>
16 #include <asm/proto.h>
26 struct pglist_data
*node_data
[MAX_NUMNODES
] __read_mostly
;
27 EXPORT_SYMBOL(node_data
);
29 bootmem_data_t plat_node_bdata
[MAX_NUMNODES
];
31 struct memnode memnode
;
33 int cpu_to_node_map
[NR_CPUS
] __read_mostly
= {
34 [0 ... NR_CPUS
-1] = NUMA_NO_NODE
36 EXPORT_SYMBOL(cpu_to_node_map
);
38 unsigned char apicid_to_node
[MAX_LOCAL_APIC
] __cpuinitdata
= {
39 [0 ... MAX_LOCAL_APIC
-1] = NUMA_NO_NODE
42 cpumask_t node_to_cpumask_map
[MAX_NUMNODES
] __read_mostly
;
43 EXPORT_SYMBOL(node_to_cpumask_map
);
45 int numa_off __initdata
;
46 unsigned long __initdata nodemap_addr
;
47 unsigned long __initdata nodemap_size
;
50 * Given a shift value, try to populate memnodemap[]
53 * 0 if memnodmap[] too small (of shift too small)
54 * -1 if node overlap or lost ram (shift too big)
56 static int __init
populate_memnodemap(const struct bootnode
*nodes
,
57 int numnodes
, int shift
)
59 unsigned long addr
, end
;
62 memset(memnodemap
, 0xff, memnodemapsize
);
63 for (i
= 0; i
< numnodes
; i
++) {
64 addr
= nodes
[i
].start
;
68 if ((end
>> shift
) >= memnodemapsize
)
71 if (memnodemap
[addr
>> shift
] != 0xff)
73 memnodemap
[addr
>> shift
] = i
;
74 addr
+= (1UL << shift
);
81 static int __init
allocate_cachealigned_memnodemap(void)
83 unsigned long pad
, pad_addr
;
85 memnodemap
= memnode
.embedded_map
;
86 if (memnodemapsize
<= 48)
89 pad
= L1_CACHE_BYTES
- 1;
91 nodemap_size
= pad
+ memnodemapsize
;
92 nodemap_addr
= find_e820_area(pad_addr
, end_pfn
<<PAGE_SHIFT
,
94 if (nodemap_addr
== -1UL) {
96 "NUMA: Unable to allocate Memory to Node hash map\n");
97 nodemap_addr
= nodemap_size
= 0;
100 pad_addr
= (nodemap_addr
+ pad
) & ~pad
;
101 memnodemap
= phys_to_virt(pad_addr
);
103 printk(KERN_DEBUG
"NUMA: Allocated memnodemap from %lx - %lx\n",
104 nodemap_addr
, nodemap_addr
+ nodemap_size
);
109 * The LSB of all start and end addresses in the node map is the value of the
110 * maximum possible shift.
112 static int __init
extract_lsb_from_nodes(const struct bootnode
*nodes
,
115 int i
, nodes_used
= 0;
116 unsigned long start
, end
;
117 unsigned long bitfield
= 0, memtop
= 0;
119 for (i
= 0; i
< numnodes
; i
++) {
120 start
= nodes
[i
].start
;
132 i
= find_first_bit(&bitfield
, sizeof(unsigned long)*8);
133 memnodemapsize
= (memtop
>> i
)+1;
137 int __init
compute_hash_shift(struct bootnode
*nodes
, int numnodes
)
141 shift
= extract_lsb_from_nodes(nodes
, numnodes
);
142 if (allocate_cachealigned_memnodemap())
144 printk(KERN_DEBUG
"NUMA: Using %d for the hash shift.\n",
147 if (populate_memnodemap(nodes
, numnodes
, shift
) != 1) {
148 printk(KERN_INFO
"Your memory is not aligned you need to "
149 "rebuild your kernel with a bigger NODEMAPSIZE "
150 "shift=%d\n", shift
);
156 int early_pfn_to_nid(unsigned long pfn
)
158 return phys_to_nid(pfn
<< PAGE_SHIFT
);
161 static void * __init
early_node_mem(int nodeid
, unsigned long start
,
162 unsigned long end
, unsigned long size
)
164 unsigned long mem
= find_e820_area(start
, end
, size
);
169 ptr
= __alloc_bootmem_nopanic(size
,
170 SMP_CACHE_BYTES
, __pa(MAX_DMA_ADDRESS
));
172 printk(KERN_ERR
"Cannot find %lu bytes in node %d\n",
179 /* Initialize bootmem allocator for a node */
180 void __init
setup_node_bootmem(int nodeid
, unsigned long start
,
183 unsigned long start_pfn
, end_pfn
, bootmap_pages
, bootmap_size
;
184 unsigned long bootmap_start
, nodedata_phys
;
186 const int pgdat_size
= round_up(sizeof(pg_data_t
), PAGE_SIZE
);
188 start
= round_up(start
, ZONE_ALIGN
);
190 printk(KERN_INFO
"Bootmem setup node %d %016lx-%016lx\n", nodeid
,
193 start_pfn
= start
>> PAGE_SHIFT
;
194 end_pfn
= end
>> PAGE_SHIFT
;
196 node_data
[nodeid
] = early_node_mem(nodeid
, start
, end
, pgdat_size
);
197 if (node_data
[nodeid
] == NULL
)
199 nodedata_phys
= __pa(node_data
[nodeid
]);
201 memset(NODE_DATA(nodeid
), 0, sizeof(pg_data_t
));
202 NODE_DATA(nodeid
)->bdata
= &plat_node_bdata
[nodeid
];
203 NODE_DATA(nodeid
)->node_start_pfn
= start_pfn
;
204 NODE_DATA(nodeid
)->node_spanned_pages
= end_pfn
- start_pfn
;
206 /* Find a place for the bootmem map */
207 bootmap_pages
= bootmem_bootmap_pages(end_pfn
- start_pfn
);
208 bootmap_start
= round_up(nodedata_phys
+ pgdat_size
, PAGE_SIZE
);
209 bootmap
= early_node_mem(nodeid
, bootmap_start
, end
,
210 bootmap_pages
<<PAGE_SHIFT
);
211 if (bootmap
== NULL
) {
212 if (nodedata_phys
< start
|| nodedata_phys
>= end
)
213 free_bootmem((unsigned long)node_data
[nodeid
],
215 node_data
[nodeid
] = NULL
;
218 bootmap_start
= __pa(bootmap
);
219 Dprintk("bootmap start %lu pages %lu\n", bootmap_start
, bootmap_pages
);
221 bootmap_size
= init_bootmem_node(NODE_DATA(nodeid
),
222 bootmap_start
>> PAGE_SHIFT
,
225 free_bootmem_with_active_regions(nodeid
, end
);
227 reserve_bootmem_node(NODE_DATA(nodeid
), nodedata_phys
, pgdat_size
);
228 reserve_bootmem_node(NODE_DATA(nodeid
), bootmap_start
,
229 bootmap_pages
<<PAGE_SHIFT
);
230 #ifdef CONFIG_ACPI_NUMA
231 srat_reserve_add_area(nodeid
);
233 node_set_online(nodeid
);
236 #ifdef CONFIG_FLAT_NODE_MEM_MAP
237 /* Initialize final allocator for a zone */
238 static void __init
flat_setup_node_zones(int nodeid
)
240 unsigned long start_pfn
, end_pfn
, memmapsize
, limit
;
242 start_pfn
= node_start_pfn(nodeid
);
243 end_pfn
= node_end_pfn(nodeid
);
245 Dprintk(KERN_INFO
"Setting up memmap for node %d %lx-%lx\n",
246 nodeid
, start_pfn
, end_pfn
);
249 * Try to allocate mem_map at end to not fill up precious <4GB
252 memmapsize
= sizeof(struct page
) * (end_pfn
-start_pfn
);
253 limit
= end_pfn
<< PAGE_SHIFT
;
255 NODE_DATA(nodeid
)->node_mem_map
=
256 __alloc_bootmem_core(NODE_DATA(nodeid
)->bdata
,
257 memmapsize
, SMP_CACHE_BYTES
,
258 round_down(limit
- memmapsize
, PAGE_SIZE
),
262 #define flat_setup_node_zones(i) do {} while (0)
266 * There are unfortunately some poorly designed mainboards around that
267 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
268 * mapping. To avoid this fill in the mapping for all possible CPUs,
269 * as the number of CPUs is not known yet. We round robin the existing
272 void __init
numa_init_array(void)
276 rr
= first_node(node_online_map
);
277 for (i
= 0; i
< NR_CPUS
; i
++) {
278 if (cpu_to_node(i
) != NUMA_NO_NODE
)
280 numa_set_node(i
, rr
);
281 rr
= next_node(rr
, node_online_map
);
282 if (rr
== MAX_NUMNODES
)
283 rr
= first_node(node_online_map
);
287 #ifdef CONFIG_NUMA_EMU
289 char *cmdline __initdata
;
292 * Setups up nid to range from addr to addr + size. If the end
293 * boundary is greater than max_addr, then max_addr is used instead.
294 * The return value is 0 if there is additional memory left for
295 * allocation past addr and -1 otherwise. addr is adjusted to be at
296 * the end of the node.
298 static int __init
setup_node_range(int nid
, struct bootnode
*nodes
, u64
*addr
,
299 u64 size
, u64 max_addr
)
303 nodes
[nid
].start
= *addr
;
305 if (*addr
>= max_addr
) {
309 nodes
[nid
].end
= *addr
;
310 node_set(nid
, node_possible_map
);
311 printk(KERN_INFO
"Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid
,
312 nodes
[nid
].start
, nodes
[nid
].end
,
313 (nodes
[nid
].end
- nodes
[nid
].start
) >> 20);
318 * Splits num_nodes nodes up equally starting at node_start. The return value
319 * is the number of nodes split up and addr is adjusted to be at the end of the
320 * last node allocated.
322 static int __init
split_nodes_equally(struct bootnode
*nodes
, u64
*addr
,
323 u64 max_addr
, int node_start
,
332 if (num_nodes
> MAX_NUMNODES
)
333 num_nodes
= MAX_NUMNODES
;
334 size
= (max_addr
- *addr
- e820_hole_size(*addr
, max_addr
)) /
337 * Calculate the number of big nodes that can be allocated as a result
338 * of consolidating the leftovers.
340 big
= ((size
& ~FAKE_NODE_MIN_HASH_MASK
) * num_nodes
) /
343 /* Round down to nearest FAKE_NODE_MIN_SIZE. */
344 size
&= FAKE_NODE_MIN_HASH_MASK
;
346 printk(KERN_ERR
"Not enough memory for each node. "
347 "NUMA emulation disabled.\n");
351 for (i
= node_start
; i
< num_nodes
+ node_start
; i
++) {
352 u64 end
= *addr
+ size
;
355 end
+= FAKE_NODE_MIN_SIZE
;
357 * The final node can have the remaining system RAM. Other
358 * nodes receive roughly the same amount of available pages.
360 if (i
== num_nodes
+ node_start
- 1)
363 while (end
- *addr
- e820_hole_size(*addr
, end
) <
365 end
+= FAKE_NODE_MIN_SIZE
;
366 if (end
> max_addr
) {
371 if (setup_node_range(i
, nodes
, addr
, end
- *addr
, max_addr
) < 0)
374 return i
- node_start
+ 1;
378 * Splits the remaining system RAM into chunks of size. The remaining memory is
379 * always assigned to a final node and can be asymmetric. Returns the number of
382 static int __init
split_nodes_by_size(struct bootnode
*nodes
, u64
*addr
,
383 u64 max_addr
, int node_start
, u64 size
)
386 size
= (size
<< 20) & FAKE_NODE_MIN_HASH_MASK
;
387 while (!setup_node_range(i
++, nodes
, addr
, size
, max_addr
))
389 return i
- node_start
;
393 * Sets up the system RAM area from start_pfn to end_pfn according to the
394 * numa=fake command-line option.
396 static int __init
numa_emulation(unsigned long start_pfn
, unsigned long end_pfn
)
398 struct bootnode nodes
[MAX_NUMNODES
];
399 u64 size
, addr
= start_pfn
<< PAGE_SHIFT
;
400 u64 max_addr
= end_pfn
<< PAGE_SHIFT
;
401 int num_nodes
= 0, num
= 0, coeff_flag
, coeff
= -1, i
;
403 memset(&nodes
, 0, sizeof(nodes
));
405 * If the numa=fake command-line is just a single number N, split the
406 * system RAM into N fake nodes.
408 if (!strchr(cmdline
, '*') && !strchr(cmdline
, ',')) {
409 long n
= simple_strtol(cmdline
, NULL
, 0);
411 num_nodes
= split_nodes_equally(nodes
, &addr
, max_addr
, 0, n
);
417 /* Parse the command line. */
418 for (coeff_flag
= 0; ; cmdline
++) {
419 if (*cmdline
&& isdigit(*cmdline
)) {
420 num
= num
* 10 + *cmdline
- '0';
423 if (*cmdline
== '*') {
428 if (!*cmdline
|| *cmdline
== ',') {
432 * Round down to the nearest FAKE_NODE_MIN_SIZE.
433 * Command-line coefficients are in megabytes.
435 size
= ((u64
)num
<< 20) & FAKE_NODE_MIN_HASH_MASK
;
437 for (i
= 0; i
< coeff
; i
++, num_nodes
++)
438 if (setup_node_range(num_nodes
, nodes
,
439 &addr
, size
, max_addr
) < 0)
451 /* Fill remainder of system RAM, if appropriate. */
452 if (addr
< max_addr
) {
453 if (coeff_flag
&& coeff
< 0) {
454 /* Split remaining nodes into num-sized chunks */
455 num_nodes
+= split_nodes_by_size(nodes
, &addr
, max_addr
,
459 switch (*(cmdline
- 1)) {
461 /* Split remaining nodes into coeff chunks */
464 num_nodes
+= split_nodes_equally(nodes
, &addr
, max_addr
,
468 /* Do not allocate remaining system RAM */
471 /* Give one final node */
472 setup_node_range(num_nodes
, nodes
, &addr
,
473 max_addr
- addr
, max_addr
);
478 memnode_shift
= compute_hash_shift(nodes
, num_nodes
);
479 if (memnode_shift
< 0) {
481 printk(KERN_ERR
"No NUMA hash function found. NUMA emulation "
487 * We need to vacate all active ranges that may have been registered by
488 * SRAT and set acpi_numa to -1 so that srat_disabled() always returns
489 * true. NUMA emulation has succeeded so we will not scan ACPI nodes.
491 remove_all_active_ranges();
492 #ifdef CONFIG_ACPI_NUMA
495 for_each_node_mask(i
, node_possible_map
) {
496 e820_register_active_regions(i
, nodes
[i
].start
>> PAGE_SHIFT
,
497 nodes
[i
].end
>> PAGE_SHIFT
);
498 setup_node_bootmem(i
, nodes
[i
].start
, nodes
[i
].end
);
500 acpi_fake_nodes(nodes
, num_nodes
);
504 #endif /* CONFIG_NUMA_EMU */
506 void __init
numa_initmem_init(unsigned long start_pfn
, unsigned long end_pfn
)
510 nodes_clear(node_possible_map
);
512 #ifdef CONFIG_NUMA_EMU
513 if (cmdline
&& !numa_emulation(start_pfn
, end_pfn
))
515 nodes_clear(node_possible_map
);
518 #ifdef CONFIG_ACPI_NUMA
519 if (!numa_off
&& !acpi_scan_nodes(start_pfn
<< PAGE_SHIFT
,
520 end_pfn
<< PAGE_SHIFT
))
522 nodes_clear(node_possible_map
);
525 #ifdef CONFIG_K8_NUMA
526 if (!numa_off
&& !k8_scan_nodes(start_pfn
<<PAGE_SHIFT
,
527 end_pfn
<<PAGE_SHIFT
))
529 nodes_clear(node_possible_map
);
531 printk(KERN_INFO
"%s\n",
532 numa_off
? "NUMA turned off" : "No NUMA configuration found");
534 printk(KERN_INFO
"Faking a node at %016lx-%016lx\n",
535 start_pfn
<< PAGE_SHIFT
,
536 end_pfn
<< PAGE_SHIFT
);
537 /* setup dummy node covering all memory */
539 memnodemap
= memnode
.embedded_map
;
541 nodes_clear(node_online_map
);
543 node_set(0, node_possible_map
);
544 for (i
= 0; i
< NR_CPUS
; i
++)
546 node_to_cpumask_map
[0] = cpumask_of_cpu(0);
547 e820_register_active_regions(0, start_pfn
, end_pfn
);
548 setup_node_bootmem(0, start_pfn
<< PAGE_SHIFT
, end_pfn
<< PAGE_SHIFT
);
551 __cpuinit
void numa_add_cpu(int cpu
)
553 set_bit(cpu
, (unsigned long *)&node_to_cpumask_map
[cpu_to_node(cpu
)]);
556 void __cpuinit
numa_set_node(int cpu
, int node
)
558 cpu_pda(cpu
)->nodenumber
= node
;
559 cpu_to_node_map
[cpu
] = node
;
562 unsigned long __init
numa_free_all_bootmem(void)
564 unsigned long pages
= 0;
567 for_each_online_node(i
)
568 pages
+= free_all_bootmem_node(NODE_DATA(i
));
573 void __init
paging_init(void)
575 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
578 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
579 max_zone_pfns
[ZONE_DMA
] = MAX_DMA_PFN
;
580 max_zone_pfns
[ZONE_DMA32
] = MAX_DMA32_PFN
;
581 max_zone_pfns
[ZONE_NORMAL
] = end_pfn
;
583 sparse_memory_present_with_active_regions(MAX_NUMNODES
);
586 for_each_online_node(i
)
587 flat_setup_node_zones(i
);
589 free_area_init_nodes(max_zone_pfns
);
592 static __init
int numa_setup(char *opt
)
596 if (!strncmp(opt
, "off", 3))
598 #ifdef CONFIG_NUMA_EMU
599 if (!strncmp(opt
, "fake=", 5))
602 #ifdef CONFIG_ACPI_NUMA
603 if (!strncmp(opt
, "noacpi", 6))
605 if (!strncmp(opt
, "hotadd=", 7))
606 hotadd_percent
= simple_strtoul(opt
+7, NULL
, 10);
610 early_param("numa", numa_setup
);
613 * Setup early cpu_to_node.
615 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
616 * and apicid_to_node[] tables have valid entries for a CPU.
617 * This means we skip cpu_to_node[] initialisation for NUMA
618 * emulation and faking node case (when running a kernel compiled
619 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
620 * is already initialized in a round robin manner at numa_init_array,
621 * prior to this call, and this initialization is good enough
622 * for the fake NUMA cases.
624 void __init
init_cpu_to_node(void)
628 for (i
= 0; i
< NR_CPUS
; i
++) {
629 u8 apicid
= x86_cpu_to_apicid_init
[i
];
631 if (apicid
== BAD_APICID
)
633 if (apicid_to_node
[apicid
] == NUMA_NO_NODE
)
635 numa_set_node(i
, apicid_to_node
[apicid
]);