2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/memblock.h>
11 #include <linux/mmzone.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/nodemask.h>
15 #include <linux/sched.h>
16 #include <linux/acpi.h>
19 #include <asm/proto.h>
23 #include <asm/amd_nb.h>
25 struct pglist_data
*node_data
[MAX_NUMNODES
] __read_mostly
;
26 EXPORT_SYMBOL(node_data
);
28 nodemask_t cpu_nodes_parsed __initdata
;
29 nodemask_t mem_nodes_parsed __initdata
;
31 struct memnode memnode
;
33 static unsigned long __initdata nodemap_addr
;
34 static unsigned long __initdata nodemap_size
;
36 static int num_node_memblks __initdata
;
37 static struct bootnode node_memblk_range
[NR_NODE_MEMBLKS
] __initdata
;
38 static int memblk_nodeid
[NR_NODE_MEMBLKS
] __initdata
;
40 struct bootnode numa_nodes
[MAX_NUMNODES
] __initdata
;
43 * Given a shift value, try to populate memnodemap[]
46 * 0 if memnodmap[] too small (of shift too small)
47 * -1 if node overlap or lost ram (shift too big)
49 static int __init
populate_memnodemap(const struct bootnode
*nodes
,
50 int numnodes
, int shift
, int *nodeids
)
52 unsigned long addr
, end
;
55 memset(memnodemap
, 0xff, sizeof(s16
)*memnodemapsize
);
56 for (i
= 0; i
< numnodes
; i
++) {
57 addr
= nodes
[i
].start
;
61 if ((end
>> shift
) >= memnodemapsize
)
64 if (memnodemap
[addr
>> shift
] != NUMA_NO_NODE
)
68 memnodemap
[addr
>> shift
] = i
;
70 memnodemap
[addr
>> shift
] = nodeids
[i
];
72 addr
+= (1UL << shift
);
79 static int __init
allocate_cachealigned_memnodemap(void)
83 memnodemap
= memnode
.embedded_map
;
84 if (memnodemapsize
<= ARRAY_SIZE(memnode
.embedded_map
))
88 nodemap_size
= roundup(sizeof(s16
) * memnodemapsize
, L1_CACHE_BYTES
);
89 nodemap_addr
= memblock_find_in_range(addr
, get_max_mapped(),
90 nodemap_size
, L1_CACHE_BYTES
);
91 if (nodemap_addr
== MEMBLOCK_ERROR
) {
93 "NUMA: Unable to allocate Memory to Node hash map\n");
94 nodemap_addr
= nodemap_size
= 0;
97 memnodemap
= phys_to_virt(nodemap_addr
);
98 memblock_x86_reserve_range(nodemap_addr
, nodemap_addr
+ nodemap_size
, "MEMNODEMAP");
100 printk(KERN_DEBUG
"NUMA: Allocated memnodemap from %lx - %lx\n",
101 nodemap_addr
, nodemap_addr
+ nodemap_size
);
106 * The LSB of all start and end addresses in the node map is the value of the
107 * maximum possible shift.
109 static int __init
extract_lsb_from_nodes(const struct bootnode
*nodes
,
112 int i
, nodes_used
= 0;
113 unsigned long start
, end
;
114 unsigned long bitfield
= 0, memtop
= 0;
116 for (i
= 0; i
< numnodes
; i
++) {
117 start
= nodes
[i
].start
;
129 i
= find_first_bit(&bitfield
, sizeof(unsigned long)*8);
130 memnodemapsize
= (memtop
>> i
)+1;
134 static int __init
compute_hash_shift(struct bootnode
*nodes
, int numnodes
,
139 shift
= extract_lsb_from_nodes(nodes
, numnodes
);
140 if (allocate_cachealigned_memnodemap())
142 printk(KERN_DEBUG
"NUMA: Using %d for the hash shift.\n",
145 if (populate_memnodemap(nodes
, numnodes
, shift
, nodeids
) != 1) {
146 printk(KERN_INFO
"Your memory is not aligned you need to "
147 "rebuild your kernel with a bigger NODEMAPSIZE "
148 "shift=%d\n", shift
);
154 int __meminit
__early_pfn_to_nid(unsigned long pfn
)
156 return phys_to_nid(pfn
<< PAGE_SHIFT
);
159 static void * __init
early_node_mem(int nodeid
, unsigned long start
,
160 unsigned long end
, unsigned long size
,
166 * put it on high as possible
167 * something will go with NODE_DATA
169 if (start
< (MAX_DMA_PFN
<<PAGE_SHIFT
))
170 start
= MAX_DMA_PFN
<<PAGE_SHIFT
;
171 if (start
< (MAX_DMA32_PFN
<<PAGE_SHIFT
) &&
172 end
> (MAX_DMA32_PFN
<<PAGE_SHIFT
))
173 start
= MAX_DMA32_PFN
<<PAGE_SHIFT
;
174 mem
= memblock_x86_find_in_range_node(nodeid
, start
, end
, size
, align
);
175 if (mem
!= MEMBLOCK_ERROR
)
178 /* extend the search scope */
179 end
= max_pfn_mapped
<< PAGE_SHIFT
;
180 start
= MAX_DMA_PFN
<< PAGE_SHIFT
;
181 mem
= memblock_find_in_range(start
, end
, size
, align
);
182 if (mem
!= MEMBLOCK_ERROR
)
185 printk(KERN_ERR
"Cannot find %lu bytes in node %d\n",
191 static __init
int conflicting_memblks(unsigned long start
, unsigned long end
)
194 for (i
= 0; i
< num_node_memblks
; i
++) {
195 struct bootnode
*nd
= &node_memblk_range
[i
];
196 if (nd
->start
== nd
->end
)
198 if (nd
->end
> start
&& nd
->start
< end
)
199 return memblk_nodeid
[i
];
200 if (nd
->end
== end
&& nd
->start
== start
)
201 return memblk_nodeid
[i
];
206 int __init
numa_add_memblk(int nid
, u64 start
, u64 end
)
210 i
= conflicting_memblks(start
, end
);
212 printk(KERN_WARNING
"NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
213 nid
, start
, end
, numa_nodes
[i
].start
, numa_nodes
[i
].end
);
215 printk(KERN_ERR
"NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
217 numa_nodes
[i
].start
, numa_nodes
[i
].end
);
221 node_memblk_range
[num_node_memblks
].start
= start
;
222 node_memblk_range
[num_node_memblks
].end
= end
;
223 memblk_nodeid
[num_node_memblks
] = nid
;
228 static __init
void cutoff_node(int i
, unsigned long start
, unsigned long end
)
230 struct bootnode
*nd
= &numa_nodes
[i
];
232 if (nd
->start
< start
) {
234 if (nd
->end
< nd
->start
)
239 if (nd
->start
> nd
->end
)
244 /* Initialize bootmem allocator for a node */
246 setup_node_bootmem(int nodeid
, unsigned long start
, unsigned long end
)
248 unsigned long start_pfn
, last_pfn
, nodedata_phys
;
249 const int pgdat_size
= roundup(sizeof(pg_data_t
), PAGE_SIZE
);
256 * Don't confuse VM with a node that doesn't have the
257 * minimum amount of memory:
259 if (end
&& (end
- start
) < NODE_MIN_SIZE
)
262 start
= roundup(start
, ZONE_ALIGN
);
264 printk(KERN_INFO
"Initmem setup node %d %016lx-%016lx\n", nodeid
,
267 start_pfn
= start
>> PAGE_SHIFT
;
268 last_pfn
= end
>> PAGE_SHIFT
;
270 node_data
[nodeid
] = early_node_mem(nodeid
, start
, end
, pgdat_size
,
272 if (node_data
[nodeid
] == NULL
)
274 nodedata_phys
= __pa(node_data
[nodeid
]);
275 memblock_x86_reserve_range(nodedata_phys
, nodedata_phys
+ pgdat_size
, "NODE_DATA");
276 printk(KERN_INFO
" NODE_DATA [%016lx - %016lx]\n", nodedata_phys
,
277 nodedata_phys
+ pgdat_size
- 1);
278 nid
= phys_to_nid(nodedata_phys
);
280 printk(KERN_INFO
" NODE_DATA(%d) on node %d\n", nodeid
, nid
);
282 memset(NODE_DATA(nodeid
), 0, sizeof(pg_data_t
));
283 NODE_DATA(nodeid
)->node_id
= nodeid
;
284 NODE_DATA(nodeid
)->node_start_pfn
= start_pfn
;
285 NODE_DATA(nodeid
)->node_spanned_pages
= last_pfn
- start_pfn
;
287 node_set_online(nodeid
);
291 * Sanity check to catch more bad NUMA configurations (they are amazingly
292 * common). Make sure the nodes cover all memory.
294 static int __init
nodes_cover_memory(const struct bootnode
*nodes
)
296 unsigned long numaram
, e820ram
;
300 for_each_node_mask(i
, mem_nodes_parsed
) {
301 unsigned long s
= nodes
[i
].start
>> PAGE_SHIFT
;
302 unsigned long e
= nodes
[i
].end
>> PAGE_SHIFT
;
304 numaram
-= __absent_pages_in_range(i
, s
, e
);
305 if ((long)numaram
< 0)
310 (memblock_x86_hole_size(0, max_pfn
<<PAGE_SHIFT
) >> PAGE_SHIFT
);
311 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
312 if ((long)(e820ram
- numaram
) >= (1<<(20 - PAGE_SHIFT
))) {
313 printk(KERN_ERR
"NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n",
314 (numaram
<< PAGE_SHIFT
) >> 20,
315 (e820ram
<< PAGE_SHIFT
) >> 20);
321 static int __init
numa_register_memblks(void)
326 * Join together blocks on the same node, holes between
327 * which don't overlap with memory on other nodes.
329 for (i
= 0; i
< num_node_memblks
; ++i
) {
332 for (j
= i
+ 1; j
< num_node_memblks
; ++j
) {
333 unsigned long start
, end
;
335 if (memblk_nodeid
[i
] != memblk_nodeid
[j
])
337 start
= min(node_memblk_range
[i
].end
,
338 node_memblk_range
[j
].end
);
339 end
= max(node_memblk_range
[i
].start
,
340 node_memblk_range
[j
].start
);
341 for (k
= 0; k
< num_node_memblks
; ++k
) {
342 if (memblk_nodeid
[i
] == memblk_nodeid
[k
])
344 if (start
< node_memblk_range
[k
].end
&&
345 end
> node_memblk_range
[k
].start
)
348 if (k
< num_node_memblks
)
350 start
= min(node_memblk_range
[i
].start
,
351 node_memblk_range
[j
].start
);
352 end
= max(node_memblk_range
[i
].end
,
353 node_memblk_range
[j
].end
);
354 printk(KERN_INFO
"NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
356 node_memblk_range
[i
].start
,
357 node_memblk_range
[i
].end
,
358 node_memblk_range
[j
].start
,
359 node_memblk_range
[j
].end
,
361 node_memblk_range
[i
].start
= start
;
362 node_memblk_range
[i
].end
= end
;
363 k
= --num_node_memblks
- j
;
364 memmove(memblk_nodeid
+ j
, memblk_nodeid
+ j
+1,
365 k
* sizeof(*memblk_nodeid
));
366 memmove(node_memblk_range
+ j
, node_memblk_range
+ j
+1,
367 k
* sizeof(*node_memblk_range
));
372 memnode_shift
= compute_hash_shift(node_memblk_range
, num_node_memblks
,
374 if (memnode_shift
< 0) {
375 printk(KERN_ERR
"NUMA: No NUMA node hash function found. Contact maintainer\n");
379 for (i
= 0; i
< num_node_memblks
; i
++)
380 memblock_x86_register_active_regions(memblk_nodeid
[i
],
381 node_memblk_range
[i
].start
>> PAGE_SHIFT
,
382 node_memblk_range
[i
].end
>> PAGE_SHIFT
);
384 /* for out of order entries */
386 if (!nodes_cover_memory(numa_nodes
))
389 init_memory_mapping_high();
391 /* Finally register nodes. */
392 for_each_node_mask(i
, node_possible_map
)
393 setup_node_bootmem(i
, numa_nodes
[i
].start
, numa_nodes
[i
].end
);
396 * Try again in case setup_node_bootmem missed one due to missing
399 for_each_node_mask(i
, node_possible_map
)
401 setup_node_bootmem(i
, numa_nodes
[i
].start
,
407 #ifdef CONFIG_NUMA_EMU
409 static struct bootnode nodes
[MAX_NUMNODES
] __initdata
;
410 static struct bootnode physnodes
[MAX_NUMNODES
] __cpuinitdata
;
411 static char *cmdline __initdata
;
413 void __init
numa_emu_cmdline(char *str
)
418 static int __init
setup_physnodes(unsigned long start
, unsigned long end
)
423 memset(physnodes
, 0, sizeof(physnodes
));
425 for_each_node_mask(i
, mem_nodes_parsed
) {
426 physnodes
[i
].start
= numa_nodes
[i
].start
;
427 physnodes
[i
].end
= numa_nodes
[i
].end
;
431 * Basic sanity checking on the physical node map: there may be errors
432 * if the SRAT or AMD code incorrectly reported the topology or the mem=
433 * kernel parameter is used.
435 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
436 if (physnodes
[i
].start
== physnodes
[i
].end
)
438 if (physnodes
[i
].start
> end
) {
439 physnodes
[i
].end
= physnodes
[i
].start
;
442 if (physnodes
[i
].end
< start
) {
443 physnodes
[i
].start
= physnodes
[i
].end
;
446 if (physnodes
[i
].start
< start
)
447 physnodes
[i
].start
= start
;
448 if (physnodes
[i
].end
> end
)
449 physnodes
[i
].end
= end
;
454 * If no physical topology was detected, a single node is faked to cover
455 * the entire address space.
458 physnodes
[ret
].start
= start
;
459 physnodes
[ret
].end
= end
;
465 static void __init
fake_physnodes(int acpi
, int amd
, int nr_nodes
)
470 #ifdef CONFIG_ACPI_NUMA
472 acpi_fake_nodes(nodes
, nr_nodes
);
474 #ifdef CONFIG_AMD_NUMA
476 amd_fake_nodes(nodes
, nr_nodes
);
479 for (i
= 0; i
< nr_cpu_ids
; i
++)
484 * Setups up nid to range from addr to addr + size. If the end
485 * boundary is greater than max_addr, then max_addr is used instead.
486 * The return value is 0 if there is additional memory left for
487 * allocation past addr and -1 otherwise. addr is adjusted to be at
488 * the end of the node.
490 static int __init
setup_node_range(int nid
, u64
*addr
, u64 size
, u64 max_addr
)
493 nodes
[nid
].start
= *addr
;
495 if (*addr
>= max_addr
) {
499 nodes
[nid
].end
= *addr
;
500 node_set(nid
, node_possible_map
);
501 printk(KERN_INFO
"Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid
,
502 nodes
[nid
].start
, nodes
[nid
].end
,
503 (nodes
[nid
].end
- nodes
[nid
].start
) >> 20);
508 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
509 * to max_addr. The return value is the number of nodes allocated.
511 static int __init
split_nodes_interleave(u64 addr
, u64 max_addr
, int nr_nodes
)
513 nodemask_t physnode_mask
= NODE_MASK_NONE
;
521 if (nr_nodes
> MAX_NUMNODES
) {
522 pr_info("numa=fake=%d too large, reducing to %d\n",
523 nr_nodes
, MAX_NUMNODES
);
524 nr_nodes
= MAX_NUMNODES
;
527 size
= (max_addr
- addr
- memblock_x86_hole_size(addr
, max_addr
)) / nr_nodes
;
529 * Calculate the number of big nodes that can be allocated as a result
530 * of consolidating the remainder.
532 big
= ((size
& ~FAKE_NODE_MIN_HASH_MASK
) * nr_nodes
) /
535 size
&= FAKE_NODE_MIN_HASH_MASK
;
537 pr_err("Not enough memory for each node. "
538 "NUMA emulation disabled.\n");
542 for (i
= 0; i
< MAX_NUMNODES
; i
++)
543 if (physnodes
[i
].start
!= physnodes
[i
].end
)
544 node_set(i
, physnode_mask
);
547 * Continue to fill physical nodes with fake nodes until there is no
548 * memory left on any of them.
550 while (nodes_weight(physnode_mask
)) {
551 for_each_node_mask(i
, physnode_mask
) {
552 u64 end
= physnodes
[i
].start
+ size
;
553 u64 dma32_end
= PFN_PHYS(MAX_DMA32_PFN
);
556 end
+= FAKE_NODE_MIN_SIZE
;
559 * Continue to add memory to this fake node if its
560 * non-reserved memory is less than the per-node size.
562 while (end
- physnodes
[i
].start
-
563 memblock_x86_hole_size(physnodes
[i
].start
, end
) < size
) {
564 end
+= FAKE_NODE_MIN_SIZE
;
565 if (end
> physnodes
[i
].end
) {
566 end
= physnodes
[i
].end
;
572 * If there won't be at least FAKE_NODE_MIN_SIZE of
573 * non-reserved memory in ZONE_DMA32 for the next node,
574 * this one must extend to the boundary.
576 if (end
< dma32_end
&& dma32_end
- end
-
577 memblock_x86_hole_size(end
, dma32_end
) < FAKE_NODE_MIN_SIZE
)
581 * If there won't be enough non-reserved memory for the
582 * next node, this one must extend to the end of the
585 if (physnodes
[i
].end
- end
-
586 memblock_x86_hole_size(end
, physnodes
[i
].end
) < size
)
587 end
= physnodes
[i
].end
;
590 * Avoid allocating more nodes than requested, which can
591 * happen as a result of rounding down each node's size
592 * to FAKE_NODE_MIN_SIZE.
594 if (nodes_weight(physnode_mask
) + ret
>= nr_nodes
)
595 end
= physnodes
[i
].end
;
597 if (setup_node_range(ret
++, &physnodes
[i
].start
,
598 end
- physnodes
[i
].start
,
599 physnodes
[i
].end
) < 0)
600 node_clear(i
, physnode_mask
);
607 * Returns the end address of a node so that there is at least `size' amount of
608 * non-reserved memory or `max_addr' is reached.
610 static u64 __init
find_end_of_node(u64 start
, u64 max_addr
, u64 size
)
612 u64 end
= start
+ size
;
614 while (end
- start
- memblock_x86_hole_size(start
, end
) < size
) {
615 end
+= FAKE_NODE_MIN_SIZE
;
616 if (end
> max_addr
) {
625 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
626 * `addr' to `max_addr'. The return value is the number of nodes allocated.
628 static int __init
split_nodes_size_interleave(u64 addr
, u64 max_addr
, u64 size
)
630 nodemask_t physnode_mask
= NODE_MASK_NONE
;
638 * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
639 * increased accordingly if the requested size is too small. This
640 * creates a uniform distribution of node sizes across the entire
641 * machine (but not necessarily over physical nodes).
643 min_size
= (max_addr
- addr
- memblock_x86_hole_size(addr
, max_addr
)) /
645 min_size
= max(min_size
, FAKE_NODE_MIN_SIZE
);
646 if ((min_size
& FAKE_NODE_MIN_HASH_MASK
) < min_size
)
647 min_size
= (min_size
+ FAKE_NODE_MIN_SIZE
) &
648 FAKE_NODE_MIN_HASH_MASK
;
649 if (size
< min_size
) {
650 pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
651 size
>> 20, min_size
>> 20);
654 size
&= FAKE_NODE_MIN_HASH_MASK
;
656 for (i
= 0; i
< MAX_NUMNODES
; i
++)
657 if (physnodes
[i
].start
!= physnodes
[i
].end
)
658 node_set(i
, physnode_mask
);
660 * Fill physical nodes with fake nodes of size until there is no memory
661 * left on any of them.
663 while (nodes_weight(physnode_mask
)) {
664 for_each_node_mask(i
, physnode_mask
) {
665 u64 dma32_end
= MAX_DMA32_PFN
<< PAGE_SHIFT
;
668 end
= find_end_of_node(physnodes
[i
].start
,
669 physnodes
[i
].end
, size
);
671 * If there won't be at least FAKE_NODE_MIN_SIZE of
672 * non-reserved memory in ZONE_DMA32 for the next node,
673 * this one must extend to the boundary.
675 if (end
< dma32_end
&& dma32_end
- end
-
676 memblock_x86_hole_size(end
, dma32_end
) < FAKE_NODE_MIN_SIZE
)
680 * If there won't be enough non-reserved memory for the
681 * next node, this one must extend to the end of the
684 if (physnodes
[i
].end
- end
-
685 memblock_x86_hole_size(end
, physnodes
[i
].end
) < size
)
686 end
= physnodes
[i
].end
;
689 * Setup the fake node that will be allocated as bootmem
690 * later. If setup_node_range() returns non-zero, there
691 * is no more memory available on this physical node.
693 if (setup_node_range(ret
++, &physnodes
[i
].start
,
694 end
- physnodes
[i
].start
,
695 physnodes
[i
].end
) < 0)
696 node_clear(i
, physnode_mask
);
703 * Sets up the system RAM area from start_pfn to last_pfn according to the
704 * numa=fake command-line option.
706 static int __init
numa_emulation(unsigned long start_pfn
,
707 unsigned long last_pfn
, int acpi
, int amd
)
709 u64 addr
= start_pfn
<< PAGE_SHIFT
;
710 u64 max_addr
= last_pfn
<< PAGE_SHIFT
;
715 * If the numa=fake command-line contains a 'M' or 'G', it represents
716 * the fixed node size. Otherwise, if it is just a single number N,
717 * split the system RAM into N fake nodes.
719 if (strchr(cmdline
, 'M') || strchr(cmdline
, 'G')) {
722 size
= memparse(cmdline
, &cmdline
);
723 num_nodes
= split_nodes_size_interleave(addr
, max_addr
, size
);
727 n
= simple_strtoul(cmdline
, NULL
, 0);
728 num_nodes
= split_nodes_interleave(addr
, max_addr
, n
);
733 memnode_shift
= compute_hash_shift(nodes
, num_nodes
, NULL
);
734 if (memnode_shift
< 0) {
736 printk(KERN_ERR
"No NUMA hash function found. NUMA emulation "
742 * We need to vacate all active ranges that may have been registered for
743 * the e820 memory map.
745 remove_all_active_ranges();
746 for_each_node_mask(i
, node_possible_map
)
747 memblock_x86_register_active_regions(i
, nodes
[i
].start
>> PAGE_SHIFT
,
748 nodes
[i
].end
>> PAGE_SHIFT
);
749 init_memory_mapping_high();
750 for_each_node_mask(i
, node_possible_map
)
751 setup_node_bootmem(i
, nodes
[i
].start
, nodes
[i
].end
);
752 setup_physnodes(addr
, max_addr
);
753 fake_physnodes(acpi
, amd
, num_nodes
);
757 #endif /* CONFIG_NUMA_EMU */
759 static int dummy_numa_init(void)
761 printk(KERN_INFO
"%s\n",
762 numa_off
? "NUMA turned off" : "No NUMA configuration found");
763 printk(KERN_INFO
"Faking a node at %016lx-%016lx\n",
764 0LU, max_pfn
<< PAGE_SHIFT
);
766 node_set(0, cpu_nodes_parsed
);
767 node_set(0, mem_nodes_parsed
);
768 numa_add_memblk(0, 0, (u64
)max_pfn
<< PAGE_SHIFT
);
769 numa_nodes
[0].start
= 0;
770 numa_nodes
[0].end
= (u64
)max_pfn
<< PAGE_SHIFT
;
775 static int dummy_scan_nodes(void)
780 void __init
initmem_init(void)
782 int (*numa_init
[])(void) = { [2] = dummy_numa_init
};
783 int (*scan_nodes
[])(void) = { [2] = dummy_scan_nodes
};
787 #ifdef CONFIG_ACPI_NUMA
788 numa_init
[0] = x86_acpi_numa_init
;
789 scan_nodes
[0] = acpi_scan_nodes
;
791 #ifdef CONFIG_AMD_NUMA
792 numa_init
[1] = amd_numa_init
;
793 scan_nodes
[1] = amd_scan_nodes
;
797 for (i
= 0; i
< ARRAY_SIZE(numa_init
); i
++) {
801 for (j
= 0; j
< MAX_LOCAL_APIC
; j
++)
802 set_apicid_to_node(j
, NUMA_NO_NODE
);
804 nodes_clear(cpu_nodes_parsed
);
805 nodes_clear(mem_nodes_parsed
);
806 nodes_clear(node_possible_map
);
807 nodes_clear(node_online_map
);
808 num_node_memblks
= 0;
809 memset(node_memblk_range
, 0, sizeof(node_memblk_range
));
810 memset(memblk_nodeid
, 0, sizeof(memblk_nodeid
));
811 memset(numa_nodes
, 0, sizeof(numa_nodes
));
812 remove_all_active_ranges();
814 if (numa_init
[i
]() < 0)
817 /* clean up the node list */
818 for (j
= 0; j
< MAX_NUMNODES
; j
++)
819 cutoff_node(j
, 0, max_pfn
<< PAGE_SHIFT
);
821 #ifdef CONFIG_NUMA_EMU
822 setup_physnodes(0, max_pfn
<< PAGE_SHIFT
);
823 if (cmdline
&& !numa_emulation(0, max_pfn
, i
== 0, i
== 1))
825 setup_physnodes(0, max_pfn
<< PAGE_SHIFT
);
826 nodes_clear(node_possible_map
);
827 nodes_clear(node_online_map
);
829 /* Account for nodes with cpus and no memory */
830 nodes_or(node_possible_map
, mem_nodes_parsed
, cpu_nodes_parsed
);
831 if (WARN_ON(nodes_empty(node_possible_map
)))
834 if (numa_register_memblks() < 0)
837 if (scan_nodes
[i
]() < 0)
840 for (j
= 0; j
< nr_cpu_ids
; j
++) {
841 int nid
= early_cpu_to_node(j
);
843 if (nid
== NUMA_NO_NODE
)
845 if (!node_online(nid
))
854 unsigned long __init
numa_free_all_bootmem(void)
856 unsigned long pages
= 0;
859 for_each_online_node(i
)
860 pages
+= free_all_bootmem_node(NODE_DATA(i
));
862 pages
+= free_all_memory_core_early(MAX_NUMNODES
);
867 int __cpuinit
numa_cpu_node(int cpu
)
869 int apicid
= early_per_cpu(x86_cpu_to_apicid
, cpu
);
871 if (apicid
!= BAD_APICID
)
872 return __apicid_to_node
[apicid
];
877 * UGLINESS AHEAD: Currently, CONFIG_NUMA_EMU is 64bit only and makes use
878 * of 64bit specific data structures. The distinction is artificial and
879 * should be removed. numa_{add|remove}_cpu() are implemented in numa.c
880 * for both 32 and 64bit when CONFIG_NUMA_EMU is disabled but here when
883 * NUMA emulation is planned to be made generic and the following and other
884 * related code should be moved to numa.c.
886 #ifdef CONFIG_NUMA_EMU
887 # ifndef CONFIG_DEBUG_PER_CPU_MAPS
888 void __cpuinit
numa_add_cpu(int cpu
)
893 nid
= numa_cpu_node(cpu
);
894 if (nid
== NUMA_NO_NODE
)
895 nid
= early_cpu_to_node(cpu
);
896 BUG_ON(nid
== NUMA_NO_NODE
|| !node_online(nid
));
899 * Use the starting address of the emulated node to find which physical
900 * node it is allocated on.
902 addr
= node_start_pfn(nid
) << PAGE_SHIFT
;
903 for (physnid
= 0; physnid
< MAX_NUMNODES
; physnid
++)
904 if (addr
>= physnodes
[physnid
].start
&&
905 addr
< physnodes
[physnid
].end
)
909 * Map the cpu to each emulated node that is allocated on the physical
910 * node of the cpu's apic id.
912 for_each_online_node(nid
) {
913 addr
= node_start_pfn(nid
) << PAGE_SHIFT
;
914 if (addr
>= physnodes
[physnid
].start
&&
915 addr
< physnodes
[physnid
].end
)
916 cpumask_set_cpu(cpu
, node_to_cpumask_map
[nid
]);
920 void __cpuinit
numa_remove_cpu(int cpu
)
924 for_each_online_node(i
)
925 cpumask_clear_cpu(cpu
, node_to_cpumask_map
[i
]);
927 # else /* !CONFIG_DEBUG_PER_CPU_MAPS */
928 static void __cpuinit
numa_set_cpumask(int cpu
, int enable
)
930 int node
= early_cpu_to_node(cpu
);
931 struct cpumask
*mask
;
934 if (node
== NUMA_NO_NODE
) {
935 /* early_cpu_to_node() already emits a warning and trace */
938 for_each_online_node(i
) {
941 addr
= node_start_pfn(i
) << PAGE_SHIFT
;
942 if (addr
< physnodes
[node
].start
||
943 addr
>= physnodes
[node
].end
)
945 mask
= debug_cpumask_set_cpu(cpu
, enable
);
950 cpumask_set_cpu(cpu
, mask
);
952 cpumask_clear_cpu(cpu
, mask
);
956 void __cpuinit
numa_add_cpu(int cpu
)
958 numa_set_cpumask(cpu
, 1);
961 void __cpuinit
numa_remove_cpu(int cpu
)
963 numa_set_cpumask(cpu
, 0);
965 # endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
966 #endif /* CONFIG_NUMA_EMU */