2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/memblock.h>
11 #include <linux/mmzone.h>
12 #include <linux/ctype.h>
13 #include <linux/module.h>
14 #include <linux/nodemask.h>
15 #include <linux/sched.h>
16 #include <linux/acpi.h>
19 #include <asm/proto.h>
23 #include <asm/amd_nb.h>
33 struct numa_memblk blk
[NR_NODE_MEMBLKS
];
36 struct pglist_data
*node_data
[MAX_NUMNODES
] __read_mostly
;
37 EXPORT_SYMBOL(node_data
);
39 nodemask_t numa_nodes_parsed __initdata
;
40 nodemask_t mem_nodes_parsed __initdata
;
42 struct memnode memnode
;
44 static unsigned long __initdata nodemap_addr
;
45 static unsigned long __initdata nodemap_size
;
47 static struct numa_meminfo numa_meminfo __initdata
;
50 * Given a shift value, try to populate memnodemap[]
53 * 0 if memnodmap[] too small (of shift too small)
54 * -1 if node overlap or lost ram (shift too big)
56 static int __init
populate_memnodemap(const struct numa_meminfo
*mi
, int shift
)
58 unsigned long addr
, end
;
61 memset(memnodemap
, 0xff, sizeof(s16
)*memnodemapsize
);
62 for (i
= 0; i
< mi
->nr_blks
; i
++) {
63 addr
= mi
->blk
[i
].start
;
67 if ((end
>> shift
) >= memnodemapsize
)
70 if (memnodemap
[addr
>> shift
] != NUMA_NO_NODE
)
72 memnodemap
[addr
>> shift
] = mi
->blk
[i
].nid
;
73 addr
+= (1UL << shift
);
80 static int __init
allocate_cachealigned_memnodemap(void)
84 memnodemap
= memnode
.embedded_map
;
85 if (memnodemapsize
<= ARRAY_SIZE(memnode
.embedded_map
))
89 nodemap_size
= roundup(sizeof(s16
) * memnodemapsize
, L1_CACHE_BYTES
);
90 nodemap_addr
= memblock_find_in_range(addr
, get_max_mapped(),
91 nodemap_size
, L1_CACHE_BYTES
);
92 if (nodemap_addr
== MEMBLOCK_ERROR
) {
94 "NUMA: Unable to allocate Memory to Node hash map\n");
95 nodemap_addr
= nodemap_size
= 0;
98 memnodemap
= phys_to_virt(nodemap_addr
);
99 memblock_x86_reserve_range(nodemap_addr
, nodemap_addr
+ nodemap_size
, "MEMNODEMAP");
101 printk(KERN_DEBUG
"NUMA: Allocated memnodemap from %lx - %lx\n",
102 nodemap_addr
, nodemap_addr
+ nodemap_size
);
107 * The LSB of all start and end addresses in the node map is the value of the
108 * maximum possible shift.
110 static int __init
extract_lsb_from_nodes(const struct numa_meminfo
*mi
)
112 int i
, nodes_used
= 0;
113 unsigned long start
, end
;
114 unsigned long bitfield
= 0, memtop
= 0;
116 for (i
= 0; i
< mi
->nr_blks
; i
++) {
117 start
= mi
->blk
[i
].start
;
118 end
= mi
->blk
[i
].end
;
129 i
= find_first_bit(&bitfield
, sizeof(unsigned long)*8);
130 memnodemapsize
= (memtop
>> i
)+1;
134 static int __init
compute_hash_shift(const struct numa_meminfo
*mi
)
138 shift
= extract_lsb_from_nodes(mi
);
139 if (allocate_cachealigned_memnodemap())
141 printk(KERN_DEBUG
"NUMA: Using %d for the hash shift.\n",
144 if (populate_memnodemap(mi
, shift
) != 1) {
145 printk(KERN_INFO
"Your memory is not aligned you need to "
146 "rebuild your kernel with a bigger NODEMAPSIZE "
147 "shift=%d\n", shift
);
153 int __meminit
__early_pfn_to_nid(unsigned long pfn
)
155 return phys_to_nid(pfn
<< PAGE_SHIFT
);
158 static void * __init
early_node_mem(int nodeid
, unsigned long start
,
159 unsigned long end
, unsigned long size
,
165 * put it on high as possible
166 * something will go with NODE_DATA
168 if (start
< (MAX_DMA_PFN
<<PAGE_SHIFT
))
169 start
= MAX_DMA_PFN
<<PAGE_SHIFT
;
170 if (start
< (MAX_DMA32_PFN
<<PAGE_SHIFT
) &&
171 end
> (MAX_DMA32_PFN
<<PAGE_SHIFT
))
172 start
= MAX_DMA32_PFN
<<PAGE_SHIFT
;
173 mem
= memblock_x86_find_in_range_node(nodeid
, start
, end
, size
, align
);
174 if (mem
!= MEMBLOCK_ERROR
)
177 /* extend the search scope */
178 end
= max_pfn_mapped
<< PAGE_SHIFT
;
179 start
= MAX_DMA_PFN
<< PAGE_SHIFT
;
180 mem
= memblock_find_in_range(start
, end
, size
, align
);
181 if (mem
!= MEMBLOCK_ERROR
)
184 printk(KERN_ERR
"Cannot find %lu bytes in node %d\n",
190 int __init
numa_add_memblk(int nid
, u64 start
, u64 end
)
192 struct numa_meminfo
*mi
= &numa_meminfo
;
194 /* ignore zero length blks */
198 /* whine about and ignore invalid blks */
199 if (start
> end
|| nid
< 0 || nid
>= MAX_NUMNODES
) {
200 pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
205 if (mi
->nr_blks
>= NR_NODE_MEMBLKS
) {
206 pr_err("NUMA: too many memblk ranges\n");
210 mi
->blk
[mi
->nr_blks
].start
= start
;
211 mi
->blk
[mi
->nr_blks
].end
= end
;
212 mi
->blk
[mi
->nr_blks
].nid
= nid
;
217 static void __init
numa_remove_memblk_from(int idx
, struct numa_meminfo
*mi
)
220 memmove(&mi
->blk
[idx
], &mi
->blk
[idx
+ 1],
221 (mi
->nr_blks
- idx
) * sizeof(mi
->blk
[0]));
224 /* Initialize bootmem allocator for a node */
226 setup_node_bootmem(int nodeid
, unsigned long start
, unsigned long end
)
228 unsigned long start_pfn
, last_pfn
, nodedata_phys
;
229 const int pgdat_size
= roundup(sizeof(pg_data_t
), PAGE_SIZE
);
236 * Don't confuse VM with a node that doesn't have the
237 * minimum amount of memory:
239 if (end
&& (end
- start
) < NODE_MIN_SIZE
)
242 start
= roundup(start
, ZONE_ALIGN
);
244 printk(KERN_INFO
"Initmem setup node %d %016lx-%016lx\n", nodeid
,
247 start_pfn
= start
>> PAGE_SHIFT
;
248 last_pfn
= end
>> PAGE_SHIFT
;
250 node_data
[nodeid
] = early_node_mem(nodeid
, start
, end
, pgdat_size
,
252 if (node_data
[nodeid
] == NULL
)
254 nodedata_phys
= __pa(node_data
[nodeid
]);
255 memblock_x86_reserve_range(nodedata_phys
, nodedata_phys
+ pgdat_size
, "NODE_DATA");
256 printk(KERN_INFO
" NODE_DATA [%016lx - %016lx]\n", nodedata_phys
,
257 nodedata_phys
+ pgdat_size
- 1);
258 nid
= phys_to_nid(nodedata_phys
);
260 printk(KERN_INFO
" NODE_DATA(%d) on node %d\n", nodeid
, nid
);
262 memset(NODE_DATA(nodeid
), 0, sizeof(pg_data_t
));
263 NODE_DATA(nodeid
)->node_id
= nodeid
;
264 NODE_DATA(nodeid
)->node_start_pfn
= start_pfn
;
265 NODE_DATA(nodeid
)->node_spanned_pages
= last_pfn
- start_pfn
;
267 node_set_online(nodeid
);
270 static int __init
numa_cleanup_meminfo(struct numa_meminfo
*mi
)
273 const u64 high
= (u64
)max_pfn
<< PAGE_SHIFT
;
276 for (i
= 0; i
< mi
->nr_blks
; i
++) {
277 struct numa_memblk
*bi
= &mi
->blk
[i
];
279 /* make sure all blocks are inside the limits */
280 bi
->start
= max(bi
->start
, low
);
281 bi
->end
= min(bi
->end
, high
);
283 /* and there's no empty block */
284 if (bi
->start
== bi
->end
) {
285 numa_remove_memblk_from(i
--, mi
);
289 for (j
= i
+ 1; j
< mi
->nr_blks
; j
++) {
290 struct numa_memblk
*bj
= &mi
->blk
[j
];
291 unsigned long start
, end
;
294 * See whether there are overlapping blocks. Whine
295 * about but allow overlaps of the same nid. They
296 * will be merged below.
298 if (bi
->end
> bj
->start
&& bi
->start
< bj
->end
) {
299 if (bi
->nid
!= bj
->nid
) {
300 pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
301 bi
->nid
, bi
->start
, bi
->end
,
302 bj
->nid
, bj
->start
, bj
->end
);
305 pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
306 bi
->nid
, bi
->start
, bi
->end
,
311 * Join together blocks on the same node, holes
312 * between which don't overlap with memory on other
315 if (bi
->nid
!= bj
->nid
)
317 start
= max(min(bi
->start
, bj
->start
), low
);
318 end
= min(max(bi
->end
, bj
->end
), high
);
319 for (k
= 0; k
< mi
->nr_blks
; k
++) {
320 struct numa_memblk
*bk
= &mi
->blk
[k
];
322 if (bi
->nid
== bk
->nid
)
324 if (start
< bk
->end
&& end
> bk
->start
)
329 printk(KERN_INFO
"NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n",
330 bi
->nid
, bi
->start
, bi
->end
, bj
->start
, bj
->end
,
334 numa_remove_memblk_from(j
--, mi
);
338 for (i
= mi
->nr_blks
; i
< ARRAY_SIZE(mi
->blk
); i
++) {
339 mi
->blk
[i
].start
= mi
->blk
[i
].end
= 0;
340 mi
->blk
[i
].nid
= NUMA_NO_NODE
;
347 * Sanity check to catch more bad NUMA configurations (they are amazingly
348 * common). Make sure the nodes cover all memory.
350 static bool __init
numa_meminfo_cover_memory(const struct numa_meminfo
*mi
)
352 unsigned long numaram
, e820ram
;
356 for (i
= 0; i
< mi
->nr_blks
; i
++) {
357 unsigned long s
= mi
->blk
[i
].start
>> PAGE_SHIFT
;
358 unsigned long e
= mi
->blk
[i
].end
>> PAGE_SHIFT
;
360 numaram
-= __absent_pages_in_range(mi
->blk
[i
].nid
, s
, e
);
361 if ((long)numaram
< 0)
365 e820ram
= max_pfn
- (memblock_x86_hole_size(0,
366 max_pfn
<< PAGE_SHIFT
) >> PAGE_SHIFT
);
367 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
368 if ((long)(e820ram
- numaram
) >= (1 << (20 - PAGE_SHIFT
))) {
369 printk(KERN_ERR
"NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n",
370 (numaram
<< PAGE_SHIFT
) >> 20,
371 (e820ram
<< PAGE_SHIFT
) >> 20);
377 static int __init
numa_register_memblks(struct numa_meminfo
*mi
)
381 /* Account for nodes with cpus and no memory */
382 nodes_or(node_possible_map
, mem_nodes_parsed
, numa_nodes_parsed
);
383 if (WARN_ON(nodes_empty(node_possible_map
)))
386 memnode_shift
= compute_hash_shift(mi
);
387 if (memnode_shift
< 0) {
388 printk(KERN_ERR
"NUMA: No NUMA node hash function found. Contact maintainer\n");
392 for (i
= 0; i
< mi
->nr_blks
; i
++)
393 memblock_x86_register_active_regions(mi
->blk
[i
].nid
,
394 mi
->blk
[i
].start
>> PAGE_SHIFT
,
395 mi
->blk
[i
].end
>> PAGE_SHIFT
);
397 /* for out of order entries */
399 if (!numa_meminfo_cover_memory(mi
))
402 init_memory_mapping_high();
405 * Finally register nodes. Do it twice in case setup_node_bootmem
406 * missed one due to missing bootmem.
408 for (i
= 0; i
< 2; i
++) {
409 for_each_node_mask(nid
, node_possible_map
) {
410 u64 start
= (u64
)max_pfn
<< PAGE_SHIFT
;
413 if (node_online(nid
))
416 for (j
= 0; j
< mi
->nr_blks
; j
++) {
417 if (nid
!= mi
->blk
[j
].nid
)
419 start
= min(mi
->blk
[j
].start
, start
);
420 end
= max(mi
->blk
[j
].end
, end
);
424 setup_node_bootmem(nid
, start
, end
);
431 #ifdef CONFIG_NUMA_EMU
433 static struct bootnode nodes
[MAX_NUMNODES
] __initdata
;
434 static struct bootnode physnodes
[MAX_NUMNODES
] __cpuinitdata
;
435 static char *cmdline __initdata
;
437 void __init
numa_emu_cmdline(char *str
)
442 int __init
find_node_by_addr(unsigned long addr
)
444 const struct numa_meminfo
*mi
= &numa_meminfo
;
447 for (i
= 0; i
< mi
->nr_blks
; i
++) {
449 * Find the real node that this emulated node appears on. For
450 * the sake of simplicity, we only use a real node's starting
451 * address to determine which emulated node it appears on.
453 if (addr
>= mi
->blk
[i
].start
&& addr
< mi
->blk
[i
].end
)
454 return mi
->blk
[i
].nid
;
459 static int __init
setup_physnodes(unsigned long start
, unsigned long end
)
461 const struct numa_meminfo
*mi
= &numa_meminfo
;
465 memset(physnodes
, 0, sizeof(physnodes
));
467 for (i
= 0; i
< mi
->nr_blks
; i
++) {
468 int nid
= mi
->blk
[i
].nid
;
470 if (physnodes
[nid
].start
== physnodes
[nid
].end
) {
471 physnodes
[nid
].start
= mi
->blk
[i
].start
;
472 physnodes
[nid
].end
= mi
->blk
[i
].end
;
474 physnodes
[nid
].start
= min(physnodes
[nid
].start
,
476 physnodes
[nid
].end
= max(physnodes
[nid
].end
,
482 * Basic sanity checking on the physical node map: there may be errors
483 * if the SRAT or AMD code incorrectly reported the topology or the mem=
484 * kernel parameter is used.
486 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
487 if (physnodes
[i
].start
== physnodes
[i
].end
)
489 if (physnodes
[i
].start
> end
) {
490 physnodes
[i
].end
= physnodes
[i
].start
;
493 if (physnodes
[i
].end
< start
) {
494 physnodes
[i
].start
= physnodes
[i
].end
;
497 if (physnodes
[i
].start
< start
)
498 physnodes
[i
].start
= start
;
499 if (physnodes
[i
].end
> end
)
500 physnodes
[i
].end
= end
;
505 * If no physical topology was detected, a single node is faked to cover
506 * the entire address space.
509 physnodes
[ret
].start
= start
;
510 physnodes
[ret
].end
= end
;
516 static void __init
fake_physnodes(int acpi
, int amd
, int nr_nodes
)
521 #ifdef CONFIG_ACPI_NUMA
523 acpi_fake_nodes(nodes
, nr_nodes
);
525 #ifdef CONFIG_AMD_NUMA
527 amd_fake_nodes(nodes
, nr_nodes
);
530 for (i
= 0; i
< nr_cpu_ids
; i
++)
535 * Setups up nid to range from addr to addr + size. If the end
536 * boundary is greater than max_addr, then max_addr is used instead.
537 * The return value is 0 if there is additional memory left for
538 * allocation past addr and -1 otherwise. addr is adjusted to be at
539 * the end of the node.
541 static int __init
setup_node_range(int nid
, u64
*addr
, u64 size
, u64 max_addr
)
544 nodes
[nid
].start
= *addr
;
546 if (*addr
>= max_addr
) {
550 nodes
[nid
].end
= *addr
;
551 node_set(nid
, node_possible_map
);
552 printk(KERN_INFO
"Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid
,
553 nodes
[nid
].start
, nodes
[nid
].end
,
554 (nodes
[nid
].end
- nodes
[nid
].start
) >> 20);
559 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
560 * to max_addr. The return value is the number of nodes allocated.
562 static int __init
split_nodes_interleave(u64 addr
, u64 max_addr
, int nr_nodes
)
564 nodemask_t physnode_mask
= NODE_MASK_NONE
;
572 if (nr_nodes
> MAX_NUMNODES
) {
573 pr_info("numa=fake=%d too large, reducing to %d\n",
574 nr_nodes
, MAX_NUMNODES
);
575 nr_nodes
= MAX_NUMNODES
;
578 size
= (max_addr
- addr
- memblock_x86_hole_size(addr
, max_addr
)) / nr_nodes
;
580 * Calculate the number of big nodes that can be allocated as a result
581 * of consolidating the remainder.
583 big
= ((size
& ~FAKE_NODE_MIN_HASH_MASK
) * nr_nodes
) /
586 size
&= FAKE_NODE_MIN_HASH_MASK
;
588 pr_err("Not enough memory for each node. "
589 "NUMA emulation disabled.\n");
593 for (i
= 0; i
< MAX_NUMNODES
; i
++)
594 if (physnodes
[i
].start
!= physnodes
[i
].end
)
595 node_set(i
, physnode_mask
);
598 * Continue to fill physical nodes with fake nodes until there is no
599 * memory left on any of them.
601 while (nodes_weight(physnode_mask
)) {
602 for_each_node_mask(i
, physnode_mask
) {
603 u64 end
= physnodes
[i
].start
+ size
;
604 u64 dma32_end
= PFN_PHYS(MAX_DMA32_PFN
);
607 end
+= FAKE_NODE_MIN_SIZE
;
610 * Continue to add memory to this fake node if its
611 * non-reserved memory is less than the per-node size.
613 while (end
- physnodes
[i
].start
-
614 memblock_x86_hole_size(physnodes
[i
].start
, end
) < size
) {
615 end
+= FAKE_NODE_MIN_SIZE
;
616 if (end
> physnodes
[i
].end
) {
617 end
= physnodes
[i
].end
;
623 * If there won't be at least FAKE_NODE_MIN_SIZE of
624 * non-reserved memory in ZONE_DMA32 for the next node,
625 * this one must extend to the boundary.
627 if (end
< dma32_end
&& dma32_end
- end
-
628 memblock_x86_hole_size(end
, dma32_end
) < FAKE_NODE_MIN_SIZE
)
632 * If there won't be enough non-reserved memory for the
633 * next node, this one must extend to the end of the
636 if (physnodes
[i
].end
- end
-
637 memblock_x86_hole_size(end
, physnodes
[i
].end
) < size
)
638 end
= physnodes
[i
].end
;
641 * Avoid allocating more nodes than requested, which can
642 * happen as a result of rounding down each node's size
643 * to FAKE_NODE_MIN_SIZE.
645 if (nodes_weight(physnode_mask
) + ret
>= nr_nodes
)
646 end
= physnodes
[i
].end
;
648 if (setup_node_range(ret
++, &physnodes
[i
].start
,
649 end
- physnodes
[i
].start
,
650 physnodes
[i
].end
) < 0)
651 node_clear(i
, physnode_mask
);
658 * Returns the end address of a node so that there is at least `size' amount of
659 * non-reserved memory or `max_addr' is reached.
661 static u64 __init
find_end_of_node(u64 start
, u64 max_addr
, u64 size
)
663 u64 end
= start
+ size
;
665 while (end
- start
- memblock_x86_hole_size(start
, end
) < size
) {
666 end
+= FAKE_NODE_MIN_SIZE
;
667 if (end
> max_addr
) {
676 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
677 * `addr' to `max_addr'. The return value is the number of nodes allocated.
679 static int __init
split_nodes_size_interleave(u64 addr
, u64 max_addr
, u64 size
)
681 nodemask_t physnode_mask
= NODE_MASK_NONE
;
689 * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
690 * increased accordingly if the requested size is too small. This
691 * creates a uniform distribution of node sizes across the entire
692 * machine (but not necessarily over physical nodes).
694 min_size
= (max_addr
- addr
- memblock_x86_hole_size(addr
, max_addr
)) /
696 min_size
= max(min_size
, FAKE_NODE_MIN_SIZE
);
697 if ((min_size
& FAKE_NODE_MIN_HASH_MASK
) < min_size
)
698 min_size
= (min_size
+ FAKE_NODE_MIN_SIZE
) &
699 FAKE_NODE_MIN_HASH_MASK
;
700 if (size
< min_size
) {
701 pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
702 size
>> 20, min_size
>> 20);
705 size
&= FAKE_NODE_MIN_HASH_MASK
;
707 for (i
= 0; i
< MAX_NUMNODES
; i
++)
708 if (physnodes
[i
].start
!= physnodes
[i
].end
)
709 node_set(i
, physnode_mask
);
711 * Fill physical nodes with fake nodes of size until there is no memory
712 * left on any of them.
714 while (nodes_weight(physnode_mask
)) {
715 for_each_node_mask(i
, physnode_mask
) {
716 u64 dma32_end
= MAX_DMA32_PFN
<< PAGE_SHIFT
;
719 end
= find_end_of_node(physnodes
[i
].start
,
720 physnodes
[i
].end
, size
);
722 * If there won't be at least FAKE_NODE_MIN_SIZE of
723 * non-reserved memory in ZONE_DMA32 for the next node,
724 * this one must extend to the boundary.
726 if (end
< dma32_end
&& dma32_end
- end
-
727 memblock_x86_hole_size(end
, dma32_end
) < FAKE_NODE_MIN_SIZE
)
731 * If there won't be enough non-reserved memory for the
732 * next node, this one must extend to the end of the
735 if (physnodes
[i
].end
- end
-
736 memblock_x86_hole_size(end
, physnodes
[i
].end
) < size
)
737 end
= physnodes
[i
].end
;
740 * Setup the fake node that will be allocated as bootmem
741 * later. If setup_node_range() returns non-zero, there
742 * is no more memory available on this physical node.
744 if (setup_node_range(ret
++, &physnodes
[i
].start
,
745 end
- physnodes
[i
].start
,
746 physnodes
[i
].end
) < 0)
747 node_clear(i
, physnode_mask
);
754 * Sets up the system RAM area from start_pfn to last_pfn according to the
755 * numa=fake command-line option.
757 static int __init
numa_emulation(unsigned long start_pfn
,
758 unsigned long last_pfn
, int acpi
, int amd
)
760 static struct numa_meminfo ei __initdata
;
761 u64 addr
= start_pfn
<< PAGE_SHIFT
;
762 u64 max_addr
= last_pfn
<< PAGE_SHIFT
;
767 * If the numa=fake command-line contains a 'M' or 'G', it represents
768 * the fixed node size. Otherwise, if it is just a single number N,
769 * split the system RAM into N fake nodes.
771 if (strchr(cmdline
, 'M') || strchr(cmdline
, 'G')) {
774 size
= memparse(cmdline
, &cmdline
);
775 num_nodes
= split_nodes_size_interleave(addr
, max_addr
, size
);
779 n
= simple_strtoul(cmdline
, NULL
, 0);
780 num_nodes
= split_nodes_interleave(addr
, max_addr
, n
);
786 ei
.nr_blks
= num_nodes
;
787 for (i
= 0; i
< ei
.nr_blks
; i
++) {
788 ei
.blk
[i
].start
= nodes
[i
].start
;
789 ei
.blk
[i
].end
= nodes
[i
].end
;
793 memnode_shift
= compute_hash_shift(&ei
);
794 if (memnode_shift
< 0) {
796 printk(KERN_ERR
"No NUMA hash function found. NUMA emulation "
802 * We need to vacate all active ranges that may have been registered for
803 * the e820 memory map.
805 remove_all_active_ranges();
806 for_each_node_mask(i
, node_possible_map
)
807 memblock_x86_register_active_regions(i
, nodes
[i
].start
>> PAGE_SHIFT
,
808 nodes
[i
].end
>> PAGE_SHIFT
);
809 init_memory_mapping_high();
810 for_each_node_mask(i
, node_possible_map
)
811 setup_node_bootmem(i
, nodes
[i
].start
, nodes
[i
].end
);
812 setup_physnodes(addr
, max_addr
);
813 fake_physnodes(acpi
, amd
, num_nodes
);
817 #endif /* CONFIG_NUMA_EMU */
819 static int dummy_numa_init(void)
821 printk(KERN_INFO
"%s\n",
822 numa_off
? "NUMA turned off" : "No NUMA configuration found");
823 printk(KERN_INFO
"Faking a node at %016lx-%016lx\n",
824 0LU, max_pfn
<< PAGE_SHIFT
);
826 node_set(0, numa_nodes_parsed
);
827 node_set(0, mem_nodes_parsed
);
828 numa_add_memblk(0, 0, (u64
)max_pfn
<< PAGE_SHIFT
);
833 void __init
initmem_init(void)
835 int (*numa_init
[])(void) = { [2] = dummy_numa_init
};
839 #ifdef CONFIG_ACPI_NUMA
840 numa_init
[0] = x86_acpi_numa_init
;
842 #ifdef CONFIG_AMD_NUMA
843 numa_init
[1] = amd_numa_init
;
847 for (i
= 0; i
< ARRAY_SIZE(numa_init
); i
++) {
851 for (j
= 0; j
< MAX_LOCAL_APIC
; j
++)
852 set_apicid_to_node(j
, NUMA_NO_NODE
);
854 nodes_clear(numa_nodes_parsed
);
855 nodes_clear(mem_nodes_parsed
);
856 nodes_clear(node_possible_map
);
857 nodes_clear(node_online_map
);
858 memset(&numa_meminfo
, 0, sizeof(numa_meminfo
));
859 remove_all_active_ranges();
861 if (numa_init
[i
]() < 0)
864 if (numa_cleanup_meminfo(&numa_meminfo
) < 0)
866 #ifdef CONFIG_NUMA_EMU
867 setup_physnodes(0, max_pfn
<< PAGE_SHIFT
);
868 if (cmdline
&& !numa_emulation(0, max_pfn
, i
== 0, i
== 1))
870 setup_physnodes(0, max_pfn
<< PAGE_SHIFT
);
871 nodes_clear(node_possible_map
);
872 nodes_clear(node_online_map
);
874 if (numa_register_memblks(&numa_meminfo
) < 0)
877 for (j
= 0; j
< nr_cpu_ids
; j
++) {
878 int nid
= early_cpu_to_node(j
);
880 if (nid
== NUMA_NO_NODE
)
882 if (!node_online(nid
))
891 unsigned long __init
numa_free_all_bootmem(void)
893 unsigned long pages
= 0;
896 for_each_online_node(i
)
897 pages
+= free_all_bootmem_node(NODE_DATA(i
));
899 pages
+= free_all_memory_core_early(MAX_NUMNODES
);
904 int __cpuinit
numa_cpu_node(int cpu
)
906 int apicid
= early_per_cpu(x86_cpu_to_apicid
, cpu
);
908 if (apicid
!= BAD_APICID
)
909 return __apicid_to_node
[apicid
];
914 * UGLINESS AHEAD: Currently, CONFIG_NUMA_EMU is 64bit only and makes use
915 * of 64bit specific data structures. The distinction is artificial and
916 * should be removed. numa_{add|remove}_cpu() are implemented in numa.c
917 * for both 32 and 64bit when CONFIG_NUMA_EMU is disabled but here when
920 * NUMA emulation is planned to be made generic and the following and other
921 * related code should be moved to numa.c.
923 #ifdef CONFIG_NUMA_EMU
924 # ifndef CONFIG_DEBUG_PER_CPU_MAPS
925 void __cpuinit
numa_add_cpu(int cpu
)
930 nid
= numa_cpu_node(cpu
);
931 if (nid
== NUMA_NO_NODE
)
932 nid
= early_cpu_to_node(cpu
);
933 BUG_ON(nid
== NUMA_NO_NODE
|| !node_online(nid
));
936 * Use the starting address of the emulated node to find which physical
937 * node it is allocated on.
939 addr
= node_start_pfn(nid
) << PAGE_SHIFT
;
940 for (physnid
= 0; physnid
< MAX_NUMNODES
; physnid
++)
941 if (addr
>= physnodes
[physnid
].start
&&
942 addr
< physnodes
[physnid
].end
)
946 * Map the cpu to each emulated node that is allocated on the physical
947 * node of the cpu's apic id.
949 for_each_online_node(nid
) {
950 addr
= node_start_pfn(nid
) << PAGE_SHIFT
;
951 if (addr
>= physnodes
[physnid
].start
&&
952 addr
< physnodes
[physnid
].end
)
953 cpumask_set_cpu(cpu
, node_to_cpumask_map
[nid
]);
957 void __cpuinit
numa_remove_cpu(int cpu
)
961 for_each_online_node(i
)
962 cpumask_clear_cpu(cpu
, node_to_cpumask_map
[i
]);
964 # else /* !CONFIG_DEBUG_PER_CPU_MAPS */
965 static void __cpuinit
numa_set_cpumask(int cpu
, int enable
)
967 int node
= early_cpu_to_node(cpu
);
968 struct cpumask
*mask
;
971 if (node
== NUMA_NO_NODE
) {
972 /* early_cpu_to_node() already emits a warning and trace */
975 for_each_online_node(i
) {
978 addr
= node_start_pfn(i
) << PAGE_SHIFT
;
979 if (addr
< physnodes
[node
].start
||
980 addr
>= physnodes
[node
].end
)
982 mask
= debug_cpumask_set_cpu(cpu
, enable
);
987 cpumask_set_cpu(cpu
, mask
);
989 cpumask_clear_cpu(cpu
, mask
);
993 void __cpuinit
numa_add_cpu(int cpu
)
995 numa_set_cpumask(cpu
, 1);
998 void __cpuinit
numa_remove_cpu(int cpu
)
1000 numa_set_cpumask(cpu
, 0);
1002 # endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
1003 #endif /* CONFIG_NUMA_EMU */