4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <asm/sparsemem.h>
22 #include <asm/system.h>
25 static int numa_enabled
= 1;
27 static int numa_debug
;
28 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
30 int numa_cpu_lookup_table
[NR_CPUS
];
31 cpumask_t numa_cpumask_lookup_table
[MAX_NUMNODES
];
32 struct pglist_data
*node_data
[MAX_NUMNODES
];
34 EXPORT_SYMBOL(numa_cpu_lookup_table
);
35 EXPORT_SYMBOL(numa_cpumask_lookup_table
);
36 EXPORT_SYMBOL(node_data
);
38 static bootmem_data_t __initdata plat_node_bdata
[MAX_NUMNODES
];
39 static int min_common_depth
;
40 static int n_mem_addr_cells
, n_mem_size_cells
;
42 static void __cpuinit
map_cpu_to_node(int cpu
, int node
)
44 numa_cpu_lookup_table
[cpu
] = node
;
46 dbg("adding cpu %d to node %d\n", cpu
, node
);
48 if (!(cpu_isset(cpu
, numa_cpumask_lookup_table
[node
])))
49 cpu_set(cpu
, numa_cpumask_lookup_table
[node
]);
52 #ifdef CONFIG_HOTPLUG_CPU
53 static void unmap_cpu_from_node(unsigned long cpu
)
55 int node
= numa_cpu_lookup_table
[cpu
];
57 dbg("removing cpu %lu from node %d\n", cpu
, node
);
59 if (cpu_isset(cpu
, numa_cpumask_lookup_table
[node
])) {
60 cpu_clear(cpu
, numa_cpumask_lookup_table
[node
]);
62 printk(KERN_ERR
"WARNING: cpu %lu not found in node %d\n",
66 #endif /* CONFIG_HOTPLUG_CPU */
68 static struct device_node
* __cpuinit
find_cpu_node(unsigned int cpu
)
70 unsigned int hw_cpuid
= get_hard_smp_processor_id(cpu
);
71 struct device_node
*cpu_node
= NULL
;
72 const unsigned int *interrupt_server
, *reg
;
75 while ((cpu_node
= of_find_node_by_type(cpu_node
, "cpu")) != NULL
) {
76 /* Try interrupt server first */
77 interrupt_server
= get_property(cpu_node
,
78 "ibm,ppc-interrupt-server#s", &len
);
80 len
= len
/ sizeof(u32
);
82 if (interrupt_server
&& (len
> 0)) {
84 if (interrupt_server
[len
] == hw_cpuid
)
88 reg
= get_property(cpu_node
, "reg", &len
);
89 if (reg
&& (len
> 0) && (reg
[0] == hw_cpuid
))
97 /* must hold reference to node during call */
98 static const int *of_get_associativity(struct device_node
*dev
)
100 return get_property(dev
, "ibm,associativity", NULL
);
103 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
106 static int of_node_to_nid_single(struct device_node
*device
)
109 const unsigned int *tmp
;
111 if (min_common_depth
== -1)
114 tmp
= of_get_associativity(device
);
118 if (tmp
[0] >= min_common_depth
)
119 nid
= tmp
[min_common_depth
];
121 /* POWER4 LPAR uses 0xffff as invalid node */
122 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
128 /* Walk the device tree upwards, looking for an associativity id */
129 int of_node_to_nid(struct device_node
*device
)
131 struct device_node
*tmp
;
136 nid
= of_node_to_nid_single(device
);
141 device
= of_get_parent(tmp
);
148 EXPORT_SYMBOL_GPL(of_node_to_nid
);
151 * In theory, the "ibm,associativity" property may contain multiple
152 * associativity lists because a resource may be multiply connected
153 * into the machine. This resource then has different associativity
154 * characteristics relative to its multiple connections. We ignore
155 * this for now. We also assume that all cpu and memory sets have
156 * their distances represented at a common level. This won't be
157 * true for hierarchical NUMA.
159 * In any case the ibm,associativity-reference-points should give
160 * the correct depth for a normal NUMA system.
162 * - Dave Hansen <haveblue@us.ibm.com>
164 static int __init
find_min_common_depth(void)
167 const unsigned int *ref_points
;
168 struct device_node
*rtas_root
;
171 rtas_root
= of_find_node_by_path("/rtas");
177 * this property is 2 32-bit integers, each representing a level of
178 * depth in the associativity nodes. The first is for an SMP
179 * configuration (should be all 0's) and the second is for a normal
180 * NUMA configuration.
182 ref_points
= get_property(rtas_root
,
183 "ibm,associativity-reference-points", &len
);
185 if ((len
>= 1) && ref_points
) {
186 depth
= ref_points
[1];
188 dbg("NUMA: ibm,associativity-reference-points not found.\n");
191 of_node_put(rtas_root
);
196 static void __init
get_n_mem_cells(int *n_addr_cells
, int *n_size_cells
)
198 struct device_node
*memory
= NULL
;
200 memory
= of_find_node_by_type(memory
, "memory");
202 panic("numa.c: No memory nodes found!");
204 *n_addr_cells
= of_n_addr_cells(memory
);
205 *n_size_cells
= of_n_size_cells(memory
);
209 static unsigned long __devinit
read_n_cells(int n
, const unsigned int **buf
)
211 unsigned long result
= 0;
214 result
= (result
<< 32) | **buf
;
221 * Figure out to which domain a cpu belongs and stick it there.
222 * Return the id of the domain used.
224 static int __cpuinit
numa_setup_cpu(unsigned long lcpu
)
227 struct device_node
*cpu
= find_cpu_node(lcpu
);
234 nid
= of_node_to_nid_single(cpu
);
236 if (nid
< 0 || !node_online(nid
))
237 nid
= any_online_node(NODE_MASK_ALL
);
239 map_cpu_to_node(lcpu
, nid
);
246 static int __cpuinit
cpu_numa_callback(struct notifier_block
*nfb
,
247 unsigned long action
,
250 unsigned long lcpu
= (unsigned long)hcpu
;
251 int ret
= NOTIFY_DONE
;
255 numa_setup_cpu(lcpu
);
258 #ifdef CONFIG_HOTPLUG_CPU
260 case CPU_UP_CANCELED
:
261 unmap_cpu_from_node(lcpu
);
270 * Check and possibly modify a memory region to enforce the memory limit.
272 * Returns the size the region should have to enforce the memory limit.
273 * This will either be the original value of size, a truncated value,
274 * or zero. If the returned value of size is 0 the region should be
275 * discarded as it lies wholy above the memory limit.
277 static unsigned long __init
numa_enforce_memory_limit(unsigned long start
,
281 * We use lmb_end_of_DRAM() in here instead of memory_limit because
282 * we've already adjusted it for the limit and it takes care of
283 * having memory holes below the limit.
289 if (start
+ size
<= lmb_end_of_DRAM())
292 if (start
>= lmb_end_of_DRAM())
295 return lmb_end_of_DRAM() - start
;
299 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
300 * node. This assumes n_mem_{addr,size}_cells have been set.
302 static void __init
parse_drconf_memory(struct device_node
*memory
)
304 const unsigned int *lm
, *dm
, *aa
;
305 unsigned int ls
, ld
, la
;
306 unsigned int n
, aam
, aalen
;
307 unsigned long lmb_size
, size
;
308 int nid
, default_nid
= 0;
309 unsigned int start
, ai
, flags
;
311 lm
= get_property(memory
, "ibm,lmb-size", &ls
);
312 dm
= get_property(memory
, "ibm,dynamic-memory", &ld
);
313 aa
= get_property(memory
, "ibm,associativity-lookup-arrays", &la
);
314 if (!lm
|| !dm
|| !aa
||
315 ls
< sizeof(unsigned int) || ld
< sizeof(unsigned int) ||
316 la
< 2 * sizeof(unsigned int))
319 lmb_size
= read_n_cells(n_mem_size_cells
, &lm
);
320 n
= *dm
++; /* number of LMBs */
321 aam
= *aa
++; /* number of associativity lists */
322 aalen
= *aa
++; /* length of each associativity list */
323 if (ld
< (n
* (n_mem_addr_cells
+ 4) + 1) * sizeof(unsigned int) ||
324 la
< (aam
* aalen
+ 2) * sizeof(unsigned int))
327 for (; n
!= 0; --n
) {
328 start
= read_n_cells(n_mem_addr_cells
, &dm
);
332 /* 0x80 == reserved, 0x8 = assigned to us */
333 if ((flags
& 0x80) || !(flags
& 0x8))
336 /* flags & 0x40 means associativity index is invalid */
337 if (min_common_depth
> 0 && min_common_depth
<= aalen
&&
338 (flags
& 0x40) == 0 && ai
< aam
) {
339 /* this is like of_node_to_nid_single */
340 nid
= aa
[ai
* aalen
+ min_common_depth
- 1];
341 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
344 node_set_online(nid
);
346 size
= numa_enforce_memory_limit(start
, lmb_size
);
350 add_active_range(nid
, start
>> PAGE_SHIFT
,
351 (start
>> PAGE_SHIFT
) + (size
>> PAGE_SHIFT
));
355 static int __init
parse_numa_properties(void)
357 struct device_node
*cpu
= NULL
;
358 struct device_node
*memory
= NULL
;
362 if (numa_enabled
== 0) {
363 printk(KERN_WARNING
"NUMA disabled by user\n");
367 min_common_depth
= find_min_common_depth();
369 if (min_common_depth
< 0)
370 return min_common_depth
;
372 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth
);
375 * Even though we connect cpus to numa domains later in SMP
376 * init, we need to know the node ids now. This is because
377 * each node to be onlined must have NODE_DATA etc backing it.
379 for_each_present_cpu(i
) {
382 cpu
= find_cpu_node(i
);
384 nid
= of_node_to_nid_single(cpu
);
388 * Don't fall back to default_nid yet -- we will plug
389 * cpus into nodes once the memory scan has discovered
394 node_set_online(nid
);
397 get_n_mem_cells(&n_mem_addr_cells
, &n_mem_size_cells
);
399 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
404 const unsigned int *memcell_buf
;
407 memcell_buf
= get_property(memory
,
408 "linux,usable-memory", &len
);
409 if (!memcell_buf
|| len
<= 0)
410 memcell_buf
= get_property(memory
, "reg", &len
);
411 if (!memcell_buf
|| len
<= 0)
415 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
417 /* these are order-sensitive, and modify the buffer pointer */
418 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
419 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
422 * Assumption: either all memory nodes or none will
423 * have associativity properties. If none, then
424 * everything goes to default_nid.
426 nid
= of_node_to_nid_single(memory
);
429 node_set_online(nid
);
431 if (!(size
= numa_enforce_memory_limit(start
, size
))) {
438 add_active_range(nid
, start
>> PAGE_SHIFT
,
439 (start
>> PAGE_SHIFT
) + (size
>> PAGE_SHIFT
));
446 * Now do the same thing for each LMB listed in the ibm,dynamic-memory
447 * property in the ibm,dynamic-reconfiguration-memory node.
449 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
451 parse_drconf_memory(memory
);
456 static void __init
setup_nonnuma(void)
458 unsigned long top_of_ram
= lmb_end_of_DRAM();
459 unsigned long total_ram
= lmb_phys_mem_size();
460 unsigned long start_pfn
, end_pfn
;
463 printk(KERN_DEBUG
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
464 top_of_ram
, total_ram
);
465 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
466 (top_of_ram
- total_ram
) >> 20);
468 for (i
= 0; i
< lmb
.memory
.cnt
; ++i
) {
469 start_pfn
= lmb
.memory
.region
[i
].base
>> PAGE_SHIFT
;
470 end_pfn
= start_pfn
+ lmb_size_pages(&lmb
.memory
, i
);
471 add_active_range(0, start_pfn
, end_pfn
);
476 void __init
dump_numa_cpu_topology(void)
479 unsigned int cpu
, count
;
481 if (min_common_depth
== -1 || !numa_enabled
)
484 for_each_online_node(node
) {
485 printk(KERN_DEBUG
"Node %d CPUs:", node
);
489 * If we used a CPU iterator here we would miss printing
490 * the holes in the cpumap.
492 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
493 if (cpu_isset(cpu
, numa_cpumask_lookup_table
[node
])) {
499 printk("-%u", cpu
- 1);
505 printk("-%u", NR_CPUS
- 1);
510 static void __init
dump_numa_memory_topology(void)
515 if (min_common_depth
== -1 || !numa_enabled
)
518 for_each_online_node(node
) {
521 printk(KERN_DEBUG
"Node %d Memory:", node
);
525 for (i
= 0; i
< lmb_end_of_DRAM();
526 i
+= (1 << SECTION_SIZE_BITS
)) {
527 if (early_pfn_to_nid(i
>> PAGE_SHIFT
) == node
) {
545 * Allocate some memory, satisfying the lmb or bootmem allocator where
546 * required. nid is the preferred node and end is the physical address of
547 * the highest address in the node.
549 * Returns the physical address of the memory.
551 static void __init
*careful_allocation(int nid
, unsigned long size
,
553 unsigned long end_pfn
)
556 unsigned long ret
= __lmb_alloc_base(size
, align
, end_pfn
<< PAGE_SHIFT
);
558 /* retry over all memory */
560 ret
= __lmb_alloc_base(size
, align
, lmb_end_of_DRAM());
563 panic("numa.c: cannot allocate %lu bytes on node %d",
567 * If the memory came from a previously allocated node, we must
568 * retry with the bootmem allocator.
570 new_nid
= early_pfn_to_nid(ret
>> PAGE_SHIFT
);
572 ret
= (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid
),
576 panic("numa.c: cannot allocate %lu bytes on node %d",
581 dbg("alloc_bootmem %lx %lx\n", ret
, size
);
587 static struct notifier_block __cpuinitdata ppc64_numa_nb
= {
588 .notifier_call
= cpu_numa_callback
,
589 .priority
= 1 /* Must run before sched domains notifier. */
592 void __init
do_init_bootmem(void)
598 max_low_pfn
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
599 max_pfn
= max_low_pfn
;
601 if (parse_numa_properties())
604 dump_numa_memory_topology();
606 register_cpu_notifier(&ppc64_numa_nb
);
607 cpu_numa_callback(&ppc64_numa_nb
, CPU_UP_PREPARE
,
608 (void *)(unsigned long)boot_cpuid
);
610 for_each_online_node(nid
) {
611 unsigned long start_pfn
, end_pfn
;
612 unsigned long bootmem_paddr
;
613 unsigned long bootmap_pages
;
615 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
617 /* Allocate the node structure node local if possible */
618 NODE_DATA(nid
) = careful_allocation(nid
,
619 sizeof(struct pglist_data
),
620 SMP_CACHE_BYTES
, end_pfn
);
621 NODE_DATA(nid
) = __va(NODE_DATA(nid
));
622 memset(NODE_DATA(nid
), 0, sizeof(struct pglist_data
));
624 dbg("node %d\n", nid
);
625 dbg("NODE_DATA() = %p\n", NODE_DATA(nid
));
627 NODE_DATA(nid
)->bdata
= &plat_node_bdata
[nid
];
628 NODE_DATA(nid
)->node_start_pfn
= start_pfn
;
629 NODE_DATA(nid
)->node_spanned_pages
= end_pfn
- start_pfn
;
631 if (NODE_DATA(nid
)->node_spanned_pages
== 0)
634 dbg("start_paddr = %lx\n", start_pfn
<< PAGE_SHIFT
);
635 dbg("end_paddr = %lx\n", end_pfn
<< PAGE_SHIFT
);
637 bootmap_pages
= bootmem_bootmap_pages(end_pfn
- start_pfn
);
638 bootmem_paddr
= (unsigned long)careful_allocation(nid
,
639 bootmap_pages
<< PAGE_SHIFT
,
641 memset(__va(bootmem_paddr
), 0, bootmap_pages
<< PAGE_SHIFT
);
643 dbg("bootmap_paddr = %lx\n", bootmem_paddr
);
645 init_bootmem_node(NODE_DATA(nid
), bootmem_paddr
>> PAGE_SHIFT
,
648 free_bootmem_with_active_regions(nid
, end_pfn
);
650 /* Mark reserved regions on this node */
651 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
652 unsigned long physbase
= lmb
.reserved
.region
[i
].base
;
653 unsigned long size
= lmb
.reserved
.region
[i
].size
;
654 unsigned long start_paddr
= start_pfn
<< PAGE_SHIFT
;
655 unsigned long end_paddr
= end_pfn
<< PAGE_SHIFT
;
657 if (early_pfn_to_nid(physbase
>> PAGE_SHIFT
) != nid
&&
658 early_pfn_to_nid((physbase
+size
-1) >> PAGE_SHIFT
) != nid
)
661 if (physbase
< end_paddr
&&
662 (physbase
+size
) > start_paddr
) {
664 if (physbase
< start_paddr
) {
665 size
-= start_paddr
- physbase
;
666 physbase
= start_paddr
;
669 if (size
> end_paddr
- physbase
)
670 size
= end_paddr
- physbase
;
672 dbg("reserve_bootmem %lx %lx\n", physbase
,
674 reserve_bootmem_node(NODE_DATA(nid
), physbase
,
679 sparse_memory_present_with_active_regions(nid
);
683 void __init
paging_init(void)
685 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
686 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
687 max_zone_pfns
[ZONE_DMA
] = lmb_end_of_DRAM() >> PAGE_SHIFT
;
688 free_area_init_nodes(max_zone_pfns
);
691 static int __init
early_numa(char *p
)
696 if (strstr(p
, "off"))
699 if (strstr(p
, "debug"))
704 early_param("numa", early_numa
);
706 #ifdef CONFIG_MEMORY_HOTPLUG
708 * Find the node associated with a hot added memory section. Section
709 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that
710 * sections are fully contained within a single LMB.
712 int hot_add_scn_to_nid(unsigned long scn_addr
)
714 struct device_node
*memory
= NULL
;
716 int default_nid
= any_online_node(NODE_MASK_ALL
);
719 if (!numa_enabled
|| (min_common_depth
< 0))
722 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
723 unsigned long start
, size
;
725 const unsigned int *memcell_buf
;
728 memcell_buf
= get_property(memory
, "reg", &len
);
729 if (!memcell_buf
|| len
<= 0)
733 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
735 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
736 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
737 nid
= of_node_to_nid_single(memory
);
739 /* Domains not present at boot default to 0 */
740 if (nid
< 0 || !node_online(nid
))
743 if ((scn_addr
>= start
) && (scn_addr
< (start
+ size
))) {
748 if (--ranges
) /* process all ranges in cell */
751 BUG(); /* section address should be found above */
754 /* Temporary code to ensure that returned node is not empty */
757 while (NODE_DATA(nid
)->node_spanned_pages
== 0) {
758 node_clear(nid
, nodes
);
759 nid
= any_online_node(nodes
);
763 #endif /* CONFIG_MEMORY_HOTPLUG */