4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
21 #include <asm/machdep.h>
22 #include <asm/abs_addr.h>
24 static int numa_enabled
= 1;
26 static int numa_debug
;
27 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
30 #define ARRAY_INITIALISER -1
32 #define ARRAY_INITIALISER 0
35 int numa_cpu_lookup_table
[NR_CPUS
] = { [ 0 ... (NR_CPUS
- 1)] =
37 char *numa_memory_lookup_table
;
38 cpumask_t numa_cpumask_lookup_table
[MAX_NUMNODES
];
39 int nr_cpus_in_node
[MAX_NUMNODES
] = { [0 ... (MAX_NUMNODES
-1)] = 0};
41 struct pglist_data
*node_data
[MAX_NUMNODES
];
42 bootmem_data_t __initdata plat_node_bdata
[MAX_NUMNODES
];
43 static int min_common_depth
;
46 * We need somewhere to store start/span for each node until we have
47 * allocated the real node_data structures.
50 unsigned long node_start_pfn
;
51 unsigned long node_end_pfn
;
52 unsigned long node_present_pages
;
53 } init_node_data
[MAX_NUMNODES
] __initdata
;
55 EXPORT_SYMBOL(node_data
);
56 EXPORT_SYMBOL(numa_cpu_lookup_table
);
57 EXPORT_SYMBOL(numa_memory_lookup_table
);
58 EXPORT_SYMBOL(numa_cpumask_lookup_table
);
59 EXPORT_SYMBOL(nr_cpus_in_node
);
61 static inline void map_cpu_to_node(int cpu
, int node
)
63 numa_cpu_lookup_table
[cpu
] = node
;
64 if (!(cpu_isset(cpu
, numa_cpumask_lookup_table
[node
]))) {
65 cpu_set(cpu
, numa_cpumask_lookup_table
[node
]);
66 nr_cpus_in_node
[node
]++;
70 #ifdef CONFIG_HOTPLUG_CPU
71 static void unmap_cpu_from_node(unsigned long cpu
)
73 int node
= numa_cpu_lookup_table
[cpu
];
75 dbg("removing cpu %lu from node %d\n", cpu
, node
);
77 if (cpu_isset(cpu
, numa_cpumask_lookup_table
[node
])) {
78 cpu_clear(cpu
, numa_cpumask_lookup_table
[node
]);
79 nr_cpus_in_node
[node
]--;
81 printk(KERN_ERR
"WARNING: cpu %lu not found in node %d\n",
85 #endif /* CONFIG_HOTPLUG_CPU */
87 static struct device_node
* __devinit
find_cpu_node(unsigned int cpu
)
89 unsigned int hw_cpuid
= get_hard_smp_processor_id(cpu
);
90 struct device_node
*cpu_node
= NULL
;
91 unsigned int *interrupt_server
, *reg
;
94 while ((cpu_node
= of_find_node_by_type(cpu_node
, "cpu")) != NULL
) {
95 /* Try interrupt server first */
96 interrupt_server
= (unsigned int *)get_property(cpu_node
,
97 "ibm,ppc-interrupt-server#s", &len
);
99 len
= len
/ sizeof(u32
);
101 if (interrupt_server
&& (len
> 0)) {
103 if (interrupt_server
[len
] == hw_cpuid
)
107 reg
= (unsigned int *)get_property(cpu_node
,
109 if (reg
&& (len
> 0) && (reg
[0] == hw_cpuid
))
117 /* must hold reference to node during call */
118 static int *of_get_associativity(struct device_node
*dev
)
120 return (unsigned int *)get_property(dev
, "ibm,associativity", NULL
);
123 static int of_node_numa_domain(struct device_node
*device
)
128 if (min_common_depth
== -1)
131 tmp
= of_get_associativity(device
);
132 if (tmp
&& (tmp
[0] >= min_common_depth
)) {
133 numa_domain
= tmp
[min_common_depth
];
135 dbg("WARNING: no NUMA information for %s\n",
143 * In theory, the "ibm,associativity" property may contain multiple
144 * associativity lists because a resource may be multiply connected
145 * into the machine. This resource then has different associativity
146 * characteristics relative to its multiple connections. We ignore
147 * this for now. We also assume that all cpu and memory sets have
148 * their distances represented at a common level. This won't be
149 * true for heirarchical NUMA.
151 * In any case the ibm,associativity-reference-points should give
152 * the correct depth for a normal NUMA system.
154 * - Dave Hansen <haveblue@us.ibm.com>
156 static int __init
find_min_common_depth(void)
159 unsigned int *ref_points
;
160 struct device_node
*rtas_root
;
163 rtas_root
= of_find_node_by_path("/rtas");
169 * this property is 2 32-bit integers, each representing a level of
170 * depth in the associativity nodes. The first is for an SMP
171 * configuration (should be all 0's) and the second is for a normal
172 * NUMA configuration.
174 ref_points
= (unsigned int *)get_property(rtas_root
,
175 "ibm,associativity-reference-points", &len
);
177 if ((len
>= 1) && ref_points
) {
178 depth
= ref_points
[1];
180 dbg("WARNING: could not find NUMA "
181 "associativity reference point\n");
184 of_node_put(rtas_root
);
189 static int __init
get_mem_addr_cells(void)
191 struct device_node
*memory
= NULL
;
194 memory
= of_find_node_by_type(memory
, "memory");
196 return 0; /* it won't matter */
198 rc
= prom_n_addr_cells(memory
);
202 static int __init
get_mem_size_cells(void)
204 struct device_node
*memory
= NULL
;
207 memory
= of_find_node_by_type(memory
, "memory");
209 return 0; /* it won't matter */
210 rc
= prom_n_size_cells(memory
);
214 static unsigned long read_n_cells(int n
, unsigned int **buf
)
216 unsigned long result
= 0;
219 result
= (result
<< 32) | **buf
;
226 * Figure out to which domain a cpu belongs and stick it there.
227 * Return the id of the domain used.
229 static int numa_setup_cpu(unsigned long lcpu
)
232 struct device_node
*cpu
= find_cpu_node(lcpu
);
239 numa_domain
= of_node_numa_domain(cpu
);
241 if (numa_domain
>= num_online_nodes()) {
243 * POWER4 LPAR uses 0xffff as invalid node,
244 * dont warn in this case.
246 if (numa_domain
!= 0xffff)
247 printk(KERN_ERR
"WARNING: cpu %ld "
248 "maps to invalid NUMA node %d\n",
253 node_set_online(numa_domain
);
255 map_cpu_to_node(lcpu
, numa_domain
);
262 static int cpu_numa_callback(struct notifier_block
*nfb
,
263 unsigned long action
,
266 unsigned long lcpu
= (unsigned long)hcpu
;
267 int ret
= NOTIFY_DONE
;
271 if (min_common_depth
== -1 || !numa_enabled
)
272 map_cpu_to_node(lcpu
, 0);
274 numa_setup_cpu(lcpu
);
277 #ifdef CONFIG_HOTPLUG_CPU
279 case CPU_UP_CANCELED
:
280 unmap_cpu_from_node(lcpu
);
289 * Check and possibly modify a memory region to enforce the memory limit.
291 * Returns the size the region should have to enforce the memory limit.
292 * This will either be the original value of size, a truncated value,
293 * or zero. If the returned value of size is 0 the region should be
294 * discarded as it lies wholy above the memory limit.
296 static unsigned long __init
numa_enforce_memory_limit(unsigned long start
, unsigned long size
)
299 * We use lmb_end_of_DRAM() in here instead of memory_limit because
300 * we've already adjusted it for the limit and it takes care of
301 * having memory holes below the limit.
303 extern unsigned long memory_limit
;
308 if (start
+ size
<= lmb_end_of_DRAM())
311 if (start
>= lmb_end_of_DRAM())
314 return lmb_end_of_DRAM() - start
;
317 static int __init
parse_numa_properties(void)
319 struct device_node
*cpu
= NULL
;
320 struct device_node
*memory
= NULL
;
321 int addr_cells
, size_cells
;
323 long entries
= lmb_end_of_DRAM() >> MEMORY_INCREMENT_SHIFT
;
326 if (numa_enabled
== 0) {
327 printk(KERN_WARNING
"NUMA disabled by user\n");
331 numa_memory_lookup_table
=
332 (char *)abs_to_virt(lmb_alloc(entries
* sizeof(char), 1));
333 memset(numa_memory_lookup_table
, 0, entries
* sizeof(char));
335 for (i
= 0; i
< entries
; i
++)
336 numa_memory_lookup_table
[i
] = ARRAY_INITIALISER
;
338 min_common_depth
= find_min_common_depth();
340 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth
);
341 if (min_common_depth
< 0)
342 return min_common_depth
;
344 max_domain
= numa_setup_cpu(boot_cpuid
);
347 * Even though we connect cpus to numa domains later in SMP init,
348 * we need to know the maximum node id now. This is because each
349 * node id must have NODE_DATA etc backing it.
350 * As a result of hotplug we could still have cpus appear later on
351 * with larger node ids. In that case we force the cpu into node 0.
356 cpu
= find_cpu_node(i
);
359 numa_domain
= of_node_numa_domain(cpu
);
362 if (numa_domain
< MAX_NUMNODES
&&
363 max_domain
< numa_domain
)
364 max_domain
= numa_domain
;
368 addr_cells
= get_mem_addr_cells();
369 size_cells
= get_mem_size_cells();
371 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
376 unsigned int *memcell_buf
;
379 memcell_buf
= (unsigned int *)get_property(memory
, "reg", &len
);
380 if (!memcell_buf
|| len
<= 0)
383 ranges
= memory
->n_addrs
;
385 /* these are order-sensitive, and modify the buffer pointer */
386 start
= read_n_cells(addr_cells
, &memcell_buf
);
387 size
= read_n_cells(size_cells
, &memcell_buf
);
389 start
= _ALIGN_DOWN(start
, MEMORY_INCREMENT
);
390 size
= _ALIGN_UP(size
, MEMORY_INCREMENT
);
392 numa_domain
= of_node_numa_domain(memory
);
394 if (numa_domain
>= MAX_NUMNODES
) {
395 if (numa_domain
!= 0xffff)
396 printk(KERN_ERR
"WARNING: memory at %lx maps "
397 "to invalid NUMA node %d\n", start
,
402 if (max_domain
< numa_domain
)
403 max_domain
= numa_domain
;
405 if (! (size
= numa_enforce_memory_limit(start
, size
))) {
413 * Initialize new node struct, or add to an existing one.
415 if (init_node_data
[numa_domain
].node_end_pfn
) {
416 if ((start
/ PAGE_SIZE
) <
417 init_node_data
[numa_domain
].node_start_pfn
)
418 init_node_data
[numa_domain
].node_start_pfn
=
420 if (((start
/ PAGE_SIZE
) + (size
/ PAGE_SIZE
)) >
421 init_node_data
[numa_domain
].node_end_pfn
)
422 init_node_data
[numa_domain
].node_end_pfn
=
423 (start
/ PAGE_SIZE
) +
426 init_node_data
[numa_domain
].node_present_pages
+=
429 node_set_online(numa_domain
);
431 init_node_data
[numa_domain
].node_start_pfn
=
433 init_node_data
[numa_domain
].node_end_pfn
=
434 init_node_data
[numa_domain
].node_start_pfn
+
436 init_node_data
[numa_domain
].node_present_pages
=
440 for (i
= start
; i
< (start
+size
); i
+= MEMORY_INCREMENT
)
441 numa_memory_lookup_table
[i
>> MEMORY_INCREMENT_SHIFT
] =
448 for (i
= 0; i
<= max_domain
; i
++)
454 static void __init
setup_nonnuma(void)
456 unsigned long top_of_ram
= lmb_end_of_DRAM();
457 unsigned long total_ram
= lmb_phys_mem_size();
460 printk(KERN_INFO
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
461 top_of_ram
, total_ram
);
462 printk(KERN_INFO
"Memory hole size: %ldMB\n",
463 (top_of_ram
- total_ram
) >> 20);
465 if (!numa_memory_lookup_table
) {
466 long entries
= top_of_ram
>> MEMORY_INCREMENT_SHIFT
;
467 numa_memory_lookup_table
=
468 (char *)abs_to_virt(lmb_alloc(entries
* sizeof(char), 1));
469 memset(numa_memory_lookup_table
, 0, entries
* sizeof(char));
470 for (i
= 0; i
< entries
; i
++)
471 numa_memory_lookup_table
[i
] = ARRAY_INITIALISER
;
474 map_cpu_to_node(boot_cpuid
, 0);
478 init_node_data
[0].node_start_pfn
= 0;
479 init_node_data
[0].node_end_pfn
= lmb_end_of_DRAM() / PAGE_SIZE
;
480 init_node_data
[0].node_present_pages
= total_ram
/ PAGE_SIZE
;
482 for (i
= 0 ; i
< top_of_ram
; i
+= MEMORY_INCREMENT
)
483 numa_memory_lookup_table
[i
>> MEMORY_INCREMENT_SHIFT
] = 0;
486 static void __init
dump_numa_topology(void)
491 if (min_common_depth
== -1 || !numa_enabled
)
494 for_each_online_node(node
) {
497 printk(KERN_INFO
"Node %d Memory:", node
);
501 for (i
= 0; i
< lmb_end_of_DRAM(); i
+= MEMORY_INCREMENT
) {
502 if (numa_memory_lookup_table
[i
>> MEMORY_INCREMENT_SHIFT
] == node
) {
521 * Allocate some memory, satisfying the lmb or bootmem allocator where
522 * required. nid is the preferred node and end is the physical address of
523 * the highest address in the node.
525 * Returns the physical address of the memory.
527 static unsigned long careful_allocation(int nid
, unsigned long size
,
528 unsigned long align
, unsigned long end
)
530 unsigned long ret
= lmb_alloc_base(size
, align
, end
);
532 /* retry over all memory */
534 ret
= lmb_alloc_base(size
, align
, lmb_end_of_DRAM());
537 panic("numa.c: cannot allocate %lu bytes on node %d",
541 * If the memory came from a previously allocated node, we must
542 * retry with the bootmem allocator.
544 if (pa_to_nid(ret
) < nid
) {
545 nid
= pa_to_nid(ret
);
546 ret
= (unsigned long)__alloc_bootmem_node(NODE_DATA(nid
),
550 panic("numa.c: cannot allocate %lu bytes on node %d",
553 ret
= virt_to_abs(ret
);
555 dbg("alloc_bootmem %lx %lx\n", ret
, size
);
561 void __init
do_init_bootmem(void)
564 int addr_cells
, size_cells
;
565 struct device_node
*memory
= NULL
;
566 static struct notifier_block ppc64_numa_nb
= {
567 .notifier_call
= cpu_numa_callback
,
568 .priority
= 1 /* Must run before sched domains notifier. */
572 max_low_pfn
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
573 max_pfn
= max_low_pfn
;
575 if (parse_numa_properties())
578 dump_numa_topology();
580 register_cpu_notifier(&ppc64_numa_nb
);
582 for_each_online_node(nid
) {
583 unsigned long start_paddr
, end_paddr
;
585 unsigned long bootmem_paddr
;
586 unsigned long bootmap_pages
;
588 start_paddr
= init_node_data
[nid
].node_start_pfn
* PAGE_SIZE
;
589 end_paddr
= init_node_data
[nid
].node_end_pfn
* PAGE_SIZE
;
591 /* Allocate the node structure node local if possible */
592 NODE_DATA(nid
) = (struct pglist_data
*)careful_allocation(nid
,
593 sizeof(struct pglist_data
),
594 SMP_CACHE_BYTES
, end_paddr
);
595 NODE_DATA(nid
) = abs_to_virt(NODE_DATA(nid
));
596 memset(NODE_DATA(nid
), 0, sizeof(struct pglist_data
));
598 dbg("node %d\n", nid
);
599 dbg("NODE_DATA() = %p\n", NODE_DATA(nid
));
601 NODE_DATA(nid
)->bdata
= &plat_node_bdata
[nid
];
602 NODE_DATA(nid
)->node_start_pfn
=
603 init_node_data
[nid
].node_start_pfn
;
604 NODE_DATA(nid
)->node_spanned_pages
=
605 end_paddr
- start_paddr
;
607 if (NODE_DATA(nid
)->node_spanned_pages
== 0)
610 dbg("start_paddr = %lx\n", start_paddr
);
611 dbg("end_paddr = %lx\n", end_paddr
);
613 bootmap_pages
= bootmem_bootmap_pages((end_paddr
- start_paddr
) >> PAGE_SHIFT
);
615 bootmem_paddr
= careful_allocation(nid
,
616 bootmap_pages
<< PAGE_SHIFT
,
617 PAGE_SIZE
, end_paddr
);
618 memset(abs_to_virt(bootmem_paddr
), 0,
619 bootmap_pages
<< PAGE_SHIFT
);
620 dbg("bootmap_paddr = %lx\n", bootmem_paddr
);
622 init_bootmem_node(NODE_DATA(nid
), bootmem_paddr
>> PAGE_SHIFT
,
623 start_paddr
>> PAGE_SHIFT
,
624 end_paddr
>> PAGE_SHIFT
);
627 * We need to do another scan of all memory sections to
628 * associate memory with the correct node.
630 addr_cells
= get_mem_addr_cells();
631 size_cells
= get_mem_size_cells();
633 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
634 unsigned long mem_start
, mem_size
;
635 int numa_domain
, ranges
;
636 unsigned int *memcell_buf
;
639 memcell_buf
= (unsigned int *)get_property(memory
, "reg", &len
);
640 if (!memcell_buf
|| len
<= 0)
643 ranges
= memory
->n_addrs
; /* ranges in cell */
645 mem_start
= read_n_cells(addr_cells
, &memcell_buf
);
646 mem_size
= read_n_cells(size_cells
, &memcell_buf
);
647 numa_domain
= numa_enabled
? of_node_numa_domain(memory
) : 0;
649 if (numa_domain
!= nid
)
652 mem_size
= numa_enforce_memory_limit(mem_start
, mem_size
);
654 dbg("free_bootmem %lx %lx\n", mem_start
, mem_size
);
655 free_bootmem_node(NODE_DATA(nid
), mem_start
, mem_size
);
658 if (--ranges
) /* process all ranges in cell */
663 * Mark reserved regions on this node
665 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
666 unsigned long physbase
= lmb
.reserved
.region
[i
].physbase
;
667 unsigned long size
= lmb
.reserved
.region
[i
].size
;
669 if (pa_to_nid(physbase
) != nid
&&
670 pa_to_nid(physbase
+size
-1) != nid
)
673 if (physbase
< end_paddr
&&
674 (physbase
+size
) > start_paddr
) {
676 if (physbase
< start_paddr
) {
677 size
-= start_paddr
- physbase
;
678 physbase
= start_paddr
;
681 if (size
> end_paddr
- physbase
)
682 size
= end_paddr
- physbase
;
684 dbg("reserve_bootmem %lx %lx\n", physbase
,
686 reserve_bootmem_node(NODE_DATA(nid
), physbase
,
693 void __init
paging_init(void)
695 unsigned long zones_size
[MAX_NR_ZONES
];
696 unsigned long zholes_size
[MAX_NR_ZONES
];
699 memset(zones_size
, 0, sizeof(zones_size
));
700 memset(zholes_size
, 0, sizeof(zholes_size
));
702 for_each_online_node(nid
) {
703 unsigned long start_pfn
;
704 unsigned long end_pfn
;
706 start_pfn
= init_node_data
[nid
].node_start_pfn
;
707 end_pfn
= init_node_data
[nid
].node_end_pfn
;
709 zones_size
[ZONE_DMA
] = end_pfn
- start_pfn
;
710 zholes_size
[ZONE_DMA
] = zones_size
[ZONE_DMA
] -
711 init_node_data
[nid
].node_present_pages
;
713 dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid
,
714 zones_size
[ZONE_DMA
], start_pfn
, zholes_size
[ZONE_DMA
]);
716 free_area_init_node(nid
, NODE_DATA(nid
), zones_size
,
717 start_pfn
, zholes_size
);
721 static int __init
early_numa(char *p
)
726 if (strstr(p
, "off"))
729 if (strstr(p
, "debug"))
734 early_param("numa", early_numa
);