4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/lmb.h>
22 #include <asm/sparsemem.h>
24 #include <asm/system.h>
27 static int numa_enabled
= 1;
29 static char *cmdline __initdata
;
31 static int numa_debug
;
32 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
34 int numa_cpu_lookup_table
[NR_CPUS
];
35 cpumask_t numa_cpumask_lookup_table
[MAX_NUMNODES
];
36 struct pglist_data
*node_data
[MAX_NUMNODES
];
38 EXPORT_SYMBOL(numa_cpu_lookup_table
);
39 EXPORT_SYMBOL(numa_cpumask_lookup_table
);
40 EXPORT_SYMBOL(node_data
);
42 static int min_common_depth
;
43 static int n_mem_addr_cells
, n_mem_size_cells
;
45 static int __cpuinit
fake_numa_create_new_node(unsigned long end_pfn
,
48 unsigned long long mem
;
50 static unsigned int fake_nid
;
51 static unsigned long long curr_boundary
;
54 * Modify node id, iff we started creating NUMA nodes
55 * We want to continue from where we left of the last time
60 * In case there are no more arguments to parse, the
61 * node_id should be the same as the last fake node id
62 * (we've handled this above).
67 mem
= memparse(p
, &p
);
71 if (mem
< curr_boundary
)
76 if ((end_pfn
<< PAGE_SHIFT
) > mem
) {
78 * Skip commas and spaces
80 while (*p
== ',' || *p
== ' ' || *p
== '\t')
86 dbg("created new fake_node with id %d\n", fake_nid
);
92 static void __cpuinit
map_cpu_to_node(int cpu
, int node
)
94 numa_cpu_lookup_table
[cpu
] = node
;
96 dbg("adding cpu %d to node %d\n", cpu
, node
);
98 if (!(cpu_isset(cpu
, numa_cpumask_lookup_table
[node
])))
99 cpu_set(cpu
, numa_cpumask_lookup_table
[node
]);
102 #ifdef CONFIG_HOTPLUG_CPU
103 static void unmap_cpu_from_node(unsigned long cpu
)
105 int node
= numa_cpu_lookup_table
[cpu
];
107 dbg("removing cpu %lu from node %d\n", cpu
, node
);
109 if (cpu_isset(cpu
, numa_cpumask_lookup_table
[node
])) {
110 cpu_clear(cpu
, numa_cpumask_lookup_table
[node
]);
112 printk(KERN_ERR
"WARNING: cpu %lu not found in node %d\n",
116 #endif /* CONFIG_HOTPLUG_CPU */
118 static struct device_node
* __cpuinit
find_cpu_node(unsigned int cpu
)
120 unsigned int hw_cpuid
= get_hard_smp_processor_id(cpu
);
121 struct device_node
*cpu_node
= NULL
;
122 const unsigned int *interrupt_server
, *reg
;
125 while ((cpu_node
= of_find_node_by_type(cpu_node
, "cpu")) != NULL
) {
126 /* Try interrupt server first */
127 interrupt_server
= of_get_property(cpu_node
,
128 "ibm,ppc-interrupt-server#s", &len
);
130 len
= len
/ sizeof(u32
);
132 if (interrupt_server
&& (len
> 0)) {
134 if (interrupt_server
[len
] == hw_cpuid
)
138 reg
= of_get_property(cpu_node
, "reg", &len
);
139 if (reg
&& (len
> 0) && (reg
[0] == hw_cpuid
))
147 /* must hold reference to node during call */
148 static const int *of_get_associativity(struct device_node
*dev
)
150 return of_get_property(dev
, "ibm,associativity", NULL
);
153 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
156 static int of_node_to_nid_single(struct device_node
*device
)
159 const unsigned int *tmp
;
161 if (min_common_depth
== -1)
164 tmp
= of_get_associativity(device
);
168 if (tmp
[0] >= min_common_depth
)
169 nid
= tmp
[min_common_depth
];
171 /* POWER4 LPAR uses 0xffff as invalid node */
172 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
178 /* Walk the device tree upwards, looking for an associativity id */
179 int of_node_to_nid(struct device_node
*device
)
181 struct device_node
*tmp
;
186 nid
= of_node_to_nid_single(device
);
191 device
= of_get_parent(tmp
);
198 EXPORT_SYMBOL_GPL(of_node_to_nid
);
201 * In theory, the "ibm,associativity" property may contain multiple
202 * associativity lists because a resource may be multiply connected
203 * into the machine. This resource then has different associativity
204 * characteristics relative to its multiple connections. We ignore
205 * this for now. We also assume that all cpu and memory sets have
206 * their distances represented at a common level. This won't be
207 * true for hierarchical NUMA.
209 * In any case the ibm,associativity-reference-points should give
210 * the correct depth for a normal NUMA system.
212 * - Dave Hansen <haveblue@us.ibm.com>
214 static int __init
find_min_common_depth(void)
217 const unsigned int *ref_points
;
218 struct device_node
*rtas_root
;
221 rtas_root
= of_find_node_by_path("/rtas");
227 * this property is 2 32-bit integers, each representing a level of
228 * depth in the associativity nodes. The first is for an SMP
229 * configuration (should be all 0's) and the second is for a normal
230 * NUMA configuration.
232 ref_points
= of_get_property(rtas_root
,
233 "ibm,associativity-reference-points", &len
);
235 if ((len
>= 1) && ref_points
) {
236 depth
= ref_points
[1];
238 dbg("NUMA: ibm,associativity-reference-points not found.\n");
241 of_node_put(rtas_root
);
246 static void __init
get_n_mem_cells(int *n_addr_cells
, int *n_size_cells
)
248 struct device_node
*memory
= NULL
;
250 memory
= of_find_node_by_type(memory
, "memory");
252 panic("numa.c: No memory nodes found!");
254 *n_addr_cells
= of_n_addr_cells(memory
);
255 *n_size_cells
= of_n_size_cells(memory
);
259 static unsigned long __devinit
read_n_cells(int n
, const unsigned int **buf
)
261 unsigned long result
= 0;
264 result
= (result
<< 32) | **buf
;
270 struct of_drconf_cell
{
278 #define DRCONF_MEM_ASSIGNED 0x00000008
279 #define DRCONF_MEM_AI_INVALID 0x00000040
280 #define DRCONF_MEM_RESERVED 0x00000080
283 * Read the next lmb list entry from the ibm,dynamic-memory property
284 * and return the information in the provided of_drconf_cell structure.
286 static void read_drconf_cell(struct of_drconf_cell
*drmem
, const u32
**cellp
)
290 drmem
->base_addr
= read_n_cells(n_mem_addr_cells
, cellp
);
293 drmem
->drc_index
= cp
[0];
294 drmem
->reserved
= cp
[1];
295 drmem
->aa_index
= cp
[2];
296 drmem
->flags
= cp
[3];
302 * Retreive and validate the ibm,dynamic-memory property of the device tree.
304 * The layout of the ibm,dynamic-memory property is a number N of lmb
305 * list entries followed by N lmb list entries. Each lmb list entry
306 * contains information as layed out in the of_drconf_cell struct above.
308 static int of_get_drconf_memory(struct device_node
*memory
, const u32
**dm
)
313 prop
= of_get_property(memory
, "ibm,dynamic-memory", &len
);
314 if (!prop
|| len
< sizeof(unsigned int))
319 /* Now that we know the number of entries, revalidate the size
320 * of the property read in to ensure we have everything
322 if (len
< (entries
* (n_mem_addr_cells
+ 4) + 1) * sizeof(unsigned int))
330 * Retreive and validate the ibm,lmb-size property for drconf memory
331 * from the device tree.
333 static u64
of_get_lmb_size(struct device_node
*memory
)
338 prop
= of_get_property(memory
, "ibm,lmb-size", &len
);
339 if (!prop
|| len
< sizeof(unsigned int))
342 return read_n_cells(n_mem_size_cells
, &prop
);
345 struct assoc_arrays
{
352 * Retreive and validate the list of associativity arrays for drconf
353 * memory from the ibm,associativity-lookup-arrays property of the
356 * The layout of the ibm,associativity-lookup-arrays property is a number N
357 * indicating the number of associativity arrays, followed by a number M
358 * indicating the size of each associativity array, followed by a list
359 * of N associativity arrays.
361 static int of_get_assoc_arrays(struct device_node
*memory
,
362 struct assoc_arrays
*aa
)
367 prop
= of_get_property(memory
, "ibm,associativity-lookup-arrays", &len
);
368 if (!prop
|| len
< 2 * sizeof(unsigned int))
371 aa
->n_arrays
= *prop
++;
372 aa
->array_sz
= *prop
++;
374 /* Now that we know the number of arrrays and size of each array,
375 * revalidate the size of the property read in.
377 if (len
< (aa
->n_arrays
* aa
->array_sz
+ 2) * sizeof(unsigned int))
385 * This is like of_node_to_nid_single() for memory represented in the
386 * ibm,dynamic-reconfiguration-memory node.
388 static int of_drconf_to_nid_single(struct of_drconf_cell
*drmem
,
389 struct assoc_arrays
*aa
)
392 int nid
= default_nid
;
395 if (min_common_depth
> 0 && min_common_depth
<= aa
->array_sz
&&
396 !(drmem
->flags
& DRCONF_MEM_AI_INVALID
) &&
397 drmem
->aa_index
< aa
->n_arrays
) {
398 index
= drmem
->aa_index
* aa
->array_sz
+ min_common_depth
- 1;
399 nid
= aa
->arrays
[index
];
401 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
409 * Figure out to which domain a cpu belongs and stick it there.
410 * Return the id of the domain used.
412 static int __cpuinit
numa_setup_cpu(unsigned long lcpu
)
415 struct device_node
*cpu
= find_cpu_node(lcpu
);
422 nid
= of_node_to_nid_single(cpu
);
424 if (nid
< 0 || !node_online(nid
))
425 nid
= any_online_node(NODE_MASK_ALL
);
427 map_cpu_to_node(lcpu
, nid
);
434 static int __cpuinit
cpu_numa_callback(struct notifier_block
*nfb
,
435 unsigned long action
,
438 unsigned long lcpu
= (unsigned long)hcpu
;
439 int ret
= NOTIFY_DONE
;
443 case CPU_UP_PREPARE_FROZEN
:
444 numa_setup_cpu(lcpu
);
447 #ifdef CONFIG_HOTPLUG_CPU
449 case CPU_DEAD_FROZEN
:
450 case CPU_UP_CANCELED
:
451 case CPU_UP_CANCELED_FROZEN
:
452 unmap_cpu_from_node(lcpu
);
461 * Check and possibly modify a memory region to enforce the memory limit.
463 * Returns the size the region should have to enforce the memory limit.
464 * This will either be the original value of size, a truncated value,
465 * or zero. If the returned value of size is 0 the region should be
466 * discarded as it lies wholy above the memory limit.
468 static unsigned long __init
numa_enforce_memory_limit(unsigned long start
,
472 * We use lmb_end_of_DRAM() in here instead of memory_limit because
473 * we've already adjusted it for the limit and it takes care of
474 * having memory holes below the limit.
480 if (start
+ size
<= lmb_end_of_DRAM())
483 if (start
>= lmb_end_of_DRAM())
486 return lmb_end_of_DRAM() - start
;
490 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
491 * node. This assumes n_mem_{addr,size}_cells have been set.
493 static void __init
parse_drconf_memory(struct device_node
*memory
)
497 unsigned long lmb_size
, size
;
499 struct assoc_arrays aa
;
501 n
= of_get_drconf_memory(memory
, &dm
);
505 lmb_size
= of_get_lmb_size(memory
);
509 rc
= of_get_assoc_arrays(memory
, &aa
);
513 for (; n
!= 0; --n
) {
514 struct of_drconf_cell drmem
;
516 read_drconf_cell(&drmem
, &dm
);
518 /* skip this block if the reserved bit is set in flags (0x80)
519 or if the block is not assigned to this partition (0x8) */
520 if ((drmem
.flags
& DRCONF_MEM_RESERVED
)
521 || !(drmem
.flags
& DRCONF_MEM_ASSIGNED
))
524 nid
= of_drconf_to_nid_single(&drmem
, &aa
);
526 fake_numa_create_new_node(
527 ((drmem
.base_addr
+ lmb_size
) >> PAGE_SHIFT
),
530 node_set_online(nid
);
532 size
= numa_enforce_memory_limit(drmem
.base_addr
, lmb_size
);
536 add_active_range(nid
, drmem
.base_addr
>> PAGE_SHIFT
,
537 (drmem
.base_addr
>> PAGE_SHIFT
)
538 + (size
>> PAGE_SHIFT
));
542 static int __init
parse_numa_properties(void)
544 struct device_node
*cpu
= NULL
;
545 struct device_node
*memory
= NULL
;
549 if (numa_enabled
== 0) {
550 printk(KERN_WARNING
"NUMA disabled by user\n");
554 min_common_depth
= find_min_common_depth();
556 if (min_common_depth
< 0)
557 return min_common_depth
;
559 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth
);
562 * Even though we connect cpus to numa domains later in SMP
563 * init, we need to know the node ids now. This is because
564 * each node to be onlined must have NODE_DATA etc backing it.
566 for_each_present_cpu(i
) {
569 cpu
= find_cpu_node(i
);
571 nid
= of_node_to_nid_single(cpu
);
575 * Don't fall back to default_nid yet -- we will plug
576 * cpus into nodes once the memory scan has discovered
581 node_set_online(nid
);
584 get_n_mem_cells(&n_mem_addr_cells
, &n_mem_size_cells
);
586 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
591 const unsigned int *memcell_buf
;
594 memcell_buf
= of_get_property(memory
,
595 "linux,usable-memory", &len
);
596 if (!memcell_buf
|| len
<= 0)
597 memcell_buf
= of_get_property(memory
, "reg", &len
);
598 if (!memcell_buf
|| len
<= 0)
602 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
604 /* these are order-sensitive, and modify the buffer pointer */
605 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
606 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
609 * Assumption: either all memory nodes or none will
610 * have associativity properties. If none, then
611 * everything goes to default_nid.
613 nid
= of_node_to_nid_single(memory
);
617 fake_numa_create_new_node(((start
+ size
) >> PAGE_SHIFT
), &nid
);
618 node_set_online(nid
);
620 if (!(size
= numa_enforce_memory_limit(start
, size
))) {
627 add_active_range(nid
, start
>> PAGE_SHIFT
,
628 (start
>> PAGE_SHIFT
) + (size
>> PAGE_SHIFT
));
635 * Now do the same thing for each LMB listed in the ibm,dynamic-memory
636 * property in the ibm,dynamic-reconfiguration-memory node.
638 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
640 parse_drconf_memory(memory
);
645 static void __init
setup_nonnuma(void)
647 unsigned long top_of_ram
= lmb_end_of_DRAM();
648 unsigned long total_ram
= lmb_phys_mem_size();
649 unsigned long start_pfn
, end_pfn
;
650 unsigned int i
, nid
= 0;
652 printk(KERN_DEBUG
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
653 top_of_ram
, total_ram
);
654 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
655 (top_of_ram
- total_ram
) >> 20);
657 for (i
= 0; i
< lmb
.memory
.cnt
; ++i
) {
658 start_pfn
= lmb
.memory
.region
[i
].base
>> PAGE_SHIFT
;
659 end_pfn
= start_pfn
+ lmb_size_pages(&lmb
.memory
, i
);
661 fake_numa_create_new_node(end_pfn
, &nid
);
662 add_active_range(nid
, start_pfn
, end_pfn
);
663 node_set_online(nid
);
667 void __init
dump_numa_cpu_topology(void)
670 unsigned int cpu
, count
;
672 if (min_common_depth
== -1 || !numa_enabled
)
675 for_each_online_node(node
) {
676 printk(KERN_DEBUG
"Node %d CPUs:", node
);
680 * If we used a CPU iterator here we would miss printing
681 * the holes in the cpumap.
683 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
684 if (cpu_isset(cpu
, numa_cpumask_lookup_table
[node
])) {
690 printk("-%u", cpu
- 1);
696 printk("-%u", NR_CPUS
- 1);
701 static void __init
dump_numa_memory_topology(void)
706 if (min_common_depth
== -1 || !numa_enabled
)
709 for_each_online_node(node
) {
712 printk(KERN_DEBUG
"Node %d Memory:", node
);
716 for (i
= 0; i
< lmb_end_of_DRAM();
717 i
+= (1 << SECTION_SIZE_BITS
)) {
718 if (early_pfn_to_nid(i
>> PAGE_SHIFT
) == node
) {
736 * Allocate some memory, satisfying the lmb or bootmem allocator where
737 * required. nid is the preferred node and end is the physical address of
738 * the highest address in the node.
740 * Returns the physical address of the memory.
742 static void __init
*careful_allocation(int nid
, unsigned long size
,
744 unsigned long end_pfn
)
747 unsigned long ret
= __lmb_alloc_base(size
, align
, end_pfn
<< PAGE_SHIFT
);
749 /* retry over all memory */
751 ret
= __lmb_alloc_base(size
, align
, lmb_end_of_DRAM());
754 panic("numa.c: cannot allocate %lu bytes on node %d",
758 * If the memory came from a previously allocated node, we must
759 * retry with the bootmem allocator.
761 new_nid
= early_pfn_to_nid(ret
>> PAGE_SHIFT
);
763 ret
= (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid
),
767 panic("numa.c: cannot allocate %lu bytes on node %d",
772 dbg("alloc_bootmem %lx %lx\n", ret
, size
);
778 static struct notifier_block __cpuinitdata ppc64_numa_nb
= {
779 .notifier_call
= cpu_numa_callback
,
780 .priority
= 1 /* Must run before sched domains notifier. */
783 void __init
do_init_bootmem(void)
789 max_low_pfn
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
790 max_pfn
= max_low_pfn
;
792 if (parse_numa_properties())
795 dump_numa_memory_topology();
797 register_cpu_notifier(&ppc64_numa_nb
);
798 cpu_numa_callback(&ppc64_numa_nb
, CPU_UP_PREPARE
,
799 (void *)(unsigned long)boot_cpuid
);
801 for_each_online_node(nid
) {
802 unsigned long start_pfn
, end_pfn
;
803 unsigned long bootmem_paddr
;
804 unsigned long bootmap_pages
;
806 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
808 /* Allocate the node structure node local if possible */
809 NODE_DATA(nid
) = careful_allocation(nid
,
810 sizeof(struct pglist_data
),
811 SMP_CACHE_BYTES
, end_pfn
);
812 NODE_DATA(nid
) = __va(NODE_DATA(nid
));
813 memset(NODE_DATA(nid
), 0, sizeof(struct pglist_data
));
815 dbg("node %d\n", nid
);
816 dbg("NODE_DATA() = %p\n", NODE_DATA(nid
));
818 NODE_DATA(nid
)->bdata
= &bootmem_node_data
[nid
];
819 NODE_DATA(nid
)->node_start_pfn
= start_pfn
;
820 NODE_DATA(nid
)->node_spanned_pages
= end_pfn
- start_pfn
;
822 if (NODE_DATA(nid
)->node_spanned_pages
== 0)
825 dbg("start_paddr = %lx\n", start_pfn
<< PAGE_SHIFT
);
826 dbg("end_paddr = %lx\n", end_pfn
<< PAGE_SHIFT
);
828 bootmap_pages
= bootmem_bootmap_pages(end_pfn
- start_pfn
);
829 bootmem_paddr
= (unsigned long)careful_allocation(nid
,
830 bootmap_pages
<< PAGE_SHIFT
,
832 memset(__va(bootmem_paddr
), 0, bootmap_pages
<< PAGE_SHIFT
);
834 dbg("bootmap_paddr = %lx\n", bootmem_paddr
);
836 init_bootmem_node(NODE_DATA(nid
), bootmem_paddr
>> PAGE_SHIFT
,
839 free_bootmem_with_active_regions(nid
, end_pfn
);
841 /* Mark reserved regions on this node */
842 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
843 unsigned long physbase
= lmb
.reserved
.region
[i
].base
;
844 unsigned long size
= lmb
.reserved
.region
[i
].size
;
845 unsigned long start_paddr
= start_pfn
<< PAGE_SHIFT
;
846 unsigned long end_paddr
= end_pfn
<< PAGE_SHIFT
;
848 if (early_pfn_to_nid(physbase
>> PAGE_SHIFT
) != nid
&&
849 early_pfn_to_nid((physbase
+size
-1) >> PAGE_SHIFT
) != nid
)
852 if (physbase
< end_paddr
&&
853 (physbase
+size
) > start_paddr
) {
855 if (physbase
< start_paddr
) {
856 size
-= start_paddr
- physbase
;
857 physbase
= start_paddr
;
860 if (size
> end_paddr
- physbase
)
861 size
= end_paddr
- physbase
;
863 dbg("reserve_bootmem %lx %lx\n", physbase
,
865 reserve_bootmem_node(NODE_DATA(nid
), physbase
,
866 size
, BOOTMEM_DEFAULT
);
870 sparse_memory_present_with_active_regions(nid
);
874 void __init
paging_init(void)
876 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
877 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
878 max_zone_pfns
[ZONE_DMA
] = lmb_end_of_DRAM() >> PAGE_SHIFT
;
879 free_area_init_nodes(max_zone_pfns
);
882 static int __init
early_numa(char *p
)
887 if (strstr(p
, "off"))
890 if (strstr(p
, "debug"))
893 p
= strstr(p
, "fake=");
895 cmdline
= p
+ strlen("fake=");
899 early_param("numa", early_numa
);
901 #ifdef CONFIG_MEMORY_HOTPLUG
903 * Validate the node associated with the memory section we are
906 int valid_hot_add_scn(int *nid
, unsigned long start
, u32 lmb_size
,
907 unsigned long scn_addr
)
911 if (*nid
< 0 || !node_online(*nid
))
912 *nid
= any_online_node(NODE_MASK_ALL
);
914 if ((scn_addr
>= start
) && (scn_addr
< (start
+ lmb_size
))) {
916 while (NODE_DATA(*nid
)->node_spanned_pages
== 0) {
917 node_clear(*nid
, nodes
);
918 *nid
= any_online_node(nodes
);
928 * Find the node associated with a hot added memory section represented
929 * by the ibm,dynamic-reconfiguration-memory node.
931 static int hot_add_drconf_scn_to_nid(struct device_node
*memory
,
932 unsigned long scn_addr
)
936 unsigned long lmb_size
;
937 int default_nid
= any_online_node(NODE_MASK_ALL
);
939 struct assoc_arrays aa
;
941 n
= of_get_drconf_memory(memory
, &dm
);
945 lmb_size
= of_get_lmb_size(memory
);
949 rc
= of_get_assoc_arrays(memory
, &aa
);
953 for (; n
!= 0; --n
) {
954 struct of_drconf_cell drmem
;
956 read_drconf_cell(&drmem
, &dm
);
958 /* skip this block if it is reserved or not assigned to
960 if ((drmem
.flags
& DRCONF_MEM_RESERVED
)
961 || !(drmem
.flags
& DRCONF_MEM_ASSIGNED
))
964 nid
= of_drconf_to_nid_single(&drmem
, &aa
);
966 if (valid_hot_add_scn(&nid
, drmem
.base_addr
, lmb_size
,
971 BUG(); /* section address should be found above */
976 * Find the node associated with a hot added memory section. Section
977 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that
978 * sections are fully contained within a single LMB.
980 int hot_add_scn_to_nid(unsigned long scn_addr
)
982 struct device_node
*memory
= NULL
;
985 if (!numa_enabled
|| (min_common_depth
< 0))
986 return any_online_node(NODE_MASK_ALL
);
988 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
990 nid
= hot_add_drconf_scn_to_nid(memory
, scn_addr
);
995 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
996 unsigned long start
, size
;
998 const unsigned int *memcell_buf
;
1001 memcell_buf
= of_get_property(memory
, "reg", &len
);
1002 if (!memcell_buf
|| len
<= 0)
1005 /* ranges in cell */
1006 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
1008 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
1009 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
1010 nid
= of_node_to_nid_single(memory
);
1012 if (valid_hot_add_scn(&nid
, start
, size
, scn_addr
)) {
1013 of_node_put(memory
);
1017 if (--ranges
) /* process all ranges in cell */
1020 BUG(); /* section address should be found above */
1023 #endif /* CONFIG_MEMORY_HOTPLUG */