4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/memblock.h>
22 #include <linux/pfn.h>
23 #include <asm/sparsemem.h>
25 #include <asm/system.h>
28 static int numa_enabled
= 1;
30 static char *cmdline __initdata
;
32 static int numa_debug
;
33 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
35 int numa_cpu_lookup_table
[NR_CPUS
];
36 cpumask_var_t node_to_cpumask_map
[MAX_NUMNODES
];
37 struct pglist_data
*node_data
[MAX_NUMNODES
];
39 EXPORT_SYMBOL(numa_cpu_lookup_table
);
40 EXPORT_SYMBOL(node_to_cpumask_map
);
41 EXPORT_SYMBOL(node_data
);
43 static int min_common_depth
;
44 static int n_mem_addr_cells
, n_mem_size_cells
;
45 static int form1_affinity
;
47 #define MAX_DISTANCE_REF_POINTS 4
48 static int distance_ref_points_depth
;
49 static const unsigned int *distance_ref_points
;
50 static int distance_lookup_table
[MAX_NUMNODES
][MAX_DISTANCE_REF_POINTS
];
53 * Allocate node_to_cpumask_map based on number of available nodes
54 * Requires node_possible_map to be valid.
56 * Note: node_to_cpumask() is not valid until after this is done.
58 static void __init
setup_node_to_cpumask_map(void)
60 unsigned int node
, num
= 0;
62 /* setup nr_node_ids if not done yet */
63 if (nr_node_ids
== MAX_NUMNODES
) {
64 for_each_node_mask(node
, node_possible_map
)
66 nr_node_ids
= num
+ 1;
69 /* allocate the map */
70 for (node
= 0; node
< nr_node_ids
; node
++)
71 alloc_bootmem_cpumask_var(&node_to_cpumask_map
[node
]);
73 /* cpumask_of_node() will now work */
74 dbg("Node to cpumask map for %d nodes\n", nr_node_ids
);
77 static int __cpuinit
fake_numa_create_new_node(unsigned long end_pfn
,
80 unsigned long long mem
;
82 static unsigned int fake_nid
;
83 static unsigned long long curr_boundary
;
86 * Modify node id, iff we started creating NUMA nodes
87 * We want to continue from where we left of the last time
92 * In case there are no more arguments to parse, the
93 * node_id should be the same as the last fake node id
94 * (we've handled this above).
99 mem
= memparse(p
, &p
);
103 if (mem
< curr_boundary
)
108 if ((end_pfn
<< PAGE_SHIFT
) > mem
) {
110 * Skip commas and spaces
112 while (*p
== ',' || *p
== ' ' || *p
== '\t')
118 dbg("created new fake_node with id %d\n", fake_nid
);
125 * get_active_region_work_fn - A helper function for get_node_active_region
126 * Returns datax set to the start_pfn and end_pfn if they contain
127 * the initial value of datax->start_pfn between them
128 * @start_pfn: start page(inclusive) of region to check
129 * @end_pfn: end page(exclusive) of region to check
130 * @datax: comes in with ->start_pfn set to value to search for and
131 * goes out with active range if it contains it
132 * Returns 1 if search value is in range else 0
134 static int __init
get_active_region_work_fn(unsigned long start_pfn
,
135 unsigned long end_pfn
, void *datax
)
137 struct node_active_region
*data
;
138 data
= (struct node_active_region
*)datax
;
140 if (start_pfn
<= data
->start_pfn
&& end_pfn
> data
->start_pfn
) {
141 data
->start_pfn
= start_pfn
;
142 data
->end_pfn
= end_pfn
;
150 * get_node_active_region - Return active region containing start_pfn
151 * Active range returned is empty if none found.
152 * @start_pfn: The page to return the region for.
153 * @node_ar: Returned set to the active region containing start_pfn
155 static void __init
get_node_active_region(unsigned long start_pfn
,
156 struct node_active_region
*node_ar
)
158 int nid
= early_pfn_to_nid(start_pfn
);
161 node_ar
->start_pfn
= start_pfn
;
162 node_ar
->end_pfn
= start_pfn
;
163 work_with_active_regions(nid
, get_active_region_work_fn
, node_ar
);
166 static void __cpuinit
map_cpu_to_node(int cpu
, int node
)
168 numa_cpu_lookup_table
[cpu
] = node
;
170 dbg("adding cpu %d to node %d\n", cpu
, node
);
172 if (!(cpumask_test_cpu(cpu
, node_to_cpumask_map
[node
])))
173 cpumask_set_cpu(cpu
, node_to_cpumask_map
[node
]);
176 #ifdef CONFIG_HOTPLUG_CPU
177 static void unmap_cpu_from_node(unsigned long cpu
)
179 int node
= numa_cpu_lookup_table
[cpu
];
181 dbg("removing cpu %lu from node %d\n", cpu
, node
);
183 if (cpumask_test_cpu(cpu
, node_to_cpumask_map
[node
])) {
184 cpumask_clear_cpu(cpu
, node_to_cpumask_map
[node
]);
186 printk(KERN_ERR
"WARNING: cpu %lu not found in node %d\n",
190 #endif /* CONFIG_HOTPLUG_CPU */
192 /* must hold reference to node during call */
193 static const int *of_get_associativity(struct device_node
*dev
)
195 return of_get_property(dev
, "ibm,associativity", NULL
);
199 * Returns the property linux,drconf-usable-memory if
200 * it exists (the property exists only in kexec/kdump kernels,
201 * added by kexec-tools)
203 static const u32
*of_get_usable_memory(struct device_node
*memory
)
207 prop
= of_get_property(memory
, "linux,drconf-usable-memory", &len
);
208 if (!prop
|| len
< sizeof(unsigned int))
213 int __node_distance(int a
, int b
)
216 int distance
= LOCAL_DISTANCE
;
221 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
222 if (distance_lookup_table
[a
][i
] == distance_lookup_table
[b
][i
])
225 /* Double the distance for each NUMA level */
232 static void initialize_distance_lookup_table(int nid
,
233 const unsigned int *associativity
)
240 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
241 distance_lookup_table
[nid
][i
] =
242 associativity
[distance_ref_points
[i
]];
246 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
249 static int of_node_to_nid_single(struct device_node
*device
)
252 const unsigned int *tmp
;
254 if (min_common_depth
== -1)
257 tmp
= of_get_associativity(device
);
261 if (tmp
[0] >= min_common_depth
)
262 nid
= tmp
[min_common_depth
];
264 /* POWER4 LPAR uses 0xffff as invalid node */
265 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
268 if (nid
> 0 && tmp
[0] >= distance_ref_points_depth
)
269 initialize_distance_lookup_table(nid
, tmp
);
275 /* Walk the device tree upwards, looking for an associativity id */
276 int of_node_to_nid(struct device_node
*device
)
278 struct device_node
*tmp
;
283 nid
= of_node_to_nid_single(device
);
288 device
= of_get_parent(tmp
);
295 EXPORT_SYMBOL_GPL(of_node_to_nid
);
297 static int __init
find_min_common_depth(void)
300 struct device_node
*rtas_root
;
301 struct device_node
*chosen
;
304 rtas_root
= of_find_node_by_path("/rtas");
310 * This property is a set of 32-bit integers, each representing
311 * an index into the ibm,associativity nodes.
313 * With form 0 affinity the first integer is for an SMP configuration
314 * (should be all 0's) and the second is for a normal NUMA
315 * configuration. We have only one level of NUMA.
317 * With form 1 affinity the first integer is the most significant
318 * NUMA boundary and the following are progressively less significant
319 * boundaries. There can be more than one level of NUMA.
321 distance_ref_points
= of_get_property(rtas_root
,
322 "ibm,associativity-reference-points",
323 &distance_ref_points_depth
);
325 if (!distance_ref_points
) {
326 dbg("NUMA: ibm,associativity-reference-points not found.\n");
330 distance_ref_points_depth
/= sizeof(int);
332 #define VEC5_AFFINITY_BYTE 5
333 #define VEC5_AFFINITY 0x80
334 chosen
= of_find_node_by_path("/chosen");
336 vec5
= of_get_property(chosen
, "ibm,architecture-vec-5", NULL
);
337 if (vec5
&& (vec5
[VEC5_AFFINITY_BYTE
] & VEC5_AFFINITY
)) {
338 dbg("Using form 1 affinity\n");
343 if (form1_affinity
) {
344 depth
= distance_ref_points
[0];
346 if (distance_ref_points_depth
< 2) {
347 printk(KERN_WARNING
"NUMA: "
348 "short ibm,associativity-reference-points\n");
352 depth
= distance_ref_points
[1];
356 * Warn and cap if the hardware supports more than
357 * MAX_DISTANCE_REF_POINTS domains.
359 if (distance_ref_points_depth
> MAX_DISTANCE_REF_POINTS
) {
360 printk(KERN_WARNING
"NUMA: distance array capped at "
361 "%d entries\n", MAX_DISTANCE_REF_POINTS
);
362 distance_ref_points_depth
= MAX_DISTANCE_REF_POINTS
;
365 of_node_put(rtas_root
);
369 of_node_put(rtas_root
);
373 static void __init
get_n_mem_cells(int *n_addr_cells
, int *n_size_cells
)
375 struct device_node
*memory
= NULL
;
377 memory
= of_find_node_by_type(memory
, "memory");
379 panic("numa.c: No memory nodes found!");
381 *n_addr_cells
= of_n_addr_cells(memory
);
382 *n_size_cells
= of_n_size_cells(memory
);
386 static unsigned long __devinit
read_n_cells(int n
, const unsigned int **buf
)
388 unsigned long result
= 0;
391 result
= (result
<< 32) | **buf
;
397 struct of_drconf_cell
{
405 #define DRCONF_MEM_ASSIGNED 0x00000008
406 #define DRCONF_MEM_AI_INVALID 0x00000040
407 #define DRCONF_MEM_RESERVED 0x00000080
410 * Read the next memblock list entry from the ibm,dynamic-memory property
411 * and return the information in the provided of_drconf_cell structure.
413 static void read_drconf_cell(struct of_drconf_cell
*drmem
, const u32
**cellp
)
417 drmem
->base_addr
= read_n_cells(n_mem_addr_cells
, cellp
);
420 drmem
->drc_index
= cp
[0];
421 drmem
->reserved
= cp
[1];
422 drmem
->aa_index
= cp
[2];
423 drmem
->flags
= cp
[3];
429 * Retreive and validate the ibm,dynamic-memory property of the device tree.
431 * The layout of the ibm,dynamic-memory property is a number N of memblock
432 * list entries followed by N memblock list entries. Each memblock list entry
433 * contains information as layed out in the of_drconf_cell struct above.
435 static int of_get_drconf_memory(struct device_node
*memory
, const u32
**dm
)
440 prop
= of_get_property(memory
, "ibm,dynamic-memory", &len
);
441 if (!prop
|| len
< sizeof(unsigned int))
446 /* Now that we know the number of entries, revalidate the size
447 * of the property read in to ensure we have everything
449 if (len
< (entries
* (n_mem_addr_cells
+ 4) + 1) * sizeof(unsigned int))
457 * Retreive and validate the ibm,lmb-size property for drconf memory
458 * from the device tree.
460 static u64
of_get_lmb_size(struct device_node
*memory
)
465 prop
= of_get_property(memory
, "ibm,lmb-size", &len
);
466 if (!prop
|| len
< sizeof(unsigned int))
469 return read_n_cells(n_mem_size_cells
, &prop
);
472 struct assoc_arrays
{
479 * Retreive and validate the list of associativity arrays for drconf
480 * memory from the ibm,associativity-lookup-arrays property of the
483 * The layout of the ibm,associativity-lookup-arrays property is a number N
484 * indicating the number of associativity arrays, followed by a number M
485 * indicating the size of each associativity array, followed by a list
486 * of N associativity arrays.
488 static int of_get_assoc_arrays(struct device_node
*memory
,
489 struct assoc_arrays
*aa
)
494 prop
= of_get_property(memory
, "ibm,associativity-lookup-arrays", &len
);
495 if (!prop
|| len
< 2 * sizeof(unsigned int))
498 aa
->n_arrays
= *prop
++;
499 aa
->array_sz
= *prop
++;
501 /* Now that we know the number of arrrays and size of each array,
502 * revalidate the size of the property read in.
504 if (len
< (aa
->n_arrays
* aa
->array_sz
+ 2) * sizeof(unsigned int))
512 * This is like of_node_to_nid_single() for memory represented in the
513 * ibm,dynamic-reconfiguration-memory node.
515 static int of_drconf_to_nid_single(struct of_drconf_cell
*drmem
,
516 struct assoc_arrays
*aa
)
519 int nid
= default_nid
;
522 if (min_common_depth
> 0 && min_common_depth
<= aa
->array_sz
&&
523 !(drmem
->flags
& DRCONF_MEM_AI_INVALID
) &&
524 drmem
->aa_index
< aa
->n_arrays
) {
525 index
= drmem
->aa_index
* aa
->array_sz
+ min_common_depth
- 1;
526 nid
= aa
->arrays
[index
];
528 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
536 * Figure out to which domain a cpu belongs and stick it there.
537 * Return the id of the domain used.
539 static int __cpuinit
numa_setup_cpu(unsigned long lcpu
)
542 struct device_node
*cpu
= of_get_cpu_node(lcpu
, NULL
);
549 nid
= of_node_to_nid_single(cpu
);
551 if (nid
< 0 || !node_online(nid
))
552 nid
= first_online_node
;
554 map_cpu_to_node(lcpu
, nid
);
561 static int __cpuinit
cpu_numa_callback(struct notifier_block
*nfb
,
562 unsigned long action
,
565 unsigned long lcpu
= (unsigned long)hcpu
;
566 int ret
= NOTIFY_DONE
;
570 case CPU_UP_PREPARE_FROZEN
:
571 numa_setup_cpu(lcpu
);
574 #ifdef CONFIG_HOTPLUG_CPU
576 case CPU_DEAD_FROZEN
:
577 case CPU_UP_CANCELED
:
578 case CPU_UP_CANCELED_FROZEN
:
579 unmap_cpu_from_node(lcpu
);
588 * Check and possibly modify a memory region to enforce the memory limit.
590 * Returns the size the region should have to enforce the memory limit.
591 * This will either be the original value of size, a truncated value,
592 * or zero. If the returned value of size is 0 the region should be
593 * discarded as it lies wholy above the memory limit.
595 static unsigned long __init
numa_enforce_memory_limit(unsigned long start
,
599 * We use memblock_end_of_DRAM() in here instead of memory_limit because
600 * we've already adjusted it for the limit and it takes care of
601 * having memory holes below the limit. Also, in the case of
602 * iommu_is_off, memory_limit is not set but is implicitly enforced.
605 if (start
+ size
<= memblock_end_of_DRAM())
608 if (start
>= memblock_end_of_DRAM())
611 return memblock_end_of_DRAM() - start
;
615 * Reads the counter for a given entry in
616 * linux,drconf-usable-memory property
618 static inline int __init
read_usm_ranges(const u32
**usm
)
621 * For each lmb in ibm,dynamic-memory a corresponding
622 * entry in linux,drconf-usable-memory property contains
623 * a counter followed by that many (base, size) duple.
624 * read the counter from linux,drconf-usable-memory
626 return read_n_cells(n_mem_size_cells
, usm
);
630 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
631 * node. This assumes n_mem_{addr,size}_cells have been set.
633 static void __init
parse_drconf_memory(struct device_node
*memory
)
636 unsigned int n
, rc
, ranges
, is_kexec_kdump
= 0;
637 unsigned long lmb_size
, base
, size
, sz
;
639 struct assoc_arrays aa
;
641 n
= of_get_drconf_memory(memory
, &dm
);
645 lmb_size
= of_get_lmb_size(memory
);
649 rc
= of_get_assoc_arrays(memory
, &aa
);
653 /* check if this is a kexec/kdump kernel */
654 usm
= of_get_usable_memory(memory
);
658 for (; n
!= 0; --n
) {
659 struct of_drconf_cell drmem
;
661 read_drconf_cell(&drmem
, &dm
);
663 /* skip this block if the reserved bit is set in flags (0x80)
664 or if the block is not assigned to this partition (0x8) */
665 if ((drmem
.flags
& DRCONF_MEM_RESERVED
)
666 || !(drmem
.flags
& DRCONF_MEM_ASSIGNED
))
669 base
= drmem
.base_addr
;
673 if (is_kexec_kdump
) {
674 ranges
= read_usm_ranges(&usm
);
675 if (!ranges
) /* there are no (base, size) duple */
679 if (is_kexec_kdump
) {
680 base
= read_n_cells(n_mem_addr_cells
, &usm
);
681 size
= read_n_cells(n_mem_size_cells
, &usm
);
683 nid
= of_drconf_to_nid_single(&drmem
, &aa
);
684 fake_numa_create_new_node(
685 ((base
+ size
) >> PAGE_SHIFT
),
687 node_set_online(nid
);
688 sz
= numa_enforce_memory_limit(base
, size
);
690 add_active_range(nid
, base
>> PAGE_SHIFT
,
692 + (sz
>> PAGE_SHIFT
));
697 static int __init
parse_numa_properties(void)
699 struct device_node
*cpu
= NULL
;
700 struct device_node
*memory
= NULL
;
704 if (numa_enabled
== 0) {
705 printk(KERN_WARNING
"NUMA disabled by user\n");
709 min_common_depth
= find_min_common_depth();
711 if (min_common_depth
< 0)
712 return min_common_depth
;
714 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth
);
717 * Even though we connect cpus to numa domains later in SMP
718 * init, we need to know the node ids now. This is because
719 * each node to be onlined must have NODE_DATA etc backing it.
721 for_each_present_cpu(i
) {
724 cpu
= of_get_cpu_node(i
, NULL
);
726 nid
= of_node_to_nid_single(cpu
);
730 * Don't fall back to default_nid yet -- we will plug
731 * cpus into nodes once the memory scan has discovered
736 node_set_online(nid
);
739 get_n_mem_cells(&n_mem_addr_cells
, &n_mem_size_cells
);
741 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
746 const unsigned int *memcell_buf
;
749 memcell_buf
= of_get_property(memory
,
750 "linux,usable-memory", &len
);
751 if (!memcell_buf
|| len
<= 0)
752 memcell_buf
= of_get_property(memory
, "reg", &len
);
753 if (!memcell_buf
|| len
<= 0)
757 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
759 /* these are order-sensitive, and modify the buffer pointer */
760 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
761 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
764 * Assumption: either all memory nodes or none will
765 * have associativity properties. If none, then
766 * everything goes to default_nid.
768 nid
= of_node_to_nid_single(memory
);
772 fake_numa_create_new_node(((start
+ size
) >> PAGE_SHIFT
), &nid
);
773 node_set_online(nid
);
775 if (!(size
= numa_enforce_memory_limit(start
, size
))) {
782 add_active_range(nid
, start
>> PAGE_SHIFT
,
783 (start
>> PAGE_SHIFT
) + (size
>> PAGE_SHIFT
));
790 * Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory
791 * property in the ibm,dynamic-reconfiguration-memory node.
793 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
795 parse_drconf_memory(memory
);
800 static void __init
setup_nonnuma(void)
802 unsigned long top_of_ram
= memblock_end_of_DRAM();
803 unsigned long total_ram
= memblock_phys_mem_size();
804 unsigned long start_pfn
, end_pfn
;
805 unsigned int i
, nid
= 0;
807 printk(KERN_DEBUG
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
808 top_of_ram
, total_ram
);
809 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
810 (top_of_ram
- total_ram
) >> 20);
812 for (i
= 0; i
< memblock
.memory
.cnt
; ++i
) {
813 start_pfn
= memblock
.memory
.region
[i
].base
>> PAGE_SHIFT
;
814 end_pfn
= start_pfn
+ memblock_size_pages(&memblock
.memory
, i
);
816 fake_numa_create_new_node(end_pfn
, &nid
);
817 add_active_range(nid
, start_pfn
, end_pfn
);
818 node_set_online(nid
);
822 void __init
dump_numa_cpu_topology(void)
825 unsigned int cpu
, count
;
827 if (min_common_depth
== -1 || !numa_enabled
)
830 for_each_online_node(node
) {
831 printk(KERN_DEBUG
"Node %d CPUs:", node
);
835 * If we used a CPU iterator here we would miss printing
836 * the holes in the cpumap.
838 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++) {
839 if (cpumask_test_cpu(cpu
,
840 node_to_cpumask_map
[node
])) {
846 printk("-%u", cpu
- 1);
852 printk("-%u", nr_cpu_ids
- 1);
857 static void __init
dump_numa_memory_topology(void)
862 if (min_common_depth
== -1 || !numa_enabled
)
865 for_each_online_node(node
) {
868 printk(KERN_DEBUG
"Node %d Memory:", node
);
872 for (i
= 0; i
< memblock_end_of_DRAM();
873 i
+= (1 << SECTION_SIZE_BITS
)) {
874 if (early_pfn_to_nid(i
>> PAGE_SHIFT
) == node
) {
892 * Allocate some memory, satisfying the memblock or bootmem allocator where
893 * required. nid is the preferred node and end is the physical address of
894 * the highest address in the node.
896 * Returns the virtual address of the memory.
898 static void __init
*careful_zallocation(int nid
, unsigned long size
,
900 unsigned long end_pfn
)
904 unsigned long ret_paddr
;
906 ret_paddr
= __memblock_alloc_base(size
, align
, end_pfn
<< PAGE_SHIFT
);
908 /* retry over all memory */
910 ret_paddr
= __memblock_alloc_base(size
, align
, memblock_end_of_DRAM());
913 panic("numa.c: cannot allocate %lu bytes for node %d",
916 ret
= __va(ret_paddr
);
919 * We initialize the nodes in numeric order: 0, 1, 2...
920 * and hand over control from the MEMBLOCK allocator to the
921 * bootmem allocator. If this function is called for
922 * node 5, then we know that all nodes <5 are using the
923 * bootmem allocator instead of the MEMBLOCK allocator.
925 * So, check the nid from which this allocation came
926 * and double check to see if we need to use bootmem
927 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
928 * since it would be useless.
930 new_nid
= early_pfn_to_nid(ret_paddr
>> PAGE_SHIFT
);
932 ret
= __alloc_bootmem_node(NODE_DATA(new_nid
),
935 dbg("alloc_bootmem %p %lx\n", ret
, size
);
938 memset(ret
, 0, size
);
942 static struct notifier_block __cpuinitdata ppc64_numa_nb
= {
943 .notifier_call
= cpu_numa_callback
,
944 .priority
= 1 /* Must run before sched domains notifier. */
947 static void mark_reserved_regions_for_nid(int nid
)
949 struct pglist_data
*node
= NODE_DATA(nid
);
952 for (i
= 0; i
< memblock
.reserved
.cnt
; i
++) {
953 unsigned long physbase
= memblock
.reserved
.region
[i
].base
;
954 unsigned long size
= memblock
.reserved
.region
[i
].size
;
955 unsigned long start_pfn
= physbase
>> PAGE_SHIFT
;
956 unsigned long end_pfn
= PFN_UP(physbase
+ size
);
957 struct node_active_region node_ar
;
958 unsigned long node_end_pfn
= node
->node_start_pfn
+
959 node
->node_spanned_pages
;
962 * Check to make sure that this memblock.reserved area is
963 * within the bounds of the node that we care about.
964 * Checking the nid of the start and end points is not
965 * sufficient because the reserved area could span the
968 if (end_pfn
<= node
->node_start_pfn
||
969 start_pfn
>= node_end_pfn
)
972 get_node_active_region(start_pfn
, &node_ar
);
973 while (start_pfn
< end_pfn
&&
974 node_ar
.start_pfn
< node_ar
.end_pfn
) {
975 unsigned long reserve_size
= size
;
977 * if reserved region extends past active region
978 * then trim size to active region
980 if (end_pfn
> node_ar
.end_pfn
)
981 reserve_size
= (node_ar
.end_pfn
<< PAGE_SHIFT
)
984 * Only worry about *this* node, others may not
985 * yet have valid NODE_DATA().
987 if (node_ar
.nid
== nid
) {
988 dbg("reserve_bootmem %lx %lx nid=%d\n",
989 physbase
, reserve_size
, node_ar
.nid
);
990 reserve_bootmem_node(NODE_DATA(node_ar
.nid
),
991 physbase
, reserve_size
,
995 * if reserved region is contained in the active region
998 if (end_pfn
<= node_ar
.end_pfn
)
1002 * reserved region extends past the active region
1003 * get next active region that contains this
1006 start_pfn
= node_ar
.end_pfn
;
1007 physbase
= start_pfn
<< PAGE_SHIFT
;
1008 size
= size
- reserve_size
;
1009 get_node_active_region(start_pfn
, &node_ar
);
1015 void __init
do_init_bootmem(void)
1020 max_low_pfn
= memblock_end_of_DRAM() >> PAGE_SHIFT
;
1021 max_pfn
= max_low_pfn
;
1023 if (parse_numa_properties())
1026 dump_numa_memory_topology();
1028 for_each_online_node(nid
) {
1029 unsigned long start_pfn
, end_pfn
;
1030 void *bootmem_vaddr
;
1031 unsigned long bootmap_pages
;
1033 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
1036 * Allocate the node structure node local if possible
1038 * Be careful moving this around, as it relies on all
1039 * previous nodes' bootmem to be initialized and have
1040 * all reserved areas marked.
1042 NODE_DATA(nid
) = careful_zallocation(nid
,
1043 sizeof(struct pglist_data
),
1044 SMP_CACHE_BYTES
, end_pfn
);
1046 dbg("node %d\n", nid
);
1047 dbg("NODE_DATA() = %p\n", NODE_DATA(nid
));
1049 NODE_DATA(nid
)->bdata
= &bootmem_node_data
[nid
];
1050 NODE_DATA(nid
)->node_start_pfn
= start_pfn
;
1051 NODE_DATA(nid
)->node_spanned_pages
= end_pfn
- start_pfn
;
1053 if (NODE_DATA(nid
)->node_spanned_pages
== 0)
1056 dbg("start_paddr = %lx\n", start_pfn
<< PAGE_SHIFT
);
1057 dbg("end_paddr = %lx\n", end_pfn
<< PAGE_SHIFT
);
1059 bootmap_pages
= bootmem_bootmap_pages(end_pfn
- start_pfn
);
1060 bootmem_vaddr
= careful_zallocation(nid
,
1061 bootmap_pages
<< PAGE_SHIFT
,
1062 PAGE_SIZE
, end_pfn
);
1064 dbg("bootmap_vaddr = %p\n", bootmem_vaddr
);
1066 init_bootmem_node(NODE_DATA(nid
),
1067 __pa(bootmem_vaddr
) >> PAGE_SHIFT
,
1068 start_pfn
, end_pfn
);
1070 free_bootmem_with_active_regions(nid
, end_pfn
);
1072 * Be very careful about moving this around. Future
1073 * calls to careful_zallocation() depend on this getting
1076 mark_reserved_regions_for_nid(nid
);
1077 sparse_memory_present_with_active_regions(nid
);
1080 init_bootmem_done
= 1;
1083 * Now bootmem is initialised we can create the node to cpumask
1084 * lookup tables and setup the cpu callback to populate them.
1086 setup_node_to_cpumask_map();
1088 register_cpu_notifier(&ppc64_numa_nb
);
1089 cpu_numa_callback(&ppc64_numa_nb
, CPU_UP_PREPARE
,
1090 (void *)(unsigned long)boot_cpuid
);
1093 void __init
paging_init(void)
1095 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
1096 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
1097 max_zone_pfns
[ZONE_DMA
] = memblock_end_of_DRAM() >> PAGE_SHIFT
;
1098 free_area_init_nodes(max_zone_pfns
);
1101 static int __init
early_numa(char *p
)
1106 if (strstr(p
, "off"))
1109 if (strstr(p
, "debug"))
1112 p
= strstr(p
, "fake=");
1114 cmdline
= p
+ strlen("fake=");
1118 early_param("numa", early_numa
);
1120 #ifdef CONFIG_MEMORY_HOTPLUG
1122 * Find the node associated with a hot added memory section for
1123 * memory represented in the device tree by the property
1124 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1126 static int hot_add_drconf_scn_to_nid(struct device_node
*memory
,
1127 unsigned long scn_addr
)
1130 unsigned int drconf_cell_cnt
, rc
;
1131 unsigned long lmb_size
;
1132 struct assoc_arrays aa
;
1135 drconf_cell_cnt
= of_get_drconf_memory(memory
, &dm
);
1136 if (!drconf_cell_cnt
)
1139 lmb_size
= of_get_lmb_size(memory
);
1143 rc
= of_get_assoc_arrays(memory
, &aa
);
1147 for (; drconf_cell_cnt
!= 0; --drconf_cell_cnt
) {
1148 struct of_drconf_cell drmem
;
1150 read_drconf_cell(&drmem
, &dm
);
1152 /* skip this block if it is reserved or not assigned to
1154 if ((drmem
.flags
& DRCONF_MEM_RESERVED
)
1155 || !(drmem
.flags
& DRCONF_MEM_ASSIGNED
))
1158 if ((scn_addr
< drmem
.base_addr
)
1159 || (scn_addr
>= (drmem
.base_addr
+ lmb_size
)))
1162 nid
= of_drconf_to_nid_single(&drmem
, &aa
);
1169 int hot_add_node_scn_to_nid(unsigned long scn_addr
)
1171 struct device_node
*memory
= NULL
;
1174 while ((memory
= of_find_node_by_type(memory
, "memory")) != NULL
) {
1175 unsigned long start
, size
;
1177 const unsigned int *memcell_buf
;
1180 memcell_buf
= of_get_property(memory
, "reg", &len
);
1181 if (!memcell_buf
|| len
<= 0)
1184 /* ranges in cell */
1185 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
1188 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
1189 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
1191 if ((scn_addr
< start
) || (scn_addr
>= (start
+ size
)))
1194 nid
= of_node_to_nid_single(memory
);
1198 of_node_put(memory
);
1207 * Find the node associated with a hot added memory section. Section
1208 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1209 * sections are fully contained within a single MEMBLOCK.
1211 int hot_add_scn_to_nid(unsigned long scn_addr
)
1213 struct device_node
*memory
= NULL
;
1216 if (!numa_enabled
|| (min_common_depth
< 0))
1217 return first_online_node
;
1219 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1221 nid
= hot_add_drconf_scn_to_nid(memory
, scn_addr
);
1222 of_node_put(memory
);
1224 nid
= hot_add_node_scn_to_nid(scn_addr
);
1227 if (nid
< 0 || !node_online(nid
))
1228 nid
= first_online_node
;
1230 if (NODE_DATA(nid
)->node_spanned_pages
)
1233 for_each_online_node(nid
) {
1234 if (NODE_DATA(nid
)->node_spanned_pages
) {
1244 #endif /* CONFIG_MEMORY_HOTPLUG */