x86-64, NUMA: Unify use of memblk in all init methods
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / mm / srat_64.c
blob69f147116da7c53288c5c8a8a93a131a1af1ece8
1 /*
2 * ACPI 3.0 based NUMA setup
3 * Copyright 2004 Andi Kleen, SuSE Labs.
5 * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
7 * Called from acpi_numa_init while reading the SRAT and SLIT tables.
8 * Assumes all memory regions belonging to a single proximity domain
9 * are in one chunk. Holes between them will be included in the node.
12 #include <linux/kernel.h>
13 #include <linux/acpi.h>
14 #include <linux/mmzone.h>
15 #include <linux/bitmap.h>
16 #include <linux/module.h>
17 #include <linux/topology.h>
18 #include <linux/bootmem.h>
19 #include <linux/memblock.h>
20 #include <linux/mm.h>
21 #include <asm/proto.h>
22 #include <asm/numa.h>
23 #include <asm/e820.h>
24 #include <asm/apic.h>
25 #include <asm/uv/uv.h>
27 int acpi_numa __initdata;
29 static struct acpi_table_slit *acpi_slit;
31 static struct bootnode nodes_add[MAX_NUMNODES];
33 static __init int setup_node(int pxm)
35 return acpi_map_pxm_to_node(pxm);
38 static __init void bad_srat(void)
40 int i;
41 printk(KERN_ERR "SRAT: SRAT not used.\n");
42 acpi_numa = -1;
43 for (i = 0; i < MAX_NUMNODES; i++) {
44 numa_nodes[i].start = numa_nodes[i].end = 0;
45 nodes_add[i].start = nodes_add[i].end = 0;
47 remove_all_active_ranges();
50 static __init inline int srat_disabled(void)
52 return acpi_numa < 0;
55 /* Callback for SLIT parsing */
56 void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
58 unsigned length;
59 unsigned long phys;
61 length = slit->header.length;
62 phys = memblock_find_in_range(0, max_pfn_mapped<<PAGE_SHIFT, length,
63 PAGE_SIZE);
65 if (phys == MEMBLOCK_ERROR)
66 panic(" Can not save slit!\n");
68 acpi_slit = __va(phys);
69 memcpy(acpi_slit, slit, length);
70 memblock_x86_reserve_range(phys, phys + length, "ACPI SLIT");
73 /* Callback for Proximity Domain -> x2APIC mapping */
74 void __init
75 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
77 int pxm, node;
78 int apic_id;
80 if (srat_disabled())
81 return;
82 if (pa->header.length < sizeof(struct acpi_srat_x2apic_cpu_affinity)) {
83 bad_srat();
84 return;
86 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
87 return;
88 pxm = pa->proximity_domain;
89 node = setup_node(pxm);
90 if (node < 0) {
91 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
92 bad_srat();
93 return;
96 apic_id = pa->apic_id;
97 if (apic_id >= MAX_LOCAL_APIC) {
98 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
99 return;
101 set_apicid_to_node(apic_id, node);
102 node_set(node, cpu_nodes_parsed);
103 acpi_numa = 1;
104 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
105 pxm, apic_id, node);
108 /* Callback for Proximity Domain -> LAPIC mapping */
109 void __init
110 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
112 int pxm, node;
113 int apic_id;
115 if (srat_disabled())
116 return;
117 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
118 bad_srat();
119 return;
121 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
122 return;
123 pxm = pa->proximity_domain_lo;
124 node = setup_node(pxm);
125 if (node < 0) {
126 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
127 bad_srat();
128 return;
131 if (get_uv_system_type() >= UV_X2APIC)
132 apic_id = (pa->apic_id << 8) | pa->local_sapic_eid;
133 else
134 apic_id = pa->apic_id;
136 if (apic_id >= MAX_LOCAL_APIC) {
137 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
138 return;
141 set_apicid_to_node(apic_id, node);
142 node_set(node, cpu_nodes_parsed);
143 acpi_numa = 1;
144 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
145 pxm, apic_id, node);
148 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
149 static inline int save_add_info(void) {return 1;}
150 #else
151 static inline int save_add_info(void) {return 0;}
152 #endif
154 * Update nodes_add[]
155 * This code supports one contiguous hot add area per node
157 static void __init
158 update_nodes_add(int node, unsigned long start, unsigned long end)
160 unsigned long s_pfn = start >> PAGE_SHIFT;
161 unsigned long e_pfn = end >> PAGE_SHIFT;
162 int changed = 0;
163 struct bootnode *nd = &nodes_add[node];
165 /* I had some trouble with strange memory hotadd regions breaking
166 the boot. Be very strict here and reject anything unexpected.
167 If you want working memory hotadd write correct SRATs.
169 The node size check is a basic sanity check to guard against
170 mistakes */
171 if ((signed long)(end - start) < NODE_MIN_SIZE) {
172 printk(KERN_ERR "SRAT: Hotplug area too small\n");
173 return;
176 /* This check might be a bit too strict, but I'm keeping it for now. */
177 if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) {
178 printk(KERN_ERR
179 "SRAT: Hotplug area %lu -> %lu has existing memory\n",
180 s_pfn, e_pfn);
181 return;
184 /* Looks good */
186 if (nd->start == nd->end) {
187 nd->start = start;
188 nd->end = end;
189 changed = 1;
190 } else {
191 if (nd->start == end) {
192 nd->start = start;
193 changed = 1;
195 if (nd->end == start) {
196 nd->end = end;
197 changed = 1;
199 if (!changed)
200 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
203 if (changed) {
204 node_set(node, cpu_nodes_parsed);
205 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n",
206 nd->start, nd->end);
210 /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
211 void __init
212 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
214 struct bootnode *nd;
215 unsigned long start, end;
216 int node, pxm;
218 if (srat_disabled())
219 return;
220 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
221 bad_srat();
222 return;
224 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
225 return;
227 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
228 return;
229 start = ma->base_address;
230 end = start + ma->length;
231 pxm = ma->proximity_domain;
232 node = setup_node(pxm);
233 if (node < 0) {
234 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
235 bad_srat();
236 return;
239 if (numa_add_memblk(node, start, end) < 0) {
240 bad_srat();
241 return;
244 printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm,
245 start, end);
247 if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) {
248 nd = &numa_nodes[node];
249 if (!node_test_and_set(node, mem_nodes_parsed)) {
250 nd->start = start;
251 nd->end = end;
252 } else {
253 if (start < nd->start)
254 nd->start = start;
255 if (nd->end < end)
256 nd->end = end;
258 } else
259 update_nodes_add(node, start, end);
262 /* Sanity check to catch more bad SRATs (they are amazingly common).
263 Make sure the PXMs cover all memory. */
264 static int __init nodes_cover_memory(const struct bootnode *nodes)
266 int i;
267 unsigned long pxmram, e820ram;
269 pxmram = 0;
270 for_each_node_mask(i, mem_nodes_parsed) {
271 unsigned long s = nodes[i].start >> PAGE_SHIFT;
272 unsigned long e = nodes[i].end >> PAGE_SHIFT;
273 pxmram += e - s;
274 pxmram -= __absent_pages_in_range(i, s, e);
275 if ((long)pxmram < 0)
276 pxmram = 0;
279 e820ram = max_pfn - (memblock_x86_hole_size(0, max_pfn<<PAGE_SHIFT)>>PAGE_SHIFT);
280 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
281 if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) {
282 printk(KERN_ERR
283 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
284 (pxmram << PAGE_SHIFT) >> 20,
285 (e820ram << PAGE_SHIFT) >> 20);
286 return 0;
288 return 1;
291 void __init acpi_numa_arch_fixup(void) {}
293 int __init x86_acpi_numa_init(void)
295 int ret;
297 ret = acpi_numa_init();
298 if (ret < 0)
299 return ret;
300 return srat_disabled() ? -EINVAL : 0;
303 /* Use the information discovered above to actually set up the nodes. */
304 int __init acpi_scan_nodes(void)
306 int i;
308 if (acpi_numa <= 0)
309 return -1;
311 /* for out of order entries in SRAT */
312 sort_node_map();
313 if (!nodes_cover_memory(numa_nodes)) {
314 bad_srat();
315 return -1;
318 init_memory_mapping_high();
320 /* Finally register nodes */
321 for_each_node_mask(i, node_possible_map)
322 setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end);
323 /* Try again in case setup_node_bootmem missed one due
324 to missing bootmem */
325 for_each_node_mask(i, node_possible_map)
326 if (!node_online(i))
327 setup_node_bootmem(i, numa_nodes[i].start,
328 numa_nodes[i].end);
330 for (i = 0; i < nr_cpu_ids; i++) {
331 int node = early_cpu_to_node(i);
333 if (node == NUMA_NO_NODE)
334 continue;
335 if (!node_online(node))
336 numa_clear_node(i);
338 numa_init_array();
339 return 0;
342 #ifdef CONFIG_NUMA_EMU
343 static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = {
344 [0 ... MAX_NUMNODES-1] = PXM_INVAL
346 static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
347 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
349 static int __init find_node_by_addr(unsigned long addr)
351 int ret = NUMA_NO_NODE;
352 int i;
354 for_each_node_mask(i, mem_nodes_parsed) {
356 * Find the real node that this emulated node appears on. For
357 * the sake of simplicity, we only use a real node's starting
358 * address to determine which emulated node it appears on.
360 if (addr >= numa_nodes[i].start && addr < numa_nodes[i].end) {
361 ret = i;
362 break;
365 return ret;
369 * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID
370 * mappings that respect the real ACPI topology but reflect our emulated
371 * environment. For each emulated node, we find which real node it appears on
372 * and create PXM to NID mappings for those fake nodes which mirror that
373 * locality. SLIT will now represent the correct distances between emulated
374 * nodes as a result of the real topology.
376 void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
378 int i, j;
380 for (i = 0; i < num_nodes; i++) {
381 int nid, pxm;
383 nid = find_node_by_addr(fake_nodes[i].start);
384 if (nid == NUMA_NO_NODE)
385 continue;
386 pxm = node_to_pxm(nid);
387 if (pxm == PXM_INVAL)
388 continue;
389 fake_node_to_pxm_map[i] = pxm;
391 * For each apicid_to_node mapping that exists for this real
392 * node, it must now point to the fake node ID.
394 for (j = 0; j < MAX_LOCAL_APIC; j++)
395 if (__apicid_to_node[j] == nid &&
396 fake_apicid_to_node[j] == NUMA_NO_NODE)
397 fake_apicid_to_node[j] = i;
401 * If there are apicid-to-node mappings for physical nodes that do not
402 * have a corresponding emulated node, it should default to a guaranteed
403 * value.
405 for (i = 0; i < MAX_LOCAL_APIC; i++)
406 if (__apicid_to_node[i] != NUMA_NO_NODE &&
407 fake_apicid_to_node[i] == NUMA_NO_NODE)
408 fake_apicid_to_node[i] = 0;
410 for (i = 0; i < num_nodes; i++)
411 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
412 memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node));
414 nodes_clear(mem_nodes_parsed);
415 for (i = 0; i < num_nodes; i++)
416 if (fake_nodes[i].start != fake_nodes[i].end)
417 node_set(i, mem_nodes_parsed);
420 static int null_slit_node_compare(int a, int b)
422 return node_to_pxm(a) == node_to_pxm(b);
424 #else
425 static int null_slit_node_compare(int a, int b)
427 return a == b;
429 #endif /* CONFIG_NUMA_EMU */
431 int __node_distance(int a, int b)
433 int index;
435 if (!acpi_slit)
436 return null_slit_node_compare(a, b) ? LOCAL_DISTANCE :
437 REMOTE_DISTANCE;
438 index = acpi_slit->locality_count * node_to_pxm(a);
439 return acpi_slit->entry[index + node_to_pxm(b)];
442 EXPORT_SYMBOL(__node_distance);
444 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY)
445 int memory_add_physaddr_to_nid(u64 start)
447 int i, ret = 0;
449 for_each_node(i)
450 if (nodes_add[i].start <= start && nodes_add[i].end > start)
451 ret = i;
453 return ret;
455 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
456 #endif