x86: 64-bit, make sparsemem vmemmap the only memory model
[linux-2.6/zen-sources.git] / arch / x86 / mm / srat_64.c
blob9be14171144b389791bc987f536ecda6744c787e
1 /*
2 * ACPI 3.0 based NUMA setup
3 * Copyright 2004 Andi Kleen, SuSE Labs.
5 * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
7 * Called from acpi_numa_init while reading the SRAT and SLIT tables.
8 * Assumes all memory regions belonging to a single proximity domain
9 * are in one chunk. Holes between them will be included in the node.
12 #include <linux/kernel.h>
13 #include <linux/acpi.h>
14 #include <linux/mmzone.h>
15 #include <linux/bitmap.h>
16 #include <linux/module.h>
17 #include <linux/topology.h>
18 #include <linux/bootmem.h>
19 #include <linux/mm.h>
20 #include <asm/proto.h>
21 #include <asm/numa.h>
22 #include <asm/e820.h>
24 int acpi_numa __initdata;
26 static struct acpi_table_slit *acpi_slit;
28 static nodemask_t nodes_parsed __initdata;
29 static struct bootnode nodes[MAX_NUMNODES] __initdata;
30 static struct bootnode nodes_add[MAX_NUMNODES];
31 static int found_add_area __initdata;
32 int hotadd_percent __initdata = 0;
34 /* Too small nodes confuse the VM badly. Usually they result
35 from BIOS bugs. */
36 #define NODE_MIN_SIZE (4*1024*1024)
38 static __init int setup_node(int pxm)
40 return acpi_map_pxm_to_node(pxm);
43 static __init int conflicting_nodes(unsigned long start, unsigned long end)
45 int i;
46 for_each_node_mask(i, nodes_parsed) {
47 struct bootnode *nd = &nodes[i];
48 if (nd->start == nd->end)
49 continue;
50 if (nd->end > start && nd->start < end)
51 return i;
52 if (nd->end == end && nd->start == start)
53 return i;
55 return -1;
58 static __init void cutoff_node(int i, unsigned long start, unsigned long end)
60 struct bootnode *nd = &nodes[i];
62 if (found_add_area)
63 return;
65 if (nd->start < start) {
66 nd->start = start;
67 if (nd->end < nd->start)
68 nd->start = nd->end;
70 if (nd->end > end) {
71 nd->end = end;
72 if (nd->start > nd->end)
73 nd->start = nd->end;
77 static __init void bad_srat(void)
79 int i;
80 printk(KERN_ERR "SRAT: SRAT not used.\n");
81 acpi_numa = -1;
82 found_add_area = 0;
83 for (i = 0; i < MAX_LOCAL_APIC; i++)
84 apicid_to_node[i] = NUMA_NO_NODE;
85 for (i = 0; i < MAX_NUMNODES; i++)
86 nodes_add[i].start = nodes[i].end = 0;
87 remove_all_active_ranges();
90 static __init inline int srat_disabled(void)
92 return numa_off || acpi_numa < 0;
96 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
97 * up the NUMA heuristics which wants the local node to have a smaller
98 * distance than the others.
99 * Do some quick checks here and only use the SLIT if it passes.
101 static __init int slit_valid(struct acpi_table_slit *slit)
103 int i, j;
104 int d = slit->locality_count;
105 for (i = 0; i < d; i++) {
106 for (j = 0; j < d; j++) {
107 u8 val = slit->entry[d*i + j];
108 if (i == j) {
109 if (val != LOCAL_DISTANCE)
110 return 0;
111 } else if (val <= LOCAL_DISTANCE)
112 return 0;
115 return 1;
118 /* Callback for SLIT parsing */
119 void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
121 if (!slit_valid(slit)) {
122 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
123 return;
125 acpi_slit = slit;
128 /* Callback for Proximity Domain -> LAPIC mapping */
129 void __init
130 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
132 int pxm, node;
133 if (srat_disabled())
134 return;
135 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
136 bad_srat();
137 return;
139 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
140 return;
141 pxm = pa->proximity_domain_lo;
142 node = setup_node(pxm);
143 if (node < 0) {
144 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
145 bad_srat();
146 return;
148 apicid_to_node[pa->apic_id] = node;
149 acpi_numa = 1;
150 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
151 pxm, pa->apic_id, node);
154 int update_end_of_memory(unsigned long end) {return -1;}
155 static int hotadd_enough_memory(struct bootnode *nd) {return 1;}
156 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
157 static inline int save_add_info(void) {return 1;}
158 #else
159 static inline int save_add_info(void) {return 0;}
160 #endif
162 * Update nodes_add and decide if to include add are in the zone.
163 * Both SPARSE and RESERVE need nodes_add infomation.
164 * This code supports one contiguous hot add area per node.
166 static int reserve_hotadd(int node, unsigned long start, unsigned long end)
168 unsigned long s_pfn = start >> PAGE_SHIFT;
169 unsigned long e_pfn = end >> PAGE_SHIFT;
170 int ret = 0, changed = 0;
171 struct bootnode *nd = &nodes_add[node];
173 /* I had some trouble with strange memory hotadd regions breaking
174 the boot. Be very strict here and reject anything unexpected.
175 If you want working memory hotadd write correct SRATs.
177 The node size check is a basic sanity check to guard against
178 mistakes */
179 if ((signed long)(end - start) < NODE_MIN_SIZE) {
180 printk(KERN_ERR "SRAT: Hotplug area too small\n");
181 return -1;
184 /* This check might be a bit too strict, but I'm keeping it for now. */
185 if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) {
186 printk(KERN_ERR
187 "SRAT: Hotplug area %lu -> %lu has existing memory\n",
188 s_pfn, e_pfn);
189 return -1;
192 if (!hotadd_enough_memory(&nodes_add[node])) {
193 printk(KERN_ERR "SRAT: Hotplug area too large\n");
194 return -1;
197 /* Looks good */
199 if (nd->start == nd->end) {
200 nd->start = start;
201 nd->end = end;
202 changed = 1;
203 } else {
204 if (nd->start == end) {
205 nd->start = start;
206 changed = 1;
208 if (nd->end == start) {
209 nd->end = end;
210 changed = 1;
212 if (!changed)
213 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
216 ret = update_end_of_memory(nd->end);
218 if (changed)
219 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end);
220 return ret;
223 /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
224 void __init
225 acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
227 struct bootnode *nd, oldnode;
228 unsigned long start, end;
229 int node, pxm;
230 int i;
232 if (srat_disabled())
233 return;
234 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
235 bad_srat();
236 return;
238 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
239 return;
241 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
242 return;
243 start = ma->base_address;
244 end = start + ma->length;
245 pxm = ma->proximity_domain;
246 node = setup_node(pxm);
247 if (node < 0) {
248 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
249 bad_srat();
250 return;
252 i = conflicting_nodes(start, end);
253 if (i == node) {
254 printk(KERN_WARNING
255 "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
256 pxm, start, end, nodes[i].start, nodes[i].end);
257 } else if (i >= 0) {
258 printk(KERN_ERR
259 "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n",
260 pxm, start, end, node_to_pxm(i),
261 nodes[i].start, nodes[i].end);
262 bad_srat();
263 return;
265 nd = &nodes[node];
266 oldnode = *nd;
267 if (!node_test_and_set(node, nodes_parsed)) {
268 nd->start = start;
269 nd->end = end;
270 } else {
271 if (start < nd->start)
272 nd->start = start;
273 if (nd->end < end)
274 nd->end = end;
277 printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
278 nd->start, nd->end);
279 e820_register_active_regions(node, nd->start >> PAGE_SHIFT,
280 nd->end >> PAGE_SHIFT);
281 push_node_boundaries(node, nd->start >> PAGE_SHIFT,
282 nd->end >> PAGE_SHIFT);
284 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) &&
285 (reserve_hotadd(node, start, end) < 0)) {
286 /* Ignore hotadd region. Undo damage */
287 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
288 *nd = oldnode;
289 if ((nd->start | nd->end) == 0)
290 node_clear(node, nodes_parsed);
294 /* Sanity check to catch more bad SRATs (they are amazingly common).
295 Make sure the PXMs cover all memory. */
296 static int __init nodes_cover_memory(const struct bootnode *nodes)
298 int i;
299 unsigned long pxmram, e820ram;
301 pxmram = 0;
302 for_each_node_mask(i, nodes_parsed) {
303 unsigned long s = nodes[i].start >> PAGE_SHIFT;
304 unsigned long e = nodes[i].end >> PAGE_SHIFT;
305 pxmram += e - s;
306 pxmram -= absent_pages_in_range(s, e);
307 if ((long)pxmram < 0)
308 pxmram = 0;
311 e820ram = end_pfn - absent_pages_in_range(0, end_pfn);
312 /* We seem to lose 3 pages somewhere. Allow a bit of slack. */
313 if ((long)(e820ram - pxmram) >= 1*1024*1024) {
314 printk(KERN_ERR
315 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
316 (pxmram << PAGE_SHIFT) >> 20,
317 (e820ram << PAGE_SHIFT) >> 20);
318 return 0;
320 return 1;
323 static void unparse_node(int node)
325 int i;
326 node_clear(node, nodes_parsed);
327 for (i = 0; i < MAX_LOCAL_APIC; i++) {
328 if (apicid_to_node[i] == node)
329 apicid_to_node[i] = NUMA_NO_NODE;
333 void __init acpi_numa_arch_fixup(void) {}
335 /* Use the information discovered above to actually set up the nodes. */
336 int __init acpi_scan_nodes(unsigned long start, unsigned long end)
338 int i;
340 if (acpi_numa <= 0)
341 return -1;
343 /* First clean up the node list */
344 for (i = 0; i < MAX_NUMNODES; i++) {
345 cutoff_node(i, start, end);
346 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
347 unparse_node(i);
348 node_set_offline(i);
352 if (!nodes_cover_memory(nodes)) {
353 bad_srat();
354 return -1;
357 memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES);
358 if (memnode_shift < 0) {
359 printk(KERN_ERR
360 "SRAT: No NUMA node hash function found. Contact maintainer\n");
361 bad_srat();
362 return -1;
365 node_possible_map = nodes_parsed;
367 /* Finally register nodes */
368 for_each_node_mask(i, node_possible_map)
369 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
370 /* Try again in case setup_node_bootmem missed one due
371 to missing bootmem */
372 for_each_node_mask(i, node_possible_map)
373 if (!node_online(i))
374 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
376 for (i = 0; i < NR_CPUS; i++) {
377 if (cpu_to_node(i) == NUMA_NO_NODE)
378 continue;
379 if (!node_isset(cpu_to_node(i), node_possible_map))
380 numa_set_node(i, NUMA_NO_NODE);
382 numa_init_array();
383 return 0;
386 #ifdef CONFIG_NUMA_EMU
387 static int __init find_node_by_addr(unsigned long addr)
389 int ret = NUMA_NO_NODE;
390 int i;
392 for_each_node_mask(i, nodes_parsed) {
394 * Find the real node that this emulated node appears on. For
395 * the sake of simplicity, we only use a real node's starting
396 * address to determine which emulated node it appears on.
398 if (addr >= nodes[i].start && addr < nodes[i].end) {
399 ret = i;
400 break;
403 return i;
407 * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID
408 * mappings that respect the real ACPI topology but reflect our emulated
409 * environment. For each emulated node, we find which real node it appears on
410 * and create PXM to NID mappings for those fake nodes which mirror that
411 * locality. SLIT will now represent the correct distances between emulated
412 * nodes as a result of the real topology.
414 void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
416 int i, j;
417 int fake_node_to_pxm_map[MAX_NUMNODES] = {
418 [0 ... MAX_NUMNODES-1] = PXM_INVAL
420 unsigned char fake_apicid_to_node[MAX_LOCAL_APIC] = {
421 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
424 printk(KERN_INFO "Faking PXM affinity for fake nodes on real "
425 "topology.\n");
426 for (i = 0; i < num_nodes; i++) {
427 int nid, pxm;
429 nid = find_node_by_addr(fake_nodes[i].start);
430 if (nid == NUMA_NO_NODE)
431 continue;
432 pxm = node_to_pxm(nid);
433 if (pxm == PXM_INVAL)
434 continue;
435 fake_node_to_pxm_map[i] = pxm;
437 * For each apicid_to_node mapping that exists for this real
438 * node, it must now point to the fake node ID.
440 for (j = 0; j < MAX_LOCAL_APIC; j++)
441 if (apicid_to_node[j] == nid)
442 fake_apicid_to_node[j] = i;
444 for (i = 0; i < num_nodes; i++)
445 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
446 memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
448 nodes_clear(nodes_parsed);
449 for (i = 0; i < num_nodes; i++)
450 if (fake_nodes[i].start != fake_nodes[i].end)
451 node_set(i, nodes_parsed);
452 WARN_ON(!nodes_cover_memory(fake_nodes));
455 static int null_slit_node_compare(int a, int b)
457 return node_to_pxm(a) == node_to_pxm(b);
459 #else
460 static int null_slit_node_compare(int a, int b)
462 return a == b;
464 #endif /* CONFIG_NUMA_EMU */
466 void __init srat_reserve_add_area(int nodeid)
468 if (found_add_area && nodes_add[nodeid].end) {
469 u64 total_mb;
471 printk(KERN_INFO "SRAT: Reserving hot-add memory space "
472 "for node %d at %Lx-%Lx\n",
473 nodeid, nodes_add[nodeid].start, nodes_add[nodeid].end);
474 total_mb = (nodes_add[nodeid].end - nodes_add[nodeid].start)
475 >> PAGE_SHIFT;
476 total_mb *= sizeof(struct page);
477 total_mb >>= 20;
478 printk(KERN_INFO "SRAT: This will cost you %Lu MB of "
479 "pre-allocated memory.\n", (unsigned long long)total_mb);
480 reserve_bootmem_node(NODE_DATA(nodeid), nodes_add[nodeid].start,
481 nodes_add[nodeid].end - nodes_add[nodeid].start);
485 int __node_distance(int a, int b)
487 int index;
489 if (!acpi_slit)
490 return null_slit_node_compare(a, b) ? LOCAL_DISTANCE :
491 REMOTE_DISTANCE;
492 index = acpi_slit->locality_count * node_to_pxm(a);
493 return acpi_slit->entry[index + node_to_pxm(b)];
496 EXPORT_SYMBOL(__node_distance);
498 int memory_add_physaddr_to_nid(u64 start)
500 int i, ret = 0;
502 for_each_node(i)
503 if (nodes_add[i].start <= start && nodes_add[i].end > start)
504 ret = i;
506 return ret;
508 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);