[PATCH] x86_64: Fix the node cpumask of a cpu going down
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86_64 / mm / srat.c
blobcd25300726fc7ac184009b3cf926f758eeb4c1bb
1 /*
2 * ACPI 3.0 based NUMA setup
3 * Copyright 2004 Andi Kleen, SuSE Labs.
5 * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
7 * Called from acpi_numa_init while reading the SRAT and SLIT tables.
8 * Assumes all memory regions belonging to a single proximity domain
9 * are in one chunk. Holes between them will be included in the node.
12 #include <linux/kernel.h>
13 #include <linux/acpi.h>
14 #include <linux/mmzone.h>
15 #include <linux/bitmap.h>
16 #include <linux/module.h>
17 #include <linux/topology.h>
18 #include <asm/proto.h>
19 #include <asm/numa.h>
20 #include <asm/e820.h>
22 static struct acpi_table_slit *acpi_slit;
24 static nodemask_t nodes_parsed __initdata;
25 static nodemask_t nodes_found __initdata;
26 static struct node nodes[MAX_NUMNODES] __initdata;
27 static u8 pxm2node[256] = { [0 ... 255] = 0xff };
29 /* Too small nodes confuse the VM badly. Usually they result
30 from BIOS bugs. */
31 #define NODE_MIN_SIZE (4*1024*1024)
33 static int node_to_pxm(int n);
35 int pxm_to_node(int pxm)
37 if ((unsigned)pxm >= 256)
38 return -1;
39 /* Extend 0xff to (int)-1 */
40 return (signed char)pxm2node[pxm];
43 static __init int setup_node(int pxm)
45 unsigned node = pxm2node[pxm];
46 if (node == 0xff) {
47 if (nodes_weight(nodes_found) >= MAX_NUMNODES)
48 return -1;
49 node = first_unset_node(nodes_found);
50 node_set(node, nodes_found);
51 pxm2node[pxm] = node;
53 return pxm2node[pxm];
56 static __init int conflicting_nodes(unsigned long start, unsigned long end)
58 int i;
59 for_each_node_mask(i, nodes_parsed) {
60 struct node *nd = &nodes[i];
61 if (nd->start == nd->end)
62 continue;
63 if (nd->end > start && nd->start < end)
64 return i;
65 if (nd->end == end && nd->start == start)
66 return i;
68 return -1;
71 static __init void cutoff_node(int i, unsigned long start, unsigned long end)
73 struct node *nd = &nodes[i];
74 if (nd->start < start) {
75 nd->start = start;
76 if (nd->end < nd->start)
77 nd->start = nd->end;
79 if (nd->end > end) {
80 nd->end = end;
81 if (nd->start > nd->end)
82 nd->start = nd->end;
86 static __init void bad_srat(void)
88 int i;
89 printk(KERN_ERR "SRAT: SRAT not used.\n");
90 acpi_numa = -1;
91 for (i = 0; i < MAX_LOCAL_APIC; i++)
92 apicid_to_node[i] = NUMA_NO_NODE;
95 static __init inline int srat_disabled(void)
97 return numa_off || acpi_numa < 0;
101 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
102 * up the NUMA heuristics which wants the local node to have a smaller
103 * distance than the others.
104 * Do some quick checks here and only use the SLIT if it passes.
106 static __init int slit_valid(struct acpi_table_slit *slit)
108 int i, j;
109 int d = slit->localities;
110 for (i = 0; i < d; i++) {
111 for (j = 0; j < d; j++) {
112 u8 val = slit->entry[d*i + j];
113 if (i == j) {
114 if (val != 10)
115 return 0;
116 } else if (val <= 10)
117 return 0;
120 return 1;
123 /* Callback for SLIT parsing */
124 void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
126 if (!slit_valid(slit)) {
127 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
128 return;
130 acpi_slit = slit;
133 /* Callback for Proximity Domain -> LAPIC mapping */
134 void __init
135 acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
137 int pxm, node;
138 if (srat_disabled())
139 return;
140 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat();
141 return;
143 if (pa->flags.enabled == 0)
144 return;
145 pxm = pa->proximity_domain;
146 node = setup_node(pxm);
147 if (node < 0) {
148 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
149 bad_srat();
150 return;
152 apicid_to_node[pa->apic_id] = node;
153 acpi_numa = 1;
154 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
155 pxm, pa->apic_id, node);
158 /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
159 void __init
160 acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
162 struct node *nd;
163 unsigned long start, end;
164 int node, pxm;
165 int i;
167 if (srat_disabled())
168 return;
169 if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
170 bad_srat();
171 return;
173 if (ma->flags.enabled == 0)
174 return;
175 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
176 end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
177 pxm = ma->proximity_domain;
178 node = setup_node(pxm);
179 if (node < 0) {
180 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
181 bad_srat();
182 return;
184 /* It is fine to add this area to the nodes data it will be used later*/
185 if (ma->flags.hot_pluggable == 1)
186 printk(KERN_INFO "SRAT: hot plug zone found %lx - %lx \n",
187 start, end);
188 i = conflicting_nodes(start, end);
189 if (i == node) {
190 printk(KERN_WARNING
191 "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
192 pxm, start, end, nodes[i].start, nodes[i].end);
193 } else if (i >= 0) {
194 printk(KERN_ERR
195 "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n",
196 pxm, start, end, node_to_pxm(i),
197 nodes[i].start, nodes[i].end);
198 bad_srat();
199 return;
201 nd = &nodes[node];
202 if (!node_test_and_set(node, nodes_parsed)) {
203 nd->start = start;
204 nd->end = end;
205 } else {
206 if (start < nd->start)
207 nd->start = start;
208 if (nd->end < end)
209 nd->end = end;
211 printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
212 nd->start, nd->end);
215 /* Sanity check to catch more bad SRATs (they are amazingly common).
216 Make sure the PXMs cover all memory. */
217 static int nodes_cover_memory(void)
219 int i;
220 unsigned long pxmram, e820ram;
222 pxmram = 0;
223 for_each_node_mask(i, nodes_parsed) {
224 unsigned long s = nodes[i].start >> PAGE_SHIFT;
225 unsigned long e = nodes[i].end >> PAGE_SHIFT;
226 pxmram += e - s;
227 pxmram -= e820_hole_size(s, e);
230 e820ram = end_pfn - e820_hole_size(0, end_pfn);
231 if (pxmram < e820ram) {
232 printk(KERN_ERR
233 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
234 (pxmram << PAGE_SHIFT) >> 20,
235 (e820ram << PAGE_SHIFT) >> 20);
236 return 0;
238 return 1;
241 static void unparse_node(int node)
243 int i;
244 node_clear(node, nodes_parsed);
245 for (i = 0; i < MAX_LOCAL_APIC; i++) {
246 if (apicid_to_node[i] == node)
247 apicid_to_node[i] = NUMA_NO_NODE;
251 void __init acpi_numa_arch_fixup(void) {}
253 /* Use the information discovered above to actually set up the nodes. */
254 int __init acpi_scan_nodes(unsigned long start, unsigned long end)
256 int i;
258 /* First clean up the node list */
259 for (i = 0; i < MAX_NUMNODES; i++) {
260 cutoff_node(i, start, end);
261 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE)
262 unparse_node(i);
265 if (acpi_numa <= 0)
266 return -1;
268 if (!nodes_cover_memory()) {
269 bad_srat();
270 return -1;
273 memnode_shift = compute_hash_shift(nodes, nodes_weight(nodes_parsed));
274 if (memnode_shift < 0) {
275 printk(KERN_ERR
276 "SRAT: No NUMA node hash function found. Contact maintainer\n");
277 bad_srat();
278 return -1;
281 /* Finally register nodes */
282 for_each_node_mask(i, nodes_parsed)
283 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
284 for (i = 0; i < NR_CPUS; i++) {
285 if (cpu_to_node[i] == NUMA_NO_NODE)
286 continue;
287 if (!node_isset(cpu_to_node[i], nodes_parsed))
288 numa_set_node(i, NUMA_NO_NODE);
290 numa_init_array();
291 return 0;
294 static int node_to_pxm(int n)
296 int i;
297 if (pxm2node[n] == n)
298 return n;
299 for (i = 0; i < 256; i++)
300 if (pxm2node[i] == n)
301 return i;
302 return 0;
305 int __node_distance(int a, int b)
307 int index;
309 if (!acpi_slit)
310 return a == b ? 10 : 20;
311 index = acpi_slit->localities * node_to_pxm(a);
312 return acpi_slit->entry[index + node_to_pxm(b)];
315 EXPORT_SYMBOL(__node_distance);