[JFFS2] Fix more breakage caused by janitorial meddling.
[linux-2.6/kmemtrace.git] / arch / x86_64 / mm / numa.c
blobb2fac14baac0fc151838b32ad54f6eaf741b8047
1 /*
2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 */
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/mmzone.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/nodemask.h>
15 #include <asm/e820.h>
16 #include <asm/proto.h>
17 #include <asm/dma.h>
18 #include <asm/numa.h>
19 #include <asm/acpi.h>
21 #ifndef Dprintk
22 #define Dprintk(x...)
23 #endif
25 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
26 bootmem_data_t plat_node_bdata[MAX_NUMNODES];
28 struct memnode memnode;
30 unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
31 [0 ... NR_CPUS-1] = NUMA_NO_NODE
33 unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
34 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
36 cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
38 int numa_off __initdata;
42 * Given a shift value, try to populate memnodemap[]
43 * Returns :
44 * 1 if OK
45 * 0 if memnodmap[] too small (of shift too small)
46 * -1 if node overlap or lost ram (shift too big)
48 static int __init
49 populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
51 int i;
52 int res = -1;
53 unsigned long addr, end;
55 if (shift >= 64)
56 return -1;
57 memset(memnodemap, 0xff, sizeof(memnodemap));
58 for (i = 0; i < numnodes; i++) {
59 addr = nodes[i].start;
60 end = nodes[i].end;
61 if (addr >= end)
62 continue;
63 if ((end >> shift) >= NODEMAPSIZE)
64 return 0;
65 do {
66 if (memnodemap[addr >> shift] != 0xff)
67 return -1;
68 memnodemap[addr >> shift] = i;
69 addr += (1UL << shift);
70 } while (addr < end);
71 res = 1;
73 return res;
76 int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
78 int shift = 20;
80 while (populate_memnodemap(nodes, numnodes, shift + 1) >= 0)
81 shift++;
83 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
84 shift);
86 if (populate_memnodemap(nodes, numnodes, shift) != 1) {
87 printk(KERN_INFO
88 "Your memory is not aligned you need to rebuild your kernel "
89 "with a bigger NODEMAPSIZE shift=%d\n",
90 shift);
91 return -1;
93 return shift;
96 #ifdef CONFIG_SPARSEMEM
97 int early_pfn_to_nid(unsigned long pfn)
99 return phys_to_nid(pfn << PAGE_SHIFT);
101 #endif
103 static void * __init
104 early_node_mem(int nodeid, unsigned long start, unsigned long end,
105 unsigned long size)
107 unsigned long mem = find_e820_area(start, end, size);
108 void *ptr;
109 if (mem != -1L)
110 return __va(mem);
111 ptr = __alloc_bootmem_nopanic(size,
112 SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
113 if (ptr == 0) {
114 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
115 size, nodeid);
116 return NULL;
118 return ptr;
121 /* Initialize bootmem allocator for a node */
122 void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
124 unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start;
125 unsigned long nodedata_phys;
126 void *bootmap;
127 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
129 start = round_up(start, ZONE_ALIGN);
131 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
133 start_pfn = start >> PAGE_SHIFT;
134 end_pfn = end >> PAGE_SHIFT;
136 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size);
137 if (node_data[nodeid] == NULL)
138 return;
139 nodedata_phys = __pa(node_data[nodeid]);
141 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
142 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid];
143 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
144 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
146 /* Find a place for the bootmem map */
147 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
148 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
149 bootmap = early_node_mem(nodeid, bootmap_start, end,
150 bootmap_pages<<PAGE_SHIFT);
151 if (bootmap == NULL) {
152 if (nodedata_phys < start || nodedata_phys >= end)
153 free_bootmem((unsigned long)node_data[nodeid],pgdat_size);
154 node_data[nodeid] = NULL;
155 return;
157 bootmap_start = __pa(bootmap);
158 Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages);
160 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
161 bootmap_start >> PAGE_SHIFT,
162 start_pfn, end_pfn);
164 e820_bootmem_free(NODE_DATA(nodeid), start, end);
166 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
167 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT);
168 #ifdef CONFIG_ACPI_NUMA
169 srat_reserve_add_area(nodeid);
170 #endif
171 node_set_online(nodeid);
174 /* Initialize final allocator for a zone */
175 void __init setup_node_zones(int nodeid)
177 unsigned long start_pfn, end_pfn, memmapsize, limit;
178 unsigned long zones[MAX_NR_ZONES];
179 unsigned long holes[MAX_NR_ZONES];
181 start_pfn = node_start_pfn(nodeid);
182 end_pfn = node_end_pfn(nodeid);
184 Dprintk(KERN_INFO "Setting up node %d %lx-%lx\n",
185 nodeid, start_pfn, end_pfn);
187 /* Try to allocate mem_map at end to not fill up precious <4GB
188 memory. */
189 memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
190 limit = end_pfn << PAGE_SHIFT;
191 #ifdef CONFIG_FLAT_NODE_MEM_MAP
192 NODE_DATA(nodeid)->node_mem_map =
193 __alloc_bootmem_core(NODE_DATA(nodeid)->bdata,
194 memmapsize, SMP_CACHE_BYTES,
195 round_down(limit - memmapsize, PAGE_SIZE),
196 limit);
197 #endif
199 size_zones(zones, holes, start_pfn, end_pfn);
200 free_area_init_node(nodeid, NODE_DATA(nodeid), zones,
201 start_pfn, holes);
204 void __init numa_init_array(void)
206 int rr, i;
207 /* There are unfortunately some poorly designed mainboards around
208 that only connect memory to a single CPU. This breaks the 1:1 cpu->node
209 mapping. To avoid this fill in the mapping for all possible
210 CPUs, as the number of CPUs is not known yet.
211 We round robin the existing nodes. */
212 rr = first_node(node_online_map);
213 for (i = 0; i < NR_CPUS; i++) {
214 if (cpu_to_node[i] != NUMA_NO_NODE)
215 continue;
216 numa_set_node(i, rr);
217 rr = next_node(rr, node_online_map);
218 if (rr == MAX_NUMNODES)
219 rr = first_node(node_online_map);
224 #ifdef CONFIG_NUMA_EMU
225 int numa_fake __initdata = 0;
227 /* Numa emulation */
228 static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
230 int i;
231 struct bootnode nodes[MAX_NUMNODES];
232 unsigned long sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake;
234 /* Kludge needed for the hash function */
235 if (hweight64(sz) > 1) {
236 unsigned long x = 1;
237 while ((x << 1) < sz)
238 x <<= 1;
239 if (x < sz/2)
240 printk(KERN_ERR "Numa emulation unbalanced. Complain to maintainer\n");
241 sz = x;
244 memset(&nodes,0,sizeof(nodes));
245 for (i = 0; i < numa_fake; i++) {
246 nodes[i].start = (start_pfn<<PAGE_SHIFT) + i*sz;
247 if (i == numa_fake-1)
248 sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;
249 nodes[i].end = nodes[i].start + sz;
250 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n",
252 nodes[i].start, nodes[i].end,
253 (nodes[i].end - nodes[i].start) >> 20);
254 node_set_online(i);
256 memnode_shift = compute_hash_shift(nodes, numa_fake);
257 if (memnode_shift < 0) {
258 memnode_shift = 0;
259 printk(KERN_ERR "No NUMA hash function found. Emulation disabled.\n");
260 return -1;
262 for_each_online_node(i)
263 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
264 numa_init_array();
265 return 0;
267 #endif
269 void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
271 int i;
273 #ifdef CONFIG_NUMA_EMU
274 if (numa_fake && !numa_emulation(start_pfn, end_pfn))
275 return;
276 #endif
278 #ifdef CONFIG_ACPI_NUMA
279 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
280 end_pfn << PAGE_SHIFT))
281 return;
282 #endif
284 #ifdef CONFIG_K8_NUMA
285 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT))
286 return;
287 #endif
288 printk(KERN_INFO "%s\n",
289 numa_off ? "NUMA turned off" : "No NUMA configuration found");
291 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
292 start_pfn << PAGE_SHIFT,
293 end_pfn << PAGE_SHIFT);
294 /* setup dummy node covering all memory */
295 memnode_shift = 63;
296 memnodemap[0] = 0;
297 nodes_clear(node_online_map);
298 node_set_online(0);
299 for (i = 0; i < NR_CPUS; i++)
300 numa_set_node(i, 0);
301 node_to_cpumask[0] = cpumask_of_cpu(0);
302 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
305 __cpuinit void numa_add_cpu(int cpu)
307 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
310 void __cpuinit numa_set_node(int cpu, int node)
312 cpu_pda(cpu)->nodenumber = node;
313 cpu_to_node[cpu] = node;
316 unsigned long __init numa_free_all_bootmem(void)
318 int i;
319 unsigned long pages = 0;
320 for_each_online_node(i) {
321 pages += free_all_bootmem_node(NODE_DATA(i));
323 return pages;
326 #ifdef CONFIG_SPARSEMEM
327 static void __init arch_sparse_init(void)
329 int i;
331 for_each_online_node(i)
332 memory_present(i, node_start_pfn(i), node_end_pfn(i));
334 sparse_init();
336 #else
337 #define arch_sparse_init() do {} while (0)
338 #endif
340 void __init paging_init(void)
342 int i;
344 arch_sparse_init();
346 for_each_online_node(i) {
347 setup_node_zones(i);
351 /* [numa=off] */
352 __init int numa_setup(char *opt)
354 if (!strncmp(opt,"off",3))
355 numa_off = 1;
356 #ifdef CONFIG_NUMA_EMU
357 if(!strncmp(opt, "fake=", 5)) {
358 numa_fake = simple_strtoul(opt+5,NULL,0); ;
359 if (numa_fake >= MAX_NUMNODES)
360 numa_fake = MAX_NUMNODES;
362 #endif
363 #ifdef CONFIG_ACPI_NUMA
364 if (!strncmp(opt,"noacpi",6))
365 acpi_numa = -1;
366 if (!strncmp(opt,"hotadd=", 7))
367 hotadd_percent = simple_strtoul(opt+7, NULL, 10);
368 #endif
369 return 1;
373 * Setup early cpu_to_node.
375 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
376 * and apicid_to_node[] tables have valid entries for a CPU.
377 * This means we skip cpu_to_node[] initialisation for NUMA
378 * emulation and faking node case (when running a kernel compiled
379 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
380 * is already initialized in a round robin manner at numa_init_array,
381 * prior to this call, and this initialization is good enough
382 * for the fake NUMA cases.
384 void __init init_cpu_to_node(void)
386 int i;
387 for (i = 0; i < NR_CPUS; i++) {
388 u8 apicid = x86_cpu_to_apicid[i];
389 if (apicid == BAD_APICID)
390 continue;
391 if (apicid_to_node[apicid] == NUMA_NO_NODE)
392 continue;
393 numa_set_node(i,apicid_to_node[apicid]);
397 EXPORT_SYMBOL(cpu_to_node);
398 EXPORT_SYMBOL(node_to_cpumask);
399 EXPORT_SYMBOL(memnode);
400 EXPORT_SYMBOL(node_data);
402 #ifdef CONFIG_DISCONTIGMEM
404 * Functions to convert PFNs from/to per node page addresses.
405 * These are out of line because they are quite big.
406 * They could be all tuned by pre caching more state.
407 * Should do that.
410 int pfn_valid(unsigned long pfn)
412 unsigned nid;
413 if (pfn >= num_physpages)
414 return 0;
415 nid = pfn_to_nid(pfn);
416 if (nid == 0xff)
417 return 0;
418 return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);
420 EXPORT_SYMBOL(pfn_valid);
421 #endif