x86, NUMA: Move *_numa_init() invocations into initmem_init()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / mm / numa_32.c
blobbde3906420df74559bd49a52aad171216be52108
1 /*
2 * Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation
3 * August 2002: added remote node KVA remap - Martin J. Bligh
5 * Copyright (C) 2002, IBM Corp.
7 * All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT. See the GNU General Public License for more
18 * details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/mm.h>
26 #include <linux/bootmem.h>
27 #include <linux/memblock.h>
28 #include <linux/mmzone.h>
29 #include <linux/highmem.h>
30 #include <linux/initrd.h>
31 #include <linux/nodemask.h>
32 #include <linux/module.h>
33 #include <linux/kexec.h>
34 #include <linux/pfn.h>
35 #include <linux/swap.h>
36 #include <linux/acpi.h>
38 #include <asm/e820.h>
39 #include <asm/setup.h>
40 #include <asm/mmzone.h>
41 #include <asm/bios_ebda.h>
42 #include <asm/proto.h>
44 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
45 EXPORT_SYMBOL(node_data);
48 * numa interface - we expect the numa architecture specific code to have
49 * populated the following initialisation.
51 * 1) node_online_map - the map of all nodes configured (online) in the system
52 * 2) node_start_pfn - the starting page frame number for a node
53 * 3) node_end_pfn - the ending page fram number for a node
55 unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly;
56 unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly;
59 #ifdef CONFIG_DISCONTIGMEM
61 * 4) physnode_map - the mapping between a pfn and owning node
62 * physnode_map keeps track of the physical memory layout of a generic
63 * numa node on a 64Mb break (each element of the array will
64 * represent 64Mb of memory and will be marked by the node id. so,
65 * if the first gig is on node 0, and the second gig is on node 1
66 * physnode_map will contain:
68 * physnode_map[0-15] = 0;
69 * physnode_map[16-31] = 1;
70 * physnode_map[32- ] = -1;
72 s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1};
73 EXPORT_SYMBOL(physnode_map);
75 void memory_present(int nid, unsigned long start, unsigned long end)
77 unsigned long pfn;
79 printk(KERN_INFO "Node: %d, start_pfn: %lx, end_pfn: %lx\n",
80 nid, start, end);
81 printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
82 printk(KERN_DEBUG " ");
83 for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) {
84 physnode_map[pfn / PAGES_PER_ELEMENT] = nid;
85 printk(KERN_CONT "%lx ", pfn);
87 printk(KERN_CONT "\n");
90 unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
91 unsigned long end_pfn)
93 unsigned long nr_pages = end_pfn - start_pfn;
95 if (!nr_pages)
96 return 0;
98 return (nr_pages + 1) * sizeof(struct page);
100 #endif
102 extern unsigned long find_max_low_pfn(void);
103 extern unsigned long highend_pfn, highstart_pfn;
105 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
107 unsigned long node_remap_size[MAX_NUMNODES];
108 static void *node_remap_start_vaddr[MAX_NUMNODES];
109 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
111 static unsigned long kva_start_pfn;
112 static unsigned long kva_pages;
114 int __cpuinit numa_cpu_node(int cpu)
116 return apic->x86_32_numa_cpu_node(cpu);
120 * FLAT - support for basic PC memory model with discontig enabled, essentially
121 * a single node with all available processors in it with a flat
122 * memory map.
124 int __init get_memcfg_numa_flat(void)
126 printk(KERN_DEBUG "NUMA - single node, flat memory mode\n");
128 node_start_pfn[0] = 0;
129 node_end_pfn[0] = max_pfn;
130 memblock_x86_register_active_regions(0, 0, max_pfn);
131 memory_present(0, 0, max_pfn);
132 node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn);
134 /* Indicate there is one node available. */
135 nodes_clear(node_online_map);
136 node_set_online(0);
137 return 1;
141 * Find the highest page frame number we have available for the node
143 static void __init propagate_e820_map_node(int nid)
145 if (node_end_pfn[nid] > max_pfn)
146 node_end_pfn[nid] = max_pfn;
148 * if a user has given mem=XXXX, then we need to make sure
149 * that the node _starts_ before that, too, not just ends
151 if (node_start_pfn[nid] > max_pfn)
152 node_start_pfn[nid] = max_pfn;
153 BUG_ON(node_start_pfn[nid] > node_end_pfn[nid]);
157 * Allocate memory for the pg_data_t for this node via a crude pre-bootmem
158 * method. For node zero take this from the bottom of memory, for
159 * subsequent nodes place them at node_remap_start_vaddr which contains
160 * node local data in physically node local memory. See setup_memory()
161 * for details.
163 static void __init allocate_pgdat(int nid)
165 char buf[16];
167 if (node_has_online_mem(nid) && node_remap_start_vaddr[nid])
168 NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid];
169 else {
170 unsigned long pgdat_phys;
171 pgdat_phys = memblock_find_in_range(min_low_pfn<<PAGE_SHIFT,
172 max_pfn_mapped<<PAGE_SHIFT,
173 sizeof(pg_data_t),
174 PAGE_SIZE);
175 NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT));
176 memset(buf, 0, sizeof(buf));
177 sprintf(buf, "NODE_DATA %d", nid);
178 memblock_x86_reserve_range(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf);
180 printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n",
181 nid, (unsigned long)NODE_DATA(nid));
185 * In the DISCONTIGMEM and SPARSEMEM memory model, a portion of the kernel
186 * virtual address space (KVA) is reserved and portions of nodes are mapped
187 * using it. This is to allow node-local memory to be allocated for
188 * structures that would normally require ZONE_NORMAL. The memory is
189 * allocated with alloc_remap() and callers should be prepared to allocate
190 * from the bootmem allocator instead.
192 static unsigned long node_remap_start_pfn[MAX_NUMNODES];
193 static void *node_remap_end_vaddr[MAX_NUMNODES];
194 static void *node_remap_alloc_vaddr[MAX_NUMNODES];
195 static unsigned long node_remap_offset[MAX_NUMNODES];
197 void *alloc_remap(int nid, unsigned long size)
199 void *allocation = node_remap_alloc_vaddr[nid];
201 size = ALIGN(size, L1_CACHE_BYTES);
203 if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
204 return NULL;
206 node_remap_alloc_vaddr[nid] += size;
207 memset(allocation, 0, size);
209 return allocation;
212 static void __init remap_numa_kva(void)
214 void *vaddr;
215 unsigned long pfn;
216 int node;
218 for_each_online_node(node) {
219 printk(KERN_DEBUG "remap_numa_kva: node %d\n", node);
220 for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) {
221 vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT);
222 printk(KERN_DEBUG "remap_numa_kva: %08lx to pfn %08lx\n",
223 (unsigned long)vaddr,
224 node_remap_start_pfn[node] + pfn);
225 set_pmd_pfn((ulong) vaddr,
226 node_remap_start_pfn[node] + pfn,
227 PAGE_KERNEL_LARGE);
232 #ifdef CONFIG_HIBERNATION
234 * resume_map_numa_kva - add KVA mapping to the temporary page tables created
235 * during resume from hibernation
236 * @pgd_base - temporary resume page directory
238 void resume_map_numa_kva(pgd_t *pgd_base)
240 int node;
242 for_each_online_node(node) {
243 unsigned long start_va, start_pfn, size, pfn;
245 start_va = (unsigned long)node_remap_start_vaddr[node];
246 start_pfn = node_remap_start_pfn[node];
247 size = node_remap_size[node];
249 printk(KERN_DEBUG "%s: node %d\n", __func__, node);
251 for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
252 unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
253 pgd_t *pgd = pgd_base + pgd_index(vaddr);
254 pud_t *pud = pud_offset(pgd, vaddr);
255 pmd_t *pmd = pmd_offset(pud, vaddr);
257 set_pmd(pmd, pfn_pmd(start_pfn + pfn,
258 PAGE_KERNEL_LARGE_EXEC));
260 printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
261 __func__, vaddr, start_pfn + pfn);
265 #endif
267 static __init unsigned long calculate_numa_remap_pages(void)
269 int nid;
270 unsigned long size, reserve_pages = 0;
272 for_each_online_node(nid) {
273 u64 node_kva_target;
274 u64 node_kva_final;
277 * The acpi/srat node info can show hot-add memroy zones
278 * where memory could be added but not currently present.
280 printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
281 nid, node_start_pfn[nid], node_end_pfn[nid]);
282 if (node_start_pfn[nid] > max_pfn)
283 continue;
284 if (!node_end_pfn[nid])
285 continue;
286 if (node_end_pfn[nid] > max_pfn)
287 node_end_pfn[nid] = max_pfn;
289 /* ensure the remap includes space for the pgdat. */
290 size = node_remap_size[nid] + sizeof(pg_data_t);
292 /* convert size to large (pmd size) pages, rounding up */
293 size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
294 /* now the roundup is correct, convert to PAGE_SIZE pages */
295 size = size * PTRS_PER_PTE;
297 node_kva_target = round_down(node_end_pfn[nid] - size,
298 PTRS_PER_PTE);
299 node_kva_target <<= PAGE_SHIFT;
300 do {
301 node_kva_final = memblock_find_in_range(node_kva_target,
302 ((u64)node_end_pfn[nid])<<PAGE_SHIFT,
303 ((u64)size)<<PAGE_SHIFT,
304 LARGE_PAGE_BYTES);
305 node_kva_target -= LARGE_PAGE_BYTES;
306 } while (node_kva_final == MEMBLOCK_ERROR &&
307 (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid]));
309 if (node_kva_final == MEMBLOCK_ERROR)
310 panic("Can not get kva ram\n");
312 node_remap_size[nid] = size;
313 node_remap_offset[nid] = reserve_pages;
314 reserve_pages += size;
315 printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of"
316 " node %d at %llx\n",
317 size, nid, node_kva_final>>PAGE_SHIFT);
320 * prevent kva address below max_low_pfn want it on system
321 * with less memory later.
322 * layout will be: KVA address , KVA RAM
324 * we are supposed to only record the one less then max_low_pfn
325 * but we could have some hole in high memory, and it will only
326 * check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide
327 * to use it as free.
328 * So memblock_x86_reserve_range here, hope we don't run out of that array
330 memblock_x86_reserve_range(node_kva_final,
331 node_kva_final+(((u64)size)<<PAGE_SHIFT),
332 "KVA RAM");
334 node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT;
336 printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n",
337 reserve_pages);
338 return reserve_pages;
341 static void init_remap_allocator(int nid)
343 node_remap_start_vaddr[nid] = pfn_to_kaddr(
344 kva_start_pfn + node_remap_offset[nid]);
345 node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
346 (node_remap_size[nid] * PAGE_SIZE);
347 node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
348 ALIGN(sizeof(pg_data_t), PAGE_SIZE);
350 printk(KERN_DEBUG "node %d will remap to vaddr %08lx - %08lx\n", nid,
351 (ulong) node_remap_start_vaddr[nid],
352 (ulong) node_remap_end_vaddr[nid]);
355 void __init initmem_init(void)
357 int nid;
358 long kva_target_pfn;
361 * When mapping a NUMA machine we allocate the node_mem_map arrays
362 * from node local memory. They are then mapped directly into KVA
363 * between zone normal and vmalloc space. Calculate the size of
364 * this space and use it to adjust the boundary between ZONE_NORMAL
365 * and ZONE_HIGHMEM.
368 get_memcfg_numa();
369 numa_init_array();
371 kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
373 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
374 do {
375 kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT,
376 max_low_pfn<<PAGE_SHIFT,
377 kva_pages<<PAGE_SHIFT,
378 PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
379 kva_target_pfn -= PTRS_PER_PTE;
380 } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn);
382 if (kva_start_pfn == MEMBLOCK_ERROR)
383 panic("Can not get kva space\n");
385 printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n",
386 kva_start_pfn, max_low_pfn);
387 printk(KERN_INFO "max_pfn = %lx\n", max_pfn);
389 /* avoid clash with initrd */
390 memblock_x86_reserve_range(kva_start_pfn<<PAGE_SHIFT,
391 (kva_start_pfn + kva_pages)<<PAGE_SHIFT,
392 "KVA PG");
393 #ifdef CONFIG_HIGHMEM
394 highstart_pfn = highend_pfn = max_pfn;
395 if (max_pfn > max_low_pfn)
396 highstart_pfn = max_low_pfn;
397 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
398 pages_to_mb(highend_pfn - highstart_pfn));
399 num_physpages = highend_pfn;
400 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
401 #else
402 num_physpages = max_low_pfn;
403 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
404 #endif
405 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
406 pages_to_mb(max_low_pfn));
407 printk(KERN_DEBUG "max_low_pfn = %lx, highstart_pfn = %lx\n",
408 max_low_pfn, highstart_pfn);
410 printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n",
411 (ulong) pfn_to_kaddr(max_low_pfn));
412 for_each_online_node(nid) {
413 init_remap_allocator(nid);
415 allocate_pgdat(nid);
417 remap_numa_kva();
419 printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
420 (ulong) pfn_to_kaddr(highstart_pfn));
421 for_each_online_node(nid)
422 propagate_e820_map_node(nid);
424 for_each_online_node(nid) {
425 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
426 NODE_DATA(nid)->node_id = nid;
429 setup_bootmem_allocator();
432 #ifdef CONFIG_MEMORY_HOTPLUG
433 static int paddr_to_nid(u64 addr)
435 int nid;
436 unsigned long pfn = PFN_DOWN(addr);
438 for_each_node(nid)
439 if (node_start_pfn[nid] <= pfn &&
440 pfn < node_end_pfn[nid])
441 return nid;
443 return -1;
447 * This function is used to ask node id BEFORE memmap and mem_section's
448 * initialization (pfn_to_nid() can't be used yet).
449 * If _PXM is not defined on ACPI's DSDT, node id must be found by this.
451 int memory_add_physaddr_to_nid(u64 addr)
453 int nid = paddr_to_nid(addr);
454 return (nid >= 0) ? nid : 0;
457 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
458 #endif