Import 2.3.99pre3
[davej-history.git] / mm / numa.c
blobdf75e65ada1c432bd6c14da597e3d0c03851ffd5
1 /*
2 * Written by Kanoj Sarcar, SGI, Aug 1999
3 */
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/bootmem.h>
9 #include <linux/mmzone.h>
10 #include <linux/spinlock.h>
12 int numnodes = 1; /* Initialized for UMA platforms */
14 #ifndef CONFIG_DISCONTIGMEM
16 static bootmem_data_t contig_bootmem_data;
17 pg_data_t contig_page_data = { bdata: &contig_bootmem_data };
20 * This is meant to be invoked by platforms whose physical memory starts
21 * at a considerably higher value than 0. Examples are Super-H, ARM, m68k.
22 * Should be invoked with paramters (0, 0, unsigned long *[], start_paddr).
24 void __init free_area_init_node(int nid, pg_data_t *pgdat,
25 unsigned long *zones_size, unsigned long zone_start_paddr)
27 free_area_init_core(0, NODE_DATA(0), &mem_map, zones_size,
28 zone_start_paddr);
31 #endif /* !CONFIG_DISCONTIGMEM */
33 struct page * alloc_pages_node(int nid, int gfp_mask, unsigned long order)
35 return __alloc_pages(NODE_DATA(nid)->node_zonelists + gfp_mask, order);
38 #ifdef CONFIG_DISCONTIGMEM
40 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
42 static spinlock_t node_lock = SPIN_LOCK_UNLOCKED;
44 void show_free_areas_node(int nid)
46 unsigned long flags;
48 spin_lock_irqsave(&node_lock, flags);
49 printk("Memory information for node %d:\n", nid);
50 show_free_areas_core(nid);
51 spin_unlock_irqrestore(&node_lock, flags);
55 * Nodes can be initialized parallely, in no particular order.
57 void __init free_area_init_node(int nid, pg_data_t *pgdat,
58 unsigned long *zones_size, unsigned long zone_start_paddr)
60 int i, size = 0;
61 struct page *discard;
63 if (mem_map == (mem_map_t *)NULL)
64 mem_map = (mem_map_t *)PAGE_OFFSET;
66 free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_paddr);
67 pgdat->node_id = nid;
70 * Get space for the valid bitmap.
72 for (i = 0; i < MAX_NR_ZONES; i++)
73 size += zones_size[i];
74 size = LONG_ALIGN((size + 7) >> 3);
75 pgdat->valid_addr_bitmap = (unsigned long *)alloc_bootmem_node(nid, size);
76 memset(pgdat->valid_addr_bitmap, 0, size);
80 * This can be refined. Currently, tries to do round robin, instead
81 * should do concentratic circle search, starting from current node.
83 struct page * alloc_pages(int gfp_mask, unsigned long order)
85 struct page *ret = 0;
86 unsigned long flags;
87 int startnode, tnode;
88 static int nextnid = 0;
90 if (order >= MAX_ORDER)
91 return NULL;
92 spin_lock_irqsave(&node_lock, flags);
93 tnode = nextnid;
94 nextnid++;
95 if (nextnid == numnodes)
96 nextnid = 0;
97 spin_unlock_irqrestore(&node_lock, flags);
98 startnode = tnode;
99 while (tnode < numnodes) {
100 if ((ret = alloc_pages_node(tnode++, gfp_mask, order)))
101 return(ret);
103 tnode = 0;
104 while (tnode != startnode) {
105 if ((ret = alloc_pages_node(tnode++, gfp_mask, order)))
106 return(ret);
108 return(0);
111 #endif /* CONFIG_DISCONTIGMEM */