- pre4:
[davej-history.git] / mm / numa.c
blobbbe9ec6fb14974d481aeb93c33e445106315d553
1 /*
2 * Written by Kanoj Sarcar, SGI, Aug 1999
3 */
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/bootmem.h>
9 #include <linux/mmzone.h>
10 #include <linux/spinlock.h>
12 int numnodes = 1; /* Initialized for UMA platforms */
14 #ifndef CONFIG_DISCONTIGMEM
16 static bootmem_data_t contig_bootmem_data;
17 pg_data_t contig_page_data = { bdata: &contig_bootmem_data };
20 * This is meant to be invoked by platforms whose physical memory starts
21 * at a considerably higher value than 0. Examples are Super-H, ARM, m68k.
22 * Should be invoked with paramters (0, 0, unsigned long *[], start_paddr).
24 void __init free_area_init_node(int nid, pg_data_t *pgdat,
25 unsigned long *zones_size, unsigned long zone_start_paddr,
26 unsigned long *zholes_size)
28 free_area_init_core(0, NODE_DATA(0), &mem_map, zones_size,
29 zone_start_paddr, zholes_size);
32 #endif /* !CONFIG_DISCONTIGMEM */
34 struct page * alloc_pages_node(int nid, int gfp_mask, unsigned long order)
36 return __alloc_pages(NODE_DATA(nid)->node_zonelists + gfp_mask, order);
39 #ifdef CONFIG_DISCONTIGMEM
41 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
43 static spinlock_t node_lock = SPIN_LOCK_UNLOCKED;
45 void show_free_areas_node(int nid)
47 unsigned long flags;
49 spin_lock_irqsave(&node_lock, flags);
50 printk("Memory information for node %d:\n", nid);
51 show_free_areas_core(nid);
52 spin_unlock_irqrestore(&node_lock, flags);
56 * Nodes can be initialized parallely, in no particular order.
58 void __init free_area_init_node(int nid, pg_data_t *pgdat,
59 unsigned long *zones_size, unsigned long zone_start_paddr,
60 unsigned long *zholes_size)
62 int i, size = 0;
63 struct page *discard;
65 if (mem_map == (mem_map_t *)NULL)
66 mem_map = (mem_map_t *)PAGE_OFFSET;
68 free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_paddr,
69 zholes_size);
70 pgdat->node_id = nid;
73 * Get space for the valid bitmap.
75 for (i = 0; i < MAX_NR_ZONES; i++)
76 size += zones_size[i];
77 size = LONG_ALIGN((size + 7) >> 3);
78 pgdat->valid_addr_bitmap = (unsigned long *)alloc_bootmem_node(nid, size);
79 memset(pgdat->valid_addr_bitmap, 0, size);
83 * This can be refined. Currently, tries to do round robin, instead
84 * should do concentratic circle search, starting from current node.
86 struct page * alloc_pages(int gfp_mask, unsigned long order)
88 struct page *ret = 0;
89 int startnode, tnode;
90 #ifndef CONFIG_NUMA
91 unsigned long flags;
92 static int nextnid = 0;
93 #endif
95 if (order >= MAX_ORDER)
96 return NULL;
97 #ifdef CONFIG_NUMA
98 tnode = numa_node_id();
99 #else
100 spin_lock_irqsave(&node_lock, flags);
101 tnode = nextnid;
102 nextnid++;
103 if (nextnid == numnodes)
104 nextnid = 0;
105 spin_unlock_irqrestore(&node_lock, flags);
106 #endif
107 startnode = tnode;
108 while (tnode < numnodes) {
109 if ((ret = alloc_pages_node(tnode++, gfp_mask, order)))
110 return(ret);
112 tnode = 0;
113 while (tnode != startnode) {
114 if ((ret = alloc_pages_node(tnode++, gfp_mask, order)))
115 return(ret);
117 return(0);
120 #endif /* CONFIG_DISCONTIGMEM */