- Kai Germaschewski: ymfpci cleanups and resource leak fixes
[davej-history.git] / mm / numa.c
blob47cb72ec63cbd55962fd9ebe983f562179fc83b6
1 /*
2 * Written by Kanoj Sarcar, SGI, Aug 1999
3 */
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/bootmem.h>
9 #include <linux/mmzone.h>
10 #include <linux/spinlock.h>
12 int numnodes = 1; /* Initialized for UMA platforms */
14 static bootmem_data_t contig_bootmem_data;
15 pg_data_t contig_page_data = { bdata: &contig_bootmem_data };
17 #ifndef CONFIG_DISCONTIGMEM
20 * This is meant to be invoked by platforms whose physical memory starts
21 * at a considerably higher value than 0. Examples are Super-H, ARM, m68k.
22 * Should be invoked with paramters (0, 0, unsigned long *[], start_paddr).
24 void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
25 unsigned long *zones_size, unsigned long zone_start_paddr,
26 unsigned long *zholes_size)
28 free_area_init_core(0, &contig_page_data, &mem_map, zones_size,
29 zone_start_paddr, zholes_size, pmap);
32 #endif /* !CONFIG_DISCONTIGMEM */
34 struct page * alloc_pages_node(int nid, int gfp_mask, unsigned long order)
36 #ifdef CONFIG_NUMA
37 return __alloc_pages(NODE_DATA(nid)->node_zonelists + gfp_mask, order);
38 #else
39 return alloc_pages(gfp_mask, order);
40 #endif
43 #ifdef CONFIG_DISCONTIGMEM
45 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
47 static spinlock_t node_lock = SPIN_LOCK_UNLOCKED;
49 void show_free_areas_node(pg_data_t *pgdat)
51 unsigned long flags;
53 spin_lock_irqsave(&node_lock, flags);
54 show_free_areas_core(pgdat);
55 spin_unlock_irqrestore(&node_lock, flags);
59 * Nodes can be initialized parallely, in no particular order.
61 void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
62 unsigned long *zones_size, unsigned long zone_start_paddr,
63 unsigned long *zholes_size)
65 int i, size = 0;
66 struct page *discard;
68 if (mem_map == (mem_map_t *)NULL)
69 mem_map = (mem_map_t *)PAGE_OFFSET;
71 free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_paddr,
72 zholes_size, pmap);
73 pgdat->node_id = nid;
76 * Get space for the valid bitmap.
78 for (i = 0; i < MAX_NR_ZONES; i++)
79 size += zones_size[i];
80 size = LONG_ALIGN((size + 7) >> 3);
81 pgdat->valid_addr_bitmap = (unsigned long *)alloc_bootmem_node(pgdat, size);
82 memset(pgdat->valid_addr_bitmap, 0, size);
85 static struct page * alloc_pages_pgdat(pg_data_t *pgdat, int gfp_mask,
86 unsigned long order)
88 return __alloc_pages(pgdat->node_zonelists + gfp_mask, order);
92 * This can be refined. Currently, tries to do round robin, instead
93 * should do concentratic circle search, starting from current node.
95 struct page * alloc_pages(int gfp_mask, unsigned long order)
97 struct page *ret = 0;
98 pg_data_t *start, *temp;
99 #ifndef CONFIG_NUMA
100 unsigned long flags;
101 static pg_data_t *next = 0;
102 #endif
104 if (order >= MAX_ORDER)
105 return NULL;
106 #ifdef CONFIG_NUMA
107 temp = NODE_DATA(numa_node_id());
108 #else
109 spin_lock_irqsave(&node_lock, flags);
110 if (!next) next = pgdat_list;
111 temp = next;
112 next = next->node_next;
113 spin_unlock_irqrestore(&node_lock, flags);
114 #endif
115 start = temp;
116 while (temp) {
117 if ((ret = alloc_pages_pgdat(temp, gfp_mask, order)))
118 return(ret);
119 temp = temp->node_next;
121 temp = pgdat_list;
122 while (temp != start) {
123 if ((ret = alloc_pages_pgdat(temp, gfp_mask, order)))
124 return(ret);
125 temp = temp->node_next;
127 return(0);
130 #endif /* CONFIG_DISCONTIGMEM */