2 * Written by Kanoj Sarcar, SGI, Aug 1999
4 #include <linux/config.h>
5 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/bootmem.h>
9 #include <linux/mmzone.h>
10 #include <linux/spinlock.h>
12 int numnodes
= 1; /* Initialized for UMA platforms */
14 #ifndef CONFIG_DISCONTIGMEM
16 static bootmem_data_t contig_bootmem_data
;
17 pg_data_t contig_page_data
= { bdata
: &contig_bootmem_data
};
20 * This is meant to be invoked by platforms whose physical memory starts
21 * at a considerably higher value than 0. Examples are Super-H, ARM, m68k.
22 * Should be invoked with paramters (0, 0, unsigned long *[], start_paddr).
24 void __init
free_area_init_node(int nid
, pg_data_t
*pgdat
,
25 unsigned long *zones_size
, unsigned long zone_start_paddr
)
27 free_area_init_core(0, NODE_DATA(0), &mem_map
, zones_size
,
31 #endif /* !CONFIG_DISCONTIGMEM */
33 struct page
* alloc_pages_node(int nid
, int gfp_mask
, unsigned long order
)
35 return __alloc_pages(NODE_DATA(nid
)->node_zonelists
+ gfp_mask
, order
);
38 #ifdef CONFIG_DISCONTIGMEM
40 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
42 static spinlock_t node_lock
= SPIN_LOCK_UNLOCKED
;
44 void show_free_areas_node(int nid
)
48 spin_lock_irqsave(&node_lock
, flags
);
49 printk("Memory information for node %d:\n", nid
);
50 show_free_areas_core(nid
);
51 spin_unlock_irqrestore(&node_lock
, flags
);
55 * Nodes can be initialized parallely, in no particular order.
57 void __init
free_area_init_node(int nid
, pg_data_t
*pgdat
,
58 unsigned long *zones_size
, unsigned long zone_start_paddr
)
63 if (mem_map
== (mem_map_t
*)NULL
)
64 mem_map
= (mem_map_t
*)PAGE_OFFSET
;
66 free_area_init_core(nid
, pgdat
, &discard
, zones_size
, zone_start_paddr
);
70 * Get space for the valid bitmap.
72 for (i
= 0; i
< MAX_NR_ZONES
; i
++)
73 size
+= zones_size
[i
];
74 size
= LONG_ALIGN((size
+ 7) >> 3);
75 pgdat
->valid_addr_bitmap
= (unsigned long *)alloc_bootmem_node(nid
, size
);
76 memset(pgdat
->valid_addr_bitmap
, 0, size
);
80 * This can be refined. Currently, tries to do round robin, instead
81 * should do concentratic circle search, starting from current node.
83 struct page
* alloc_pages(int gfp_mask
, unsigned long order
)
88 static int nextnid
= 0;
90 if (order
>= MAX_ORDER
)
92 spin_lock_irqsave(&node_lock
, flags
);
95 if (nextnid
== numnodes
)
97 spin_unlock_irqrestore(&node_lock
, flags
);
99 while (tnode
< numnodes
) {
100 if ((ret
= alloc_pages_node(tnode
++, gfp_mask
, order
)))
104 while (tnode
!= startnode
) {
105 if ((ret
= alloc_pages_node(tnode
++, gfp_mask
, order
)))
111 #endif /* CONFIG_DISCONTIGMEM */