2 * linux/mm/page_alloc.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
8 #include <linux/config.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/swap.h>
12 #include <linux/swapctl.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
18 #include <asm/uaccess.h> /* for copy_to/from_user */
19 #include <asm/pgtable.h>
21 int nr_swap_pages
= 0;
22 int nr_free_pages
= 0;
25 * Free area management
27 * The free_area_list arrays point to the queue heads of the free areas
32 /* the AP+ needs to allocate 8MB contiguous, aligned chunks of ram
33 for the ring buffers */
34 #define NR_MEM_LISTS 12
36 #define NR_MEM_LISTS 10
39 /* The start of this MUST match the start of "struct page" */
40 struct free_area_struct
{
46 #define memory_head(x) ((struct page *)(x))
48 static struct free_area_struct free_area
[NR_MEM_LISTS
];
50 static inline void init_mem_queue(struct free_area_struct
* head
)
52 head
->next
= memory_head(head
);
53 head
->prev
= memory_head(head
);
56 static inline void add_mem_queue(struct free_area_struct
* head
, struct page
* entry
)
58 struct page
* next
= head
->next
;
60 entry
->prev
= memory_head(head
);
66 static inline void remove_mem_queue(struct page
* entry
)
68 struct page
* next
= entry
->next
;
69 struct page
* prev
= entry
->prev
;
75 * Free_page() adds the page to the free lists. This is optimized for
76 * fast normal cases (no error jumps taken normally).
78 * The way to optimize jumps for gcc-2.2.2 is to:
79 * - select the "normal" case and put it inside the if () { XXX }
80 * - no else-statements if you can avoid them
82 * With the above two rules, you get a straight-line execution path
83 * for the normal case, giving better asm-code.
87 * Buddy system. Hairy. You really aren't expected to understand this
89 * Hint: -mask = 1+~mask
91 spinlock_t page_alloc_lock
= SPIN_LOCK_UNLOCKED
;
93 static inline void free_pages_ok(unsigned long map_nr
, unsigned long order
)
95 struct free_area_struct
*area
= free_area
+ order
;
96 unsigned long index
= map_nr
>> (1 + order
);
97 unsigned long mask
= (~0UL) << order
;
100 spin_lock_irqsave(&page_alloc_lock
, flags
);
102 #define list(x) (mem_map+(x))
105 nr_free_pages
-= mask
;
106 while (mask
+ (1 << (NR_MEM_LISTS
-1))) {
107 if (!test_and_change_bit(index
, area
->map
))
109 remove_mem_queue(list(map_nr
^ -mask
));
115 add_mem_queue(area
, list(map_nr
));
119 spin_unlock_irqrestore(&page_alloc_lock
, flags
);
122 int __free_page(struct page
*page
)
124 if (!PageReserved(page
) && put_page_testzero(page
)) {
125 if (PageSwapCache(page
))
127 if (PageLocked(page
))
130 page
->flags
&= ~(1 << PG_referenced
);
131 free_pages_ok(page
- mem_map
, 0);
137 int free_pages(unsigned long addr
, unsigned long order
)
139 unsigned long map_nr
= MAP_NR(addr
);
141 if (map_nr
< max_mapnr
) {
142 mem_map_t
* map
= mem_map
+ map_nr
;
143 if (!PageReserved(map
) && put_page_testzero(map
)) {
144 if (PageSwapCache(map
))
148 map
->flags
&= ~(1 << PG_referenced
);
149 free_pages_ok(map_nr
, order
);
157 * Some ugly macros to speed up __get_free_pages()..
159 #define MARK_USED(index, order, area) \
160 change_bit((index) >> (1+(order)), (area)->map)
161 #define CAN_DMA(x) (PageDMA(x))
162 #define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT))
163 #define RMQUEUE(order, gfp_mask) \
164 do { struct free_area_struct * area = free_area+order; \
165 unsigned long new_order = order; \
166 do { struct page *prev = memory_head(area), *ret = prev->next; \
167 while (memory_head(area) != ret) { \
168 if (!(gfp_mask & __GFP_DMA) || CAN_DMA(ret)) { \
169 unsigned long map_nr; \
170 (prev->next = ret->next)->prev = prev; \
171 map_nr = ret - mem_map; \
172 MARK_USED(map_nr, new_order, area); \
173 nr_free_pages -= 1 << order; \
174 EXPAND(ret, map_nr, order, new_order, area); \
175 spin_unlock_irqrestore(&page_alloc_lock,flags);\
176 return ADDRESS(map_nr); \
181 new_order++; area++; \
182 } while (new_order < NR_MEM_LISTS); \
185 #define EXPAND(map,index,low,high,area) \
186 do { unsigned long size = 1 << high; \
187 while (high > low) { \
188 area--; high--; size >>= 1; \
189 add_mem_queue(area, map); \
190 MARK_USED(index, high, area); \
194 set_page_count(map, 1); \
197 int low_on_memory
= 0;
199 unsigned long __get_free_pages(int gfp_mask
, unsigned long order
)
203 if (order
>= NR_MEM_LISTS
)
206 #ifdef ATOMIC_MEMORY_DEBUGGING
207 if ((gfp_mask
& __GFP_WAIT
) && in_interrupt()) {
208 static int count
= 0;
210 printk("gfp called nonatomically from interrupt %p\n",
211 __builtin_return_address(0));
218 * If this is a recursive call, we'd better
219 * do our best to just allocate things without
222 if (!(current
->flags
& PF_MEMALLOC
)) {
225 if (nr_free_pages
> freepages
.min
) {
228 if (nr_free_pages
>= freepages
.high
) {
235 current
->flags
|= PF_MEMALLOC
;
236 freed
= try_to_free_pages(gfp_mask
);
237 current
->flags
&= ~PF_MEMALLOC
;
239 if (!freed
&& !(gfp_mask
& (__GFP_MED
| __GFP_HIGH
)))
243 spin_lock_irqsave(&page_alloc_lock
, flags
);
244 RMQUEUE(order
, gfp_mask
);
245 spin_unlock_irqrestore(&page_alloc_lock
, flags
);
248 * If we can schedule, do so, and make sure to yield.
249 * We may be a real-time process, and if kswapd is
250 * waiting for us we need to allow it to run a bit.
252 if (gfp_mask
& __GFP_WAIT
) {
253 current
->policy
|= SCHED_YIELD
;
262 * Show free area list (used inside shift_scroll-lock stuff)
263 * We also calculate the percentage fragmentation. We do this by counting the
264 * memory on each free list with the exception of the first item on the list.
266 void show_free_areas(void)
268 unsigned long order
, flags
;
269 unsigned long total
= 0;
271 printk("Free pages: %6dkB\n ( ",nr_free_pages
<<(PAGE_SHIFT
-10));
272 printk("Free: %d (%d %d %d)\n",
277 spin_lock_irqsave(&page_alloc_lock
, flags
);
278 for (order
=0 ; order
< NR_MEM_LISTS
; order
++) {
280 unsigned long nr
= 0;
281 for (tmp
= free_area
[order
].next
; tmp
!= memory_head(free_area
+order
) ; tmp
= tmp
->next
) {
284 total
+= nr
* ((PAGE_SIZE
>>10) << order
);
285 printk("%lu*%lukB ", nr
, (unsigned long)((PAGE_SIZE
>>10) << order
));
287 spin_unlock_irqrestore(&page_alloc_lock
, flags
);
288 printk("= %lukB)\n", total
);
289 #ifdef SWAP_CACHE_INFO
290 show_swap_cache_info();
294 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
297 * set up the free-area data structures:
298 * - mark all pages reserved
299 * - mark all memory queues empty
300 * - clear the memory bitmaps
302 unsigned long __init
free_area_init(unsigned long start_mem
, unsigned long end_mem
)
305 unsigned long mask
= PAGE_MASK
;
309 * Select nr of pages we try to keep free for important stuff
310 * with a minimum of 10 pages and a maximum of 256 pages, so
311 * that we don't waste too much memory on large systems.
312 * This is fairly arbitrary, but based on some behaviour
315 i
= (end_mem
- PAGE_OFFSET
) >> (PAGE_SHIFT
+7);
321 freepages
.low
= i
* 2;
322 freepages
.high
= i
* 3;
323 mem_map
= (mem_map_t
*) LONG_ALIGN(start_mem
);
324 p
= mem_map
+ MAP_NR(end_mem
);
325 start_mem
= LONG_ALIGN((unsigned long) p
);
326 memset(mem_map
, 0, start_mem
- (unsigned long) mem_map
);
329 set_page_count(p
, 0);
330 p
->flags
= (1 << PG_DMA
) | (1 << PG_reserved
);
331 init_waitqueue_head(&p
->wait
);
332 } while (p
> mem_map
);
334 for (i
= 0 ; i
< NR_MEM_LISTS
; i
++) {
335 unsigned long bitmap_size
;
336 init_mem_queue(free_area
+i
);
338 end_mem
= (end_mem
+ ~mask
) & mask
;
339 bitmap_size
= (end_mem
- PAGE_OFFSET
) >> (PAGE_SHIFT
+ i
);
340 bitmap_size
= (bitmap_size
+ 7) >> 3;
341 bitmap_size
= LONG_ALIGN(bitmap_size
);
342 free_area
[i
].map
= (unsigned int *) start_mem
;
343 memset((void *) start_mem
, 0, bitmap_size
);
344 start_mem
+= bitmap_size
;