Merge with Linu 2.4.0-test6-pre6.
[linux-2.6/linux-mips.git] / arch / arm / mm / small_page.c
blob40c91ba323966212626fc5f56986dba5ff973290
1 /*
2 * linux/arch/arm/mm/small_page.c
4 * Copyright (C) 1996 Russell King
6 * Changelog:
7 * 26/01/1996 RMK Cleaned up various areas to make little more generic
8 * 07/02/1999 RMK Support added for 16K and 32K page sizes
9 * containing 8K blocks
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/ptrace.h>
19 #include <linux/mman.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/smp.h>
24 #include <asm/bitops.h>
25 #include <asm/pgtable.h>
27 #define PEDANTIC
30 * Requirement:
31 * We need to be able to allocate naturally aligned memory of finer
32 * granularity than the page size. This is typically used for the
33 * second level page tables on 32-bit ARMs.
35 * Theory:
36 * We "misuse" the Linux memory management system. We use alloc_page
37 * to allocate a page and then mark it as reserved. The Linux memory
38 * management system will then ignore the "offset", "next_hash" and
39 * "pprev_hash" entries in the mem_map for this page.
41 * We then use a bitstring in the "offset" field to mark which segments
42 * of the page are in use, and manipulate this as required during the
43 * allocation and freeing of these small pages.
45 * We also maintain a queue of pages being used for this purpose using
46 * the "next_hash" and "pprev_hash" entries of mem_map;
49 struct order {
50 struct page *queue;
51 unsigned int mask; /* (1 << shift) - 1 */
52 unsigned int shift; /* (1 << shift) size of page */
53 unsigned int block_mask; /* nr_blocks - 1 */
54 unsigned int all_used; /* (1 << nr_blocks) - 1 */
58 static struct order orders[] = {
59 #if PAGE_SIZE == 4096
60 { NULL, 2047, 11, 1, 0x00000003 }
61 #elif PAGE_SIZE == 32768
62 { NULL, 2047, 11, 15, 0x0000ffff },
63 { NULL, 8191, 13, 3, 0x0000000f }
64 #else
65 #error unsupported page size
66 #endif
69 #define USED_MAP(pg) ((pg)->index)
70 #define TEST_AND_CLEAR_USED(pg,off) (test_and_clear_bit(off, &USED_MAP(pg)))
71 #define SET_USED(pg,off) (set_bit(off, &USED_MAP(pg)))
73 static void add_page_to_queue(struct page *page, struct page **p)
75 #ifdef PEDANTIC
76 if (page->pprev_hash)
77 PAGE_BUG(page);
78 #endif
79 page->next_hash = *p;
80 if (*p)
81 (*p)->pprev_hash = &page->next_hash;
82 *p = page;
83 page->pprev_hash = p;
86 static void remove_page_from_queue(struct page *page)
88 if (page->pprev_hash) {
89 if (page->next_hash)
90 page->next_hash->pprev_hash = page->pprev_hash;
91 *page->pprev_hash = page->next_hash;
92 page->pprev_hash = NULL;
96 static unsigned long __get_small_page(int priority, struct order *order)
98 unsigned long flags;
99 struct page *page;
100 int offset;
102 save_flags(flags);
103 if (!order->queue)
104 goto need_new_page;
106 cli();
107 page = order->queue;
108 again:
109 #ifdef PEDANTIC
110 if (USED_MAP(page) & ~order->all_used)
111 PAGE_BUG(page);
112 #endif
113 offset = ffz(USED_MAP(page));
114 SET_USED(page, offset);
115 if (USED_MAP(page) == order->all_used)
116 remove_page_from_queue(page);
117 restore_flags(flags);
119 return (unsigned long) page_address(page) + (offset << order->shift);
121 need_new_page:
122 page = alloc_page(priority);
123 if (!order->queue) {
124 if (!page)
125 goto no_page;
126 SetPageReserved(page);
127 USED_MAP(page) = 0;
128 cli();
129 add_page_to_queue(page, &order->queue);
130 } else {
131 __free_page(page);
132 cli();
133 page = order->queue;
135 goto again;
137 no_page:
138 restore_flags(flags);
139 return 0;
142 static void __free_small_page(unsigned long spage, struct order *order)
144 unsigned long flags;
145 unsigned long nr;
146 struct page *page;
148 nr = MAP_NR(spage);
149 if (nr < max_mapnr) {
150 page = mem_map + nr;
153 * The container-page must be marked Reserved
155 if (!PageReserved(page) || spage & order->mask)
156 goto non_small;
158 #ifdef PEDANTIC
159 if (USED_MAP(page) & ~order->all_used)
160 PAGE_BUG(page);
161 #endif
163 spage = spage >> order->shift;
164 spage &= order->block_mask;
167 * the following must be atomic wrt get_page
169 save_flags_cli(flags);
171 if (USED_MAP(page) == order->all_used)
172 add_page_to_queue(page, &order->queue);
174 if (!TEST_AND_CLEAR_USED(page, spage))
175 goto already_free;
177 if (USED_MAP(page) == 0)
178 goto free_page;
180 restore_flags(flags);
182 return;
184 free_page:
186 * unlink the page from the small page queue and free it
188 remove_page_from_queue(page);
189 restore_flags(flags);
190 ClearPageReserved(page);
191 __free_page(page);
192 return;
194 non_small:
195 printk("Trying to free non-small page from %p\n", __builtin_return_address(0));
196 return;
197 already_free:
198 printk("Trying to free free small page from %p\n", __builtin_return_address(0));
201 unsigned long get_page_2k(int priority)
203 return __get_small_page(priority, orders+0);
206 void free_page_2k(unsigned long spage)
208 __free_small_page(spage, orders+0);
211 #if PAGE_SIZE > 8192
212 unsigned long get_page_8k(int priority)
214 return __get_small_page(priority, orders+1);
217 void free_page_8k(unsigned long spage)
219 __free_small_page(spage, orders+1);
221 #endif