initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / arm26 / machine / small_page.c
blob3cbeffa017181fb5d4b991298400102548a71d72
1 /*
2 * linux/arch/arm/mm/small_page.c
4 * Copyright (C) 1996 Russell King
5 * Copyright (C) 2003 Ian Molton
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Changelog:
12 * 26/01/1996 RMK Cleaned up various areas to make little more generic
13 * 07/02/1999 RMK Support added for 16K and 32K page sizes
14 * containing 8K blocks
16 #include <linux/signal.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/string.h>
21 #include <linux/types.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
24 #include <linux/mm.h>
25 #include <linux/swap.h>
26 #include <linux/smp.h>
28 #include <asm/bitops.h>
29 #include <asm/pgtable.h>
31 #define PEDANTIC
34 * Requirement:
35 * We need to be able to allocate naturally aligned memory of finer
36 * granularity than the page size. This is typically used for the
37 * second level page tables on 32-bit ARMs.
39 * Theory:
40 * We "misuse" the Linux memory management system. We use alloc_page
41 * to allocate a page and then mark it as reserved. The Linux memory
42 * management system will then ignore the "offset", "next_hash" and
43 * "pprev_hash" entries in the mem_map for this page.
45 * We then use a bitstring in the "offset" field to mark which segments
46 * of the page are in use, and manipulate this as required during the
47 * allocation and freeing of these small pages.
49 * We also maintain a queue of pages being used for this purpose using
50 * the "next_hash" and "pprev_hash" entries of mem_map;
53 struct order {
54 struct list_head queue;
55 unsigned int mask; /* (1 << shift) - 1 */
56 unsigned int shift; /* (1 << shift) size of page */
57 unsigned int block_mask; /* nr_blocks - 1 */
58 unsigned int all_used; /* (1 << nr_blocks) - 1 */
62 static struct order orders[] = {
63 #if PAGE_SIZE == 32768
64 { LIST_HEAD_INIT(orders[0].queue), 2047, 11, 15, 0x0000ffff },
65 { LIST_HEAD_INIT(orders[1].queue), 8191, 13, 3, 0x0000000f }
66 #else
67 #error unsupported page size (ARGH!)
68 #endif
71 #define USED_MAP(pg) ((pg)->index)
72 #define TEST_AND_CLEAR_USED(pg,off) (test_and_clear_bit(off, &USED_MAP(pg)))
73 #define SET_USED(pg,off) (set_bit(off, &USED_MAP(pg)))
75 static spinlock_t small_page_lock = SPIN_LOCK_UNLOCKED;
77 static unsigned long __get_small_page(int priority, struct order *order)
79 unsigned long flags;
80 struct page *page;
81 int offset;
83 do {
84 spin_lock_irqsave(&small_page_lock, flags);
86 if (list_empty(&order->queue))
87 goto need_new_page;
89 page = list_entry(order->queue.next, struct page, list);
90 again:
91 #ifdef PEDANTIC
92 if (USED_MAP(page) & ~order->all_used)
93 PAGE_BUG(page);
94 #endif
95 offset = ffz(USED_MAP(page));
96 SET_USED(page, offset);
97 if (USED_MAP(page) == order->all_used)
98 list_del_init(&page->lru);
99 spin_unlock_irqrestore(&small_page_lock, flags);
101 return (unsigned long) page_address(page) + (offset << order->shift);
103 need_new_page:
104 spin_unlock_irqrestore(&small_page_lock, flags);
105 page = alloc_page(priority);
106 spin_lock_irqsave(&small_page_lock, flags);
108 if (list_empty(&order->queue)) {
109 if (!page)
110 goto no_page;
111 SetPageReserved(page);
112 USED_MAP(page) = 0;
113 list_add(&page->lru, &order->queue);
114 goto again;
117 spin_unlock_irqrestore(&small_page_lock, flags);
118 __free_page(page);
119 } while (1);
121 no_page:
122 spin_unlock_irqrestore(&small_page_lock, flags);
123 return 0;
126 static void __free_small_page(unsigned long spage, struct order *order)
128 unsigned long flags;
129 struct page *page;
131 if (virt_addr_valid(spage)) {
132 page = virt_to_page(spage);
135 * The container-page must be marked Reserved
137 if (!PageReserved(page) || spage & order->mask)
138 goto non_small;
140 #ifdef PEDANTIC
141 if (USED_MAP(page) & ~order->all_used)
142 PAGE_BUG(page);
143 #endif
145 spage = spage >> order->shift;
146 spage &= order->block_mask;
149 * the following must be atomic wrt get_page
151 spin_lock_irqsave(&small_page_lock, flags);
153 if (USED_MAP(page) == order->all_used)
154 list_add(&page->lru, &order->queue);
156 if (!TEST_AND_CLEAR_USED(page, spage))
157 goto already_free;
159 if (USED_MAP(page) == 0)
160 goto free_page;
162 spin_unlock_irqrestore(&small_page_lock, flags);
164 return;
166 free_page:
168 * unlink the page from the small page queue and free it
170 list_del_init(&page->lru);
171 spin_unlock_irqrestore(&small_page_lock, flags);
172 ClearPageReserved(page);
173 __free_page(page);
174 return;
176 non_small:
177 printk("Trying to free non-small page from %p\n", __builtin_return_address(0));
178 return;
179 already_free:
180 printk("Trying to free free small page from %p\n", __builtin_return_address(0));
183 unsigned long get_page_8k(int priority)
185 return __get_small_page(priority, orders+1);
188 void free_page_8k(unsigned long spage)
190 __free_small_page(spage, orders+1);