Optimize andes_clear_page() and andes_copy_page() with prefetch
[linux-2.6/linux-mips.git] / mm / highmem.c
blob7c9dbc6954139a11655e36221443872227c27558
1 /*
2 * High memory handling common code and variables.
4 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
5 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
8 * Redesigned the x86 32-bit VM architecture to deal with
9 * 64-bit physical space. With current x86 CPUs this
10 * means up to 64 Gigabytes physical RAM.
12 * Rewrote high memory support to move the page cache into
13 * high memory. Implemented permanent (schedulable) kmaps
14 * based on Linus' idea.
16 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
19 #include <linux/mm.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/swap.h>
23 #include <linux/slab.h>
25 unsigned long highmem_mapnr;
28 * Take one locked page, return another low-memory locked page.
30 struct page * prepare_highmem_swapout(struct page * page)
32 struct page *new_page;
33 unsigned long regular_page;
34 unsigned long vaddr;
36 * If this is a highmem page so it can't be swapped out directly
37 * otherwise the b_data buffer addresses will break
38 * the lowlevel device drivers.
40 if (!PageHighMem(page))
41 return page;
44 * Here we break the page lock, and we split the
45 * dirty page into two. We can unlock the old page,
46 * and we'll now have two of them. Too bad, it would
47 * have been nice to continue to potentially share
48 * across a fork().
50 UnlockPage(page);
51 regular_page = __get_free_page(GFP_ATOMIC);
52 if (!regular_page)
53 return NULL;
55 vaddr = kmap(page);
56 copy_page((void *)regular_page, (void *)vaddr);
57 kunmap(page);
60 * ok, we can just forget about our highmem page since
61 * we stored its data into the new regular_page.
63 page_cache_release(page);
64 new_page = mem_map + MAP_NR(regular_page);
65 LockPage(new_page);
66 return new_page;
69 struct page * replace_with_highmem(struct page * page)
71 struct page *highpage;
72 unsigned long vaddr;
74 if (PageHighMem(page) || !nr_free_highpages())
75 return page;
77 highpage = alloc_page(GFP_ATOMIC|__GFP_HIGHMEM);
78 if (!highpage)
79 return page;
80 if (!PageHighMem(highpage)) {
81 page_cache_release(highpage);
82 return page;
85 vaddr = kmap(highpage);
86 copy_page((void *)vaddr, (void *)page_address(page));
87 kunmap(highpage);
89 /* Preserve the caching of the swap_entry. */
90 highpage->index = page->index;
91 highpage->mapping = page->mapping;
94 * We can just forget the old page since
95 * we stored its data into the new highmem-page.
97 page_cache_release(page);
99 return highpage;
103 * Virtual_count is not a pure "count".
104 * 0 means that it is not mapped, and has not been mapped
105 * since a TLB flush - it is usable.
106 * 1 means that there are no users, but it has been mapped
107 * since the last TLB flush - so we can't use it.
108 * n means that there are (n-1) current users of it.
110 static int pkmap_count[LAST_PKMAP];
111 static unsigned int last_pkmap_nr;
112 static spinlock_t kmap_lock = SPIN_LOCK_UNLOCKED;
114 pte_t * pkmap_page_table;
116 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
118 static void flush_all_zero_pkmaps(void)
120 int i;
122 for (i = 0; i < LAST_PKMAP; i++) {
123 struct page *page;
124 pte_t pte;
126 * zero means we don't have anything to do,
127 * >1 means that it is still in use. Only
128 * a count of 1 means that it is free but
129 * needs to be unmapped
131 if (pkmap_count[i] != 1)
132 continue;
133 pkmap_count[i] = 0;
134 pte = pkmap_page_table[i];
135 if (pte_none(pte))
136 BUG();
137 pte_clear(pkmap_page_table+i);
138 page = pte_page(pte);
139 page->virtual = 0;
141 flush_tlb_all();
144 static inline unsigned long map_new_virtual(struct page *page)
146 unsigned long vaddr;
147 int count;
149 start:
150 count = LAST_PKMAP;
151 /* Find an empty entry */
152 for (;;) {
153 last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
154 if (!last_pkmap_nr) {
155 flush_all_zero_pkmaps();
156 count = LAST_PKMAP;
158 if (!pkmap_count[last_pkmap_nr])
159 break; /* Found a usable entry */
160 if (--count)
161 continue;
164 * Sleep for somebody else to unmap their entries
167 DECLARE_WAITQUEUE(wait, current);
169 current->state = TASK_UNINTERRUPTIBLE;
170 add_wait_queue(&pkmap_map_wait, &wait);
171 spin_unlock(&kmap_lock);
172 schedule();
173 remove_wait_queue(&pkmap_map_wait, &wait);
174 spin_lock(&kmap_lock);
176 /* Somebody else might have mapped it while we slept */
177 if (page->virtual)
178 return page->virtual;
180 /* Re-start */
181 goto start;
184 vaddr = PKMAP_ADDR(last_pkmap_nr);
185 pkmap_page_table[last_pkmap_nr] = mk_pte(page, kmap_prot);
187 pkmap_count[last_pkmap_nr] = 1;
188 page->virtual = vaddr;
190 return vaddr;
193 unsigned long kmap_high(struct page *page)
195 unsigned long vaddr;
198 * For highmem pages, we can't trust "virtual" until
199 * after we have the lock.
201 * We cannot call this from interrupts, as it may block
203 spin_lock(&kmap_lock);
204 vaddr = page->virtual;
205 if (!vaddr)
206 vaddr = map_new_virtual(page);
207 pkmap_count[PKMAP_NR(vaddr)]++;
208 if (pkmap_count[PKMAP_NR(vaddr)] < 2)
209 BUG();
210 spin_unlock(&kmap_lock);
211 return vaddr;
214 void kunmap_high(struct page *page)
216 unsigned long vaddr;
217 unsigned long nr;
219 spin_lock(&kmap_lock);
220 vaddr = page->virtual;
221 if (!vaddr)
222 BUG();
223 nr = PKMAP_NR(vaddr);
226 * A count must never go down to zero
227 * without a TLB flush!
229 switch (--pkmap_count[nr]) {
230 case 0:
231 BUG();
232 case 1:
233 wake_up(&pkmap_map_wait);
235 spin_unlock(&kmap_lock);
239 * Simple bounce buffer support for highmem pages.
240 * This will be moved to the block layer in 2.5.
243 extern kmem_cache_t *bh_cachep;
245 static inline void copy_from_high_bh (struct buffer_head *to,
246 struct buffer_head *from)
248 struct page *p_from;
249 unsigned long vfrom;
250 unsigned long flags;
252 p_from = from->b_page;
255 * Since this can be executed from IRQ context, reentrance
256 * on the same CPU must be avoided:
258 __save_flags(flags);
259 __cli();
260 vfrom = kmap_atomic(p_from, KM_BOUNCE_WRITE);
261 memcpy(to->b_data, (char *)vfrom + bh_offset(from), to->b_size);
262 kunmap_atomic(vfrom, KM_BOUNCE_WRITE);
263 __restore_flags(flags);
266 static inline void copy_to_high_bh_irq (struct buffer_head *to,
267 struct buffer_head *from)
269 struct page *p_to;
270 unsigned long vto;
271 unsigned long flags;
273 p_to = to->b_page;
274 __save_flags(flags);
275 __cli();
276 vto = kmap_atomic(p_to, KM_BOUNCE_READ);
277 memcpy((char *)vto + bh_offset(to), from->b_data, to->b_size);
278 kunmap_atomic(vto, KM_BOUNCE_READ);
279 __restore_flags(flags);
282 static inline void bounce_end_io (struct buffer_head *bh, int uptodate)
284 struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_dev_id);
286 bh_orig->b_end_io(bh_orig, uptodate);
287 __free_page(bh->b_page);
288 kmem_cache_free(bh_cachep, bh);
291 static void bounce_end_io_write (struct buffer_head *bh, int uptodate)
293 bounce_end_io(bh, uptodate);
296 static void bounce_end_io_read (struct buffer_head *bh, int uptodate)
298 struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_dev_id);
300 if (uptodate)
301 copy_to_high_bh_irq(bh_orig, bh);
302 bounce_end_io(bh, uptodate);
305 struct buffer_head * create_bounce(int rw, struct buffer_head * bh_orig)
307 struct page *page;
308 struct buffer_head *bh;
310 if (!PageHighMem(bh_orig->b_page))
311 return bh_orig;
313 repeat_bh:
314 bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER);
315 if (!bh) {
316 wakeup_bdflush(1);
317 current->policy |= SCHED_YIELD;
318 schedule();
319 goto repeat_bh;
322 * This is wasteful for 1k buffers, but this is a stopgap measure
323 * and we are being ineffective anyway. This approach simplifies
324 * things immensly. On boxes with more than 4GB RAM this should
325 * not be an issue anyway.
327 repeat_page:
328 page = alloc_page(GFP_BUFFER);
329 if (!page) {
330 wakeup_bdflush(1);
331 current->policy |= SCHED_YIELD;
332 schedule();
333 goto repeat_page;
335 set_bh_page(bh, page, 0);
337 bh->b_next = NULL;
338 bh->b_blocknr = bh_orig->b_blocknr;
339 bh->b_size = bh_orig->b_size;
340 bh->b_list = -1;
341 bh->b_dev = bh_orig->b_dev;
342 bh->b_count = bh_orig->b_count;
343 bh->b_rdev = bh_orig->b_rdev;
344 bh->b_state = bh_orig->b_state;
345 bh->b_flushtime = 0;
346 bh->b_next_free = NULL;
347 bh->b_prev_free = NULL;
348 /* bh->b_this_page */
349 bh->b_reqnext = NULL;
350 bh->b_pprev = NULL;
351 /* bh->b_page */
352 if (rw == WRITE) {
353 bh->b_end_io = bounce_end_io_write;
354 copy_from_high_bh(bh, bh_orig);
355 } else
356 bh->b_end_io = bounce_end_io_read;
357 bh->b_dev_id = (void *)bh_orig;
358 bh->b_rsector = bh_orig->b_rsector;
359 memset(&bh->b_wait, -1, sizeof(bh->b_wait));
360 bh->b_kiobuf = NULL;
362 return bh;