Import 2.4.0-test6pre4
[davej-history.git] / mm / highmem.c
blobe11b5d0b1c63f94b37f4efb913e3134a9c4a1212
1 /*
2 * High memory handling common code and variables.
4 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
5 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
8 * Redesigned the x86 32-bit VM architecture to deal with
9 * 64-bit physical space. With current x86 CPUs this
10 * means up to 64 Gigabytes physical RAM.
12 * Rewrote high memory support to move the page cache into
13 * high memory. Implemented permanent (schedulable) kmaps
14 * based on Linus' idea.
16 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
19 #include <linux/mm.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/swap.h>
23 #include <linux/slab.h>
25 unsigned long highmem_mapnr;
28 * Take one locked page, return another low-memory locked page.
30 struct page * prepare_highmem_swapout(struct page * page)
32 struct page *new_page;
33 unsigned long regular_page;
34 unsigned long vaddr;
36 * If this is a highmem page so it can't be swapped out directly
37 * otherwise the b_data buffer addresses will break
38 * the lowlevel device drivers.
40 if (!PageHighMem(page))
41 return page;
44 * Here we break the page lock, and we split the
45 * dirty page into two. We can unlock the old page,
46 * and we'll now have two of them. Too bad, it would
47 * have been nice to continue to potentially share
48 * across a fork().
50 UnlockPage(page);
51 regular_page = __get_free_page(GFP_ATOMIC);
52 if (!regular_page)
53 return NULL;
55 vaddr = kmap(page);
56 copy_page((void *)regular_page, (void *)vaddr);
57 kunmap(page);
60 * ok, we can just forget about our highmem page since
61 * we stored its data into the new regular_page.
63 page_cache_release(page);
64 new_page = mem_map + MAP_NR(regular_page);
65 LockPage(new_page);
66 return new_page;
69 struct page * replace_with_highmem(struct page * page)
71 struct page *highpage;
72 unsigned long vaddr;
74 if (PageHighMem(page) || !nr_free_highpages())
75 return page;
77 highpage = alloc_page(GFP_ATOMIC|__GFP_HIGHMEM);
78 if (!highpage)
79 return page;
80 if (!PageHighMem(highpage)) {
81 page_cache_release(highpage);
82 return page;
85 vaddr = kmap(highpage);
86 copy_page((void *)vaddr, (void *)page_address(page));
87 kunmap(highpage);
89 if (page->mapping)
90 BUG();
93 * We can just forget the old page since
94 * we stored its data into the new highmem-page.
96 page_cache_release(page);
98 return highpage;
102 * Virtual_count is not a pure "count".
103 * 0 means that it is not mapped, and has not been mapped
104 * since a TLB flush - it is usable.
105 * 1 means that there are no users, but it has been mapped
106 * since the last TLB flush - so we can't use it.
107 * n means that there are (n-1) current users of it.
109 static int pkmap_count[LAST_PKMAP];
110 static unsigned int last_pkmap_nr;
111 static spinlock_t kmap_lock = SPIN_LOCK_UNLOCKED;
113 pte_t * pkmap_page_table;
115 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
117 static void flush_all_zero_pkmaps(void)
119 int i;
121 flush_cache_all();
123 for (i = 0; i < LAST_PKMAP; i++) {
124 struct page *page;
125 pte_t pte;
127 * zero means we don't have anything to do,
128 * >1 means that it is still in use. Only
129 * a count of 1 means that it is free but
130 * needs to be unmapped
132 if (pkmap_count[i] != 1)
133 continue;
134 pkmap_count[i] = 0;
135 pte = pkmap_page_table[i];
136 if (pte_none(pte))
137 BUG();
138 pte_clear(pkmap_page_table+i);
139 page = pte_page(pte);
140 page->virtual = 0;
142 flush_tlb_all();
145 static inline unsigned long map_new_virtual(struct page *page)
147 unsigned long vaddr;
148 int count;
150 start:
151 count = LAST_PKMAP;
152 /* Find an empty entry */
153 for (;;) {
154 last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
155 if (!last_pkmap_nr) {
156 flush_all_zero_pkmaps();
157 count = LAST_PKMAP;
159 if (!pkmap_count[last_pkmap_nr])
160 break; /* Found a usable entry */
161 if (--count)
162 continue;
165 * Sleep for somebody else to unmap their entries
168 DECLARE_WAITQUEUE(wait, current);
170 current->state = TASK_UNINTERRUPTIBLE;
171 add_wait_queue(&pkmap_map_wait, &wait);
172 spin_unlock(&kmap_lock);
173 schedule();
174 remove_wait_queue(&pkmap_map_wait, &wait);
175 spin_lock(&kmap_lock);
177 /* Somebody else might have mapped it while we slept */
178 if (page->virtual)
179 return page->virtual;
181 /* Re-start */
182 goto start;
185 vaddr = PKMAP_ADDR(last_pkmap_nr);
186 set_pte(pkmap_page_table + last_pkmap_nr, mk_pte(page, kmap_prot));
188 pkmap_count[last_pkmap_nr] = 1;
189 page->virtual = vaddr;
191 return vaddr;
194 unsigned long kmap_high(struct page *page)
196 unsigned long vaddr;
199 * For highmem pages, we can't trust "virtual" until
200 * after we have the lock.
202 * We cannot call this from interrupts, as it may block
204 spin_lock(&kmap_lock);
205 vaddr = page->virtual;
206 if (!vaddr)
207 vaddr = map_new_virtual(page);
208 pkmap_count[PKMAP_NR(vaddr)]++;
209 if (pkmap_count[PKMAP_NR(vaddr)] < 2)
210 BUG();
211 spin_unlock(&kmap_lock);
212 return vaddr;
215 void kunmap_high(struct page *page)
217 unsigned long vaddr;
218 unsigned long nr;
220 spin_lock(&kmap_lock);
221 vaddr = page->virtual;
222 if (!vaddr)
223 BUG();
224 nr = PKMAP_NR(vaddr);
227 * A count must never go down to zero
228 * without a TLB flush!
230 switch (--pkmap_count[nr]) {
231 case 0:
232 BUG();
233 case 1:
234 wake_up(&pkmap_map_wait);
236 spin_unlock(&kmap_lock);
240 * Simple bounce buffer support for highmem pages.
241 * This will be moved to the block layer in 2.5.
244 static inline void copy_from_high_bh (struct buffer_head *to,
245 struct buffer_head *from)
247 struct page *p_from;
248 unsigned long vfrom;
249 unsigned long flags;
251 p_from = from->b_page;
254 * Since this can be executed from IRQ context, reentrance
255 * on the same CPU must be avoided:
257 __save_flags(flags);
258 __cli();
259 vfrom = kmap_atomic(p_from, KM_BOUNCE_WRITE);
260 memcpy(to->b_data, (char *)vfrom + bh_offset(from), to->b_size);
261 kunmap_atomic(vfrom, KM_BOUNCE_WRITE);
262 __restore_flags(flags);
265 static inline void copy_to_high_bh_irq (struct buffer_head *to,
266 struct buffer_head *from)
268 struct page *p_to;
269 unsigned long vto;
270 unsigned long flags;
272 p_to = to->b_page;
273 __save_flags(flags);
274 __cli();
275 vto = kmap_atomic(p_to, KM_BOUNCE_READ);
276 memcpy((char *)vto + bh_offset(to), from->b_data, to->b_size);
277 kunmap_atomic(vto, KM_BOUNCE_READ);
278 __restore_flags(flags);
281 static inline void bounce_end_io (struct buffer_head *bh, int uptodate)
283 struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);
285 bh_orig->b_end_io(bh_orig, uptodate);
286 __free_page(bh->b_page);
287 kmem_cache_free(bh_cachep, bh);
290 static void bounce_end_io_write (struct buffer_head *bh, int uptodate)
292 bounce_end_io(bh, uptodate);
295 static void bounce_end_io_read (struct buffer_head *bh, int uptodate)
297 struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);
299 if (uptodate)
300 copy_to_high_bh_irq(bh_orig, bh);
301 bounce_end_io(bh, uptodate);
304 struct buffer_head * create_bounce(int rw, struct buffer_head * bh_orig)
306 struct page *page;
307 struct buffer_head *bh;
309 if (!PageHighMem(bh_orig->b_page))
310 return bh_orig;
312 repeat_bh:
313 bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER);
314 if (!bh) {
315 wakeup_bdflush(1);
316 current->policy |= SCHED_YIELD;
317 schedule();
318 goto repeat_bh;
321 * This is wasteful for 1k buffers, but this is a stopgap measure
322 * and we are being ineffective anyway. This approach simplifies
323 * things immensly. On boxes with more than 4GB RAM this should
324 * not be an issue anyway.
326 repeat_page:
327 page = alloc_page(GFP_BUFFER);
328 if (!page) {
329 wakeup_bdflush(1);
330 current->policy |= SCHED_YIELD;
331 schedule();
332 goto repeat_page;
334 set_bh_page(bh, page, 0);
336 bh->b_next = NULL;
337 bh->b_blocknr = bh_orig->b_blocknr;
338 bh->b_size = bh_orig->b_size;
339 bh->b_list = -1;
340 bh->b_dev = bh_orig->b_dev;
341 bh->b_count = bh_orig->b_count;
342 bh->b_rdev = bh_orig->b_rdev;
343 bh->b_state = bh_orig->b_state;
344 bh->b_flushtime = 0;
345 bh->b_next_free = NULL;
346 bh->b_prev_free = NULL;
347 /* bh->b_this_page */
348 bh->b_reqnext = NULL;
349 bh->b_pprev = NULL;
350 /* bh->b_page */
351 if (rw == WRITE) {
352 bh->b_end_io = bounce_end_io_write;
353 copy_from_high_bh(bh, bh_orig);
354 } else
355 bh->b_end_io = bounce_end_io_read;
356 bh->b_private = (void *)bh_orig;
357 bh->b_rsector = bh_orig->b_rsector;
358 memset(&bh->b_wait, -1, sizeof(bh->b_wait));
360 return bh;