2 * High memory handling common code and variables.
4 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
5 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
8 * Redesigned the x86 32-bit VM architecture to deal with
9 * 64-bit physical space. With current x86 CPUs this
10 * means up to 64 Gigabytes physical RAM.
12 * Rewrote high memory support to move the page cache into
13 * high memory. Implemented permanent (schedulable) kmaps
14 * based on Linus' idea.
16 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/swap.h>
23 #include <linux/slab.h>
25 unsigned long highmem_mapnr
;
28 * Take one locked page, return another low-memory locked page.
30 struct page
* prepare_highmem_swapout(struct page
* page
)
32 struct page
*new_page
;
33 unsigned long regular_page
;
36 * If this is a highmem page so it can't be swapped out directly
37 * otherwise the b_data buffer addresses will break
38 * the lowlevel device drivers.
40 if (!PageHighMem(page
))
44 * Here we break the page lock, and we split the
45 * dirty page into two. We can unlock the old page,
46 * and we'll now have two of them. Too bad, it would
47 * have been nice to continue to potentially share
51 regular_page
= __get_free_page(GFP_ATOMIC
);
56 copy_page((void *)regular_page
, (void *)vaddr
);
60 * ok, we can just forget about our highmem page since
61 * we stored its data into the new regular_page.
63 page_cache_release(page
);
64 new_page
= mem_map
+ MAP_NR(regular_page
);
69 struct page
* replace_with_highmem(struct page
* page
)
71 struct page
*highpage
;
74 if (PageHighMem(page
) || !nr_free_highpages())
77 highpage
= alloc_page(GFP_ATOMIC
|__GFP_HIGHMEM
);
80 if (!PageHighMem(highpage
)) {
81 page_cache_release(highpage
);
85 vaddr
= kmap(highpage
);
86 copy_page((void *)vaddr
, (void *)page_address(page
));
93 * We can just forget the old page since
94 * we stored its data into the new highmem-page.
96 page_cache_release(page
);
102 * Virtual_count is not a pure "count".
103 * 0 means that it is not mapped, and has not been mapped
104 * since a TLB flush - it is usable.
105 * 1 means that there are no users, but it has been mapped
106 * since the last TLB flush - so we can't use it.
107 * n means that there are (n-1) current users of it.
109 static int pkmap_count
[LAST_PKMAP
];
110 static unsigned int last_pkmap_nr
;
111 static spinlock_t kmap_lock
= SPIN_LOCK_UNLOCKED
;
113 pte_t
* pkmap_page_table
;
115 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait
);
117 static void flush_all_zero_pkmaps(void)
123 for (i
= 0; i
< LAST_PKMAP
; i
++) {
127 * zero means we don't have anything to do,
128 * >1 means that it is still in use. Only
129 * a count of 1 means that it is free but
130 * needs to be unmapped
132 if (pkmap_count
[i
] != 1)
135 pte
= pkmap_page_table
[i
];
138 pte_clear(pkmap_page_table
+i
);
139 page
= pte_page(pte
);
145 static inline unsigned long map_new_virtual(struct page
*page
)
152 /* Find an empty entry */
154 last_pkmap_nr
= (last_pkmap_nr
+ 1) & LAST_PKMAP_MASK
;
155 if (!last_pkmap_nr
) {
156 flush_all_zero_pkmaps();
159 if (!pkmap_count
[last_pkmap_nr
])
160 break; /* Found a usable entry */
165 * Sleep for somebody else to unmap their entries
168 DECLARE_WAITQUEUE(wait
, current
);
170 current
->state
= TASK_UNINTERRUPTIBLE
;
171 add_wait_queue(&pkmap_map_wait
, &wait
);
172 spin_unlock(&kmap_lock
);
174 remove_wait_queue(&pkmap_map_wait
, &wait
);
175 spin_lock(&kmap_lock
);
177 /* Somebody else might have mapped it while we slept */
179 return page
->virtual;
185 vaddr
= PKMAP_ADDR(last_pkmap_nr
);
186 set_pte(pkmap_page_table
+ last_pkmap_nr
, mk_pte(page
, kmap_prot
));
188 pkmap_count
[last_pkmap_nr
] = 1;
189 page
->virtual = vaddr
;
194 unsigned long kmap_high(struct page
*page
)
199 * For highmem pages, we can't trust "virtual" until
200 * after we have the lock.
202 * We cannot call this from interrupts, as it may block
204 spin_lock(&kmap_lock
);
205 vaddr
= page
->virtual;
207 vaddr
= map_new_virtual(page
);
208 pkmap_count
[PKMAP_NR(vaddr
)]++;
209 if (pkmap_count
[PKMAP_NR(vaddr
)] < 2)
211 spin_unlock(&kmap_lock
);
215 void kunmap_high(struct page
*page
)
220 spin_lock(&kmap_lock
);
221 vaddr
= page
->virtual;
224 nr
= PKMAP_NR(vaddr
);
227 * A count must never go down to zero
228 * without a TLB flush!
230 switch (--pkmap_count
[nr
]) {
234 wake_up(&pkmap_map_wait
);
236 spin_unlock(&kmap_lock
);
240 * Simple bounce buffer support for highmem pages.
241 * This will be moved to the block layer in 2.5.
244 static inline void copy_from_high_bh (struct buffer_head
*to
,
245 struct buffer_head
*from
)
251 p_from
= from
->b_page
;
254 * Since this can be executed from IRQ context, reentrance
255 * on the same CPU must be avoided:
259 vfrom
= kmap_atomic(p_from
, KM_BOUNCE_WRITE
);
260 memcpy(to
->b_data
, (char *)vfrom
+ bh_offset(from
), to
->b_size
);
261 kunmap_atomic(vfrom
, KM_BOUNCE_WRITE
);
262 __restore_flags(flags
);
265 static inline void copy_to_high_bh_irq (struct buffer_head
*to
,
266 struct buffer_head
*from
)
275 vto
= kmap_atomic(p_to
, KM_BOUNCE_READ
);
276 memcpy((char *)vto
+ bh_offset(to
), from
->b_data
, to
->b_size
);
277 kunmap_atomic(vto
, KM_BOUNCE_READ
);
278 __restore_flags(flags
);
281 static inline void bounce_end_io (struct buffer_head
*bh
, int uptodate
)
283 struct buffer_head
*bh_orig
= (struct buffer_head
*)(bh
->b_private
);
285 bh_orig
->b_end_io(bh_orig
, uptodate
);
286 __free_page(bh
->b_page
);
287 kmem_cache_free(bh_cachep
, bh
);
290 static void bounce_end_io_write (struct buffer_head
*bh
, int uptodate
)
292 bounce_end_io(bh
, uptodate
);
295 static void bounce_end_io_read (struct buffer_head
*bh
, int uptodate
)
297 struct buffer_head
*bh_orig
= (struct buffer_head
*)(bh
->b_private
);
300 copy_to_high_bh_irq(bh_orig
, bh
);
301 bounce_end_io(bh
, uptodate
);
304 struct buffer_head
* create_bounce(int rw
, struct buffer_head
* bh_orig
)
307 struct buffer_head
*bh
;
309 if (!PageHighMem(bh_orig
->b_page
))
313 bh
= kmem_cache_alloc(bh_cachep
, SLAB_BUFFER
);
316 current
->policy
|= SCHED_YIELD
;
321 * This is wasteful for 1k buffers, but this is a stopgap measure
322 * and we are being ineffective anyway. This approach simplifies
323 * things immensly. On boxes with more than 4GB RAM this should
324 * not be an issue anyway.
327 page
= alloc_page(GFP_BUFFER
);
330 current
->policy
|= SCHED_YIELD
;
334 set_bh_page(bh
, page
, 0);
337 bh
->b_blocknr
= bh_orig
->b_blocknr
;
338 bh
->b_size
= bh_orig
->b_size
;
340 bh
->b_dev
= bh_orig
->b_dev
;
341 bh
->b_count
= bh_orig
->b_count
;
342 bh
->b_rdev
= bh_orig
->b_rdev
;
343 bh
->b_state
= bh_orig
->b_state
;
345 bh
->b_next_free
= NULL
;
346 bh
->b_prev_free
= NULL
;
347 /* bh->b_this_page */
348 bh
->b_reqnext
= NULL
;
352 bh
->b_end_io
= bounce_end_io_write
;
353 copy_from_high_bh(bh
, bh_orig
);
355 bh
->b_end_io
= bounce_end_io_read
;
356 bh
->b_private
= (void *)bh_orig
;
357 bh
->b_rsector
= bh_orig
->b_rsector
;
358 memset(&bh
->b_wait
, -1, sizeof(bh
->b_wait
));