2 * High memory handling common code and variables.
4 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
5 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
8 * Redesigned the x86 32-bit VM architecture to deal with
9 * 64-bit physical space. With current x86 CPUs this
10 * means up to 64 Gigabytes physical RAM.
12 * Rewrote high memory support to move the page cache into
13 * high memory. Implemented permanent (schedulable) kmaps
14 * based on Linus' idea.
16 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/swap.h>
23 #include <linux/slab.h>
25 unsigned long highmem_mapnr
;
28 * Take one locked page, return another low-memory locked page.
30 struct page
* prepare_highmem_swapout(struct page
* page
)
32 struct page
*new_page
;
33 unsigned long regular_page
;
36 * If this is a highmem page so it can't be swapped out directly
37 * otherwise the b_data buffer addresses will break
38 * the lowlevel device drivers.
40 if (!PageHighMem(page
))
44 * Here we break the page lock, and we split the
45 * dirty page into two. We can unlock the old page,
46 * and we'll now have two of them. Too bad, it would
47 * have been nice to continue to potentially share
51 regular_page
= __get_free_page(GFP_ATOMIC
);
56 copy_page((void *)regular_page
, (void *)vaddr
);
60 * ok, we can just forget about our highmem page since
61 * we stored its data into the new regular_page.
63 page_cache_release(page
);
64 new_page
= mem_map
+ MAP_NR(regular_page
);
69 struct page
* replace_with_highmem(struct page
* page
)
71 struct page
*highpage
;
74 if (PageHighMem(page
) || !nr_free_highpages())
77 highpage
= alloc_page(GFP_ATOMIC
|__GFP_HIGHMEM
);
80 if (!PageHighMem(highpage
)) {
81 page_cache_release(highpage
);
85 vaddr
= kmap(highpage
);
86 copy_page((void *)vaddr
, (void *)page_address(page
));
89 /* Preserve the caching of the swap_entry. */
90 highpage
->index
= page
->index
;
91 highpage
->mapping
= page
->mapping
;
94 * We can just forget the old page since
95 * we stored its data into the new highmem-page.
97 page_cache_release(page
);
103 * Virtual_count is not a pure "count".
104 * 0 means that it is not mapped, and has not been mapped
105 * since a TLB flush - it is usable.
106 * 1 means that there are no users, but it has been mapped
107 * since the last TLB flush - so we can't use it.
108 * n means that there are (n-1) current users of it.
110 static int pkmap_count
[LAST_PKMAP
];
111 static unsigned int last_pkmap_nr
;
112 static spinlock_t kmap_lock
= SPIN_LOCK_UNLOCKED
;
114 pte_t
* pkmap_page_table
;
116 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait
);
118 static void flush_all_zero_pkmaps(void)
122 for (i
= 0; i
< LAST_PKMAP
; i
++) {
126 * zero means we don't have anything to do,
127 * >1 means that it is still in use. Only
128 * a count of 1 means that it is free but
129 * needs to be unmapped
131 if (pkmap_count
[i
] != 1)
134 pte
= pkmap_page_table
[i
];
137 pte_clear(pkmap_page_table
+i
);
138 page
= pte_page(pte
);
144 static inline unsigned long map_new_virtual(struct page
*page
)
151 /* Find an empty entry */
153 last_pkmap_nr
= (last_pkmap_nr
+ 1) & LAST_PKMAP_MASK
;
154 if (!last_pkmap_nr
) {
155 flush_all_zero_pkmaps();
158 if (!pkmap_count
[last_pkmap_nr
])
159 break; /* Found a usable entry */
164 * Sleep for somebody else to unmap their entries
167 DECLARE_WAITQUEUE(wait
, current
);
169 current
->state
= TASK_UNINTERRUPTIBLE
;
170 add_wait_queue(&pkmap_map_wait
, &wait
);
171 spin_unlock(&kmap_lock
);
173 remove_wait_queue(&pkmap_map_wait
, &wait
);
174 spin_lock(&kmap_lock
);
176 /* Somebody else might have mapped it while we slept */
178 return page
->virtual;
184 vaddr
= PKMAP_ADDR(last_pkmap_nr
);
185 pkmap_page_table
[last_pkmap_nr
] = mk_pte(page
, kmap_prot
);
187 pkmap_count
[last_pkmap_nr
] = 1;
188 page
->virtual = vaddr
;
193 unsigned long kmap_high(struct page
*page
)
198 * For highmem pages, we can't trust "virtual" until
199 * after we have the lock.
201 * We cannot call this from interrupts, as it may block
203 spin_lock(&kmap_lock
);
204 vaddr
= page
->virtual;
206 vaddr
= map_new_virtual(page
);
207 pkmap_count
[PKMAP_NR(vaddr
)]++;
208 if (pkmap_count
[PKMAP_NR(vaddr
)] < 2)
210 spin_unlock(&kmap_lock
);
214 void kunmap_high(struct page
*page
)
219 spin_lock(&kmap_lock
);
220 vaddr
= page
->virtual;
223 nr
= PKMAP_NR(vaddr
);
226 * A count must never go down to zero
227 * without a TLB flush!
229 switch (--pkmap_count
[nr
]) {
233 wake_up(&pkmap_map_wait
);
235 spin_unlock(&kmap_lock
);
239 * Simple bounce buffer support for highmem pages.
240 * This will be moved to the block layer in 2.5.
243 extern kmem_cache_t
*bh_cachep
;
245 static inline void copy_from_high_bh (struct buffer_head
*to
,
246 struct buffer_head
*from
)
252 p_from
= from
->b_page
;
255 * Since this can be executed from IRQ context, reentrance
256 * on the same CPU must be avoided:
260 vfrom
= kmap_atomic(p_from
, KM_BOUNCE_WRITE
);
261 memcpy(to
->b_data
, (char *)vfrom
+ bh_offset(from
), to
->b_size
);
262 kunmap_atomic(vfrom
, KM_BOUNCE_WRITE
);
263 __restore_flags(flags
);
266 static inline void copy_to_high_bh_irq (struct buffer_head
*to
,
267 struct buffer_head
*from
)
276 vto
= kmap_atomic(p_to
, KM_BOUNCE_READ
);
277 memcpy((char *)vto
+ bh_offset(to
), from
->b_data
, to
->b_size
);
278 kunmap_atomic(vto
, KM_BOUNCE_READ
);
279 __restore_flags(flags
);
282 static inline void bounce_end_io (struct buffer_head
*bh
, int uptodate
)
284 struct buffer_head
*bh_orig
= (struct buffer_head
*)(bh
->b_dev_id
);
286 bh_orig
->b_end_io(bh_orig
, uptodate
);
287 __free_page(bh
->b_page
);
288 kmem_cache_free(bh_cachep
, bh
);
291 static void bounce_end_io_write (struct buffer_head
*bh
, int uptodate
)
293 bounce_end_io(bh
, uptodate
);
296 static void bounce_end_io_read (struct buffer_head
*bh
, int uptodate
)
298 struct buffer_head
*bh_orig
= (struct buffer_head
*)(bh
->b_dev_id
);
301 copy_to_high_bh_irq(bh_orig
, bh
);
302 bounce_end_io(bh
, uptodate
);
305 struct buffer_head
* create_bounce(int rw
, struct buffer_head
* bh_orig
)
308 struct buffer_head
*bh
;
310 if (!PageHighMem(bh_orig
->b_page
))
314 bh
= kmem_cache_alloc(bh_cachep
, SLAB_BUFFER
);
317 current
->policy
|= SCHED_YIELD
;
322 * This is wasteful for 1k buffers, but this is a stopgap measure
323 * and we are being ineffective anyway. This approach simplifies
324 * things immensly. On boxes with more than 4GB RAM this should
325 * not be an issue anyway.
328 page
= alloc_page(GFP_BUFFER
);
331 current
->policy
|= SCHED_YIELD
;
335 set_bh_page(bh
, page
, 0);
338 bh
->b_blocknr
= bh_orig
->b_blocknr
;
339 bh
->b_size
= bh_orig
->b_size
;
341 bh
->b_dev
= bh_orig
->b_dev
;
342 bh
->b_count
= bh_orig
->b_count
;
343 bh
->b_rdev
= bh_orig
->b_rdev
;
344 bh
->b_state
= bh_orig
->b_state
;
346 bh
->b_next_free
= NULL
;
347 bh
->b_prev_free
= NULL
;
348 /* bh->b_this_page */
349 bh
->b_reqnext
= NULL
;
353 bh
->b_end_io
= bounce_end_io_write
;
354 copy_from_high_bh(bh
, bh_orig
);
356 bh
->b_end_io
= bounce_end_io_read
;
357 bh
->b_dev_id
= (void *)bh_orig
;
358 bh
->b_rsector
= bh_orig
->b_rsector
;
359 memset(&bh
->b_wait
, -1, sizeof(bh
->b_wait
));