2 * linux/mm/swap_state.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 #include <linux/module.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/pagemap.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
18 #include <asm/pgtable.h>
21 * swapper_space is a fiction, retained to simplify the path through
22 * vmscan's shrink_list, to make sync_page look nicer, and to allow
23 * future use of radix_tree tags in the swap cache.
25 static struct address_space_operations swap_aops
= {
26 .writepage
= swap_writepage
,
27 .sync_page
= block_sync_page
,
28 .set_page_dirty
= __set_page_dirty_nobuffers
,
31 static struct backing_dev_info swap_backing_dev_info
= {
32 .memory_backed
= 1, /* Does not contribute to dirty memory */
33 .unplug_io_fn
= swap_unplug_io_fn
,
36 struct address_space swapper_space
= {
37 .page_tree
= RADIX_TREE_INIT(GFP_ATOMIC
),
38 .tree_lock
= SPIN_LOCK_UNLOCKED
,
40 .i_mmap_nonlinear
= LIST_HEAD_INIT(swapper_space
.i_mmap_nonlinear
),
41 .backing_dev_info
= &swap_backing_dev_info
,
43 EXPORT_SYMBOL(swapper_space
);
45 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
48 unsigned long add_total
;
49 unsigned long del_total
;
50 unsigned long find_success
;
51 unsigned long find_total
;
52 unsigned long noent_race
;
53 unsigned long exist_race
;
56 void show_swap_cache_info(void)
58 printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
59 swap_cache_info
.add_total
, swap_cache_info
.del_total
,
60 swap_cache_info
.find_success
, swap_cache_info
.find_total
,
61 swap_cache_info
.noent_race
, swap_cache_info
.exist_race
);
65 * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
66 * but sets SwapCache flag and private instead of mapping and index.
68 static int __add_to_swap_cache(struct page
*page
,
69 swp_entry_t entry
, int gfp_mask
)
73 BUG_ON(PageSwapCache(page
));
74 BUG_ON(PagePrivate(page
));
75 error
= radix_tree_preload(gfp_mask
);
77 spin_lock_irq(&swapper_space
.tree_lock
);
78 error
= radix_tree_insert(&swapper_space
.page_tree
,
83 SetPageSwapCache(page
);
84 page
->private = entry
.val
;
85 total_swapcache_pages
++;
88 spin_unlock_irq(&swapper_space
.tree_lock
);
89 radix_tree_preload_end();
94 static int add_to_swap_cache(struct page
*page
, swp_entry_t entry
)
98 if (!swap_duplicate(entry
)) {
99 INC_CACHE_INFO(noent_race
);
102 error
= __add_to_swap_cache(page
, entry
, GFP_KERNEL
);
104 * Anon pages are already on the LRU, we don't run lru_cache_add here.
108 if (error
== -EEXIST
)
109 INC_CACHE_INFO(exist_race
);
112 INC_CACHE_INFO(add_total
);
117 * This must be called only on pages that have
118 * been verified to be in the swap cache.
120 void __delete_from_swap_cache(struct page
*page
)
122 BUG_ON(!PageLocked(page
));
123 BUG_ON(!PageSwapCache(page
));
124 BUG_ON(PageWriteback(page
));
126 radix_tree_delete(&swapper_space
.page_tree
, page
->private);
128 ClearPageSwapCache(page
);
129 total_swapcache_pages
--;
131 INC_CACHE_INFO(del_total
);
135 * add_to_swap - allocate swap space for a page
136 * @page: page we want to move to swap
138 * Allocate swap space for the page and add the page to the
139 * swap cache. Caller needs to hold the page lock.
141 int add_to_swap(struct page
* page
)
147 if (!PageLocked(page
))
151 entry
= get_swap_page();
155 /* Radix-tree node allocations are performing
156 * GFP_ATOMIC allocations under PF_MEMALLOC.
157 * They can completely exhaust the page allocator.
159 * So PF_MEMALLOC is dropped here. This causes the slab
160 * allocations to fail earlier, so radix-tree nodes will
161 * then be allocated from the mempool reserves.
163 * We're still using __GFP_HIGH for radix-tree node
164 * allocations, so some of the emergency pools are available,
165 * just not all of them.
168 pf_flags
= current
->flags
;
169 current
->flags
&= ~PF_MEMALLOC
;
172 * Add it to the swap cache and mark it dirty
174 err
= __add_to_swap_cache(page
, entry
, GFP_ATOMIC
|__GFP_NOWARN
);
176 if (pf_flags
& PF_MEMALLOC
)
177 current
->flags
|= PF_MEMALLOC
;
180 case 0: /* Success */
181 SetPageUptodate(page
);
183 INC_CACHE_INFO(add_total
);
186 /* Raced with "speculative" read_swap_cache_async */
187 INC_CACHE_INFO(exist_race
);
191 /* -ENOMEM radix-tree allocation failure */
199 * This must be called only on pages that have
200 * been verified to be in the swap cache and locked.
201 * It will never put the page into the free list,
202 * the caller has a reference on the page.
204 void delete_from_swap_cache(struct page
*page
)
208 BUG_ON(!PageSwapCache(page
));
209 BUG_ON(!PageLocked(page
));
210 BUG_ON(PageWriteback(page
));
211 BUG_ON(PagePrivate(page
));
213 entry
.val
= page
->private;
215 spin_lock_irq(&swapper_space
.tree_lock
);
216 __delete_from_swap_cache(page
);
217 spin_unlock_irq(&swapper_space
.tree_lock
);
220 page_cache_release(page
);
224 * Strange swizzling function only for use by shmem_writepage
226 int move_to_swap_cache(struct page
*page
, swp_entry_t entry
)
228 int err
= __add_to_swap_cache(page
, entry
, GFP_ATOMIC
);
230 remove_from_page_cache(page
);
231 page_cache_release(page
); /* pagecache ref */
232 if (!swap_duplicate(entry
))
235 INC_CACHE_INFO(add_total
);
236 } else if (err
== -EEXIST
)
237 INC_CACHE_INFO(exist_race
);
242 * Strange swizzling function for shmem_getpage (and shmem_unuse)
244 int move_from_swap_cache(struct page
*page
, unsigned long index
,
245 struct address_space
*mapping
)
247 int err
= add_to_page_cache(page
, mapping
, index
, GFP_ATOMIC
);
249 delete_from_swap_cache(page
);
250 /* shift page from clean_pages to dirty_pages list */
251 ClearPageDirty(page
);
252 set_page_dirty(page
);
258 * If we are the only user, then try to free up the swap cache.
260 * Its ok to check for PageSwapCache without the page lock
261 * here because we are going to recheck again inside
262 * exclusive_swap_page() _with_ the lock.
265 static inline void free_swap_cache(struct page
*page
)
267 if (PageSwapCache(page
) && !TestSetPageLocked(page
)) {
268 remove_exclusive_swap_page(page
);
274 * Perform a free_page(), also freeing any swap cache associated with
275 * this page if it is the last user of the page. Can not do a lock_page,
276 * as we are holding the page_table_lock spinlock.
278 void free_page_and_swap_cache(struct page
*page
)
280 free_swap_cache(page
);
281 page_cache_release(page
);
285 * Passed an array of pages, drop them all from swapcache and then release
286 * them. They are removed from the LRU and freed if this is their last use.
288 void free_pages_and_swap_cache(struct page
**pages
, int nr
)
291 struct page
**pagep
= pages
;
295 int todo
= min(chunk
, nr
);
298 for (i
= 0; i
< todo
; i
++)
299 free_swap_cache(pagep
[i
]);
300 release_pages(pagep
, todo
, 0);
307 * Lookup a swap entry in the swap cache. A found page will be returned
308 * unlocked and with its refcount incremented - we rely on the kernel
309 * lock getting page table operations atomic even if we drop the page
310 * lock before returning.
312 struct page
* lookup_swap_cache(swp_entry_t entry
)
316 spin_lock_irq(&swapper_space
.tree_lock
);
317 page
= radix_tree_lookup(&swapper_space
.page_tree
, entry
.val
);
319 page_cache_get(page
);
320 INC_CACHE_INFO(find_success
);
322 spin_unlock_irq(&swapper_space
.tree_lock
);
323 INC_CACHE_INFO(find_total
);
328 * Locate a page of swap in physical memory, reserving swap cache space
329 * and reading the disk if it is not already cached.
330 * A failure return means that either the page allocation failed or that
331 * the swap entry is no longer in use.
333 struct page
*read_swap_cache_async(swp_entry_t entry
,
334 struct vm_area_struct
*vma
, unsigned long addr
)
336 struct page
*found_page
, *new_page
= NULL
;
341 * First check the swap cache. Since this is normally
342 * called after lookup_swap_cache() failed, re-calling
343 * that would confuse statistics.
345 spin_lock_irq(&swapper_space
.tree_lock
);
346 found_page
= radix_tree_lookup(&swapper_space
.page_tree
,
349 page_cache_get(found_page
);
350 spin_unlock_irq(&swapper_space
.tree_lock
);
355 * Get a new page to read into from swap.
358 new_page
= alloc_page_vma(GFP_HIGHUSER
, vma
, addr
);
360 break; /* Out of memory */
364 * Associate the page with swap entry in the swap cache.
365 * May fail (-ENOENT) if swap entry has been freed since
366 * our caller observed it. May fail (-EEXIST) if there
367 * is already a page associated with this entry in the
368 * swap cache: added by a racing read_swap_cache_async,
369 * or by try_to_swap_out (or shmem_writepage) re-using
370 * the just freed swap entry for an existing page.
371 * May fail (-ENOMEM) if radix-tree node allocation failed.
373 err
= add_to_swap_cache(new_page
, entry
);
376 * Initiate read into locked page and return.
378 lru_cache_add_active(new_page
);
379 swap_readpage(NULL
, new_page
);
382 } while (err
!= -ENOENT
&& err
!= -ENOMEM
);
385 page_cache_release(new_page
);