2 * linux/mm/swap_state.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 #include <linux/module.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/pagemap.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 #include <linux/migrate.h>
20 #include <asm/pgtable.h>
23 * swapper_space is a fiction, retained to simplify the path through
24 * vmscan's shrink_list, to make sync_page look nicer, and to allow
25 * future use of radix_tree tags in the swap cache.
27 static const struct address_space_operations swap_aops
= {
28 .writepage
= swap_writepage
,
29 .sync_page
= block_sync_page
,
30 .set_page_dirty
= __set_page_dirty_nobuffers
,
31 .migratepage
= migrate_page
,
34 static struct backing_dev_info swap_backing_dev_info
= {
35 .capabilities
= BDI_CAP_NO_ACCT_DIRTY
| BDI_CAP_NO_WRITEBACK
,
36 .unplug_io_fn
= swap_unplug_io_fn
,
39 struct address_space swapper_space
= {
40 .page_tree
= RADIX_TREE_INIT(GFP_ATOMIC
|__GFP_NOWARN
),
41 .tree_lock
= __RW_LOCK_UNLOCKED(swapper_space
.tree_lock
),
43 .i_mmap_nonlinear
= LIST_HEAD_INIT(swapper_space
.i_mmap_nonlinear
),
44 .backing_dev_info
= &swap_backing_dev_info
,
47 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
50 unsigned long add_total
;
51 unsigned long del_total
;
52 unsigned long find_success
;
53 unsigned long find_total
;
54 unsigned long noent_race
;
55 unsigned long exist_race
;
58 void show_swap_cache_info(void)
60 printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
61 swap_cache_info
.add_total
, swap_cache_info
.del_total
,
62 swap_cache_info
.find_success
, swap_cache_info
.find_total
,
63 swap_cache_info
.noent_race
, swap_cache_info
.exist_race
);
64 printk("Free swap = %lukB\n", nr_swap_pages
<< (PAGE_SHIFT
- 10));
65 printk("Total swap = %lukB\n", total_swap_pages
<< (PAGE_SHIFT
- 10));
69 * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
70 * but sets SwapCache flag and private instead of mapping and index.
72 static int __add_to_swap_cache(struct page
*page
, swp_entry_t entry
,
77 BUG_ON(PageSwapCache(page
));
78 BUG_ON(PagePrivate(page
));
79 error
= radix_tree_preload(gfp_mask
);
81 write_lock_irq(&swapper_space
.tree_lock
);
82 error
= radix_tree_insert(&swapper_space
.page_tree
,
87 SetPageSwapCache(page
);
88 set_page_private(page
, entry
.val
);
89 total_swapcache_pages
++;
90 __inc_zone_page_state(page
, NR_FILE_PAGES
);
92 write_unlock_irq(&swapper_space
.tree_lock
);
93 radix_tree_preload_end();
98 static int add_to_swap_cache(struct page
*page
, swp_entry_t entry
)
102 if (!swap_duplicate(entry
)) {
103 INC_CACHE_INFO(noent_race
);
106 error
= __add_to_swap_cache(page
, entry
, GFP_KERNEL
);
108 * Anon pages are already on the LRU, we don't run lru_cache_add here.
112 if (error
== -EEXIST
)
113 INC_CACHE_INFO(exist_race
);
116 INC_CACHE_INFO(add_total
);
121 * This must be called only on pages that have
122 * been verified to be in the swap cache.
124 void __delete_from_swap_cache(struct page
*page
)
126 BUG_ON(!PageLocked(page
));
127 BUG_ON(!PageSwapCache(page
));
128 BUG_ON(PageWriteback(page
));
129 BUG_ON(PagePrivate(page
));
131 radix_tree_delete(&swapper_space
.page_tree
, page_private(page
));
132 set_page_private(page
, 0);
133 ClearPageSwapCache(page
);
134 total_swapcache_pages
--;
135 __dec_zone_page_state(page
, NR_FILE_PAGES
);
136 INC_CACHE_INFO(del_total
);
140 * add_to_swap - allocate swap space for a page
141 * @page: page we want to move to swap
143 * Allocate swap space for the page and add the page to the
144 * swap cache. Caller needs to hold the page lock.
146 int add_to_swap(struct page
* page
, gfp_t gfp_mask
)
151 BUG_ON(!PageLocked(page
));
154 entry
= get_swap_page();
159 * Radix-tree node allocations from PF_MEMALLOC contexts could
160 * completely exhaust the page allocator. __GFP_NOMEMALLOC
161 * stops emergency reserves from being allocated.
163 * TODO: this could cause a theoretical memory reclaim
164 * deadlock in the swap out path.
167 * Add it to the swap cache and mark it dirty
169 err
= __add_to_swap_cache(page
, entry
,
170 gfp_mask
|__GFP_NOMEMALLOC
|__GFP_NOWARN
);
173 case 0: /* Success */
174 SetPageUptodate(page
);
176 INC_CACHE_INFO(add_total
);
179 /* Raced with "speculative" read_swap_cache_async */
180 INC_CACHE_INFO(exist_race
);
184 /* -ENOMEM radix-tree allocation failure */
192 * This must be called only on pages that have
193 * been verified to be in the swap cache and locked.
194 * It will never put the page into the free list,
195 * the caller has a reference on the page.
197 void delete_from_swap_cache(struct page
*page
)
201 entry
.val
= page_private(page
);
203 write_lock_irq(&swapper_space
.tree_lock
);
204 __delete_from_swap_cache(page
);
205 write_unlock_irq(&swapper_space
.tree_lock
);
208 page_cache_release(page
);
212 * Strange swizzling function only for use by shmem_writepage
214 int move_to_swap_cache(struct page
*page
, swp_entry_t entry
)
216 int err
= __add_to_swap_cache(page
, entry
, GFP_ATOMIC
);
218 remove_from_page_cache(page
);
219 page_cache_release(page
); /* pagecache ref */
220 if (!swap_duplicate(entry
))
223 INC_CACHE_INFO(add_total
);
224 } else if (err
== -EEXIST
)
225 INC_CACHE_INFO(exist_race
);
230 * Strange swizzling function for shmem_getpage (and shmem_unuse)
232 int move_from_swap_cache(struct page
*page
, unsigned long index
,
233 struct address_space
*mapping
)
235 int err
= add_to_page_cache(page
, mapping
, index
, GFP_ATOMIC
);
237 delete_from_swap_cache(page
);
238 /* shift page from clean_pages to dirty_pages list */
239 ClearPageDirty(page
);
240 set_page_dirty(page
);
246 * If we are the only user, then try to free up the swap cache.
248 * Its ok to check for PageSwapCache without the page lock
249 * here because we are going to recheck again inside
250 * exclusive_swap_page() _with_ the lock.
253 static inline void free_swap_cache(struct page
*page
)
255 if (PageSwapCache(page
) && !TestSetPageLocked(page
)) {
256 remove_exclusive_swap_page(page
);
262 * Perform a free_page(), also freeing any swap cache associated with
263 * this page if it is the last user of the page.
265 void free_page_and_swap_cache(struct page
*page
)
267 free_swap_cache(page
);
268 page_cache_release(page
);
272 * Passed an array of pages, drop them all from swapcache and then release
273 * them. They are removed from the LRU and freed if this is their last use.
275 void free_pages_and_swap_cache(struct page
**pages
, int nr
)
277 struct page
**pagep
= pages
;
281 int todo
= min(nr
, PAGEVEC_SIZE
);
284 for (i
= 0; i
< todo
; i
++)
285 free_swap_cache(pagep
[i
]);
286 release_pages(pagep
, todo
, 0);
293 * Lookup a swap entry in the swap cache. A found page will be returned
294 * unlocked and with its refcount incremented - we rely on the kernel
295 * lock getting page table operations atomic even if we drop the page
296 * lock before returning.
298 struct page
* lookup_swap_cache(swp_entry_t entry
)
302 page
= find_get_page(&swapper_space
, entry
.val
);
305 INC_CACHE_INFO(find_success
);
307 INC_CACHE_INFO(find_total
);
312 * Locate a page of swap in physical memory, reserving swap cache space
313 * and reading the disk if it is not already cached.
314 * A failure return means that either the page allocation failed or that
315 * the swap entry is no longer in use.
317 struct page
*read_swap_cache_async(swp_entry_t entry
,
318 struct vm_area_struct
*vma
, unsigned long addr
)
320 struct page
*found_page
, *new_page
= NULL
;
325 * First check the swap cache. Since this is normally
326 * called after lookup_swap_cache() failed, re-calling
327 * that would confuse statistics.
329 found_page
= find_get_page(&swapper_space
, entry
.val
);
334 * Get a new page to read into from swap.
337 new_page
= alloc_page_vma(GFP_HIGHUSER
, vma
, addr
);
339 break; /* Out of memory */
343 * Associate the page with swap entry in the swap cache.
344 * May fail (-ENOENT) if swap entry has been freed since
345 * our caller observed it. May fail (-EEXIST) if there
346 * is already a page associated with this entry in the
347 * swap cache: added by a racing read_swap_cache_async,
348 * or by try_to_swap_out (or shmem_writepage) re-using
349 * the just freed swap entry for an existing page.
350 * May fail (-ENOMEM) if radix-tree node allocation failed.
352 err
= add_to_swap_cache(new_page
, entry
);
355 * Initiate read into locked page and return.
357 lru_cache_add_active(new_page
);
358 swap_readpage(NULL
, new_page
);
361 } while (err
!= -ENOENT
&& err
!= -ENOMEM
);
364 page_cache_release(new_page
);