2 * linux/mm/swap_state.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
10 #include <linux/gfp.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
20 #include <linux/page_cgroup.h>
22 #include <asm/pgtable.h>
25 * swapper_space is a fiction, retained to simplify the path through
26 * vmscan's shrink_page_list.
28 static const struct address_space_operations swap_aops
= {
29 .writepage
= swap_writepage
,
30 .set_page_dirty
= swap_set_page_dirty
,
31 .migratepage
= migrate_page
,
34 static struct backing_dev_info swap_backing_dev_info
= {
36 .capabilities
= BDI_CAP_NO_ACCT_AND_WRITEBACK
| BDI_CAP_SWAP_BACKED
,
39 struct address_space swapper_space
= {
40 .page_tree
= RADIX_TREE_INIT(GFP_ATOMIC
|__GFP_NOWARN
),
41 .tree_lock
= __SPIN_LOCK_UNLOCKED(swapper_space
.tree_lock
),
43 .i_mmap_nonlinear
= LIST_HEAD_INIT(swapper_space
.i_mmap_nonlinear
),
44 .backing_dev_info
= &swap_backing_dev_info
,
47 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
50 unsigned long add_total
;
51 unsigned long del_total
;
52 unsigned long find_success
;
53 unsigned long find_total
;
56 void show_swap_cache_info(void)
58 printk("%lu pages in swap cache\n", total_swapcache_pages
);
59 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
60 swap_cache_info
.add_total
, swap_cache_info
.del_total
,
61 swap_cache_info
.find_success
, swap_cache_info
.find_total
);
62 printk("Free swap = %ldkB\n", nr_swap_pages
<< (PAGE_SHIFT
- 10));
63 printk("Total swap = %lukB\n", total_swap_pages
<< (PAGE_SHIFT
- 10));
67 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
68 * but sets SwapCache flag and private instead of mapping and index.
70 static int __add_to_swap_cache(struct page
*page
, swp_entry_t entry
)
74 VM_BUG_ON(!PageLocked(page
));
75 VM_BUG_ON(PageSwapCache(page
));
76 VM_BUG_ON(!PageSwapBacked(page
));
79 SetPageSwapCache(page
);
80 set_page_private(page
, entry
.val
);
82 spin_lock_irq(&swapper_space
.tree_lock
);
83 error
= radix_tree_insert(&swapper_space
.page_tree
, entry
.val
, page
);
85 total_swapcache_pages
++;
86 __inc_zone_page_state(page
, NR_FILE_PAGES
);
87 INC_CACHE_INFO(add_total
);
89 spin_unlock_irq(&swapper_space
.tree_lock
);
91 if (unlikely(error
)) {
93 * Only the context which have set SWAP_HAS_CACHE flag
94 * would call add_to_swap_cache().
95 * So add_to_swap_cache() doesn't returns -EEXIST.
97 VM_BUG_ON(error
== -EEXIST
);
98 set_page_private(page
, 0UL);
99 ClearPageSwapCache(page
);
100 page_cache_release(page
);
107 int add_to_swap_cache(struct page
*page
, swp_entry_t entry
, gfp_t gfp_mask
)
111 error
= radix_tree_preload(gfp_mask
);
113 error
= __add_to_swap_cache(page
, entry
);
114 radix_tree_preload_end();
120 * This must be called only on pages that have
121 * been verified to be in the swap cache.
123 void __delete_from_swap_cache(struct page
*page
)
125 VM_BUG_ON(!PageLocked(page
));
126 VM_BUG_ON(!PageSwapCache(page
));
127 VM_BUG_ON(PageWriteback(page
));
129 radix_tree_delete(&swapper_space
.page_tree
, page_private(page
));
130 set_page_private(page
, 0);
131 ClearPageSwapCache(page
);
132 total_swapcache_pages
--;
133 __dec_zone_page_state(page
, NR_FILE_PAGES
);
134 INC_CACHE_INFO(del_total
);
138 * add_to_swap - allocate swap space for a page
139 * @page: page we want to move to swap
141 * Allocate swap space for the page and add the page to the
142 * swap cache. Caller needs to hold the page lock.
144 int add_to_swap(struct page
*page
)
149 VM_BUG_ON(!PageLocked(page
));
150 VM_BUG_ON(!PageUptodate(page
));
152 entry
= get_swap_page();
156 if (unlikely(PageTransHuge(page
)))
157 if (unlikely(split_huge_page(page
))) {
158 swapcache_free(entry
, NULL
);
163 * Radix-tree node allocations from PF_MEMALLOC contexts could
164 * completely exhaust the page allocator. __GFP_NOMEMALLOC
165 * stops emergency reserves from being allocated.
167 * TODO: this could cause a theoretical memory reclaim
168 * deadlock in the swap out path.
171 * Add it to the swap cache and mark it dirty
173 err
= add_to_swap_cache(page
, entry
,
174 __GFP_HIGH
|__GFP_NOMEMALLOC
|__GFP_NOWARN
);
176 if (!err
) { /* Success */
179 } else { /* -ENOMEM radix-tree allocation failure */
181 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
182 * clear SWAP_HAS_CACHE flag.
184 swapcache_free(entry
, NULL
);
190 * This must be called only on pages that have
191 * been verified to be in the swap cache and locked.
192 * It will never put the page into the free list,
193 * the caller has a reference on the page.
195 void delete_from_swap_cache(struct page
*page
)
199 entry
.val
= page_private(page
);
201 spin_lock_irq(&swapper_space
.tree_lock
);
202 __delete_from_swap_cache(page
);
203 spin_unlock_irq(&swapper_space
.tree_lock
);
205 swapcache_free(entry
, page
);
206 page_cache_release(page
);
210 * If we are the only user, then try to free up the swap cache.
212 * Its ok to check for PageSwapCache without the page lock
213 * here because we are going to recheck again inside
214 * try_to_free_swap() _with_ the lock.
217 static inline void free_swap_cache(struct page
*page
)
219 if (PageSwapCache(page
) && !page_mapped(page
) && trylock_page(page
)) {
220 try_to_free_swap(page
);
226 * Perform a free_page(), also freeing any swap cache associated with
227 * this page if it is the last user of the page.
229 void free_page_and_swap_cache(struct page
*page
)
231 free_swap_cache(page
);
232 page_cache_release(page
);
236 * Passed an array of pages, drop them all from swapcache and then release
237 * them. They are removed from the LRU and freed if this is their last use.
239 void free_pages_and_swap_cache(struct page
**pages
, int nr
)
241 struct page
**pagep
= pages
;
245 int todo
= min(nr
, PAGEVEC_SIZE
);
248 for (i
= 0; i
< todo
; i
++)
249 free_swap_cache(pagep
[i
]);
250 release_pages(pagep
, todo
, 0);
257 * Lookup a swap entry in the swap cache. A found page will be returned
258 * unlocked and with its refcount incremented - we rely on the kernel
259 * lock getting page table operations atomic even if we drop the page
260 * lock before returning.
262 struct page
* lookup_swap_cache(swp_entry_t entry
)
266 page
= find_get_page(&swapper_space
, entry
.val
);
269 INC_CACHE_INFO(find_success
);
271 INC_CACHE_INFO(find_total
);
276 * Locate a page of swap in physical memory, reserving swap cache space
277 * and reading the disk if it is not already cached.
278 * A failure return means that either the page allocation failed or that
279 * the swap entry is no longer in use.
281 struct page
*read_swap_cache_async(swp_entry_t entry
, gfp_t gfp_mask
,
282 struct vm_area_struct
*vma
, unsigned long addr
)
284 struct page
*found_page
, *new_page
= NULL
;
289 * First check the swap cache. Since this is normally
290 * called after lookup_swap_cache() failed, re-calling
291 * that would confuse statistics.
293 found_page
= find_get_page(&swapper_space
, entry
.val
);
298 * Get a new page to read into from swap.
301 new_page
= alloc_page_vma(gfp_mask
, vma
, addr
);
303 break; /* Out of memory */
307 * call radix_tree_preload() while we can wait.
309 err
= radix_tree_preload(gfp_mask
& GFP_KERNEL
);
314 * Swap entry may have been freed since our caller observed it.
316 err
= swapcache_prepare(entry
);
317 if (err
== -EEXIST
) { /* seems racy */
318 radix_tree_preload_end();
321 if (err
) { /* swp entry is obsolete ? */
322 radix_tree_preload_end();
326 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
327 __set_page_locked(new_page
);
328 SetPageSwapBacked(new_page
);
329 err
= __add_to_swap_cache(new_page
, entry
);
331 radix_tree_preload_end();
333 * Initiate read into locked page and return.
335 lru_cache_add_anon(new_page
);
336 swap_readpage(new_page
);
339 radix_tree_preload_end();
340 ClearPageSwapBacked(new_page
);
341 __clear_page_locked(new_page
);
343 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
344 * clear SWAP_HAS_CACHE flag.
346 swapcache_free(entry
, NULL
);
347 } while (err
!= -ENOMEM
);
350 page_cache_release(new_page
);
355 * swapin_readahead - swap in pages in hope we need them soon
356 * @entry: swap entry of this memory
357 * @gfp_mask: memory allocation flags
358 * @vma: user vma this address belongs to
359 * @addr: target address for mempolicy
361 * Returns the struct page for entry and addr, after queueing swapin.
363 * Primitive swap readahead code. We simply read an aligned block of
364 * (1 << page_cluster) entries in the swap area. This method is chosen
365 * because it doesn't cost us any seek time. We also make sure to queue
366 * the 'original' request together with the readahead ones...
368 * This has been extended to use the NUMA policies from the mm triggering
371 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
373 struct page
*swapin_readahead(swp_entry_t entry
, gfp_t gfp_mask
,
374 struct vm_area_struct
*vma
, unsigned long addr
)
377 unsigned long offset
= swp_offset(entry
);
378 unsigned long start_offset
, end_offset
;
379 unsigned long mask
= (1UL << page_cluster
) - 1;
380 struct blk_plug plug
;
382 /* Read a page_cluster sized and aligned cluster around offset. */
383 start_offset
= offset
& ~mask
;
384 end_offset
= offset
| mask
;
385 if (!start_offset
) /* First page is swap header. */
388 blk_start_plug(&plug
);
389 for (offset
= start_offset
; offset
<= end_offset
; offset
++) {
390 /* Ok, do the async read-ahead now */
391 page
= read_swap_cache_async(swp_entry(swp_type(entry
), offset
),
392 gfp_mask
, vma
, addr
);
395 page_cache_release(page
);
397 blk_finish_plug(&plug
);
399 lru_add_drain(); /* Push any new pages onto the LRU now */
400 return read_swap_cache_async(entry
, gfp_mask
, vma
, addr
);