2 * linux/mm/swap_state.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 #include <linux/module.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/buffer_head.h>
17 #include <linux/backing-dev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
21 #include <asm/pgtable.h>
24 * swapper_space is a fiction, retained to simplify the path through
25 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
26 * future use of radix_tree tags in the swap cache.
28 static const struct address_space_operations swap_aops
= {
29 .writepage
= swap_writepage
,
30 .sync_page
= block_sync_page
,
31 .set_page_dirty
= __set_page_dirty_nobuffers
,
32 .migratepage
= migrate_page
,
35 static struct backing_dev_info swap_backing_dev_info
= {
36 .capabilities
= BDI_CAP_NO_ACCT_AND_WRITEBACK
,
37 .unplug_io_fn
= swap_unplug_io_fn
,
40 struct address_space swapper_space
= {
41 .page_tree
= RADIX_TREE_INIT(GFP_ATOMIC
|__GFP_NOWARN
),
42 .tree_lock
= __SPIN_LOCK_UNLOCKED(swapper_space
.tree_lock
),
44 .i_mmap_nonlinear
= LIST_HEAD_INIT(swapper_space
.i_mmap_nonlinear
),
45 .backing_dev_info
= &swap_backing_dev_info
,
48 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
51 unsigned long add_total
;
52 unsigned long del_total
;
53 unsigned long find_success
;
54 unsigned long find_total
;
57 void show_swap_cache_info(void)
59 printk("Swap cache: add %lu, delete %lu, find %lu/%lu\n",
60 swap_cache_info
.add_total
, swap_cache_info
.del_total
,
61 swap_cache_info
.find_success
, swap_cache_info
.find_total
);
62 printk("Free swap = %lukB\n", nr_swap_pages
<< (PAGE_SHIFT
- 10));
63 printk("Total swap = %lukB\n", total_swap_pages
<< (PAGE_SHIFT
- 10));
67 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
68 * but sets SwapCache flag and private instead of mapping and index.
70 int add_to_swap_cache(struct page
*page
, swp_entry_t entry
, gfp_t gfp_mask
)
74 BUG_ON(!PageLocked(page
));
75 BUG_ON(PageSwapCache(page
));
76 BUG_ON(PagePrivate(page
));
77 error
= radix_tree_preload(gfp_mask
);
80 SetPageSwapCache(page
);
81 set_page_private(page
, entry
.val
);
83 spin_lock_irq(&swapper_space
.tree_lock
);
84 error
= radix_tree_insert(&swapper_space
.page_tree
,
87 total_swapcache_pages
++;
88 __inc_zone_page_state(page
, NR_FILE_PAGES
);
89 INC_CACHE_INFO(add_total
);
91 spin_unlock_irq(&swapper_space
.tree_lock
);
92 radix_tree_preload_end();
94 if (unlikely(error
)) {
95 set_page_private(page
, 0UL);
96 ClearPageSwapCache(page
);
97 page_cache_release(page
);
104 * This must be called only on pages that have
105 * been verified to be in the swap cache.
107 void __delete_from_swap_cache(struct page
*page
)
109 BUG_ON(!PageLocked(page
));
110 BUG_ON(!PageSwapCache(page
));
111 BUG_ON(PageWriteback(page
));
112 BUG_ON(PagePrivate(page
));
114 radix_tree_delete(&swapper_space
.page_tree
, page_private(page
));
115 set_page_private(page
, 0);
116 ClearPageSwapCache(page
);
117 total_swapcache_pages
--;
118 __dec_zone_page_state(page
, NR_FILE_PAGES
);
119 INC_CACHE_INFO(del_total
);
123 * add_to_swap - allocate swap space for a page
124 * @page: page we want to move to swap
125 * @gfp_mask: memory allocation flags
127 * Allocate swap space for the page and add the page to the
128 * swap cache. Caller needs to hold the page lock.
130 int add_to_swap(struct page
* page
, gfp_t gfp_mask
)
135 BUG_ON(!PageLocked(page
));
136 BUG_ON(!PageUptodate(page
));
139 entry
= get_swap_page();
144 * Radix-tree node allocations from PF_MEMALLOC contexts could
145 * completely exhaust the page allocator. __GFP_NOMEMALLOC
146 * stops emergency reserves from being allocated.
148 * TODO: this could cause a theoretical memory reclaim
149 * deadlock in the swap out path.
152 * Add it to the swap cache and mark it dirty
154 err
= add_to_swap_cache(page
, entry
,
155 gfp_mask
|__GFP_NOMEMALLOC
|__GFP_NOWARN
);
158 case 0: /* Success */
162 /* Raced with "speculative" read_swap_cache_async */
166 /* -ENOMEM radix-tree allocation failure */
174 * This must be called only on pages that have
175 * been verified to be in the swap cache and locked.
176 * It will never put the page into the free list,
177 * the caller has a reference on the page.
179 void delete_from_swap_cache(struct page
*page
)
183 entry
.val
= page_private(page
);
185 spin_lock_irq(&swapper_space
.tree_lock
);
186 __delete_from_swap_cache(page
);
187 spin_unlock_irq(&swapper_space
.tree_lock
);
190 page_cache_release(page
);
194 * If we are the only user, then try to free up the swap cache.
196 * Its ok to check for PageSwapCache without the page lock
197 * here because we are going to recheck again inside
198 * exclusive_swap_page() _with_ the lock.
201 static inline void free_swap_cache(struct page
*page
)
203 if (PageSwapCache(page
) && !TestSetPageLocked(page
)) {
204 remove_exclusive_swap_page(page
);
210 * Perform a free_page(), also freeing any swap cache associated with
211 * this page if it is the last user of the page.
213 void free_page_and_swap_cache(struct page
*page
)
215 free_swap_cache(page
);
216 page_cache_release(page
);
220 * Passed an array of pages, drop them all from swapcache and then release
221 * them. They are removed from the LRU and freed if this is their last use.
223 void free_pages_and_swap_cache(struct page
**pages
, int nr
)
225 struct page
**pagep
= pages
;
229 int todo
= min(nr
, PAGEVEC_SIZE
);
232 for (i
= 0; i
< todo
; i
++)
233 free_swap_cache(pagep
[i
]);
234 release_pages(pagep
, todo
, 0);
241 * Lookup a swap entry in the swap cache. A found page will be returned
242 * unlocked and with its refcount incremented - we rely on the kernel
243 * lock getting page table operations atomic even if we drop the page
244 * lock before returning.
246 struct page
* lookup_swap_cache(swp_entry_t entry
)
250 page
= find_get_page(&swapper_space
, entry
.val
);
253 INC_CACHE_INFO(find_success
);
255 INC_CACHE_INFO(find_total
);
260 * Locate a page of swap in physical memory, reserving swap cache space
261 * and reading the disk if it is not already cached.
262 * A failure return means that either the page allocation failed or that
263 * the swap entry is no longer in use.
265 struct page
*read_swap_cache_async(swp_entry_t entry
, gfp_t gfp_mask
,
266 struct vm_area_struct
*vma
, unsigned long addr
)
268 struct page
*found_page
, *new_page
= NULL
;
273 * First check the swap cache. Since this is normally
274 * called after lookup_swap_cache() failed, re-calling
275 * that would confuse statistics.
277 found_page
= find_get_page(&swapper_space
, entry
.val
);
282 * Get a new page to read into from swap.
285 new_page
= alloc_page_vma(gfp_mask
, vma
, addr
);
287 break; /* Out of memory */
291 * Swap entry may have been freed since our caller observed it.
293 if (!swap_duplicate(entry
))
297 * Associate the page with swap entry in the swap cache.
298 * May fail (-EEXIST) if there is already a page associated
299 * with this entry in the swap cache: added by a racing
300 * read_swap_cache_async, or add_to_swap or shmem_writepage
301 * re-using the just freed swap entry for an existing page.
302 * May fail (-ENOMEM) if radix-tree node allocation failed.
304 SetPageLocked(new_page
);
305 err
= add_to_swap_cache(new_page
, entry
, gfp_mask
& GFP_KERNEL
);
308 * Initiate read into locked page and return.
310 lru_cache_add_active(new_page
);
311 swap_readpage(NULL
, new_page
);
314 ClearPageLocked(new_page
);
316 } while (err
!= -ENOMEM
);
319 page_cache_release(new_page
);
324 * swapin_readahead - swap in pages in hope we need them soon
325 * @entry: swap entry of this memory
326 * @gfp_mask: memory allocation flags
327 * @vma: user vma this address belongs to
328 * @addr: target address for mempolicy
330 * Returns the struct page for entry and addr, after queueing swapin.
332 * Primitive swap readahead code. We simply read an aligned block of
333 * (1 << page_cluster) entries in the swap area. This method is chosen
334 * because it doesn't cost us any seek time. We also make sure to queue
335 * the 'original' request together with the readahead ones...
337 * This has been extended to use the NUMA policies from the mm triggering
340 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
342 struct page
*swapin_readahead(swp_entry_t entry
, gfp_t gfp_mask
,
343 struct vm_area_struct
*vma
, unsigned long addr
)
347 unsigned long offset
;
348 unsigned long end_offset
;
351 * Get starting offset for readaround, and number of pages to read.
352 * Adjust starting address by readbehind (for NUMA interleave case)?
353 * No, it's very unlikely that swap layout would follow vma layout,
354 * more likely that neighbouring swap pages came from the same node:
355 * so use the same "addr" to choose the same node for each swap read.
357 nr_pages
= valid_swaphandles(entry
, &offset
);
358 for (end_offset
= offset
+ nr_pages
; offset
< end_offset
; offset
++) {
359 /* Ok, do the async read-ahead now */
360 page
= read_swap_cache_async(swp_entry(swp_type(entry
), offset
),
361 gfp_mask
, vma
, addr
);
364 page_cache_release(page
);
366 lru_add_drain(); /* Push any new pages onto the LRU now */
367 return read_swap_cache_async(entry
, gfp_mask
, vma
, addr
);