2 * linux/mm/swap_state.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapctl.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/smp_lock.h>
18 #include <asm/pgtable.h>
20 static struct address_space_operations swap_aops
= {
21 sync_page
: block_sync_page
24 struct address_space swapper_space
= {
26 &swapper_space
.pages
, /* .next */
27 &swapper_space
.pages
/* .prev */
33 #ifdef SWAP_CACHE_INFO
34 unsigned long swap_cache_add_total
;
35 unsigned long swap_cache_del_total
;
36 unsigned long swap_cache_find_total
;
37 unsigned long swap_cache_find_success
;
39 void show_swap_cache_info(void)
41 printk("Swap cache: add %ld, delete %ld, find %ld/%ld\n",
44 swap_cache_find_success
, swap_cache_find_total
);
48 void add_to_swap_cache(struct page
*page
, swp_entry_t entry
)
52 #ifdef SWAP_CACHE_INFO
53 swap_cache_add_total
++;
55 if (!PageLocked(page
))
57 if (PageTestandSetSwapCache(page
))
61 flags
= page
->flags
& ~((1 << PG_error
) | (1 << PG_dirty
) | (1 << PG_referenced
));
62 page
->flags
= flags
| (1 << PG_uptodate
);
63 add_to_page_cache_locked(page
, &swapper_space
, entry
.val
);
66 static inline void remove_from_swap_cache(struct page
*page
)
68 struct address_space
*mapping
= page
->mapping
;
70 if (mapping
!= &swapper_space
)
72 if (!PageSwapCache(page
) || !PageLocked(page
))
75 PageClearSwapCache(page
);
76 remove_inode_page(page
);
80 * This must be called only on pages that have
81 * been verified to be in the swap cache.
83 void __delete_from_swap_cache(struct page
*page
)
87 entry
.val
= page
->index
;
89 #ifdef SWAP_CACHE_INFO
90 swap_cache_del_total
++;
92 remove_from_swap_cache(page
);
97 * This will never put the page into the free list, the caller has
98 * a reference on the page.
100 void delete_from_swap_cache_nolock(struct page
*page
)
102 if (!PageLocked(page
))
105 if (block_flushpage(page
, 0))
108 __delete_from_swap_cache(page
);
109 page_cache_release(page
);
113 * This must be called only on pages that have
114 * been verified to be in the swap cache and locked.
116 void delete_from_swap_cache(struct page
*page
)
119 delete_from_swap_cache_nolock(page
);
124 * Perform a free_page(), also freeing any swap cache associated with
125 * this page if it is the last user of the page. Can not do a lock_page,
126 * as we are holding the page_table_lock spinlock.
128 void free_page_and_swap_cache(struct page
*page
)
131 * If we are the only user, then try to free up the swap cache.
133 if (PageSwapCache(page
) && !TryLockPage(page
)) {
134 if (!is_page_shared(page
)) {
135 delete_from_swap_cache_nolock(page
);
139 page_cache_release(page
);
144 * Lookup a swap entry in the swap cache. A found page will be returned
145 * unlocked and with its refcount incremented - we rely on the kernel
146 * lock getting page table operations atomic even if we drop the page
147 * lock before returning.
150 struct page
* lookup_swap_cache(swp_entry_t entry
)
154 #ifdef SWAP_CACHE_INFO
155 swap_cache_find_total
++;
159 * Right now the pagecache is 32-bit only. But it's a 32 bit index. =)
162 found
= find_lock_page(&swapper_space
, entry
.val
);
166 * Though the "found" page was in the swap cache an instant
167 * earlier, it might have been removed by shrink_mmap etc.
168 * Re search ... Since find_lock_page grabs a reference on
169 * the page, it can not be reused for anything else, namely
170 * it can not be associated with another swaphandle, so it
171 * is enough to check whether the page is still in the scache.
173 if (!PageSwapCache(found
)) {
175 page_cache_release(found
);
178 if (found
->mapping
!= &swapper_space
)
180 #ifdef SWAP_CACHE_INFO
181 swap_cache_find_success
++;
188 printk (KERN_ERR
"VM: Found a non-swapper swap page!\n");
190 page_cache_release(found
);
195 * Locate a page of swap in physical memory, reserving swap cache space
196 * and reading the disk if it is not already cached. If wait==0, we are
197 * only doing readahead, so don't worry if the page is already locked.
199 * A failure return means that either the page allocation failed or that
200 * the swap entry is no longer in use.
203 struct page
* read_swap_cache_async(swp_entry_t entry
, int wait
)
205 struct page
*found_page
= 0, *new_page
;
206 unsigned long new_page_addr
;
209 * Make sure the swap entry is still in use.
211 if (!swap_duplicate(entry
)) /* Account for the swap cache */
214 * Look for the page in the swap cache.
216 found_page
= lookup_swap_cache(entry
);
220 new_page_addr
= __get_free_page(GFP_USER
);
222 goto out_free_swap
; /* Out of memory */
223 new_page
= mem_map
+ MAP_NR(new_page_addr
);
226 * Check the swap cache again, in case we stalled above.
228 found_page
= lookup_swap_cache(entry
);
232 * Add it to the swap cache and read its contents.
235 add_to_swap_cache(new_page
, entry
);
236 rw_swap_page(READ
, new_page
, wait
);
240 page_cache_release(new_page
);