Merge branch 'for-3.11' of git://linux-nfs.org/~bfields/linux
[linux-2.6.git] / include / linux / swap.h
blobd95cde5e257d9d23c71190109ba97d373668ebf9
1 #ifndef _LINUX_SWAP_H
2 #define _LINUX_SWAP_H
4 #include <linux/spinlock.h>
5 #include <linux/linkage.h>
6 #include <linux/mmzone.h>
7 #include <linux/list.h>
8 #include <linux/memcontrol.h>
9 #include <linux/sched.h>
10 #include <linux/node.h>
11 #include <linux/fs.h>
12 #include <linux/atomic.h>
13 #include <linux/page-flags.h>
14 #include <asm/page.h>
16 struct notifier_block;
18 struct bio;
20 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
21 #define SWAP_FLAG_PRIO_MASK 0x7fff
22 #define SWAP_FLAG_PRIO_SHIFT 0
23 #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
24 #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
25 #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
27 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
28 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
29 SWAP_FLAG_DISCARD_PAGES)
31 static inline int current_is_kswapd(void)
33 return current->flags & PF_KSWAPD;
37 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
38 * be swapped to. The swap type and the offset into that swap type are
39 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
40 * for the type means that the maximum number of swapcache pages is 27 bits
41 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
42 * the type/offset into the pte as 5/27 as well.
44 #define MAX_SWAPFILES_SHIFT 5
47 * Use some of the swap files numbers for other purposes. This
48 * is a convenient way to hook into the VM to trigger special
49 * actions on faults.
53 * NUMA node memory migration support
55 #ifdef CONFIG_MIGRATION
56 #define SWP_MIGRATION_NUM 2
57 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
58 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
59 #else
60 #define SWP_MIGRATION_NUM 0
61 #endif
64 * Handling of hardware poisoned pages with memory corruption.
66 #ifdef CONFIG_MEMORY_FAILURE
67 #define SWP_HWPOISON_NUM 1
68 #define SWP_HWPOISON MAX_SWAPFILES
69 #else
70 #define SWP_HWPOISON_NUM 0
71 #endif
73 #define MAX_SWAPFILES \
74 ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
77 * Magic header for a swap area. The first part of the union is
78 * what the swap magic looks like for the old (limited to 128MB)
79 * swap area format, the second part of the union adds - in the
80 * old reserved area - some extra information. Note that the first
81 * kilobyte is reserved for boot loader or disk label stuff...
83 * Having the magic at the end of the PAGE_SIZE makes detecting swap
84 * areas somewhat tricky on machines that support multiple page sizes.
85 * For 2.5 we'll probably want to move the magic to just beyond the
86 * bootbits...
88 union swap_header {
89 struct {
90 char reserved[PAGE_SIZE - 10];
91 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
92 } magic;
93 struct {
94 char bootbits[1024]; /* Space for disklabel etc. */
95 __u32 version;
96 __u32 last_page;
97 __u32 nr_badpages;
98 unsigned char sws_uuid[16];
99 unsigned char sws_volume[16];
100 __u32 padding[117];
101 __u32 badpages[1];
102 } info;
105 /* A swap entry has to fit into a "unsigned long", as
106 * the entry is hidden in the "index" field of the
107 * swapper address space.
109 typedef struct {
110 unsigned long val;
111 } swp_entry_t;
114 * current->reclaim_state points to one of these when a task is running
115 * memory reclaim
117 struct reclaim_state {
118 unsigned long reclaimed_slab;
121 #ifdef __KERNEL__
123 struct address_space;
124 struct sysinfo;
125 struct writeback_control;
126 struct zone;
129 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
130 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
131 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
132 * from setup, they're handled identically.
134 * We always assume that blocks are of size PAGE_SIZE.
136 struct swap_extent {
137 struct list_head list;
138 pgoff_t start_page;
139 pgoff_t nr_pages;
140 sector_t start_block;
144 * Max bad pages in the new format..
146 #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
147 #define MAX_SWAP_BADPAGES \
148 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
150 enum {
151 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
152 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
153 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
154 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
155 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
156 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
157 SWP_BLKDEV = (1 << 6), /* its a block device */
158 SWP_FILE = (1 << 7), /* set after swap_activate success */
159 SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
160 SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
161 /* add others here before... */
162 SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */
165 #define SWAP_CLUSTER_MAX 32UL
166 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
169 * Ratio between the present memory in the zone and the "gap" that
170 * we're allowing kswapd to shrink in addition to the per-zone high
171 * wmark, even for zones that already have the high wmark satisfied,
172 * in order to provide better per-zone lru behavior. We are ok to
173 * spend not more than 1% of the memory for this zone balancing "gap".
175 #define KSWAPD_ZONE_BALANCE_GAP_RATIO 100
177 #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
178 #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
179 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
180 #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
181 #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
182 #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
185 * The in-memory structure used to track swap areas.
187 struct swap_info_struct {
188 unsigned long flags; /* SWP_USED etc: see above */
189 signed short prio; /* swap priority of this type */
190 signed char type; /* strange name for an index */
191 signed char next; /* next type on the swap list */
192 unsigned int max; /* extent of the swap_map */
193 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
194 unsigned int lowest_bit; /* index of first free in swap_map */
195 unsigned int highest_bit; /* index of last free in swap_map */
196 unsigned int pages; /* total of usable pages of swap */
197 unsigned int inuse_pages; /* number of those currently in use */
198 unsigned int cluster_next; /* likely index for next allocation */
199 unsigned int cluster_nr; /* countdown to next cluster search */
200 unsigned int lowest_alloc; /* while preparing discard cluster */
201 unsigned int highest_alloc; /* while preparing discard cluster */
202 struct swap_extent *curr_swap_extent;
203 struct swap_extent first_swap_extent;
204 struct block_device *bdev; /* swap device or bdev of swap file */
205 struct file *swap_file; /* seldom referenced */
206 unsigned int old_block_size; /* seldom referenced */
207 #ifdef CONFIG_FRONTSWAP
208 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
209 atomic_t frontswap_pages; /* frontswap pages in-use counter */
210 #endif
211 spinlock_t lock; /*
212 * protect map scan related fields like
213 * swap_map, lowest_bit, highest_bit,
214 * inuse_pages, cluster_next,
215 * cluster_nr, lowest_alloc and
216 * highest_alloc. other fields are only
217 * changed at swapon/swapoff, so are
218 * protected by swap_lock. changing
219 * flags need hold this lock and
220 * swap_lock. If both locks need hold,
221 * hold swap_lock first.
225 struct swap_list_t {
226 int head; /* head of priority-ordered swapfile list */
227 int next; /* swapfile to be used next */
230 /* linux/mm/page_alloc.c */
231 extern unsigned long totalram_pages;
232 extern unsigned long totalreserve_pages;
233 extern unsigned long dirty_balance_reserve;
234 extern unsigned long nr_free_buffer_pages(void);
235 extern unsigned long nr_free_pagecache_pages(void);
237 /* Definition of global_page_state not available yet */
238 #define nr_free_pages() global_page_state(NR_FREE_PAGES)
241 /* linux/mm/swap.c */
242 extern void __lru_cache_add(struct page *);
243 extern void lru_cache_add(struct page *);
244 extern void lru_add_page_tail(struct page *page, struct page *page_tail,
245 struct lruvec *lruvec, struct list_head *head);
246 extern void activate_page(struct page *);
247 extern void mark_page_accessed(struct page *);
248 extern void lru_add_drain(void);
249 extern void lru_add_drain_cpu(int cpu);
250 extern int lru_add_drain_all(void);
251 extern void rotate_reclaimable_page(struct page *page);
252 extern void deactivate_page(struct page *page);
253 extern void swap_setup(void);
255 extern void add_page_to_unevictable_list(struct page *page);
258 * lru_cache_add: add a page to the page lists
259 * @page: the page to add
261 static inline void lru_cache_add_anon(struct page *page)
263 ClearPageActive(page);
264 __lru_cache_add(page);
267 static inline void lru_cache_add_file(struct page *page)
269 ClearPageActive(page);
270 __lru_cache_add(page);
273 /* linux/mm/vmscan.c */
274 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
275 gfp_t gfp_mask, nodemask_t *mask);
276 extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
277 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
278 gfp_t gfp_mask, bool noswap);
279 extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
280 gfp_t gfp_mask, bool noswap,
281 struct zone *zone,
282 unsigned long *nr_scanned);
283 extern unsigned long shrink_all_memory(unsigned long nr_pages);
284 extern int vm_swappiness;
285 extern int remove_mapping(struct address_space *mapping, struct page *page);
286 extern unsigned long vm_total_pages;
288 #ifdef CONFIG_NUMA
289 extern int zone_reclaim_mode;
290 extern int sysctl_min_unmapped_ratio;
291 extern int sysctl_min_slab_ratio;
292 extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
293 #else
294 #define zone_reclaim_mode 0
295 static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
297 return 0;
299 #endif
301 extern int page_evictable(struct page *page);
302 extern void check_move_unevictable_pages(struct page **, int nr_pages);
304 extern unsigned long scan_unevictable_pages;
305 extern int scan_unevictable_handler(struct ctl_table *, int,
306 void __user *, size_t *, loff_t *);
307 #ifdef CONFIG_NUMA
308 extern int scan_unevictable_register_node(struct node *node);
309 extern void scan_unevictable_unregister_node(struct node *node);
310 #else
311 static inline int scan_unevictable_register_node(struct node *node)
313 return 0;
315 static inline void scan_unevictable_unregister_node(struct node *node)
318 #endif
320 extern int kswapd_run(int nid);
321 extern void kswapd_stop(int nid);
322 #ifdef CONFIG_MEMCG
323 extern int mem_cgroup_swappiness(struct mem_cgroup *mem);
324 #else
325 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
327 return vm_swappiness;
329 #endif
330 #ifdef CONFIG_MEMCG_SWAP
331 extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
332 #else
333 static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
336 #endif
337 #ifdef CONFIG_SWAP
338 /* linux/mm/page_io.c */
339 extern int swap_readpage(struct page *);
340 extern int swap_writepage(struct page *page, struct writeback_control *wbc);
341 extern void end_swap_bio_write(struct bio *bio, int err);
342 extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
343 void (*end_write_func)(struct bio *, int));
344 extern int swap_set_page_dirty(struct page *page);
345 extern void end_swap_bio_read(struct bio *bio, int err);
347 int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
348 unsigned long nr_pages, sector_t start_block);
349 int generic_swapfile_activate(struct swap_info_struct *, struct file *,
350 sector_t *);
352 /* linux/mm/swap_state.c */
353 extern struct address_space swapper_spaces[];
354 #define swap_address_space(entry) (&swapper_spaces[swp_type(entry)])
355 extern unsigned long total_swapcache_pages(void);
356 extern void show_swap_cache_info(void);
357 extern int add_to_swap(struct page *, struct list_head *list);
358 extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
359 extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
360 extern void __delete_from_swap_cache(struct page *);
361 extern void delete_from_swap_cache(struct page *);
362 extern void free_page_and_swap_cache(struct page *);
363 extern void free_pages_and_swap_cache(struct page **, int);
364 extern struct page *lookup_swap_cache(swp_entry_t);
365 extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
366 struct vm_area_struct *vma, unsigned long addr);
367 extern struct page *swapin_readahead(swp_entry_t, gfp_t,
368 struct vm_area_struct *vma, unsigned long addr);
370 /* linux/mm/swapfile.c */
371 extern atomic_long_t nr_swap_pages;
372 extern long total_swap_pages;
374 /* Swap 50% full? Release swapcache more aggressively.. */
375 static inline bool vm_swap_full(void)
377 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
380 static inline long get_nr_swap_pages(void)
382 return atomic_long_read(&nr_swap_pages);
385 extern void si_swapinfo(struct sysinfo *);
386 extern swp_entry_t get_swap_page(void);
387 extern swp_entry_t get_swap_page_of_type(int);
388 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
389 extern void swap_shmem_alloc(swp_entry_t);
390 extern int swap_duplicate(swp_entry_t);
391 extern int swapcache_prepare(swp_entry_t);
392 extern void swap_free(swp_entry_t);
393 extern void swapcache_free(swp_entry_t, struct page *page);
394 extern int free_swap_and_cache(swp_entry_t);
395 extern int swap_type_of(dev_t, sector_t, struct block_device **);
396 extern unsigned int count_swap_pages(int, int);
397 extern sector_t map_swap_page(struct page *, struct block_device **);
398 extern sector_t swapdev_block(int, pgoff_t);
399 extern int page_swapcount(struct page *);
400 extern struct swap_info_struct *page_swap_info(struct page *);
401 extern int reuse_swap_page(struct page *);
402 extern int try_to_free_swap(struct page *);
403 struct backing_dev_info;
405 #ifdef CONFIG_MEMCG
406 extern void
407 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
408 #else
409 static inline void
410 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
413 #endif
415 #else /* CONFIG_SWAP */
417 #define get_nr_swap_pages() 0L
418 #define total_swap_pages 0L
419 #define total_swapcache_pages() 0UL
420 #define vm_swap_full() 0
422 #define si_swapinfo(val) \
423 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
424 /* only sparc can not include linux/pagemap.h in this file
425 * so leave page_cache_release and release_pages undeclared... */
426 #define free_page_and_swap_cache(page) \
427 page_cache_release(page)
428 #define free_pages_and_swap_cache(pages, nr) \
429 release_pages((pages), (nr), 0);
431 static inline void show_swap_cache_info(void)
435 #define free_swap_and_cache(swp) is_migration_entry(swp)
436 #define swapcache_prepare(swp) is_migration_entry(swp)
438 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
440 return 0;
443 static inline void swap_shmem_alloc(swp_entry_t swp)
447 static inline int swap_duplicate(swp_entry_t swp)
449 return 0;
452 static inline void swap_free(swp_entry_t swp)
456 static inline void swapcache_free(swp_entry_t swp, struct page *page)
460 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
461 struct vm_area_struct *vma, unsigned long addr)
463 return NULL;
466 static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
468 return 0;
471 static inline struct page *lookup_swap_cache(swp_entry_t swp)
473 return NULL;
476 static inline int add_to_swap(struct page *page, struct list_head *list)
478 return 0;
481 static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
482 gfp_t gfp_mask)
484 return -1;
487 static inline void __delete_from_swap_cache(struct page *page)
491 static inline void delete_from_swap_cache(struct page *page)
495 static inline int page_swapcount(struct page *page)
497 return 0;
500 #define reuse_swap_page(page) (page_mapcount(page) == 1)
502 static inline int try_to_free_swap(struct page *page)
504 return 0;
507 static inline swp_entry_t get_swap_page(void)
509 swp_entry_t entry;
510 entry.val = 0;
511 return entry;
514 static inline void
515 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
519 #endif /* CONFIG_SWAP */
520 #endif /* __KERNEL__*/
521 #endif /* _LINUX_SWAP_H */