4 #include <linux/spinlock.h>
5 #include <linux/linkage.h>
6 #include <linux/mmzone.h>
7 #include <linux/list.h>
8 #include <linux/memcontrol.h>
9 #include <linux/sched.h>
10 #include <linux/node.h>
12 #include <linux/atomic.h>
15 struct notifier_block
;
19 #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
20 #define SWAP_FLAG_PRIO_MASK 0x7fff
21 #define SWAP_FLAG_PRIO_SHIFT 0
22 #define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */
24 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
27 static inline int current_is_kswapd(void)
29 return current
->flags
& PF_KSWAPD
;
33 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
34 * be swapped to. The swap type and the offset into that swap type are
35 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
36 * for the type means that the maximum number of swapcache pages is 27 bits
37 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
38 * the type/offset into the pte as 5/27 as well.
40 #define MAX_SWAPFILES_SHIFT 5
43 * Use some of the swap files numbers for other purposes. This
44 * is a convenient way to hook into the VM to trigger special
49 * NUMA node memory migration support
51 #ifdef CONFIG_MIGRATION
52 #define SWP_MIGRATION_NUM 2
53 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
54 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
56 #define SWP_MIGRATION_NUM 0
60 * Handling of hardware poisoned pages with memory corruption.
62 #ifdef CONFIG_MEMORY_FAILURE
63 #define SWP_HWPOISON_NUM 1
64 #define SWP_HWPOISON MAX_SWAPFILES
66 #define SWP_HWPOISON_NUM 0
69 #define MAX_SWAPFILES \
70 ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
73 * Magic header for a swap area. The first part of the union is
74 * what the swap magic looks like for the old (limited to 128MB)
75 * swap area format, the second part of the union adds - in the
76 * old reserved area - some extra information. Note that the first
77 * kilobyte is reserved for boot loader or disk label stuff...
79 * Having the magic at the end of the PAGE_SIZE makes detecting swap
80 * areas somewhat tricky on machines that support multiple page sizes.
81 * For 2.5 we'll probably want to move the magic to just beyond the
86 char reserved
[PAGE_SIZE
- 10];
87 char magic
[10]; /* SWAP-SPACE or SWAPSPACE2 */
90 char bootbits
[1024]; /* Space for disklabel etc. */
94 unsigned char sws_uuid
[16];
95 unsigned char sws_volume
[16];
101 /* A swap entry has to fit into a "unsigned long", as
102 * the entry is hidden in the "index" field of the
103 * swapper address space.
110 * current->reclaim_state points to one of these when a task is running
113 struct reclaim_state
{
114 unsigned long reclaimed_slab
;
119 struct address_space
;
121 struct writeback_control
;
125 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
126 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
127 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
128 * from setup, they're handled identically.
130 * We always assume that blocks are of size PAGE_SIZE.
133 struct list_head list
;
136 sector_t start_block
;
140 * Max bad pages in the new format..
142 #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
143 #define MAX_SWAP_BADPAGES \
144 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
147 SWP_USED
= (1 << 0), /* is slot in swap_info[] used? */
148 SWP_WRITEOK
= (1 << 1), /* ok to write to this swap? */
149 SWP_DISCARDABLE
= (1 << 2), /* swapon+blkdev support discard */
150 SWP_DISCARDING
= (1 << 3), /* now discarding a free cluster */
151 SWP_SOLIDSTATE
= (1 << 4), /* blkdev seeks are cheap */
152 SWP_CONTINUED
= (1 << 5), /* swap_map has count continuation */
153 SWP_BLKDEV
= (1 << 6), /* its a block device */
154 SWP_FILE
= (1 << 7), /* set after swap_activate success */
155 /* add others here before... */
156 SWP_SCANNING
= (1 << 8), /* refcount in scan_swap_map */
159 #define SWAP_CLUSTER_MAX 32UL
160 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
163 * Ratio between the present memory in the zone and the "gap" that
164 * we're allowing kswapd to shrink in addition to the per-zone high
165 * wmark, even for zones that already have the high wmark satisfied,
166 * in order to provide better per-zone lru behavior. We are ok to
167 * spend not more than 1% of the memory for this zone balancing "gap".
169 #define KSWAPD_ZONE_BALANCE_GAP_RATIO 100
171 #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
172 #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
173 #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
174 #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
175 #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
176 #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
179 * The in-memory structure used to track swap areas.
181 struct swap_info_struct
{
182 unsigned long flags
; /* SWP_USED etc: see above */
183 signed short prio
; /* swap priority of this type */
184 signed char type
; /* strange name for an index */
185 signed char next
; /* next type on the swap list */
186 unsigned int max
; /* extent of the swap_map */
187 unsigned char *swap_map
; /* vmalloc'ed array of usage counts */
188 unsigned int lowest_bit
; /* index of first free in swap_map */
189 unsigned int highest_bit
; /* index of last free in swap_map */
190 unsigned int pages
; /* total of usable pages of swap */
191 unsigned int inuse_pages
; /* number of those currently in use */
192 unsigned int cluster_next
; /* likely index for next allocation */
193 unsigned int cluster_nr
; /* countdown to next cluster search */
194 unsigned int lowest_alloc
; /* while preparing discard cluster */
195 unsigned int highest_alloc
; /* while preparing discard cluster */
196 struct swap_extent
*curr_swap_extent
;
197 struct swap_extent first_swap_extent
;
198 struct block_device
*bdev
; /* swap device or bdev of swap file */
199 struct file
*swap_file
; /* seldom referenced */
200 unsigned int old_block_size
; /* seldom referenced */
201 #ifdef CONFIG_FRONTSWAP
202 unsigned long *frontswap_map
; /* frontswap in-use, one bit per page */
203 atomic_t frontswap_pages
; /* frontswap pages in-use counter */
206 * protect map scan related fields like
207 * swap_map, lowest_bit, highest_bit,
208 * inuse_pages, cluster_next,
209 * cluster_nr, lowest_alloc and
210 * highest_alloc. other fields are only
211 * changed at swapon/swapoff, so are
212 * protected by swap_lock. changing
213 * flags need hold this lock and
214 * swap_lock. If both locks need hold,
215 * hold swap_lock first.
220 int head
; /* head of priority-ordered swapfile list */
221 int next
; /* swapfile to be used next */
224 /* linux/mm/page_alloc.c */
225 extern unsigned long totalram_pages
;
226 extern unsigned long totalreserve_pages
;
227 extern unsigned long dirty_balance_reserve
;
228 extern unsigned long nr_free_buffer_pages(void);
229 extern unsigned long nr_free_pagecache_pages(void);
231 /* Definition of global_page_state not available yet */
232 #define nr_free_pages() global_page_state(NR_FREE_PAGES)
235 /* linux/mm/swap.c */
236 extern void __lru_cache_add(struct page
*, enum lru_list lru
);
237 extern void lru_cache_add_lru(struct page
*, enum lru_list lru
);
238 extern void lru_add_page_tail(struct page
*page
, struct page
*page_tail
,
239 struct lruvec
*lruvec
, struct list_head
*head
);
240 extern void activate_page(struct page
*);
241 extern void mark_page_accessed(struct page
*);
242 extern void lru_add_drain(void);
243 extern void lru_add_drain_cpu(int cpu
);
244 extern int lru_add_drain_all(void);
245 extern void rotate_reclaimable_page(struct page
*page
);
246 extern void deactivate_page(struct page
*page
);
247 extern void swap_setup(void);
249 extern void add_page_to_unevictable_list(struct page
*page
);
252 * lru_cache_add: add a page to the page lists
253 * @page: the page to add
255 static inline void lru_cache_add_anon(struct page
*page
)
257 __lru_cache_add(page
, LRU_INACTIVE_ANON
);
260 static inline void lru_cache_add_file(struct page
*page
)
262 __lru_cache_add(page
, LRU_INACTIVE_FILE
);
265 /* linux/mm/vmscan.c */
266 extern unsigned long try_to_free_pages(struct zonelist
*zonelist
, int order
,
267 gfp_t gfp_mask
, nodemask_t
*mask
);
268 extern int __isolate_lru_page(struct page
*page
, isolate_mode_t mode
);
269 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup
*mem
,
270 gfp_t gfp_mask
, bool noswap
);
271 extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup
*mem
,
272 gfp_t gfp_mask
, bool noswap
,
274 unsigned long *nr_scanned
);
275 extern unsigned long shrink_all_memory(unsigned long nr_pages
);
276 extern int vm_swappiness
;
277 extern int remove_mapping(struct address_space
*mapping
, struct page
*page
);
278 extern unsigned long vm_total_pages
;
281 extern int zone_reclaim_mode
;
282 extern int sysctl_min_unmapped_ratio
;
283 extern int sysctl_min_slab_ratio
;
284 extern int zone_reclaim(struct zone
*, gfp_t
, unsigned int);
286 #define zone_reclaim_mode 0
287 static inline int zone_reclaim(struct zone
*z
, gfp_t mask
, unsigned int order
)
293 extern int page_evictable(struct page
*page
);
294 extern void check_move_unevictable_pages(struct page
**, int nr_pages
);
296 extern unsigned long scan_unevictable_pages
;
297 extern int scan_unevictable_handler(struct ctl_table
*, int,
298 void __user
*, size_t *, loff_t
*);
300 extern int scan_unevictable_register_node(struct node
*node
);
301 extern void scan_unevictable_unregister_node(struct node
*node
);
303 static inline int scan_unevictable_register_node(struct node
*node
)
307 static inline void scan_unevictable_unregister_node(struct node
*node
)
312 extern int kswapd_run(int nid
);
313 extern void kswapd_stop(int nid
);
315 extern int mem_cgroup_swappiness(struct mem_cgroup
*mem
);
317 static inline int mem_cgroup_swappiness(struct mem_cgroup
*mem
)
319 return vm_swappiness
;
322 #ifdef CONFIG_MEMCG_SWAP
323 extern void mem_cgroup_uncharge_swap(swp_entry_t ent
);
325 static inline void mem_cgroup_uncharge_swap(swp_entry_t ent
)
330 /* linux/mm/page_io.c */
331 extern int swap_readpage(struct page
*);
332 extern int swap_writepage(struct page
*page
, struct writeback_control
*wbc
);
333 extern void end_swap_bio_write(struct bio
*bio
, int err
);
334 extern int __swap_writepage(struct page
*page
, struct writeback_control
*wbc
,
335 void (*end_write_func
)(struct bio
*, int));
336 extern int swap_set_page_dirty(struct page
*page
);
337 extern void end_swap_bio_read(struct bio
*bio
, int err
);
339 int add_swap_extent(struct swap_info_struct
*sis
, unsigned long start_page
,
340 unsigned long nr_pages
, sector_t start_block
);
341 int generic_swapfile_activate(struct swap_info_struct
*, struct file
*,
344 /* linux/mm/swap_state.c */
345 extern struct address_space swapper_spaces
[];
346 #define swap_address_space(entry) (&swapper_spaces[swp_type(entry)])
347 extern unsigned long total_swapcache_pages(void);
348 extern void show_swap_cache_info(void);
349 extern int add_to_swap(struct page
*, struct list_head
*list
);
350 extern int add_to_swap_cache(struct page
*, swp_entry_t
, gfp_t
);
351 extern int __add_to_swap_cache(struct page
*page
, swp_entry_t entry
);
352 extern void __delete_from_swap_cache(struct page
*);
353 extern void delete_from_swap_cache(struct page
*);
354 extern void free_page_and_swap_cache(struct page
*);
355 extern void free_pages_and_swap_cache(struct page
**, int);
356 extern struct page
*lookup_swap_cache(swp_entry_t
);
357 extern struct page
*read_swap_cache_async(swp_entry_t
, gfp_t
,
358 struct vm_area_struct
*vma
, unsigned long addr
);
359 extern struct page
*swapin_readahead(swp_entry_t
, gfp_t
,
360 struct vm_area_struct
*vma
, unsigned long addr
);
362 /* linux/mm/swapfile.c */
363 extern atomic_long_t nr_swap_pages
;
364 extern long total_swap_pages
;
366 /* Swap 50% full? Release swapcache more aggressively.. */
367 static inline bool vm_swap_full(void)
369 return atomic_long_read(&nr_swap_pages
) * 2 < total_swap_pages
;
372 static inline long get_nr_swap_pages(void)
374 return atomic_long_read(&nr_swap_pages
);
377 extern void si_swapinfo(struct sysinfo
*);
378 extern swp_entry_t
get_swap_page(void);
379 extern swp_entry_t
get_swap_page_of_type(int);
380 extern int add_swap_count_continuation(swp_entry_t
, gfp_t
);
381 extern void swap_shmem_alloc(swp_entry_t
);
382 extern int swap_duplicate(swp_entry_t
);
383 extern int swapcache_prepare(swp_entry_t
);
384 extern void swap_free(swp_entry_t
);
385 extern void swapcache_free(swp_entry_t
, struct page
*page
);
386 extern int free_swap_and_cache(swp_entry_t
);
387 extern int swap_type_of(dev_t
, sector_t
, struct block_device
**);
388 extern unsigned int count_swap_pages(int, int);
389 extern sector_t
map_swap_page(struct page
*, struct block_device
**);
390 extern sector_t
swapdev_block(int, pgoff_t
);
391 extern int page_swapcount(struct page
*);
392 extern struct swap_info_struct
*page_swap_info(struct page
*);
393 extern int reuse_swap_page(struct page
*);
394 extern int try_to_free_swap(struct page
*);
395 struct backing_dev_info
;
399 mem_cgroup_uncharge_swapcache(struct page
*page
, swp_entry_t ent
, bool swapout
);
402 mem_cgroup_uncharge_swapcache(struct page
*page
, swp_entry_t ent
, bool swapout
)
407 #else /* CONFIG_SWAP */
409 #define get_nr_swap_pages() 0L
410 #define total_swap_pages 0L
411 #define total_swapcache_pages() 0UL
412 #define vm_swap_full() 0
414 #define si_swapinfo(val) \
415 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
416 /* only sparc can not include linux/pagemap.h in this file
417 * so leave page_cache_release and release_pages undeclared... */
418 #define free_page_and_swap_cache(page) \
419 page_cache_release(page)
420 #define free_pages_and_swap_cache(pages, nr) \
421 release_pages((pages), (nr), 0);
423 static inline void show_swap_cache_info(void)
427 #define free_swap_and_cache(swp) is_migration_entry(swp)
428 #define swapcache_prepare(swp) is_migration_entry(swp)
430 static inline int add_swap_count_continuation(swp_entry_t swp
, gfp_t gfp_mask
)
435 static inline void swap_shmem_alloc(swp_entry_t swp
)
439 static inline int swap_duplicate(swp_entry_t swp
)
444 static inline void swap_free(swp_entry_t swp
)
448 static inline void swapcache_free(swp_entry_t swp
, struct page
*page
)
452 static inline struct page
*swapin_readahead(swp_entry_t swp
, gfp_t gfp_mask
,
453 struct vm_area_struct
*vma
, unsigned long addr
)
458 static inline int swap_writepage(struct page
*p
, struct writeback_control
*wbc
)
463 static inline struct page
*lookup_swap_cache(swp_entry_t swp
)
468 static inline int add_to_swap(struct page
*page
, struct list_head
*list
)
473 static inline int add_to_swap_cache(struct page
*page
, swp_entry_t entry
,
479 static inline void __delete_from_swap_cache(struct page
*page
)
483 static inline void delete_from_swap_cache(struct page
*page
)
487 static inline int page_swapcount(struct page
*page
)
492 #define reuse_swap_page(page) (page_mapcount(page) == 1)
494 static inline int try_to_free_swap(struct page
*page
)
499 static inline swp_entry_t
get_swap_page(void)
507 mem_cgroup_uncharge_swapcache(struct page
*page
, swp_entry_t ent
)
511 #endif /* CONFIG_SWAP */
512 #endif /* __KERNEL__*/
513 #endif /* _LINUX_SWAP_H */