4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/file.h>
23 #include <linux/writeback.h>
24 #include <linux/suspend.h>
25 #include <linux/blkdev.h>
26 #include <linux/buffer_head.h> /* for try_to_release_page(),
27 buffer_heads_over_limit */
28 #include <linux/mm_inline.h>
29 #include <linux/pagevec.h>
30 #include <linux/backing-dev.h>
31 #include <linux/rmap-locking.h>
32 #include <linux/topology.h>
33 #include <linux/cpu.h>
34 #include <linux/notifier.h>
36 #include <asm/pgalloc.h>
37 #include <asm/tlbflush.h>
38 #include <asm/div64.h>
40 #include <linux/swapops.h>
43 * From 0 .. 100. Higher means more swappy.
45 int vm_swappiness
= 60;
46 static long total_memory
;
48 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
50 #ifdef ARCH_HAS_PREFETCH
51 #define prefetch_prev_lru_page(_page, _base, _field) \
53 if ((_page)->lru.prev != _base) { \
56 prev = lru_to_page(&(_page->lru)); \
57 prefetch(&prev->_field); \
61 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
64 #ifdef ARCH_HAS_PREFETCHW
65 #define prefetchw_prev_lru_page(_page, _base, _field) \
67 if ((_page)->lru.prev != _base) { \
70 prev = lru_to_page(&(_page->lru)); \
71 prefetchw(&prev->_field); \
75 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
79 * The list of shrinker callbacks used by to apply pressure to
84 struct list_head list
;
85 int seeks
; /* seeks to recreate an obj */
86 long nr
; /* objs pending delete */
89 static LIST_HEAD(shrinker_list
);
90 static DECLARE_MUTEX(shrinker_sem
);
93 * Add a shrinker callback to be called from the vm
95 struct shrinker
*set_shrinker(int seeks
, shrinker_t theshrinker
)
97 struct shrinker
*shrinker
;
99 shrinker
= kmalloc(sizeof(*shrinker
), GFP_KERNEL
);
101 shrinker
->shrinker
= theshrinker
;
102 shrinker
->seeks
= seeks
;
105 list_add(&shrinker
->list
, &shrinker_list
);
111 EXPORT_SYMBOL(set_shrinker
);
116 void remove_shrinker(struct shrinker
*shrinker
)
119 list_del(&shrinker
->list
);
124 EXPORT_SYMBOL(remove_shrinker
);
126 #define SHRINK_BATCH 128
128 * Call the shrink functions to age shrinkable caches
130 * Here we assume it costs one seek to replace a lru page and that it also
131 * takes a seek to recreate a cache object. With this in mind we age equal
132 * percentages of the lru and ageable caches. This should balance the seeks
133 * generated by these structures.
135 * If the vm encounted mapped pages on the LRU it increase the pressure on
136 * slab to avoid swapping.
138 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
140 static int shrink_slab(unsigned long scanned
, unsigned int gfp_mask
)
142 struct shrinker
*shrinker
;
145 if (down_trylock(&shrinker_sem
))
148 pages
= nr_used_zone_pages();
149 list_for_each_entry(shrinker
, &shrinker_list
, list
) {
150 unsigned long long delta
;
152 delta
= (4 * scanned
) / shrinker
->seeks
;
153 delta
*= (*shrinker
->shrinker
)(0, gfp_mask
);
154 do_div(delta
, pages
+ 1);
155 shrinker
->nr
+= delta
;
156 if (shrinker
->nr
> SHRINK_BATCH
) {
157 long nr_to_scan
= shrinker
->nr
;
160 mod_page_state(slabs_scanned
, nr_to_scan
);
162 long this_scan
= nr_to_scan
;
166 (*shrinker
->shrinker
)(this_scan
, gfp_mask
);
167 nr_to_scan
-= this_scan
;
176 /* Must be called with page's pte_chain_lock held. */
177 static inline int page_mapping_inuse(struct page
*page
)
179 struct address_space
*mapping
= page
->mapping
;
181 /* Page is in somebody's page tables. */
182 if (page_mapped(page
))
185 /* XXX: does this happen ? */
189 /* Be more reluctant to reclaim swapcache than pagecache */
190 if (PageSwapCache(page
))
193 /* File is mmap'd by somebody. */
194 if (!list_empty(&mapping
->i_mmap
))
196 if (!list_empty(&mapping
->i_mmap_shared
))
202 static inline int is_page_cache_freeable(struct page
*page
)
204 return page_count(page
) - !!PagePrivate(page
) == 2;
207 static int may_write_to_queue(struct backing_dev_info
*bdi
)
209 if (current_is_kswapd())
211 if (current_is_pdflush()) /* This is unlikely, but why not... */
213 if (!bdi_write_congested(bdi
))
215 if (bdi
== current
->backing_dev_info
)
221 * We detected a synchronous write error writing a page out. Probably
222 * -ENOSPC. We need to propagate that into the address_space for a subsequent
223 * fsync(), msync() or close().
225 * The tricky part is that after writepage we cannot touch the mapping: nothing
226 * prevents it from being freed up. But we have a ref on the page and once
227 * that page is locked, the mapping is pinned.
229 * We're allowed to run sleeping lock_page() here because we know the caller has
232 static void handle_write_error(struct address_space
*mapping
,
233 struct page
*page
, int error
)
236 if (page
->mapping
== mapping
) {
237 if (error
== -ENOSPC
)
238 set_bit(AS_ENOSPC
, &mapping
->flags
);
240 set_bit(AS_EIO
, &mapping
->flags
);
246 * shrink_list returns the number of reclaimed pages
249 shrink_list(struct list_head
*page_list
, unsigned int gfp_mask
, int *nr_scanned
)
251 struct address_space
*mapping
;
252 LIST_HEAD(ret_pages
);
253 struct pagevec freed_pvec
;
259 pagevec_init(&freed_pvec
, 1);
260 while (!list_empty(page_list
)) {
265 page
= lru_to_page(page_list
);
266 list_del(&page
->lru
);
268 if (TestSetPageLocked(page
))
271 /* Double the slab pressure for mapped and swapcache pages */
272 if (page_mapped(page
) || PageSwapCache(page
))
275 BUG_ON(PageActive(page
));
277 if (PageWriteback(page
))
280 pte_chain_lock(page
);
281 referenced
= page_referenced(page
);
282 if (referenced
&& page_mapping_inuse(page
)) {
283 /* In active use or really unfreeable. Activate it. */
284 pte_chain_unlock(page
);
285 goto activate_locked
;
288 mapping
= page
->mapping
;
292 * Anonymous process memory without backing store. Try to
293 * allocate it some swap space here.
295 * XXX: implement swap clustering ?
297 if (page_mapped(page
) && !mapping
&& !PagePrivate(page
)) {
298 pte_chain_unlock(page
);
299 if (!add_to_swap(page
))
300 goto activate_locked
;
301 pte_chain_lock(page
);
302 mapping
= page
->mapping
;
304 #endif /* CONFIG_SWAP */
306 may_enter_fs
= (gfp_mask
& __GFP_FS
) ||
307 (PageSwapCache(page
) && (gfp_mask
& __GFP_IO
));
310 * The page is mapped into the page tables of one or more
311 * processes. Try to unmap it here.
313 if (page_mapped(page
) && mapping
) {
314 switch (try_to_unmap(page
)) {
316 pte_chain_unlock(page
);
317 goto activate_locked
;
319 pte_chain_unlock(page
);
322 ; /* try to free the page below */
325 pte_chain_unlock(page
);
328 * If the page is dirty, only perform writeback if that write
329 * will be non-blocking. To prevent this allocation from being
330 * stalled by pagecache activity. But note that there may be
331 * stalls if we need to run get_block(). We could test
332 * PagePrivate for that.
334 * If this process is currently in generic_file_write() against
335 * this page's queue, we can perform writeback even if that
338 * If the page is swapcache, write it back even if that would
339 * block, for some throttling. This happens by accident, because
340 * swap_backing_dev_info is bust: it doesn't reflect the
341 * congestion state of the swapdevs. Easy to fix, if needed.
342 * See swapfile.c:page_queue_congested().
344 if (PageDirty(page
)) {
347 if (!is_page_cache_freeable(page
))
351 if (mapping
->a_ops
->writepage
== NULL
)
352 goto activate_locked
;
355 if (!may_write_to_queue(mapping
->backing_dev_info
))
357 spin_lock(&mapping
->page_lock
);
358 if (test_clear_page_dirty(page
)) {
360 struct writeback_control wbc
= {
361 .sync_mode
= WB_SYNC_NONE
,
362 .nr_to_write
= SWAP_CLUSTER_MAX
,
367 list_move(&page
->list
, &mapping
->locked_pages
);
368 spin_unlock(&mapping
->page_lock
);
370 SetPageReclaim(page
);
371 res
= mapping
->a_ops
->writepage(page
, &wbc
);
373 handle_write_error(mapping
, page
, res
);
374 if (res
== WRITEPAGE_ACTIVATE
) {
375 ClearPageReclaim(page
);
376 goto activate_locked
;
378 if (!PageWriteback(page
)) {
379 /* synchronous write or broken a_ops? */
380 ClearPageReclaim(page
);
384 spin_unlock(&mapping
->page_lock
);
388 * If the page has buffers, try to free the buffer mappings
389 * associated with this page. If we succeed we try to free
392 * We do this even if the page is PageDirty().
393 * try_to_release_page() does not perform I/O, but it is
394 * possible for a page to have PageDirty set, but it is actually
395 * clean (all its buffers are clean). This happens if the
396 * buffers were written out directly, with submit_bh(). ext3
397 * will do this, as well as the blockdev mapping.
398 * try_to_release_page() will discover that cleanness and will
399 * drop the buffers and mark the page clean - it can be freed.
401 * Rarely, pages can have buffers and no ->mapping. These are
402 * the pages which were not successfully invalidated in
403 * truncate_complete_page(). We try to drop those buffers here
404 * and if that worked, and the page is no longer mapped into
405 * process address space (page_count == 0) it can be freed.
406 * Otherwise, leave the page on the LRU so it is swappable.
408 if (PagePrivate(page
)) {
409 if (!try_to_release_page(page
, gfp_mask
))
410 goto activate_locked
;
411 if (!mapping
&& page_count(page
) == 1)
416 goto keep_locked
; /* truncate got there first */
418 spin_lock(&mapping
->page_lock
);
421 * The non-racy check for busy page. It is critical to check
422 * PageDirty _after_ making sure that the page is freeable and
423 * not in use by anybody. (pagecache + us == 2)
425 if (page_count(page
) != 2 || PageDirty(page
)) {
426 spin_unlock(&mapping
->page_lock
);
431 if (PageSwapCache(page
)) {
432 swp_entry_t swap
= { .val
= page
->index
};
433 __delete_from_swap_cache(page
);
434 spin_unlock(&mapping
->page_lock
);
436 __put_page(page
); /* The pagecache ref */
439 #endif /* CONFIG_SWAP */
441 __remove_from_page_cache(page
);
442 spin_unlock(&mapping
->page_lock
);
448 if (!pagevec_add(&freed_pvec
, page
))
449 __pagevec_release_nonlru(&freed_pvec
);
458 list_add(&page
->lru
, &ret_pages
);
459 BUG_ON(PageLRU(page
));
461 list_splice(&ret_pages
, page_list
);
462 if (pagevec_count(&freed_pvec
))
463 __pagevec_release_nonlru(&freed_pvec
);
464 mod_page_state(pgactivate
, pgactivate
);
469 * zone->lru_lock is heavily contented. We relieve it by quickly privatising
470 * a batch of pages and working on them outside the lock. Any pages which were
471 * not freed will be added back to the LRU.
473 * shrink_cache() is passed the number of pages to scan and returns the number
474 * of pages which were reclaimed.
476 * For pagecache intensive workloads, the first loop here is the hottest spot
477 * in the kernel (apart from the copy_*_user functions).
480 shrink_cache(struct zone
*zone
, unsigned int gfp_mask
,
481 int max_scan
, int *total_scanned
)
483 LIST_HEAD(page_list
);
487 pagevec_init(&pvec
, 1);
490 spin_lock_irq(&zone
->lru_lock
);
491 while (max_scan
> 0) {
497 while (nr_scan
++ < SWAP_CLUSTER_MAX
&&
498 !list_empty(&zone
->inactive_list
)) {
499 page
= lru_to_page(&zone
->inactive_list
);
501 prefetchw_prev_lru_page(page
,
502 &zone
->inactive_list
, flags
);
504 if (!TestClearPageLRU(page
))
506 list_del(&page
->lru
);
507 if (page_count(page
) == 0) {
508 /* It is currently in pagevec_release() */
510 list_add(&page
->lru
, &zone
->inactive_list
);
513 list_add(&page
->lru
, &page_list
);
514 page_cache_get(page
);
517 zone
->nr_inactive
-= nr_taken
;
518 zone
->pages_scanned
+= nr_taken
;
519 spin_unlock_irq(&zone
->lru_lock
);
525 if (current_is_kswapd())
526 mod_page_state_zone(zone
, pgscan_kswapd
, nr_scan
);
528 mod_page_state_zone(zone
, pgscan_direct
, nr_scan
);
529 nr_freed
= shrink_list(&page_list
, gfp_mask
, total_scanned
);
530 *total_scanned
+= nr_taken
;
531 if (current_is_kswapd())
532 mod_page_state(kswapd_steal
, nr_freed
);
533 mod_page_state_zone(zone
, pgsteal
, nr_freed
);
536 if (nr_freed
<= 0 && list_empty(&page_list
))
539 spin_lock_irq(&zone
->lru_lock
);
541 * Put back any unfreeable pages.
543 while (!list_empty(&page_list
)) {
544 page
= lru_to_page(&page_list
);
545 if (TestSetPageLRU(page
))
547 list_del(&page
->lru
);
548 if (PageActive(page
))
549 add_page_to_active_list(zone
, page
);
551 add_page_to_inactive_list(zone
, page
);
552 if (!pagevec_add(&pvec
, page
)) {
553 spin_unlock_irq(&zone
->lru_lock
);
554 __pagevec_release(&pvec
);
555 spin_lock_irq(&zone
->lru_lock
);
559 spin_unlock_irq(&zone
->lru_lock
);
561 pagevec_release(&pvec
);
566 * This moves pages from the active list to the inactive list.
568 * We move them the other way if the page is referenced by one or more
569 * processes, from rmap.
571 * If the pages are mostly unmapped, the processing is fast and it is
572 * appropriate to hold zone->lru_lock across the whole operation. But if
573 * the pages are mapped, the processing is slow (page_referenced()) so we
574 * should drop zone->lru_lock around each page. It's impossible to balance
575 * this, so instead we remove the pages from the LRU while processing them.
576 * It is safe to rely on PG_active against the non-LRU pages in here because
577 * nobody will play with that bit on a non-LRU page.
579 * The downside is that we have to touch page->count against each page.
580 * But we had to alter page->flags anyway.
583 refill_inactive_zone(struct zone
*zone
, const int nr_pages_in
,
584 struct page_state
*ps
)
587 int pgdeactivate
= 0;
588 int nr_pages
= nr_pages_in
;
589 LIST_HEAD(l_hold
); /* The pages which were snipped off */
590 LIST_HEAD(l_inactive
); /* Pages to go onto the inactive_list */
591 LIST_HEAD(l_active
); /* Pages to go onto the active_list */
594 int reclaim_mapped
= 0;
601 spin_lock_irq(&zone
->lru_lock
);
602 while (nr_pages
&& !list_empty(&zone
->active_list
)) {
603 page
= lru_to_page(&zone
->active_list
);
604 prefetchw_prev_lru_page(page
, &zone
->active_list
, flags
);
605 if (!TestClearPageLRU(page
))
607 list_del(&page
->lru
);
608 if (page_count(page
) == 0) {
609 /* It is currently in pagevec_release() */
611 list_add(&page
->lru
, &zone
->active_list
);
613 page_cache_get(page
);
614 list_add(&page
->lru
, &l_hold
);
619 zone
->nr_active
-= pgmoved
;
620 spin_unlock_irq(&zone
->lru_lock
);
623 * `distress' is a measure of how much trouble we're having reclaiming
624 * pages. 0 -> no problems. 100 -> great trouble.
626 distress
= 100 >> zone
->prev_priority
;
629 * The point of this algorithm is to decide when to start reclaiming
630 * mapped memory instead of just pagecache. Work out how much memory
633 mapped_ratio
= (ps
->nr_mapped
* 100) / total_memory
;
636 * Now decide how much we really want to unmap some pages. The mapped
637 * ratio is downgraded - just because there's a lot of mapped memory
638 * doesn't necessarily mean that page reclaim isn't succeeding.
640 * The distress ratio is important - we don't want to start going oom.
642 * A 100% value of vm_swappiness overrides this algorithm altogether.
644 swap_tendency
= mapped_ratio
/ 2 + distress
+ vm_swappiness
;
647 * Now use this metric to decide whether to start moving mapped memory
648 * onto the inactive list.
650 if (swap_tendency
>= 100)
653 while (!list_empty(&l_hold
)) {
654 page
= lru_to_page(&l_hold
);
655 list_del(&page
->lru
);
656 if (page_mapped(page
)) {
657 if (!reclaim_mapped
) {
658 list_add(&page
->lru
, &l_active
);
661 pte_chain_lock(page
);
662 if (page_referenced(page
)) {
663 pte_chain_unlock(page
);
664 list_add(&page
->lru
, &l_active
);
667 pte_chain_unlock(page
);
670 * FIXME: need to consider page_count(page) here if/when we
671 * reap orphaned pages via the LRU (Daniel's locking stuff)
673 if (total_swap_pages
== 0 && !page
->mapping
&&
674 !PagePrivate(page
)) {
675 list_add(&page
->lru
, &l_active
);
678 list_add(&page
->lru
, &l_inactive
);
681 pagevec_init(&pvec
, 1);
683 spin_lock_irq(&zone
->lru_lock
);
684 while (!list_empty(&l_inactive
)) {
685 page
= lru_to_page(&l_inactive
);
686 prefetchw_prev_lru_page(page
, &l_inactive
, flags
);
687 if (TestSetPageLRU(page
))
689 if (!TestClearPageActive(page
))
691 list_move(&page
->lru
, &zone
->inactive_list
);
693 if (!pagevec_add(&pvec
, page
)) {
694 zone
->nr_inactive
+= pgmoved
;
695 spin_unlock_irq(&zone
->lru_lock
);
696 pgdeactivate
+= pgmoved
;
698 if (buffer_heads_over_limit
)
699 pagevec_strip(&pvec
);
700 __pagevec_release(&pvec
);
701 spin_lock_irq(&zone
->lru_lock
);
704 zone
->nr_inactive
+= pgmoved
;
705 pgdeactivate
+= pgmoved
;
706 if (buffer_heads_over_limit
) {
707 spin_unlock_irq(&zone
->lru_lock
);
708 pagevec_strip(&pvec
);
709 spin_lock_irq(&zone
->lru_lock
);
713 while (!list_empty(&l_active
)) {
714 page
= lru_to_page(&l_active
);
715 prefetchw_prev_lru_page(page
, &l_active
, flags
);
716 if (TestSetPageLRU(page
))
718 BUG_ON(!PageActive(page
));
719 list_move(&page
->lru
, &zone
->active_list
);
721 if (!pagevec_add(&pvec
, page
)) {
722 zone
->nr_active
+= pgmoved
;
724 spin_unlock_irq(&zone
->lru_lock
);
725 __pagevec_release(&pvec
);
726 spin_lock_irq(&zone
->lru_lock
);
729 zone
->nr_active
+= pgmoved
;
730 spin_unlock_irq(&zone
->lru_lock
);
731 pagevec_release(&pvec
);
733 mod_page_state_zone(zone
, pgrefill
, nr_pages_in
- nr_pages
);
734 mod_page_state(pgdeactivate
, pgdeactivate
);
738 * Scan `nr_pages' from this zone. Returns the number of reclaimed pages.
739 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
742 shrink_zone(struct zone
*zone
, int max_scan
, unsigned int gfp_mask
,
743 int *total_scanned
, struct page_state
*ps
)
749 * Try to keep the active list 2/3 of the size of the cache. And
750 * make sure that refill_inactive is given a decent number of pages.
752 * The "ratio+1" here is important. With pagecache-intensive workloads
753 * the inactive list is huge, and `ratio' evaluates to zero all the
754 * time. Which pins the active list memory. So we add one to `ratio'
755 * just to make sure that the kernel will slowly sift through the
758 ratio
= (unsigned long)SWAP_CLUSTER_MAX
* zone
->nr_active
/
759 ((zone
->nr_inactive
| 1) * 2);
761 atomic_add(ratio
+1, &zone
->nr_scan_active
);
762 count
= atomic_read(&zone
->nr_scan_active
);
763 if (count
>= SWAP_CLUSTER_MAX
) {
764 atomic_set(&zone
->nr_scan_active
, 0);
765 refill_inactive_zone(zone
, count
, ps
);
768 atomic_add(max_scan
, &zone
->nr_scan_inactive
);
769 count
= atomic_read(&zone
->nr_scan_inactive
);
770 if (count
>= SWAP_CLUSTER_MAX
) {
771 atomic_set(&zone
->nr_scan_inactive
, 0);
772 return shrink_cache(zone
, gfp_mask
, count
, total_scanned
);
778 * This is the direct reclaim path, for page-allocating processes. We only
779 * try to reclaim pages from zones which will satisfy the caller's allocation
782 * We reclaim from a zone even if that zone is over pages_high. Because:
783 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
785 * b) The zones may be over pages_high but they must go *over* pages_high to
786 * satisfy the `incremental min' zone defense algorithm.
788 * Returns the number of reclaimed pages.
790 * If a zone is deemed to be full of pinned pages then just give it a light
791 * scan then give up on it.
794 shrink_caches(struct zone
**zones
, int priority
, int *total_scanned
,
795 int gfp_mask
, struct page_state
*ps
)
800 for (i
= 0; zones
[i
] != NULL
; i
++) {
801 struct zone
*zone
= zones
[i
];
804 if (zone
->free_pages
< zone
->pages_high
)
805 zone
->temp_priority
= priority
;
807 if (zone
->all_unreclaimable
&& priority
!= DEF_PRIORITY
)
808 continue; /* Let kswapd poll it */
810 max_scan
= zone
->nr_inactive
>> priority
;
811 ret
+= shrink_zone(zone
, max_scan
, gfp_mask
, total_scanned
, ps
);
817 * This is the main entry point to direct page reclaim.
819 * If a full scan of the inactive list fails to free enough memory then we
820 * are "out of memory" and something needs to be killed.
822 * If the caller is !__GFP_FS then the probability of a failure is reasonably
823 * high - the zone may be full of dirty or under-writeback pages, which this
824 * caller can't do much about. So for !__GFP_FS callers, we just perform a
825 * small LRU walk and if that didn't work out, fail the allocation back to the
826 * caller. GFP_NOFS allocators need to know how to deal with it. Kicking
827 * bdflush, waiting and retrying will work.
829 * This is a fairly lame algorithm - it can result in excessive CPU burning and
830 * excessive rotation of the inactive list, which is _supposed_ to be an LRU,
833 int try_to_free_pages(struct zone
**zones
,
834 unsigned int gfp_mask
, unsigned int order
)
838 int nr_reclaimed
= 0;
839 struct reclaim_state
*reclaim_state
= current
->reclaim_state
;
842 inc_page_state(allocstall
);
844 for (i
= 0; zones
[i
] != 0; i
++)
845 zones
[i
]->temp_priority
= DEF_PRIORITY
;
847 for (priority
= DEF_PRIORITY
; priority
>= 0; priority
--) {
848 int total_scanned
= 0;
849 struct page_state ps
;
852 nr_reclaimed
+= shrink_caches(zones
, priority
, &total_scanned
,
854 shrink_slab(total_scanned
, gfp_mask
);
856 nr_reclaimed
+= reclaim_state
->reclaimed_slab
;
857 reclaim_state
->reclaimed_slab
= 0;
859 if (nr_reclaimed
>= SWAP_CLUSTER_MAX
) {
863 if (!(gfp_mask
& __GFP_FS
))
864 break; /* Let the caller handle it */
866 * Try to write back as many pages as we just scanned. Not
867 * sure if that makes sense, but it's an attempt to avoid
868 * creating IO storms unnecessarily
870 wakeup_bdflush(total_scanned
);
872 /* Take a nap, wait for some writeback to complete */
873 if (total_scanned
&& priority
< DEF_PRIORITY
- 2)
874 blk_congestion_wait(WRITE
, HZ
/10);
876 if ((gfp_mask
& __GFP_FS
) && !(gfp_mask
& __GFP_NORETRY
))
879 for (i
= 0; zones
[i
] != 0; i
++)
880 zones
[i
]->prev_priority
= zones
[i
]->temp_priority
;
885 * For kswapd, balance_pgdat() will work across all this node's zones until
886 * they are all at pages_high.
888 * If `nr_pages' is non-zero then it is the number of pages which are to be
889 * reclaimed, regardless of the zone occupancies. This is a software suspend
892 * Returns the number of pages which were actually freed.
894 * There is special handling here for zones which are full of pinned pages.
895 * This can happen if the pages are all mlocked, or if they are all used by
896 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
897 * What we do is to detect the case where all pages in the zone have been
898 * scanned twice and there has been zero successful reclaim. Mark the zone as
899 * dead and from now on, only perform a short scan. Basically we're polling
900 * the zone for when the problem goes away.
902 * kswapd scans the zones in the highmem->normal->dma direction. It skips
903 * zones which have free_pages > pages_high, but once a zone is found to have
904 * free_pages <= pages_high, we scan that zone and the lower zones regardless
905 * of the number of free pages in the lower zones. This interoperates with
906 * the page allocator fallback scheme to ensure that aging of pages is balanced
909 static int balance_pgdat(pg_data_t
*pgdat
, int nr_pages
, struct page_state
*ps
)
911 int to_free
= nr_pages
;
914 struct reclaim_state
*reclaim_state
= current
->reclaim_state
;
916 inc_page_state(pageoutrun
);
918 for (i
= 0; i
< pgdat
->nr_zones
; i
++) {
919 struct zone
*zone
= pgdat
->node_zones
+ i
;
921 zone
->temp_priority
= DEF_PRIORITY
;
924 for (priority
= DEF_PRIORITY
; priority
; priority
--) {
925 int all_zones_ok
= 1;
926 int pages_scanned
= 0;
927 int end_zone
= 0; /* Inclusive. 0 = ZONE_DMA */
932 * Scan in the highmem->dma direction for the highest
933 * zone which needs scanning
935 for (i
= pgdat
->nr_zones
- 1; i
>= 0; i
--) {
936 struct zone
*zone
= pgdat
->node_zones
+ i
;
938 if (zone
->all_unreclaimable
&&
939 priority
!= DEF_PRIORITY
)
942 if (zone
->free_pages
<= zone
->pages_high
) {
949 end_zone
= pgdat
->nr_zones
- 1;
953 * Now scan the zone in the dma->highmem direction, stopping
954 * at the last zone which needs scanning.
956 * We do this because the page allocator works in the opposite
957 * direction. This prevents the page allocator from allocating
958 * pages behind kswapd's direction of progress, which would
959 * cause too much scanning of the lower zones.
961 for (i
= 0; i
<= end_zone
; i
++) {
962 struct zone
*zone
= pgdat
->node_zones
+ i
;
963 int total_scanned
= 0;
967 if (zone
->all_unreclaimable
&& priority
!= DEF_PRIORITY
)
970 if (nr_pages
== 0) { /* Not software suspend */
971 if (zone
->free_pages
<= zone
->pages_high
)
974 zone
->temp_priority
= priority
;
975 max_scan
= zone
->nr_inactive
>> priority
;
976 reclaimed
= shrink_zone(zone
, max_scan
, GFP_KERNEL
,
978 total_scanned
+= pages_scanned
;
979 reclaim_state
->reclaimed_slab
= 0;
980 shrink_slab(total_scanned
, GFP_KERNEL
);
981 reclaimed
+= reclaim_state
->reclaimed_slab
;
982 to_free
-= reclaimed
;
983 if (zone
->all_unreclaimable
)
985 if (zone
->pages_scanned
> zone
->present_pages
* 2)
986 zone
->all_unreclaimable
= 1;
988 if (nr_pages
&& to_free
> 0)
989 continue; /* swsusp: need to do more work */
991 break; /* kswapd: all done */
993 * OK, kswapd is getting into trouble. Take a nap, then take
994 * another pass across the zones.
996 if (pages_scanned
&& priority
< DEF_PRIORITY
- 2)
997 blk_congestion_wait(WRITE
, HZ
/10);
1000 for (i
= 0; i
< pgdat
->nr_zones
; i
++) {
1001 struct zone
*zone
= pgdat
->node_zones
+ i
;
1003 zone
->prev_priority
= zone
->temp_priority
;
1005 return nr_pages
- to_free
;
1009 * The background pageout daemon, started as a kernel thread
1010 * from the init process.
1012 * This basically trickles out pages so that we have _some_
1013 * free memory available even if there is no other activity
1014 * that frees anything up. This is needed for things like routing
1015 * etc, where we otherwise might have all activity going on in
1016 * asynchronous contexts that cannot page things out.
1018 * If there are applications that are active memory-allocators
1019 * (most normal use), this basically shouldn't matter.
1023 pg_data_t
*pgdat
= (pg_data_t
*)p
;
1024 struct task_struct
*tsk
= current
;
1026 struct reclaim_state reclaim_state
= {
1027 .reclaimed_slab
= 0,
1031 daemonize("kswapd%d", pgdat
->node_id
);
1032 cpumask
= node_to_cpumask(pgdat
->node_id
);
1033 if (!cpus_empty(cpumask
))
1034 set_cpus_allowed(tsk
, cpumask
);
1035 current
->reclaim_state
= &reclaim_state
;
1038 * Tell the memory management that we're a "memory allocator",
1039 * and that if we need more memory we should get access to it
1040 * regardless (see "__alloc_pages()"). "kswapd" should
1041 * never get caught in the normal page freeing logic.
1043 * (Kswapd normally doesn't need memory anyway, but sometimes
1044 * you need a small amount of memory in order to be able to
1045 * page out something else, and this flag essentially protects
1046 * us from recursively trying to free more memory as we're
1047 * trying to free the first piece of memory in the first place).
1049 tsk
->flags
|= PF_MEMALLOC
|PF_KSWAPD
;
1052 struct page_state ps
;
1054 if (current
->flags
& PF_FREEZE
)
1055 refrigerator(PF_IOTHREAD
);
1056 prepare_to_wait(&pgdat
->kswapd_wait
, &wait
, TASK_INTERRUPTIBLE
);
1058 finish_wait(&pgdat
->kswapd_wait
, &wait
);
1059 get_page_state(&ps
);
1060 balance_pgdat(pgdat
, 0, &ps
);
1065 * A zone is low on free memory, so wake its kswapd task to service it.
1067 void wakeup_kswapd(struct zone
*zone
)
1069 if (zone
->free_pages
> zone
->pages_low
)
1071 if (!waitqueue_active(&zone
->zone_pgdat
->kswapd_wait
))
1073 wake_up_interruptible(&zone
->zone_pgdat
->kswapd_wait
);
1078 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed
1081 int shrink_all_memory(int nr_pages
)
1084 int nr_to_free
= nr_pages
;
1086 struct reclaim_state reclaim_state
= {
1087 .reclaimed_slab
= 0,
1090 current
->reclaim_state
= &reclaim_state
;
1091 for_each_pgdat(pgdat
) {
1093 struct page_state ps
;
1095 get_page_state(&ps
);
1096 freed
= balance_pgdat(pgdat
, nr_to_free
, &ps
);
1098 nr_to_free
-= freed
;
1099 if (nr_to_free
<= 0)
1102 current
->reclaim_state
= NULL
;
1107 #ifdef CONFIG_HOTPLUG_CPU
1108 /* It's optimal to keep kswapds on the same CPUs as their memory, but
1109 not required for correctness. So if the last cpu in a node goes
1110 away, we get changed to run anywhere: as the first one comes back,
1111 restore their cpu bindings. */
1112 static int __devinit
cpu_callback(struct notifier_block
*nfb
,
1113 unsigned long action
,
1119 if (action
== CPU_ONLINE
) {
1120 for_each_pgdat(pgdat
) {
1121 mask
= node_to_cpumask(pgdat
->node_id
);
1122 if (any_online_cpu(mask
) != NR_CPUS
)
1123 /* One of our CPUs online: restore mask */
1124 set_cpus_allowed(pgdat
->kswapd
, mask
);
1129 #endif /* CONFIG_HOTPLUG_CPU */
1131 static int __init
kswapd_init(void)
1135 for_each_pgdat(pgdat
)
1137 = find_task_by_pid(kernel_thread(kswapd
, pgdat
, CLONE_KERNEL
));
1138 total_memory
= nr_free_pagecache_pages();
1139 hotcpu_notifier(cpu_callback
, 0);
1143 module_init(kswapd_init
)