4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/file.h>
23 #include <linux/writeback.h>
24 #include <linux/blkdev.h>
25 #include <linux/buffer_head.h> /* for try_to_release_page(),
26 buffer_heads_over_limit */
27 #include <linux/mm_inline.h>
28 #include <linux/pagevec.h>
29 #include <linux/backing-dev.h>
30 #include <linux/rmap.h>
31 #include <linux/topology.h>
32 #include <linux/cpu.h>
33 #include <linux/cpuset.h>
34 #include <linux/notifier.h>
35 #include <linux/rwsem.h>
36 #include <linux/delay.h>
38 #include <asm/tlbflush.h>
39 #include <asm/div64.h>
41 #include <linux/swapops.h>
46 /* Incremented by the number of inactive pages that were scanned */
47 unsigned long nr_scanned
;
49 unsigned long nr_mapped
; /* From page_state */
51 /* This context's GFP mask */
56 /* Can pages be swapped as part of reclaim? */
59 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
60 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
61 * In this context, it doesn't matter that we scan the
62 * whole list at once. */
67 * The list of shrinker callbacks used by to apply pressure to
72 struct list_head list
;
73 int seeks
; /* seeks to recreate an obj */
74 long nr
; /* objs pending delete */
77 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
79 #ifdef ARCH_HAS_PREFETCH
80 #define prefetch_prev_lru_page(_page, _base, _field) \
82 if ((_page)->lru.prev != _base) { \
85 prev = lru_to_page(&(_page->lru)); \
86 prefetch(&prev->_field); \
90 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
93 #ifdef ARCH_HAS_PREFETCHW
94 #define prefetchw_prev_lru_page(_page, _base, _field) \
96 if ((_page)->lru.prev != _base) { \
99 prev = lru_to_page(&(_page->lru)); \
100 prefetchw(&prev->_field); \
104 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
108 * From 0 .. 100. Higher means more swappy.
110 int vm_swappiness
= 60;
111 static long total_memory
;
113 static LIST_HEAD(shrinker_list
);
114 static DECLARE_RWSEM(shrinker_rwsem
);
117 * Add a shrinker callback to be called from the vm
119 struct shrinker
*set_shrinker(int seeks
, shrinker_t theshrinker
)
121 struct shrinker
*shrinker
;
123 shrinker
= kmalloc(sizeof(*shrinker
), GFP_KERNEL
);
125 shrinker
->shrinker
= theshrinker
;
126 shrinker
->seeks
= seeks
;
128 down_write(&shrinker_rwsem
);
129 list_add_tail(&shrinker
->list
, &shrinker_list
);
130 up_write(&shrinker_rwsem
);
134 EXPORT_SYMBOL(set_shrinker
);
139 void remove_shrinker(struct shrinker
*shrinker
)
141 down_write(&shrinker_rwsem
);
142 list_del(&shrinker
->list
);
143 up_write(&shrinker_rwsem
);
146 EXPORT_SYMBOL(remove_shrinker
);
148 #define SHRINK_BATCH 128
150 * Call the shrink functions to age shrinkable caches
152 * Here we assume it costs one seek to replace a lru page and that it also
153 * takes a seek to recreate a cache object. With this in mind we age equal
154 * percentages of the lru and ageable caches. This should balance the seeks
155 * generated by these structures.
157 * If the vm encounted mapped pages on the LRU it increase the pressure on
158 * slab to avoid swapping.
160 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
162 * `lru_pages' represents the number of on-LRU pages in all the zones which
163 * are eligible for the caller's allocation attempt. It is used for balancing
164 * slab reclaim versus page reclaim.
166 * Returns the number of slab objects which we shrunk.
168 unsigned long shrink_slab(unsigned long scanned
, gfp_t gfp_mask
,
169 unsigned long lru_pages
)
171 struct shrinker
*shrinker
;
172 unsigned long ret
= 0;
175 scanned
= SWAP_CLUSTER_MAX
;
177 if (!down_read_trylock(&shrinker_rwsem
))
178 return 1; /* Assume we'll be able to shrink next time */
180 list_for_each_entry(shrinker
, &shrinker_list
, list
) {
181 unsigned long long delta
;
182 unsigned long total_scan
;
183 unsigned long max_pass
= (*shrinker
->shrinker
)(0, gfp_mask
);
185 delta
= (4 * scanned
) / shrinker
->seeks
;
187 do_div(delta
, lru_pages
+ 1);
188 shrinker
->nr
+= delta
;
189 if (shrinker
->nr
< 0) {
190 printk(KERN_ERR
"%s: nr=%ld\n",
191 __FUNCTION__
, shrinker
->nr
);
192 shrinker
->nr
= max_pass
;
196 * Avoid risking looping forever due to too large nr value:
197 * never try to free more than twice the estimate number of
200 if (shrinker
->nr
> max_pass
* 2)
201 shrinker
->nr
= max_pass
* 2;
203 total_scan
= shrinker
->nr
;
206 while (total_scan
>= SHRINK_BATCH
) {
207 long this_scan
= SHRINK_BATCH
;
211 nr_before
= (*shrinker
->shrinker
)(0, gfp_mask
);
212 shrink_ret
= (*shrinker
->shrinker
)(this_scan
, gfp_mask
);
213 if (shrink_ret
== -1)
215 if (shrink_ret
< nr_before
)
216 ret
+= nr_before
- shrink_ret
;
217 mod_page_state(slabs_scanned
, this_scan
);
218 total_scan
-= this_scan
;
223 shrinker
->nr
+= total_scan
;
225 up_read(&shrinker_rwsem
);
229 /* Called without lock on whether page is mapped, so answer is unstable */
230 static inline int page_mapping_inuse(struct page
*page
)
232 struct address_space
*mapping
;
234 /* Page is in somebody's page tables. */
235 if (page_mapped(page
))
238 /* Be more reluctant to reclaim swapcache than pagecache */
239 if (PageSwapCache(page
))
242 mapping
= page_mapping(page
);
246 /* File is mmap'd by somebody? */
247 return mapping_mapped(mapping
);
250 static inline int is_page_cache_freeable(struct page
*page
)
252 return page_count(page
) - !!PagePrivate(page
) == 2;
255 static int may_write_to_queue(struct backing_dev_info
*bdi
)
257 if (current
->flags
& PF_SWAPWRITE
)
259 if (!bdi_write_congested(bdi
))
261 if (bdi
== current
->backing_dev_info
)
267 * We detected a synchronous write error writing a page out. Probably
268 * -ENOSPC. We need to propagate that into the address_space for a subsequent
269 * fsync(), msync() or close().
271 * The tricky part is that after writepage we cannot touch the mapping: nothing
272 * prevents it from being freed up. But we have a ref on the page and once
273 * that page is locked, the mapping is pinned.
275 * We're allowed to run sleeping lock_page() here because we know the caller has
278 static void handle_write_error(struct address_space
*mapping
,
279 struct page
*page
, int error
)
282 if (page_mapping(page
) == mapping
) {
283 if (error
== -ENOSPC
)
284 set_bit(AS_ENOSPC
, &mapping
->flags
);
286 set_bit(AS_EIO
, &mapping
->flags
);
292 * pageout is called by shrink_page_list() for each dirty page.
293 * Calls ->writepage().
295 pageout_t
pageout(struct page
*page
, struct address_space
*mapping
)
298 * If the page is dirty, only perform writeback if that write
299 * will be non-blocking. To prevent this allocation from being
300 * stalled by pagecache activity. But note that there may be
301 * stalls if we need to run get_block(). We could test
302 * PagePrivate for that.
304 * If this process is currently in generic_file_write() against
305 * this page's queue, we can perform writeback even if that
308 * If the page is swapcache, write it back even if that would
309 * block, for some throttling. This happens by accident, because
310 * swap_backing_dev_info is bust: it doesn't reflect the
311 * congestion state of the swapdevs. Easy to fix, if needed.
312 * See swapfile.c:page_queue_congested().
314 if (!is_page_cache_freeable(page
))
318 * Some data journaling orphaned pages can have
319 * page->mapping == NULL while being dirty with clean buffers.
321 if (PagePrivate(page
)) {
322 if (try_to_free_buffers(page
)) {
323 ClearPageDirty(page
);
324 printk("%s: orphaned page\n", __FUNCTION__
);
330 if (mapping
->a_ops
->writepage
== NULL
)
331 return PAGE_ACTIVATE
;
332 if (!may_write_to_queue(mapping
->backing_dev_info
))
335 if (clear_page_dirty_for_io(page
)) {
337 struct writeback_control wbc
= {
338 .sync_mode
= WB_SYNC_NONE
,
339 .nr_to_write
= SWAP_CLUSTER_MAX
,
344 SetPageReclaim(page
);
345 res
= mapping
->a_ops
->writepage(page
, &wbc
);
347 handle_write_error(mapping
, page
, res
);
348 if (res
== AOP_WRITEPAGE_ACTIVATE
) {
349 ClearPageReclaim(page
);
350 return PAGE_ACTIVATE
;
352 if (!PageWriteback(page
)) {
353 /* synchronous write or broken a_ops? */
354 ClearPageReclaim(page
);
363 int remove_mapping(struct address_space
*mapping
, struct page
*page
)
366 return 0; /* truncate got there first */
368 write_lock_irq(&mapping
->tree_lock
);
371 * The non-racy check for busy page. It is critical to check
372 * PageDirty _after_ making sure that the page is freeable and
373 * not in use by anybody. (pagecache + us == 2)
375 if (unlikely(page_count(page
) != 2))
378 if (unlikely(PageDirty(page
)))
381 if (PageSwapCache(page
)) {
382 swp_entry_t swap
= { .val
= page_private(page
) };
383 __delete_from_swap_cache(page
);
384 write_unlock_irq(&mapping
->tree_lock
);
386 __put_page(page
); /* The pagecache ref */
390 __remove_from_page_cache(page
);
391 write_unlock_irq(&mapping
->tree_lock
);
396 write_unlock_irq(&mapping
->tree_lock
);
401 * shrink_page_list() returns the number of reclaimed pages
403 static unsigned long shrink_page_list(struct list_head
*page_list
,
404 struct scan_control
*sc
)
406 LIST_HEAD(ret_pages
);
407 struct pagevec freed_pvec
;
409 unsigned long nr_reclaimed
= 0;
413 pagevec_init(&freed_pvec
, 1);
414 while (!list_empty(page_list
)) {
415 struct address_space
*mapping
;
422 page
= lru_to_page(page_list
);
423 list_del(&page
->lru
);
425 if (TestSetPageLocked(page
))
428 BUG_ON(PageActive(page
));
432 if (!sc
->may_swap
&& page_mapped(page
))
435 /* Double the slab pressure for mapped and swapcache pages */
436 if (page_mapped(page
) || PageSwapCache(page
))
439 if (PageWriteback(page
))
442 referenced
= page_referenced(page
, 1);
443 /* In active use or really unfreeable? Activate it. */
444 if (referenced
&& page_mapping_inuse(page
))
445 goto activate_locked
;
449 * Anonymous process memory has backing store?
450 * Try to allocate it some swap space here.
452 if (PageAnon(page
) && !PageSwapCache(page
))
453 if (!add_to_swap(page
, GFP_ATOMIC
))
454 goto activate_locked
;
455 #endif /* CONFIG_SWAP */
457 mapping
= page_mapping(page
);
458 may_enter_fs
= (sc
->gfp_mask
& __GFP_FS
) ||
459 (PageSwapCache(page
) && (sc
->gfp_mask
& __GFP_IO
));
462 * The page is mapped into the page tables of one or more
463 * processes. Try to unmap it here.
465 if (page_mapped(page
) && mapping
) {
466 switch (try_to_unmap(page
, 0)) {
468 goto activate_locked
;
472 ; /* try to free the page below */
476 if (PageDirty(page
)) {
481 if (!sc
->may_writepage
)
484 /* Page is dirty, try to write it out here */
485 switch(pageout(page
, mapping
)) {
489 goto activate_locked
;
491 if (PageWriteback(page
) || PageDirty(page
))
494 * A synchronous write - probably a ramdisk. Go
495 * ahead and try to reclaim the page.
497 if (TestSetPageLocked(page
))
499 if (PageDirty(page
) || PageWriteback(page
))
501 mapping
= page_mapping(page
);
503 ; /* try to free the page below */
508 * If the page has buffers, try to free the buffer mappings
509 * associated with this page. If we succeed we try to free
512 * We do this even if the page is PageDirty().
513 * try_to_release_page() does not perform I/O, but it is
514 * possible for a page to have PageDirty set, but it is actually
515 * clean (all its buffers are clean). This happens if the
516 * buffers were written out directly, with submit_bh(). ext3
517 * will do this, as well as the blockdev mapping.
518 * try_to_release_page() will discover that cleanness and will
519 * drop the buffers and mark the page clean - it can be freed.
521 * Rarely, pages can have buffers and no ->mapping. These are
522 * the pages which were not successfully invalidated in
523 * truncate_complete_page(). We try to drop those buffers here
524 * and if that worked, and the page is no longer mapped into
525 * process address space (page_count == 1) it can be freed.
526 * Otherwise, leave the page on the LRU so it is swappable.
528 if (PagePrivate(page
)) {
529 if (!try_to_release_page(page
, sc
->gfp_mask
))
530 goto activate_locked
;
531 if (!mapping
&& page_count(page
) == 1)
535 if (!remove_mapping(mapping
, page
))
541 if (!pagevec_add(&freed_pvec
, page
))
542 __pagevec_release_nonlru(&freed_pvec
);
551 list_add(&page
->lru
, &ret_pages
);
552 BUG_ON(PageLRU(page
));
554 list_splice(&ret_pages
, page_list
);
555 if (pagevec_count(&freed_pvec
))
556 __pagevec_release_nonlru(&freed_pvec
);
557 mod_page_state(pgactivate
, pgactivate
);
562 * zone->lru_lock is heavily contended. Some of the functions that
563 * shrink the lists perform better by taking out a batch of pages
564 * and working on them outside the LRU lock.
566 * For pagecache intensive workloads, this function is the hottest
567 * spot in the kernel (apart from copy_*_user functions).
569 * Appropriate locks must be held before calling this function.
571 * @nr_to_scan: The number of pages to look through on the list.
572 * @src: The LRU list to pull pages off.
573 * @dst: The temp list to put pages on to.
574 * @scanned: The number of pages that were scanned.
576 * returns how many pages were moved onto *@dst.
578 static unsigned long isolate_lru_pages(unsigned long nr_to_scan
,
579 struct list_head
*src
, struct list_head
*dst
,
580 unsigned long *scanned
)
582 unsigned long nr_taken
= 0;
586 for (scan
= 0; scan
< nr_to_scan
&& !list_empty(src
); scan
++) {
587 struct list_head
*target
;
588 page
= lru_to_page(src
);
589 prefetchw_prev_lru_page(page
, src
, flags
);
591 BUG_ON(!PageLRU(page
));
593 list_del(&page
->lru
);
595 if (likely(get_page_unless_zero(page
))) {
597 * Be careful not to clear PageLRU until after we're
598 * sure the page is not being freed elsewhere -- the
599 * page release code relies on it.
604 } /* else it is being freed elsewhere */
606 list_add(&page
->lru
, target
);
614 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
617 static unsigned long shrink_inactive_list(unsigned long max_scan
,
618 struct zone
*zone
, struct scan_control
*sc
)
620 LIST_HEAD(page_list
);
622 unsigned long nr_scanned
= 0;
623 unsigned long nr_reclaimed
= 0;
625 pagevec_init(&pvec
, 1);
628 spin_lock_irq(&zone
->lru_lock
);
631 unsigned long nr_taken
;
632 unsigned long nr_scan
;
633 unsigned long nr_freed
;
635 nr_taken
= isolate_lru_pages(sc
->swap_cluster_max
,
636 &zone
->inactive_list
,
637 &page_list
, &nr_scan
);
638 zone
->nr_inactive
-= nr_taken
;
639 zone
->pages_scanned
+= nr_scan
;
640 spin_unlock_irq(&zone
->lru_lock
);
642 nr_scanned
+= nr_scan
;
643 nr_freed
= shrink_page_list(&page_list
, sc
);
644 nr_reclaimed
+= nr_freed
;
646 if (current_is_kswapd()) {
647 __mod_page_state_zone(zone
, pgscan_kswapd
, nr_scan
);
648 __mod_page_state(kswapd_steal
, nr_freed
);
650 __mod_page_state_zone(zone
, pgscan_direct
, nr_scan
);
651 __mod_page_state_zone(zone
, pgsteal
, nr_freed
);
656 spin_lock(&zone
->lru_lock
);
658 * Put back any unfreeable pages.
660 while (!list_empty(&page_list
)) {
661 page
= lru_to_page(&page_list
);
662 BUG_ON(PageLRU(page
));
664 list_del(&page
->lru
);
665 if (PageActive(page
))
666 add_page_to_active_list(zone
, page
);
668 add_page_to_inactive_list(zone
, page
);
669 if (!pagevec_add(&pvec
, page
)) {
670 spin_unlock_irq(&zone
->lru_lock
);
671 __pagevec_release(&pvec
);
672 spin_lock_irq(&zone
->lru_lock
);
675 } while (nr_scanned
< max_scan
);
676 spin_unlock(&zone
->lru_lock
);
679 pagevec_release(&pvec
);
684 * This moves pages from the active list to the inactive list.
686 * We move them the other way if the page is referenced by one or more
687 * processes, from rmap.
689 * If the pages are mostly unmapped, the processing is fast and it is
690 * appropriate to hold zone->lru_lock across the whole operation. But if
691 * the pages are mapped, the processing is slow (page_referenced()) so we
692 * should drop zone->lru_lock around each page. It's impossible to balance
693 * this, so instead we remove the pages from the LRU while processing them.
694 * It is safe to rely on PG_active against the non-LRU pages in here because
695 * nobody will play with that bit on a non-LRU page.
697 * The downside is that we have to touch page->_count against each page.
698 * But we had to alter page->flags anyway.
700 static void shrink_active_list(unsigned long nr_pages
, struct zone
*zone
,
701 struct scan_control
*sc
)
703 unsigned long pgmoved
;
704 int pgdeactivate
= 0;
705 unsigned long pgscanned
;
706 LIST_HEAD(l_hold
); /* The pages which were snipped off */
707 LIST_HEAD(l_inactive
); /* Pages to go onto the inactive_list */
708 LIST_HEAD(l_active
); /* Pages to go onto the active_list */
711 int reclaim_mapped
= 0;
719 * `distress' is a measure of how much trouble we're having
720 * reclaiming pages. 0 -> no problems. 100 -> great trouble.
722 distress
= 100 >> zone
->prev_priority
;
725 * The point of this algorithm is to decide when to start
726 * reclaiming mapped memory instead of just pagecache. Work out
730 mapped_ratio
= (sc
->nr_mapped
* 100) / total_memory
;
733 * Now decide how much we really want to unmap some pages. The
734 * mapped ratio is downgraded - just because there's a lot of
735 * mapped memory doesn't necessarily mean that page reclaim
738 * The distress ratio is important - we don't want to start
741 * A 100% value of vm_swappiness overrides this algorithm
744 swap_tendency
= mapped_ratio
/ 2 + distress
+ vm_swappiness
;
747 * Now use this metric to decide whether to start moving mapped
748 * memory onto the inactive list.
750 if (swap_tendency
>= 100)
755 spin_lock_irq(&zone
->lru_lock
);
756 pgmoved
= isolate_lru_pages(nr_pages
, &zone
->active_list
,
757 &l_hold
, &pgscanned
);
758 zone
->pages_scanned
+= pgscanned
;
759 zone
->nr_active
-= pgmoved
;
760 spin_unlock_irq(&zone
->lru_lock
);
762 while (!list_empty(&l_hold
)) {
764 page
= lru_to_page(&l_hold
);
765 list_del(&page
->lru
);
766 if (page_mapped(page
)) {
767 if (!reclaim_mapped
||
768 (total_swap_pages
== 0 && PageAnon(page
)) ||
769 page_referenced(page
, 0)) {
770 list_add(&page
->lru
, &l_active
);
774 list_add(&page
->lru
, &l_inactive
);
777 pagevec_init(&pvec
, 1);
779 spin_lock_irq(&zone
->lru_lock
);
780 while (!list_empty(&l_inactive
)) {
781 page
= lru_to_page(&l_inactive
);
782 prefetchw_prev_lru_page(page
, &l_inactive
, flags
);
783 BUG_ON(PageLRU(page
));
785 BUG_ON(!PageActive(page
));
786 ClearPageActive(page
);
788 list_move(&page
->lru
, &zone
->inactive_list
);
790 if (!pagevec_add(&pvec
, page
)) {
791 zone
->nr_inactive
+= pgmoved
;
792 spin_unlock_irq(&zone
->lru_lock
);
793 pgdeactivate
+= pgmoved
;
795 if (buffer_heads_over_limit
)
796 pagevec_strip(&pvec
);
797 __pagevec_release(&pvec
);
798 spin_lock_irq(&zone
->lru_lock
);
801 zone
->nr_inactive
+= pgmoved
;
802 pgdeactivate
+= pgmoved
;
803 if (buffer_heads_over_limit
) {
804 spin_unlock_irq(&zone
->lru_lock
);
805 pagevec_strip(&pvec
);
806 spin_lock_irq(&zone
->lru_lock
);
810 while (!list_empty(&l_active
)) {
811 page
= lru_to_page(&l_active
);
812 prefetchw_prev_lru_page(page
, &l_active
, flags
);
813 BUG_ON(PageLRU(page
));
815 BUG_ON(!PageActive(page
));
816 list_move(&page
->lru
, &zone
->active_list
);
818 if (!pagevec_add(&pvec
, page
)) {
819 zone
->nr_active
+= pgmoved
;
821 spin_unlock_irq(&zone
->lru_lock
);
822 __pagevec_release(&pvec
);
823 spin_lock_irq(&zone
->lru_lock
);
826 zone
->nr_active
+= pgmoved
;
827 spin_unlock(&zone
->lru_lock
);
829 __mod_page_state_zone(zone
, pgrefill
, pgscanned
);
830 __mod_page_state(pgdeactivate
, pgdeactivate
);
833 pagevec_release(&pvec
);
837 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
839 static unsigned long shrink_zone(int priority
, struct zone
*zone
,
840 struct scan_control
*sc
)
842 unsigned long nr_active
;
843 unsigned long nr_inactive
;
844 unsigned long nr_to_scan
;
845 unsigned long nr_reclaimed
= 0;
847 atomic_inc(&zone
->reclaim_in_progress
);
850 * Add one to `nr_to_scan' just to make sure that the kernel will
851 * slowly sift through the active list.
853 zone
->nr_scan_active
+= (zone
->nr_active
>> priority
) + 1;
854 nr_active
= zone
->nr_scan_active
;
855 if (nr_active
>= sc
->swap_cluster_max
)
856 zone
->nr_scan_active
= 0;
860 zone
->nr_scan_inactive
+= (zone
->nr_inactive
>> priority
) + 1;
861 nr_inactive
= zone
->nr_scan_inactive
;
862 if (nr_inactive
>= sc
->swap_cluster_max
)
863 zone
->nr_scan_inactive
= 0;
867 while (nr_active
|| nr_inactive
) {
869 nr_to_scan
= min(nr_active
,
870 (unsigned long)sc
->swap_cluster_max
);
871 nr_active
-= nr_to_scan
;
872 shrink_active_list(nr_to_scan
, zone
, sc
);
876 nr_to_scan
= min(nr_inactive
,
877 (unsigned long)sc
->swap_cluster_max
);
878 nr_inactive
-= nr_to_scan
;
879 nr_reclaimed
+= shrink_inactive_list(nr_to_scan
, zone
,
884 throttle_vm_writeout();
886 atomic_dec(&zone
->reclaim_in_progress
);
891 * This is the direct reclaim path, for page-allocating processes. We only
892 * try to reclaim pages from zones which will satisfy the caller's allocation
895 * We reclaim from a zone even if that zone is over pages_high. Because:
896 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
898 * b) The zones may be over pages_high but they must go *over* pages_high to
899 * satisfy the `incremental min' zone defense algorithm.
901 * Returns the number of reclaimed pages.
903 * If a zone is deemed to be full of pinned pages then just give it a light
904 * scan then give up on it.
906 static unsigned long shrink_zones(int priority
, struct zone
**zones
,
907 struct scan_control
*sc
)
909 unsigned long nr_reclaimed
= 0;
912 for (i
= 0; zones
[i
] != NULL
; i
++) {
913 struct zone
*zone
= zones
[i
];
915 if (!populated_zone(zone
))
918 if (!cpuset_zone_allowed(zone
, __GFP_HARDWALL
))
921 zone
->temp_priority
= priority
;
922 if (zone
->prev_priority
> priority
)
923 zone
->prev_priority
= priority
;
925 if (zone
->all_unreclaimable
&& priority
!= DEF_PRIORITY
)
926 continue; /* Let kswapd poll it */
928 nr_reclaimed
+= shrink_zone(priority
, zone
, sc
);
934 * This is the main entry point to direct page reclaim.
936 * If a full scan of the inactive list fails to free enough memory then we
937 * are "out of memory" and something needs to be killed.
939 * If the caller is !__GFP_FS then the probability of a failure is reasonably
940 * high - the zone may be full of dirty or under-writeback pages, which this
941 * caller can't do much about. We kick pdflush and take explicit naps in the
942 * hope that some of these pages can be written. But if the allocating task
943 * holds filesystem locks which prevent writeout this might not work, and the
944 * allocation attempt will fail.
946 unsigned long try_to_free_pages(struct zone
**zones
, gfp_t gfp_mask
)
950 unsigned long total_scanned
= 0;
951 unsigned long nr_reclaimed
= 0;
952 struct reclaim_state
*reclaim_state
= current
->reclaim_state
;
953 unsigned long lru_pages
= 0;
955 struct scan_control sc
= {
956 .gfp_mask
= gfp_mask
,
957 .may_writepage
= !laptop_mode
,
958 .swap_cluster_max
= SWAP_CLUSTER_MAX
,
962 inc_page_state(allocstall
);
964 for (i
= 0; zones
[i
] != NULL
; i
++) {
965 struct zone
*zone
= zones
[i
];
967 if (!cpuset_zone_allowed(zone
, __GFP_HARDWALL
))
970 zone
->temp_priority
= DEF_PRIORITY
;
971 lru_pages
+= zone
->nr_active
+ zone
->nr_inactive
;
974 for (priority
= DEF_PRIORITY
; priority
>= 0; priority
--) {
975 sc
.nr_mapped
= read_page_state(nr_mapped
);
978 disable_swap_token();
979 nr_reclaimed
+= shrink_zones(priority
, zones
, &sc
);
980 shrink_slab(sc
.nr_scanned
, gfp_mask
, lru_pages
);
982 nr_reclaimed
+= reclaim_state
->reclaimed_slab
;
983 reclaim_state
->reclaimed_slab
= 0;
985 total_scanned
+= sc
.nr_scanned
;
986 if (nr_reclaimed
>= sc
.swap_cluster_max
) {
992 * Try to write back as many pages as we just scanned. This
993 * tends to cause slow streaming writers to write data to the
994 * disk smoothly, at the dirtying rate, which is nice. But
995 * that's undesirable in laptop mode, where we *want* lumpy
996 * writeout. So in laptop mode, write out the whole world.
998 if (total_scanned
> sc
.swap_cluster_max
+
999 sc
.swap_cluster_max
/ 2) {
1000 wakeup_pdflush(laptop_mode
? 0 : total_scanned
);
1001 sc
.may_writepage
= 1;
1004 /* Take a nap, wait for some writeback to complete */
1005 if (sc
.nr_scanned
&& priority
< DEF_PRIORITY
- 2)
1006 blk_congestion_wait(WRITE
, HZ
/10);
1009 for (i
= 0; zones
[i
] != 0; i
++) {
1010 struct zone
*zone
= zones
[i
];
1012 if (!cpuset_zone_allowed(zone
, __GFP_HARDWALL
))
1015 zone
->prev_priority
= zone
->temp_priority
;
1021 * For kswapd, balance_pgdat() will work across all this node's zones until
1022 * they are all at pages_high.
1024 * If `nr_pages' is non-zero then it is the number of pages which are to be
1025 * reclaimed, regardless of the zone occupancies. This is a software suspend
1028 * Returns the number of pages which were actually freed.
1030 * There is special handling here for zones which are full of pinned pages.
1031 * This can happen if the pages are all mlocked, or if they are all used by
1032 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
1033 * What we do is to detect the case where all pages in the zone have been
1034 * scanned twice and there has been zero successful reclaim. Mark the zone as
1035 * dead and from now on, only perform a short scan. Basically we're polling
1036 * the zone for when the problem goes away.
1038 * kswapd scans the zones in the highmem->normal->dma direction. It skips
1039 * zones which have free_pages > pages_high, but once a zone is found to have
1040 * free_pages <= pages_high, we scan that zone and the lower zones regardless
1041 * of the number of free pages in the lower zones. This interoperates with
1042 * the page allocator fallback scheme to ensure that aging of pages is balanced
1045 static unsigned long balance_pgdat(pg_data_t
*pgdat
, unsigned long nr_pages
,
1048 unsigned long to_free
= nr_pages
;
1052 unsigned long total_scanned
;
1053 unsigned long nr_reclaimed
;
1054 struct reclaim_state
*reclaim_state
= current
->reclaim_state
;
1055 struct scan_control sc
= {
1056 .gfp_mask
= GFP_KERNEL
,
1058 .swap_cluster_max
= nr_pages
? nr_pages
: SWAP_CLUSTER_MAX
,
1064 sc
.may_writepage
= !laptop_mode
,
1065 sc
.nr_mapped
= read_page_state(nr_mapped
);
1067 inc_page_state(pageoutrun
);
1069 for (i
= 0; i
< pgdat
->nr_zones
; i
++) {
1070 struct zone
*zone
= pgdat
->node_zones
+ i
;
1072 zone
->temp_priority
= DEF_PRIORITY
;
1075 for (priority
= DEF_PRIORITY
; priority
>= 0; priority
--) {
1076 int end_zone
= 0; /* Inclusive. 0 = ZONE_DMA */
1077 unsigned long lru_pages
= 0;
1079 /* The swap token gets in the way of swapout... */
1081 disable_swap_token();
1085 if (nr_pages
== 0) {
1087 * Scan in the highmem->dma direction for the highest
1088 * zone which needs scanning
1090 for (i
= pgdat
->nr_zones
- 1; i
>= 0; i
--) {
1091 struct zone
*zone
= pgdat
->node_zones
+ i
;
1093 if (!populated_zone(zone
))
1096 if (zone
->all_unreclaimable
&&
1097 priority
!= DEF_PRIORITY
)
1100 if (!zone_watermark_ok(zone
, order
,
1101 zone
->pages_high
, 0, 0)) {
1108 end_zone
= pgdat
->nr_zones
- 1;
1111 for (i
= 0; i
<= end_zone
; i
++) {
1112 struct zone
*zone
= pgdat
->node_zones
+ i
;
1114 lru_pages
+= zone
->nr_active
+ zone
->nr_inactive
;
1118 * Now scan the zone in the dma->highmem direction, stopping
1119 * at the last zone which needs scanning.
1121 * We do this because the page allocator works in the opposite
1122 * direction. This prevents the page allocator from allocating
1123 * pages behind kswapd's direction of progress, which would
1124 * cause too much scanning of the lower zones.
1126 for (i
= 0; i
<= end_zone
; i
++) {
1127 struct zone
*zone
= pgdat
->node_zones
+ i
;
1130 if (!populated_zone(zone
))
1133 if (zone
->all_unreclaimable
&& priority
!= DEF_PRIORITY
)
1136 if (nr_pages
== 0) { /* Not software suspend */
1137 if (!zone_watermark_ok(zone
, order
,
1138 zone
->pages_high
, end_zone
, 0))
1141 zone
->temp_priority
= priority
;
1142 if (zone
->prev_priority
> priority
)
1143 zone
->prev_priority
= priority
;
1145 nr_reclaimed
+= shrink_zone(priority
, zone
, &sc
);
1146 reclaim_state
->reclaimed_slab
= 0;
1147 nr_slab
= shrink_slab(sc
.nr_scanned
, GFP_KERNEL
,
1149 nr_reclaimed
+= reclaim_state
->reclaimed_slab
;
1150 total_scanned
+= sc
.nr_scanned
;
1151 if (zone
->all_unreclaimable
)
1153 if (nr_slab
== 0 && zone
->pages_scanned
>=
1154 (zone
->nr_active
+ zone
->nr_inactive
) * 4)
1155 zone
->all_unreclaimable
= 1;
1157 * If we've done a decent amount of scanning and
1158 * the reclaim ratio is low, start doing writepage
1159 * even in laptop mode
1161 if (total_scanned
> SWAP_CLUSTER_MAX
* 2 &&
1162 total_scanned
> nr_reclaimed
+ nr_reclaimed
/ 2)
1163 sc
.may_writepage
= 1;
1165 if (nr_pages
&& to_free
> nr_reclaimed
)
1166 continue; /* swsusp: need to do more work */
1168 break; /* kswapd: all done */
1170 * OK, kswapd is getting into trouble. Take a nap, then take
1171 * another pass across the zones.
1173 if (total_scanned
&& priority
< DEF_PRIORITY
- 2)
1174 blk_congestion_wait(WRITE
, HZ
/10);
1177 * We do this so kswapd doesn't build up large priorities for
1178 * example when it is freeing in parallel with allocators. It
1179 * matches the direct reclaim path behaviour in terms of impact
1180 * on zone->*_priority.
1182 if ((nr_reclaimed
>= SWAP_CLUSTER_MAX
) && !nr_pages
)
1186 for (i
= 0; i
< pgdat
->nr_zones
; i
++) {
1187 struct zone
*zone
= pgdat
->node_zones
+ i
;
1189 zone
->prev_priority
= zone
->temp_priority
;
1191 if (!all_zones_ok
) {
1196 return nr_reclaimed
;
1200 * The background pageout daemon, started as a kernel thread
1201 * from the init process.
1203 * This basically trickles out pages so that we have _some_
1204 * free memory available even if there is no other activity
1205 * that frees anything up. This is needed for things like routing
1206 * etc, where we otherwise might have all activity going on in
1207 * asynchronous contexts that cannot page things out.
1209 * If there are applications that are active memory-allocators
1210 * (most normal use), this basically shouldn't matter.
1212 static int kswapd(void *p
)
1214 unsigned long order
;
1215 pg_data_t
*pgdat
= (pg_data_t
*)p
;
1216 struct task_struct
*tsk
= current
;
1218 struct reclaim_state reclaim_state
= {
1219 .reclaimed_slab
= 0,
1223 daemonize("kswapd%d", pgdat
->node_id
);
1224 cpumask
= node_to_cpumask(pgdat
->node_id
);
1225 if (!cpus_empty(cpumask
))
1226 set_cpus_allowed(tsk
, cpumask
);
1227 current
->reclaim_state
= &reclaim_state
;
1230 * Tell the memory management that we're a "memory allocator",
1231 * and that if we need more memory we should get access to it
1232 * regardless (see "__alloc_pages()"). "kswapd" should
1233 * never get caught in the normal page freeing logic.
1235 * (Kswapd normally doesn't need memory anyway, but sometimes
1236 * you need a small amount of memory in order to be able to
1237 * page out something else, and this flag essentially protects
1238 * us from recursively trying to free more memory as we're
1239 * trying to free the first piece of memory in the first place).
1241 tsk
->flags
|= PF_MEMALLOC
| PF_SWAPWRITE
| PF_KSWAPD
;
1245 unsigned long new_order
;
1249 prepare_to_wait(&pgdat
->kswapd_wait
, &wait
, TASK_INTERRUPTIBLE
);
1250 new_order
= pgdat
->kswapd_max_order
;
1251 pgdat
->kswapd_max_order
= 0;
1252 if (order
< new_order
) {
1254 * Don't sleep if someone wants a larger 'order'
1260 order
= pgdat
->kswapd_max_order
;
1262 finish_wait(&pgdat
->kswapd_wait
, &wait
);
1264 balance_pgdat(pgdat
, 0, order
);
1270 * A zone is low on free memory, so wake its kswapd task to service it.
1272 void wakeup_kswapd(struct zone
*zone
, int order
)
1276 if (!populated_zone(zone
))
1279 pgdat
= zone
->zone_pgdat
;
1280 if (zone_watermark_ok(zone
, order
, zone
->pages_low
, 0, 0))
1282 if (pgdat
->kswapd_max_order
< order
)
1283 pgdat
->kswapd_max_order
= order
;
1284 if (!cpuset_zone_allowed(zone
, __GFP_HARDWALL
))
1286 if (!waitqueue_active(&pgdat
->kswapd_wait
))
1288 wake_up_interruptible(&pgdat
->kswapd_wait
);
1293 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed
1296 unsigned long shrink_all_memory(unsigned long nr_pages
)
1299 unsigned long nr_to_free
= nr_pages
;
1300 unsigned long ret
= 0;
1302 struct reclaim_state reclaim_state
= {
1303 .reclaimed_slab
= 0,
1306 current
->reclaim_state
= &reclaim_state
;
1308 for_each_online_pgdat(pgdat
) {
1309 unsigned long freed
;
1311 freed
= balance_pgdat(pgdat
, nr_to_free
, 0);
1313 nr_to_free
-= freed
;
1314 if ((long)nr_to_free
<= 0)
1317 if (retry
-- && ret
< nr_pages
) {
1318 blk_congestion_wait(WRITE
, HZ
/5);
1321 current
->reclaim_state
= NULL
;
1326 #ifdef CONFIG_HOTPLUG_CPU
1327 /* It's optimal to keep kswapds on the same CPUs as their memory, but
1328 not required for correctness. So if the last cpu in a node goes
1329 away, we get changed to run anywhere: as the first one comes back,
1330 restore their cpu bindings. */
1331 static int __devinit
cpu_callback(struct notifier_block
*nfb
,
1332 unsigned long action
, void *hcpu
)
1337 if (action
== CPU_ONLINE
) {
1338 for_each_online_pgdat(pgdat
) {
1339 mask
= node_to_cpumask(pgdat
->node_id
);
1340 if (any_online_cpu(mask
) != NR_CPUS
)
1341 /* One of our CPUs online: restore mask */
1342 set_cpus_allowed(pgdat
->kswapd
, mask
);
1347 #endif /* CONFIG_HOTPLUG_CPU */
1349 static int __init
kswapd_init(void)
1354 for_each_online_pgdat(pgdat
) {
1357 pid
= kernel_thread(kswapd
, pgdat
, CLONE_KERNEL
);
1359 read_lock(&tasklist_lock
);
1360 pgdat
->kswapd
= find_task_by_pid(pid
);
1361 read_unlock(&tasklist_lock
);
1363 total_memory
= nr_free_pagecache_pages();
1364 hotcpu_notifier(cpu_callback
, 0);
1368 module_init(kswapd_init
)
1374 * If non-zero call zone_reclaim when the number of free pages falls below
1377 * In the future we may add flags to the mode. However, the page allocator
1378 * should only have to check that zone_reclaim_mode != 0 before calling
1381 int zone_reclaim_mode __read_mostly
;
1383 #define RECLAIM_OFF 0
1384 #define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */
1385 #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
1386 #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
1387 #define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */
1390 * Mininum time between zone reclaim scans
1392 int zone_reclaim_interval __read_mostly
= 30*HZ
;
1395 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1396 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1399 #define ZONE_RECLAIM_PRIORITY 4
1402 * Try to free up some pages from this zone through reclaim.
1404 static int __zone_reclaim(struct zone
*zone
, gfp_t gfp_mask
, unsigned int order
)
1406 /* Minimum pages needed in order to stay on node */
1407 const unsigned long nr_pages
= 1 << order
;
1408 struct task_struct
*p
= current
;
1409 struct reclaim_state reclaim_state
;
1411 unsigned long nr_reclaimed
= 0;
1412 struct scan_control sc
= {
1413 .may_writepage
= !!(zone_reclaim_mode
& RECLAIM_WRITE
),
1414 .may_swap
= !!(zone_reclaim_mode
& RECLAIM_SWAP
),
1415 .nr_mapped
= read_page_state(nr_mapped
),
1416 .swap_cluster_max
= max_t(unsigned long, nr_pages
,
1418 .gfp_mask
= gfp_mask
,
1421 disable_swap_token();
1424 * We need to be able to allocate from the reserves for RECLAIM_SWAP
1425 * and we also need to be able to write out pages for RECLAIM_WRITE
1428 p
->flags
|= PF_MEMALLOC
| PF_SWAPWRITE
;
1429 reclaim_state
.reclaimed_slab
= 0;
1430 p
->reclaim_state
= &reclaim_state
;
1433 * Free memory by calling shrink zone with increasing priorities
1434 * until we have enough memory freed.
1436 priority
= ZONE_RECLAIM_PRIORITY
;
1438 nr_reclaimed
+= shrink_zone(priority
, zone
, &sc
);
1440 } while (priority
>= 0 && nr_reclaimed
< nr_pages
);
1442 if (nr_reclaimed
< nr_pages
&& (zone_reclaim_mode
& RECLAIM_SLAB
)) {
1444 * shrink_slab() does not currently allow us to determine how
1445 * many pages were freed in this zone. So we just shake the slab
1446 * a bit and then go off node for this particular allocation
1447 * despite possibly having freed enough memory to allocate in
1448 * this zone. If we freed local memory then the next
1449 * allocations will be local again.
1451 * shrink_slab will free memory on all zones and may take
1454 shrink_slab(sc
.nr_scanned
, gfp_mask
, order
);
1457 p
->reclaim_state
= NULL
;
1458 current
->flags
&= ~(PF_MEMALLOC
| PF_SWAPWRITE
);
1460 if (nr_reclaimed
== 0) {
1462 * We were unable to reclaim enough pages to stay on node. We
1463 * now allow off node accesses for a certain time period before
1464 * trying again to reclaim pages from the local zone.
1466 zone
->last_unsuccessful_zone_reclaim
= jiffies
;
1469 return nr_reclaimed
>= nr_pages
;
1472 int zone_reclaim(struct zone
*zone
, gfp_t gfp_mask
, unsigned int order
)
1478 * Do not reclaim if there was a recent unsuccessful attempt at zone
1479 * reclaim. In that case we let allocations go off node for the
1480 * zone_reclaim_interval. Otherwise we would scan for each off-node
1483 if (time_before(jiffies
,
1484 zone
->last_unsuccessful_zone_reclaim
+ zone_reclaim_interval
))
1488 * Avoid concurrent zone reclaims, do not reclaim in a zone that does
1489 * not have reclaimable pages and if we should not delay the allocation
1492 if (!(gfp_mask
& __GFP_WAIT
) ||
1493 zone
->all_unreclaimable
||
1494 atomic_read(&zone
->reclaim_in_progress
) > 0 ||
1495 (current
->flags
& PF_MEMALLOC
))
1499 * Only run zone reclaim on the local zone or on zones that do not
1500 * have associated processors. This will favor the local processor
1501 * over remote processors and spread off node memory allocations
1502 * as wide as possible.
1504 node_id
= zone
->zone_pgdat
->node_id
;
1505 mask
= node_to_cpumask(node_id
);
1506 if (!cpus_empty(mask
) && node_id
!= numa_node_id())
1508 return __zone_reclaim(zone
, gfp_mask
, order
);