4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/file.h>
23 #include <linux/writeback.h>
24 #include <linux/suspend.h>
25 #include <linux/blkdev.h>
26 #include <linux/buffer_head.h> /* for try_to_release_page(),
27 buffer_heads_over_limit */
28 #include <linux/mm_inline.h>
29 #include <linux/pagevec.h>
30 #include <linux/backing-dev.h>
31 #include <linux/rmap.h>
32 #include <linux/topology.h>
33 #include <linux/cpu.h>
34 #include <linux/notifier.h>
35 #include <linux/rwsem.h>
37 #include <asm/tlbflush.h>
38 #include <asm/div64.h>
40 #include <linux/swapops.h>
42 /* possible outcome of pageout() */
44 /* failed to write page out, page is locked */
46 /* move page to the active list, page is locked */
48 /* page has been sent to the disk successfully, page is unlocked */
50 /* page is clean and locked */
55 /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */
56 unsigned long nr_to_scan
;
58 /* Incremented by the number of inactive pages that were scanned */
59 unsigned long nr_scanned
;
61 /* Incremented by the number of pages reclaimed */
62 unsigned long nr_reclaimed
;
64 unsigned long nr_mapped
; /* From page_state */
66 /* How many pages shrink_cache() should reclaim */
69 /* Ask shrink_caches, or shrink_zone to scan at this priority */
70 unsigned int priority
;
72 /* This context's GFP mask */
73 unsigned int gfp_mask
;
79 * The list of shrinker callbacks used by to apply pressure to
84 struct list_head list
;
85 int seeks
; /* seeks to recreate an obj */
86 long nr
; /* objs pending delete */
89 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
91 #ifdef ARCH_HAS_PREFETCH
92 #define prefetch_prev_lru_page(_page, _base, _field) \
94 if ((_page)->lru.prev != _base) { \
97 prev = lru_to_page(&(_page->lru)); \
98 prefetch(&prev->_field); \
102 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
105 #ifdef ARCH_HAS_PREFETCHW
106 #define prefetchw_prev_lru_page(_page, _base, _field) \
108 if ((_page)->lru.prev != _base) { \
111 prev = lru_to_page(&(_page->lru)); \
112 prefetchw(&prev->_field); \
116 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
120 * From 0 .. 100. Higher means more swappy.
122 int vm_swappiness
= 60;
123 static long total_memory
;
125 static LIST_HEAD(shrinker_list
);
126 static DECLARE_RWSEM(shrinker_rwsem
);
129 * Add a shrinker callback to be called from the vm
131 struct shrinker
*set_shrinker(int seeks
, shrinker_t theshrinker
)
133 struct shrinker
*shrinker
;
135 shrinker
= kmalloc(sizeof(*shrinker
), GFP_KERNEL
);
137 shrinker
->shrinker
= theshrinker
;
138 shrinker
->seeks
= seeks
;
140 down_write(&shrinker_rwsem
);
141 list_add(&shrinker
->list
, &shrinker_list
);
142 up_write(&shrinker_rwsem
);
146 EXPORT_SYMBOL(set_shrinker
);
151 void remove_shrinker(struct shrinker
*shrinker
)
153 down_write(&shrinker_rwsem
);
154 list_del(&shrinker
->list
);
155 up_write(&shrinker_rwsem
);
158 EXPORT_SYMBOL(remove_shrinker
);
160 #define SHRINK_BATCH 128
162 * Call the shrink functions to age shrinkable caches
164 * Here we assume it costs one seek to replace a lru page and that it also
165 * takes a seek to recreate a cache object. With this in mind we age equal
166 * percentages of the lru and ageable caches. This should balance the seeks
167 * generated by these structures.
169 * If the vm encounted mapped pages on the LRU it increase the pressure on
170 * slab to avoid swapping.
172 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
174 * `lru_pages' represents the number of on-LRU pages in all the zones which
175 * are eligible for the caller's allocation attempt. It is used for balancing
176 * slab reclaim versus page reclaim.
178 static int shrink_slab(unsigned long scanned
, unsigned int gfp_mask
,
179 unsigned long lru_pages
)
181 struct shrinker
*shrinker
;
184 scanned
= SWAP_CLUSTER_MAX
;
186 if (!down_read_trylock(&shrinker_rwsem
))
189 list_for_each_entry(shrinker
, &shrinker_list
, list
) {
190 unsigned long long delta
;
191 unsigned long total_scan
;
193 delta
= (4 * scanned
) / shrinker
->seeks
;
194 delta
*= (*shrinker
->shrinker
)(0, gfp_mask
);
195 do_div(delta
, lru_pages
+ 1);
196 shrinker
->nr
+= delta
;
197 if (shrinker
->nr
< 0)
198 shrinker
->nr
= LONG_MAX
; /* It wrapped! */
200 total_scan
= shrinker
->nr
;
203 while (total_scan
>= SHRINK_BATCH
) {
204 long this_scan
= SHRINK_BATCH
;
207 shrink_ret
= (*shrinker
->shrinker
)(this_scan
, gfp_mask
);
208 if (shrink_ret
== -1)
210 mod_page_state(slabs_scanned
, this_scan
);
211 total_scan
-= this_scan
;
216 shrinker
->nr
+= total_scan
;
218 up_read(&shrinker_rwsem
);
222 /* Called without lock on whether page is mapped, so answer is unstable */
223 static inline int page_mapping_inuse(struct page
*page
)
225 struct address_space
*mapping
;
227 /* Page is in somebody's page tables. */
228 if (page_mapped(page
))
231 /* Be more reluctant to reclaim swapcache than pagecache */
232 if (PageSwapCache(page
))
235 mapping
= page_mapping(page
);
239 /* File is mmap'd by somebody? */
240 return mapping_mapped(mapping
);
243 static inline int is_page_cache_freeable(struct page
*page
)
245 return page_count(page
) - !!PagePrivate(page
) == 2;
248 static int may_write_to_queue(struct backing_dev_info
*bdi
)
250 if (current_is_kswapd())
252 if (current_is_pdflush()) /* This is unlikely, but why not... */
254 if (!bdi_write_congested(bdi
))
256 if (bdi
== current
->backing_dev_info
)
262 * We detected a synchronous write error writing a page out. Probably
263 * -ENOSPC. We need to propagate that into the address_space for a subsequent
264 * fsync(), msync() or close().
266 * The tricky part is that after writepage we cannot touch the mapping: nothing
267 * prevents it from being freed up. But we have a ref on the page and once
268 * that page is locked, the mapping is pinned.
270 * We're allowed to run sleeping lock_page() here because we know the caller has
273 static void handle_write_error(struct address_space
*mapping
,
274 struct page
*page
, int error
)
277 if (page_mapping(page
) == mapping
) {
278 if (error
== -ENOSPC
)
279 set_bit(AS_ENOSPC
, &mapping
->flags
);
281 set_bit(AS_EIO
, &mapping
->flags
);
287 * pageout is called by shrink_list() for each dirty page. Calls ->writepage().
289 static pageout_t
pageout(struct page
*page
, struct address_space
*mapping
)
292 * If the page is dirty, only perform writeback if that write
293 * will be non-blocking. To prevent this allocation from being
294 * stalled by pagecache activity. But note that there may be
295 * stalls if we need to run get_block(). We could test
296 * PagePrivate for that.
298 * If this process is currently in generic_file_write() against
299 * this page's queue, we can perform writeback even if that
302 * If the page is swapcache, write it back even if that would
303 * block, for some throttling. This happens by accident, because
304 * swap_backing_dev_info is bust: it doesn't reflect the
305 * congestion state of the swapdevs. Easy to fix, if needed.
306 * See swapfile.c:page_queue_congested().
308 if (!is_page_cache_freeable(page
))
312 if (mapping
->a_ops
->writepage
== NULL
)
313 return PAGE_ACTIVATE
;
314 if (!may_write_to_queue(mapping
->backing_dev_info
))
317 if (clear_page_dirty_for_io(page
)) {
319 struct writeback_control wbc
= {
320 .sync_mode
= WB_SYNC_NONE
,
321 .nr_to_write
= SWAP_CLUSTER_MAX
,
326 SetPageReclaim(page
);
327 res
= mapping
->a_ops
->writepage(page
, &wbc
);
329 handle_write_error(mapping
, page
, res
);
330 if (res
== WRITEPAGE_ACTIVATE
) {
331 ClearPageReclaim(page
);
332 return PAGE_ACTIVATE
;
334 if (!PageWriteback(page
)) {
335 /* synchronous write or broken a_ops? */
336 ClearPageReclaim(page
);
346 * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed
348 static int shrink_list(struct list_head
*page_list
, struct scan_control
*sc
)
350 LIST_HEAD(ret_pages
);
351 struct pagevec freed_pvec
;
357 pagevec_init(&freed_pvec
, 1);
358 while (!list_empty(page_list
)) {
359 struct address_space
*mapping
;
364 page
= lru_to_page(page_list
);
365 list_del(&page
->lru
);
367 if (TestSetPageLocked(page
))
370 BUG_ON(PageActive(page
));
372 if (PageWriteback(page
))
376 /* Double the slab pressure for mapped and swapcache pages */
377 if (page_mapped(page
) || PageSwapCache(page
))
380 referenced
= page_referenced(page
, 1);
381 /* In active use or really unfreeable? Activate it. */
382 if (referenced
&& page_mapping_inuse(page
))
383 goto activate_locked
;
387 * Anonymous process memory has backing store?
388 * Try to allocate it some swap space here.
390 if (PageAnon(page
) && !PageSwapCache(page
)) {
391 if (!add_to_swap(page
))
392 goto activate_locked
;
394 #endif /* CONFIG_SWAP */
396 mapping
= page_mapping(page
);
397 may_enter_fs
= (sc
->gfp_mask
& __GFP_FS
) ||
398 (PageSwapCache(page
) && (sc
->gfp_mask
& __GFP_IO
));
401 * The page is mapped into the page tables of one or more
402 * processes. Try to unmap it here.
404 if (page_mapped(page
) && mapping
) {
405 switch (try_to_unmap(page
)) {
407 goto activate_locked
;
411 ; /* try to free the page below */
415 if (PageDirty(page
)) {
420 if (laptop_mode
&& !sc
->may_writepage
)
423 /* Page is dirty, try to write it out here */
424 switch(pageout(page
, mapping
)) {
428 goto activate_locked
;
430 if (PageWriteback(page
) || PageDirty(page
))
433 * A synchronous write - probably a ramdisk. Go
434 * ahead and try to reclaim the page.
436 if (TestSetPageLocked(page
))
438 if (PageDirty(page
) || PageWriteback(page
))
440 mapping
= page_mapping(page
);
442 ; /* try to free the page below */
447 * If the page has buffers, try to free the buffer mappings
448 * associated with this page. If we succeed we try to free
451 * We do this even if the page is PageDirty().
452 * try_to_release_page() does not perform I/O, but it is
453 * possible for a page to have PageDirty set, but it is actually
454 * clean (all its buffers are clean). This happens if the
455 * buffers were written out directly, with submit_bh(). ext3
456 * will do this, as well as the blockdev mapping.
457 * try_to_release_page() will discover that cleanness and will
458 * drop the buffers and mark the page clean - it can be freed.
460 * Rarely, pages can have buffers and no ->mapping. These are
461 * the pages which were not successfully invalidated in
462 * truncate_complete_page(). We try to drop those buffers here
463 * and if that worked, and the page is no longer mapped into
464 * process address space (page_count == 1) it can be freed.
465 * Otherwise, leave the page on the LRU so it is swappable.
467 if (PagePrivate(page
)) {
468 if (!try_to_release_page(page
, sc
->gfp_mask
))
469 goto activate_locked
;
470 if (!mapping
&& page_count(page
) == 1)
475 goto keep_locked
; /* truncate got there first */
477 spin_lock_irq(&mapping
->tree_lock
);
480 * The non-racy check for busy page. It is critical to check
481 * PageDirty _after_ making sure that the page is freeable and
482 * not in use by anybody. (pagecache + us == 2)
484 if (page_count(page
) != 2 || PageDirty(page
)) {
485 spin_unlock_irq(&mapping
->tree_lock
);
490 if (PageSwapCache(page
)) {
491 swp_entry_t swap
= { .val
= page
->private };
492 __delete_from_swap_cache(page
);
493 spin_unlock_irq(&mapping
->tree_lock
);
495 __put_page(page
); /* The pagecache ref */
498 #endif /* CONFIG_SWAP */
500 __remove_from_page_cache(page
);
501 spin_unlock_irq(&mapping
->tree_lock
);
507 if (!pagevec_add(&freed_pvec
, page
))
508 __pagevec_release_nonlru(&freed_pvec
);
517 list_add(&page
->lru
, &ret_pages
);
518 BUG_ON(PageLRU(page
));
520 list_splice(&ret_pages
, page_list
);
521 if (pagevec_count(&freed_pvec
))
522 __pagevec_release_nonlru(&freed_pvec
);
523 mod_page_state(pgactivate
, pgactivate
);
524 sc
->nr_reclaimed
+= reclaimed
;
529 * zone->lru_lock is heavily contented. We relieve it by quickly privatising
530 * a batch of pages and working on them outside the lock. Any pages which were
531 * not freed will be added back to the LRU.
533 * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
535 * For pagecache intensive workloads, the first loop here is the hottest spot
536 * in the kernel (apart from the copy_*_user functions).
538 static void shrink_cache(struct zone
*zone
, struct scan_control
*sc
)
540 LIST_HEAD(page_list
);
542 int max_scan
= sc
->nr_to_scan
;
544 pagevec_init(&pvec
, 1);
547 spin_lock_irq(&zone
->lru_lock
);
548 while (max_scan
> 0) {
554 while (nr_scan
++ < SWAP_CLUSTER_MAX
&&
555 !list_empty(&zone
->inactive_list
)) {
556 page
= lru_to_page(&zone
->inactive_list
);
558 prefetchw_prev_lru_page(page
,
559 &zone
->inactive_list
, flags
);
561 if (!TestClearPageLRU(page
))
563 list_del(&page
->lru
);
564 if (get_page_testone(page
)) {
566 * It is being freed elsewhere
570 list_add(&page
->lru
, &zone
->inactive_list
);
573 list_add(&page
->lru
, &page_list
);
576 zone
->nr_inactive
-= nr_taken
;
577 zone
->pages_scanned
+= nr_taken
;
578 spin_unlock_irq(&zone
->lru_lock
);
584 if (current_is_kswapd())
585 mod_page_state_zone(zone
, pgscan_kswapd
, nr_scan
);
587 mod_page_state_zone(zone
, pgscan_direct
, nr_scan
);
588 nr_freed
= shrink_list(&page_list
, sc
);
589 if (current_is_kswapd())
590 mod_page_state(kswapd_steal
, nr_freed
);
591 mod_page_state_zone(zone
, pgsteal
, nr_freed
);
592 sc
->nr_to_reclaim
-= nr_freed
;
594 spin_lock_irq(&zone
->lru_lock
);
596 * Put back any unfreeable pages.
598 while (!list_empty(&page_list
)) {
599 page
= lru_to_page(&page_list
);
600 if (TestSetPageLRU(page
))
602 list_del(&page
->lru
);
603 if (PageActive(page
))
604 add_page_to_active_list(zone
, page
);
606 add_page_to_inactive_list(zone
, page
);
607 if (!pagevec_add(&pvec
, page
)) {
608 spin_unlock_irq(&zone
->lru_lock
);
609 __pagevec_release(&pvec
);
610 spin_lock_irq(&zone
->lru_lock
);
614 spin_unlock_irq(&zone
->lru_lock
);
616 pagevec_release(&pvec
);
620 * This moves pages from the active list to the inactive list.
622 * We move them the other way if the page is referenced by one or more
623 * processes, from rmap.
625 * If the pages are mostly unmapped, the processing is fast and it is
626 * appropriate to hold zone->lru_lock across the whole operation. But if
627 * the pages are mapped, the processing is slow (page_referenced()) so we
628 * should drop zone->lru_lock around each page. It's impossible to balance
629 * this, so instead we remove the pages from the LRU while processing them.
630 * It is safe to rely on PG_active against the non-LRU pages in here because
631 * nobody will play with that bit on a non-LRU page.
633 * The downside is that we have to touch page->_count against each page.
634 * But we had to alter page->flags anyway.
637 refill_inactive_zone(struct zone
*zone
, struct scan_control
*sc
)
640 int pgdeactivate
= 0;
642 int nr_pages
= sc
->nr_to_scan
;
643 LIST_HEAD(l_hold
); /* The pages which were snipped off */
644 LIST_HEAD(l_inactive
); /* Pages to go onto the inactive_list */
645 LIST_HEAD(l_active
); /* Pages to go onto the active_list */
648 int reclaim_mapped
= 0;
655 spin_lock_irq(&zone
->lru_lock
);
656 while (pgscanned
< nr_pages
&& !list_empty(&zone
->active_list
)) {
657 page
= lru_to_page(&zone
->active_list
);
658 prefetchw_prev_lru_page(page
, &zone
->active_list
, flags
);
659 if (!TestClearPageLRU(page
))
661 list_del(&page
->lru
);
662 if (get_page_testone(page
)) {
664 * It was already free! release_pages() or put_page()
665 * are about to remove it from the LRU and free it. So
666 * put the refcount back and put the page back on the
671 list_add(&page
->lru
, &zone
->active_list
);
673 list_add(&page
->lru
, &l_hold
);
678 zone
->nr_active
-= pgmoved
;
679 spin_unlock_irq(&zone
->lru_lock
);
682 * `distress' is a measure of how much trouble we're having reclaiming
683 * pages. 0 -> no problems. 100 -> great trouble.
685 distress
= 100 >> zone
->prev_priority
;
688 * The point of this algorithm is to decide when to start reclaiming
689 * mapped memory instead of just pagecache. Work out how much memory
692 mapped_ratio
= (sc
->nr_mapped
* 100) / total_memory
;
695 * Now decide how much we really want to unmap some pages. The mapped
696 * ratio is downgraded - just because there's a lot of mapped memory
697 * doesn't necessarily mean that page reclaim isn't succeeding.
699 * The distress ratio is important - we don't want to start going oom.
701 * A 100% value of vm_swappiness overrides this algorithm altogether.
703 swap_tendency
= mapped_ratio
/ 2 + distress
+ vm_swappiness
;
706 * Now use this metric to decide whether to start moving mapped memory
707 * onto the inactive list.
709 if (swap_tendency
>= 100)
712 while (!list_empty(&l_hold
)) {
713 page
= lru_to_page(&l_hold
);
714 list_del(&page
->lru
);
715 if (page_mapped(page
)) {
716 if (!reclaim_mapped
||
717 (total_swap_pages
== 0 && PageAnon(page
)) ||
718 page_referenced(page
, 0)) {
719 list_add(&page
->lru
, &l_active
);
723 list_add(&page
->lru
, &l_inactive
);
726 pagevec_init(&pvec
, 1);
728 spin_lock_irq(&zone
->lru_lock
);
729 while (!list_empty(&l_inactive
)) {
730 page
= lru_to_page(&l_inactive
);
731 prefetchw_prev_lru_page(page
, &l_inactive
, flags
);
732 if (TestSetPageLRU(page
))
734 if (!TestClearPageActive(page
))
736 list_move(&page
->lru
, &zone
->inactive_list
);
738 if (!pagevec_add(&pvec
, page
)) {
739 zone
->nr_inactive
+= pgmoved
;
740 spin_unlock_irq(&zone
->lru_lock
);
741 pgdeactivate
+= pgmoved
;
743 if (buffer_heads_over_limit
)
744 pagevec_strip(&pvec
);
745 __pagevec_release(&pvec
);
746 spin_lock_irq(&zone
->lru_lock
);
749 zone
->nr_inactive
+= pgmoved
;
750 pgdeactivate
+= pgmoved
;
751 if (buffer_heads_over_limit
) {
752 spin_unlock_irq(&zone
->lru_lock
);
753 pagevec_strip(&pvec
);
754 spin_lock_irq(&zone
->lru_lock
);
758 while (!list_empty(&l_active
)) {
759 page
= lru_to_page(&l_active
);
760 prefetchw_prev_lru_page(page
, &l_active
, flags
);
761 if (TestSetPageLRU(page
))
763 BUG_ON(!PageActive(page
));
764 list_move(&page
->lru
, &zone
->active_list
);
766 if (!pagevec_add(&pvec
, page
)) {
767 zone
->nr_active
+= pgmoved
;
769 spin_unlock_irq(&zone
->lru_lock
);
770 __pagevec_release(&pvec
);
771 spin_lock_irq(&zone
->lru_lock
);
774 zone
->nr_active
+= pgmoved
;
775 spin_unlock_irq(&zone
->lru_lock
);
776 pagevec_release(&pvec
);
778 mod_page_state_zone(zone
, pgrefill
, pgscanned
);
779 mod_page_state(pgdeactivate
, pgdeactivate
);
783 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
786 shrink_zone(struct zone
*zone
, struct scan_control
*sc
)
788 unsigned long nr_active
;
789 unsigned long nr_inactive
;
792 * Add one to `nr_to_scan' just to make sure that the kernel will
793 * slowly sift through the active list.
795 zone
->nr_scan_active
+= (zone
->nr_active
>> sc
->priority
) + 1;
796 nr_active
= zone
->nr_scan_active
;
797 if (nr_active
>= SWAP_CLUSTER_MAX
)
798 zone
->nr_scan_active
= 0;
802 zone
->nr_scan_inactive
+= (zone
->nr_inactive
>> sc
->priority
) + 1;
803 nr_inactive
= zone
->nr_scan_inactive
;
804 if (nr_inactive
>= SWAP_CLUSTER_MAX
)
805 zone
->nr_scan_inactive
= 0;
809 sc
->nr_to_reclaim
= SWAP_CLUSTER_MAX
;
811 while (nr_active
|| nr_inactive
) {
813 sc
->nr_to_scan
= min(nr_active
,
814 (unsigned long)SWAP_CLUSTER_MAX
);
815 nr_active
-= sc
->nr_to_scan
;
816 refill_inactive_zone(zone
, sc
);
820 sc
->nr_to_scan
= min(nr_inactive
,
821 (unsigned long)SWAP_CLUSTER_MAX
);
822 nr_inactive
-= sc
->nr_to_scan
;
823 shrink_cache(zone
, sc
);
824 if (sc
->nr_to_reclaim
<= 0)
831 * This is the direct reclaim path, for page-allocating processes. We only
832 * try to reclaim pages from zones which will satisfy the caller's allocation
835 * We reclaim from a zone even if that zone is over pages_high. Because:
836 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
838 * b) The zones may be over pages_high but they must go *over* pages_high to
839 * satisfy the `incremental min' zone defense algorithm.
841 * Returns the number of reclaimed pages.
843 * If a zone is deemed to be full of pinned pages then just give it a light
844 * scan then give up on it.
847 shrink_caches(struct zone
**zones
, struct scan_control
*sc
)
851 for (i
= 0; zones
[i
] != NULL
; i
++) {
852 struct zone
*zone
= zones
[i
];
854 if (zone
->present_pages
== 0)
857 zone
->temp_priority
= sc
->priority
;
858 if (zone
->prev_priority
> sc
->priority
)
859 zone
->prev_priority
= sc
->priority
;
861 if (zone
->all_unreclaimable
&& sc
->priority
!= DEF_PRIORITY
)
862 continue; /* Let kswapd poll it */
864 shrink_zone(zone
, sc
);
869 * This is the main entry point to direct page reclaim.
871 * If a full scan of the inactive list fails to free enough memory then we
872 * are "out of memory" and something needs to be killed.
874 * If the caller is !__GFP_FS then the probability of a failure is reasonably
875 * high - the zone may be full of dirty or under-writeback pages, which this
876 * caller can't do much about. We kick pdflush and take explicit naps in the
877 * hope that some of these pages can be written. But if the allocating task
878 * holds filesystem locks which prevent writeout this might not work, and the
879 * allocation attempt will fail.
881 int try_to_free_pages(struct zone
**zones
,
882 unsigned int gfp_mask
, unsigned int order
)
886 int total_scanned
= 0, total_reclaimed
= 0;
887 struct reclaim_state
*reclaim_state
= current
->reclaim_state
;
888 struct scan_control sc
;
889 unsigned long lru_pages
= 0;
892 sc
.gfp_mask
= gfp_mask
;
893 sc
.may_writepage
= 0;
895 inc_page_state(allocstall
);
897 for (i
= 0; zones
[i
] != NULL
; i
++) {
898 struct zone
*zone
= zones
[i
];
900 zone
->temp_priority
= DEF_PRIORITY
;
901 lru_pages
+= zone
->nr_active
+ zone
->nr_inactive
;
904 for (priority
= DEF_PRIORITY
; priority
>= 0; priority
--) {
905 sc
.nr_mapped
= read_page_state(nr_mapped
);
908 sc
.priority
= priority
;
909 shrink_caches(zones
, &sc
);
910 shrink_slab(sc
.nr_scanned
, gfp_mask
, lru_pages
);
912 sc
.nr_reclaimed
+= reclaim_state
->reclaimed_slab
;
913 reclaim_state
->reclaimed_slab
= 0;
915 if (sc
.nr_reclaimed
>= SWAP_CLUSTER_MAX
) {
919 total_scanned
+= sc
.nr_scanned
;
920 total_reclaimed
+= sc
.nr_reclaimed
;
923 * Try to write back as many pages as we just scanned. This
924 * tends to cause slow streaming writers to write data to the
925 * disk smoothly, at the dirtying rate, which is nice. But
926 * that's undesirable in laptop mode, where we *want* lumpy
927 * writeout. So in laptop mode, write out the whole world.
929 if (total_scanned
> SWAP_CLUSTER_MAX
+ SWAP_CLUSTER_MAX
/2) {
930 wakeup_bdflush(laptop_mode
? 0 : total_scanned
);
931 sc
.may_writepage
= 1;
934 /* Take a nap, wait for some writeback to complete */
935 if (sc
.nr_scanned
&& priority
< DEF_PRIORITY
- 2)
936 blk_congestion_wait(WRITE
, HZ
/10);
938 if ((gfp_mask
& __GFP_FS
) && !(gfp_mask
& __GFP_NORETRY
))
939 out_of_memory(gfp_mask
);
941 for (i
= 0; zones
[i
] != 0; i
++)
942 zones
[i
]->prev_priority
= zones
[i
]->temp_priority
;
947 * For kswapd, balance_pgdat() will work across all this node's zones until
948 * they are all at pages_high.
950 * If `nr_pages' is non-zero then it is the number of pages which are to be
951 * reclaimed, regardless of the zone occupancies. This is a software suspend
954 * Returns the number of pages which were actually freed.
956 * There is special handling here for zones which are full of pinned pages.
957 * This can happen if the pages are all mlocked, or if they are all used by
958 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
959 * What we do is to detect the case where all pages in the zone have been
960 * scanned twice and there has been zero successful reclaim. Mark the zone as
961 * dead and from now on, only perform a short scan. Basically we're polling
962 * the zone for when the problem goes away.
964 * kswapd scans the zones in the highmem->normal->dma direction. It skips
965 * zones which have free_pages > pages_high, but once a zone is found to have
966 * free_pages <= pages_high, we scan that zone and the lower zones regardless
967 * of the number of free pages in the lower zones. This interoperates with
968 * the page allocator fallback scheme to ensure that aging of pages is balanced
971 static int balance_pgdat(pg_data_t
*pgdat
, int nr_pages
)
973 int to_free
= nr_pages
;
977 int total_scanned
, total_reclaimed
;
978 struct reclaim_state
*reclaim_state
= current
->reclaim_state
;
979 struct scan_control sc
;
984 sc
.gfp_mask
= GFP_KERNEL
;
985 sc
.may_writepage
= 0;
986 sc
.nr_mapped
= read_page_state(nr_mapped
);
988 inc_page_state(pageoutrun
);
990 for (i
= 0; i
< pgdat
->nr_zones
; i
++) {
991 struct zone
*zone
= pgdat
->node_zones
+ i
;
993 zone
->temp_priority
= DEF_PRIORITY
;
996 for (priority
= DEF_PRIORITY
; priority
>= 0; priority
--) {
997 int end_zone
= 0; /* Inclusive. 0 = ZONE_DMA */
998 unsigned long lru_pages
= 0;
1002 if (nr_pages
== 0) {
1004 * Scan in the highmem->dma direction for the highest
1005 * zone which needs scanning
1007 for (i
= pgdat
->nr_zones
- 1; i
>= 0; i
--) {
1008 struct zone
*zone
= pgdat
->node_zones
+ i
;
1010 if (zone
->present_pages
== 0)
1013 if (zone
->all_unreclaimable
&&
1014 priority
!= DEF_PRIORITY
)
1017 if (zone
->free_pages
<= zone
->pages_high
) {
1024 end_zone
= pgdat
->nr_zones
- 1;
1027 for (i
= 0; i
<= end_zone
; i
++) {
1028 struct zone
*zone
= pgdat
->node_zones
+ i
;
1030 lru_pages
+= zone
->nr_active
+ zone
->nr_inactive
;
1034 * Now scan the zone in the dma->highmem direction, stopping
1035 * at the last zone which needs scanning.
1037 * We do this because the page allocator works in the opposite
1038 * direction. This prevents the page allocator from allocating
1039 * pages behind kswapd's direction of progress, which would
1040 * cause too much scanning of the lower zones.
1042 for (i
= 0; i
<= end_zone
; i
++) {
1043 struct zone
*zone
= pgdat
->node_zones
+ i
;
1045 if (zone
->present_pages
== 0)
1048 if (zone
->all_unreclaimable
&& priority
!= DEF_PRIORITY
)
1051 if (nr_pages
== 0) { /* Not software suspend */
1052 if (zone
->free_pages
<= zone
->pages_high
)
1055 zone
->temp_priority
= priority
;
1056 if (zone
->prev_priority
> priority
)
1057 zone
->prev_priority
= priority
;
1059 sc
.nr_reclaimed
= 0;
1060 sc
.priority
= priority
;
1061 shrink_zone(zone
, &sc
);
1062 reclaim_state
->reclaimed_slab
= 0;
1063 shrink_slab(sc
.nr_scanned
, GFP_KERNEL
, lru_pages
);
1064 sc
.nr_reclaimed
+= reclaim_state
->reclaimed_slab
;
1065 total_reclaimed
+= sc
.nr_reclaimed
;
1066 if (zone
->all_unreclaimable
)
1068 if (zone
->pages_scanned
>= (zone
->nr_active
+
1069 zone
->nr_inactive
) * 4)
1070 zone
->all_unreclaimable
= 1;
1072 * If we've done a decent amount of scanning and
1073 * the reclaim ratio is low, start doing writepage
1074 * even in laptop mode
1076 if (total_scanned
> SWAP_CLUSTER_MAX
* 2 &&
1077 total_scanned
> total_reclaimed
+total_reclaimed
/2)
1078 sc
.may_writepage
= 1;
1080 if (nr_pages
&& to_free
> total_reclaimed
)
1081 continue; /* swsusp: need to do more work */
1083 break; /* kswapd: all done */
1085 * OK, kswapd is getting into trouble. Take a nap, then take
1086 * another pass across the zones.
1088 if (total_scanned
&& priority
< DEF_PRIORITY
- 2)
1089 blk_congestion_wait(WRITE
, HZ
/10);
1092 * We do this so kswapd doesn't build up large priorities for
1093 * example when it is freeing in parallel with allocators. It
1094 * matches the direct reclaim path behaviour in terms of impact
1095 * on zone->*_priority.
1097 if (total_reclaimed
>= SWAP_CLUSTER_MAX
)
1101 for (i
= 0; i
< pgdat
->nr_zones
; i
++) {
1102 struct zone
*zone
= pgdat
->node_zones
+ i
;
1104 zone
->prev_priority
= zone
->temp_priority
;
1106 if (!all_zones_ok
) {
1111 return total_reclaimed
;
1115 * The background pageout daemon, started as a kernel thread
1116 * from the init process.
1118 * This basically trickles out pages so that we have _some_
1119 * free memory available even if there is no other activity
1120 * that frees anything up. This is needed for things like routing
1121 * etc, where we otherwise might have all activity going on in
1122 * asynchronous contexts that cannot page things out.
1124 * If there are applications that are active memory-allocators
1125 * (most normal use), this basically shouldn't matter.
1127 static int kswapd(void *p
)
1129 pg_data_t
*pgdat
= (pg_data_t
*)p
;
1130 struct task_struct
*tsk
= current
;
1132 struct reclaim_state reclaim_state
= {
1133 .reclaimed_slab
= 0,
1137 daemonize("kswapd%d", pgdat
->node_id
);
1138 cpumask
= node_to_cpumask(pgdat
->node_id
);
1139 if (!cpus_empty(cpumask
))
1140 set_cpus_allowed(tsk
, cpumask
);
1141 current
->reclaim_state
= &reclaim_state
;
1144 * Tell the memory management that we're a "memory allocator",
1145 * and that if we need more memory we should get access to it
1146 * regardless (see "__alloc_pages()"). "kswapd" should
1147 * never get caught in the normal page freeing logic.
1149 * (Kswapd normally doesn't need memory anyway, but sometimes
1150 * you need a small amount of memory in order to be able to
1151 * page out something else, and this flag essentially protects
1152 * us from recursively trying to free more memory as we're
1153 * trying to free the first piece of memory in the first place).
1155 tsk
->flags
|= PF_MEMALLOC
|PF_KSWAPD
;
1158 if (current
->flags
& PF_FREEZE
)
1159 refrigerator(PF_FREEZE
);
1160 prepare_to_wait(&pgdat
->kswapd_wait
, &wait
, TASK_INTERRUPTIBLE
);
1162 finish_wait(&pgdat
->kswapd_wait
, &wait
);
1164 balance_pgdat(pgdat
, 0);
1170 * A zone is low on free memory, so wake its kswapd task to service it.
1172 void wakeup_kswapd(struct zone
*zone
)
1174 if (zone
->present_pages
== 0)
1176 if (zone
->free_pages
> zone
->pages_low
)
1178 if (!waitqueue_active(&zone
->zone_pgdat
->kswapd_wait
))
1180 wake_up_interruptible(&zone
->zone_pgdat
->kswapd_wait
);
1185 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed
1188 int shrink_all_memory(int nr_pages
)
1191 int nr_to_free
= nr_pages
;
1193 struct reclaim_state reclaim_state
= {
1194 .reclaimed_slab
= 0,
1197 current
->reclaim_state
= &reclaim_state
;
1198 for_each_pgdat(pgdat
) {
1200 freed
= balance_pgdat(pgdat
, nr_to_free
);
1202 nr_to_free
-= freed
;
1203 if (nr_to_free
<= 0)
1206 current
->reclaim_state
= NULL
;
1211 #ifdef CONFIG_HOTPLUG_CPU
1212 /* It's optimal to keep kswapds on the same CPUs as their memory, but
1213 not required for correctness. So if the last cpu in a node goes
1214 away, we get changed to run anywhere: as the first one comes back,
1215 restore their cpu bindings. */
1216 static int __devinit
cpu_callback(struct notifier_block
*nfb
,
1217 unsigned long action
,
1223 if (action
== CPU_ONLINE
) {
1224 for_each_pgdat(pgdat
) {
1225 mask
= node_to_cpumask(pgdat
->node_id
);
1226 if (any_online_cpu(mask
) != NR_CPUS
)
1227 /* One of our CPUs online: restore mask */
1228 set_cpus_allowed(pgdat
->kswapd
, mask
);
1233 #endif /* CONFIG_HOTPLUG_CPU */
1235 static int __init
kswapd_init(void)
1239 for_each_pgdat(pgdat
)
1241 = find_task_by_pid(kernel_thread(kswapd
, pgdat
, CLONE_KERNEL
));
1242 total_memory
= nr_free_pagecache_pages();
1243 hotcpu_notifier(cpu_callback
, 0);
1247 module_init(kswapd_init
)