4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
15 #include <linux/slab.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/init.h>
20 #include <linux/highmem.h>
21 #include <linux/file.h>
22 #include <linux/writeback.h>
23 #include <linux/suspend.h>
24 #include <linux/blkdev.h>
25 #include <linux/buffer_head.h> /* for try_to_release_page(),
26 buffer_heads_over_limit */
27 #include <linux/mm_inline.h>
28 #include <linux/pagevec.h>
29 #include <linux/backing-dev.h>
30 #include <linux/rmap-locking.h>
31 #include <linux/topology.h>
33 #include <asm/pgalloc.h>
34 #include <asm/tlbflush.h>
35 #include <asm/div64.h>
37 #include <linux/swapops.h>
40 * The "priority" of VM scanning is how much of the queues we will scan in one
41 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
42 * queues ("queue_length >> 12") during an aging round.
44 #define DEF_PRIORITY 12
47 * From 0 .. 100. Higher means more swappy.
49 int vm_swappiness
= 60;
50 static long total_memory
;
52 #ifdef ARCH_HAS_PREFETCH
53 #define prefetch_prev_lru_page(_page, _base, _field) \
55 if ((_page)->lru.prev != _base) { \
58 prev = list_entry(_page->lru.prev, \
60 prefetch(&prev->_field); \
64 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
67 #ifdef ARCH_HAS_PREFETCHW
68 #define prefetchw_prev_lru_page(_page, _base, _field) \
70 if ((_page)->lru.prev != _base) { \
73 prev = list_entry(_page->lru.prev, \
75 prefetchw(&prev->_field); \
79 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
83 * The list of shrinker callbacks used by to apply pressure to
88 struct list_head list
;
89 int seeks
; /* seeks to recreate an obj */
90 long nr
; /* objs pending delete */
93 static LIST_HEAD(shrinker_list
);
94 static DECLARE_MUTEX(shrinker_sem
);
97 * Add a shrinker callback to be called from the vm
99 struct shrinker
*set_shrinker(int seeks
, shrinker_t theshrinker
)
101 struct shrinker
*shrinker
;
103 shrinker
= kmalloc(sizeof(*shrinker
), GFP_KERNEL
);
105 shrinker
->shrinker
= theshrinker
;
106 shrinker
->seeks
= seeks
;
109 list_add(&shrinker
->list
, &shrinker_list
);
118 void remove_shrinker(struct shrinker
*shrinker
)
121 list_del(&shrinker
->list
);
126 #define SHRINK_BATCH 128
128 * Call the shrink functions to age shrinkable caches
130 * Here we assume it costs one seek to replace a lru page and that it also
131 * takes a seek to recreate a cache object. With this in mind we age equal
132 * percentages of the lru and ageable caches. This should balance the seeks
133 * generated by these structures.
135 * If the vm encounted mapped pages on the LRU it increase the pressure on
136 * slab to avoid swapping.
138 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
140 static int shrink_slab(long scanned
, unsigned int gfp_mask
)
142 struct shrinker
*shrinker
;
145 if (down_trylock(&shrinker_sem
))
148 pages
= nr_used_zone_pages();
149 list_for_each_entry(shrinker
, &shrinker_list
, list
) {
150 unsigned long long delta
;
152 delta
= scanned
* shrinker
->seeks
;
153 delta
*= (*shrinker
->shrinker
)(0, gfp_mask
);
154 do_div(delta
, pages
+ 1);
155 shrinker
->nr
+= delta
;
156 if (shrinker
->nr
> SHRINK_BATCH
) {
157 long nr_to_scan
= shrinker
->nr
;
161 long this_scan
= nr_to_scan
;
165 (*shrinker
->shrinker
)(this_scan
, gfp_mask
);
166 nr_to_scan
-= this_scan
;
175 /* Must be called with page's pte_chain_lock held. */
176 static inline int page_mapping_inuse(struct page
*page
)
178 struct address_space
*mapping
= page
->mapping
;
180 /* Page is in somebody's page tables. */
181 if (page_mapped(page
))
184 /* XXX: does this happen ? */
188 /* Be more reluctant to reclaim swapcache than pagecache */
189 if (PageSwapCache(page
))
192 /* File is mmap'd by somebody. */
193 if (!list_empty(&mapping
->i_mmap
))
195 if (!list_empty(&mapping
->i_mmap_shared
))
201 static inline int is_page_cache_freeable(struct page
*page
)
203 return page_count(page
) - !!PagePrivate(page
) == 2;
206 static int may_write_to_queue(struct backing_dev_info
*bdi
)
208 if (current_is_kswapd())
210 if (current_is_pdflush()) /* This is unlikely, but why not... */
212 if (!bdi_write_congested(bdi
))
214 if (bdi
== current
->backing_dev_info
)
220 * shrink_list returns the number of reclaimed pages
223 shrink_list(struct list_head
*page_list
, unsigned int gfp_mask
,
224 int *max_scan
, int *nr_mapped
)
226 struct address_space
*mapping
;
227 LIST_HEAD(ret_pages
);
228 struct pagevec freed_pvec
;
234 pagevec_init(&freed_pvec
, 1);
235 while (!list_empty(page_list
)) {
239 page
= list_entry(page_list
->prev
, struct page
, lru
);
240 list_del(&page
->lru
);
242 if (TestSetPageLocked(page
))
245 /* Double the slab pressure for mapped and swapcache pages */
246 if (page_mapped(page
) || PageSwapCache(page
))
249 BUG_ON(PageActive(page
));
250 may_enter_fs
= (gfp_mask
& __GFP_FS
) ||
251 (PageSwapCache(page
) && (gfp_mask
& __GFP_IO
));
253 if (PageWriteback(page
))
256 pte_chain_lock(page
);
257 if (page_referenced(page
) && page_mapping_inuse(page
)) {
258 /* In active use or really unfreeable. Activate it. */
259 pte_chain_unlock(page
);
260 goto activate_locked
;
263 mapping
= page
->mapping
;
267 * Anonymous process memory without backing store. Try to
268 * allocate it some swap space here.
270 * XXX: implement swap clustering ?
272 if (page_mapped(page
) && !mapping
&& !PagePrivate(page
)) {
273 pte_chain_unlock(page
);
274 if (!add_to_swap(page
))
275 goto activate_locked
;
276 pte_chain_lock(page
);
277 mapping
= page
->mapping
;
279 #endif /* CONFIG_SWAP */
282 * The page is mapped into the page tables of one or more
283 * processes. Try to unmap it here.
285 if (page_mapped(page
) && mapping
) {
286 switch (try_to_unmap(page
)) {
288 pte_chain_unlock(page
);
289 goto activate_locked
;
291 pte_chain_unlock(page
);
294 ; /* try to free the page below */
297 pte_chain_unlock(page
);
300 * If the page is dirty, only perform writeback if that write
301 * will be non-blocking. To prevent this allocation from being
302 * stalled by pagecache activity. But note that there may be
303 * stalls if we need to run get_block(). We could test
304 * PagePrivate for that.
306 * If this process is currently in generic_file_write() against
307 * this page's queue, we can perform writeback even if that
310 * If the page is swapcache, write it back even if that would
311 * block, for some throttling. This happens by accident, because
312 * swap_backing_dev_info is bust: it doesn't reflect the
313 * congestion state of the swapdevs. Easy to fix, if needed.
314 * See swapfile.c:page_queue_congested().
316 if (PageDirty(page
)) {
317 if (!is_page_cache_freeable(page
))
321 if (mapping
->a_ops
->writepage
== NULL
)
322 goto activate_locked
;
325 if (!may_write_to_queue(mapping
->backing_dev_info
))
327 spin_lock(&mapping
->page_lock
);
328 if (test_clear_page_dirty(page
)) {
330 struct writeback_control wbc
= {
331 .sync_mode
= WB_SYNC_NONE
,
332 .nr_to_write
= SWAP_CLUSTER_MAX
,
337 list_move(&page
->list
, &mapping
->locked_pages
);
338 spin_unlock(&mapping
->page_lock
);
340 SetPageReclaim(page
);
341 res
= mapping
->a_ops
->writepage(page
, &wbc
);
343 if (res
== WRITEPAGE_ACTIVATE
) {
344 ClearPageReclaim(page
);
345 goto activate_locked
;
347 if (!PageWriteback(page
)) {
348 /* synchronous write or broken a_ops? */
349 ClearPageReclaim(page
);
353 spin_unlock(&mapping
->page_lock
);
357 * If the page has buffers, try to free the buffer mappings
358 * associated with this page. If we succeed we try to free
361 * We do this even if the page is PageDirty().
362 * try_to_release_page() does not perform I/O, but it is
363 * possible for a page to have PageDirty set, but it is actually
364 * clean (all its buffers are clean). This happens if the
365 * buffers were written out directly, with submit_bh(). ext3
366 * will do this, as well as the blockdev mapping.
367 * try_to_release_page() will discover that cleanness and will
368 * drop the buffers and mark the page clean - it can be freed.
370 * Rarely, pages can have buffers and no ->mapping. These are
371 * the pages which were not successfully invalidated in
372 * truncate_complete_page(). We try to drop those buffers here
373 * and if that worked, and the page is no longer mapped into
374 * process address space (page_count == 0) it can be freed.
375 * Otherwise, leave the page on the LRU so it is swappable.
377 if (PagePrivate(page
)) {
378 if (!try_to_release_page(page
, gfp_mask
))
379 goto activate_locked
;
380 if (!mapping
&& page_count(page
) == 1)
385 goto keep_locked
; /* truncate got there first */
387 spin_lock(&mapping
->page_lock
);
390 * The non-racy check for busy page. It is critical to check
391 * PageDirty _after_ making sure that the page is freeable and
392 * not in use by anybody. (pagecache + us == 2)
394 if (page_count(page
) != 2 || PageDirty(page
)) {
395 spin_unlock(&mapping
->page_lock
);
400 if (PageSwapCache(page
)) {
401 swp_entry_t swap
= { .val
= page
->index
};
402 __delete_from_swap_cache(page
);
403 spin_unlock(&mapping
->page_lock
);
405 __put_page(page
); /* The pagecache ref */
408 #endif /* CONFIG_SWAP */
410 __remove_from_page_cache(page
);
411 spin_unlock(&mapping
->page_lock
);
417 if (!pagevec_add(&freed_pvec
, page
))
418 __pagevec_release_nonlru(&freed_pvec
);
427 list_add(&page
->lru
, &ret_pages
);
428 BUG_ON(PageLRU(page
));
430 list_splice(&ret_pages
, page_list
);
431 if (pagevec_count(&freed_pvec
))
432 __pagevec_release_nonlru(&freed_pvec
);
433 mod_page_state(pgsteal
, ret
);
434 if (current_is_kswapd())
435 mod_page_state(kswapd_steal
, ret
);
436 mod_page_state(pgactivate
, pgactivate
);
441 * zone->lru_lock is heavily contented. We relieve it by quickly privatising
442 * a batch of pages and working on them outside the lock. Any pages which were
443 * not freed will be added back to the LRU.
445 * shrink_cache() is passed the number of pages to try to free, and returns
446 * the number of pages which were reclaimed.
448 * For pagecache intensive workloads, the first loop here is the hottest spot
449 * in the kernel (apart from the copy_*_user functions).
452 shrink_cache(const int nr_pages
, struct zone
*zone
,
453 unsigned int gfp_mask
, int max_scan
, int *nr_mapped
)
455 LIST_HEAD(page_list
);
461 * Try to ensure that we free `nr_pages' pages in one pass of the loop.
463 nr_to_process
= nr_pages
;
464 if (nr_to_process
< SWAP_CLUSTER_MAX
)
465 nr_to_process
= SWAP_CLUSTER_MAX
;
467 pagevec_init(&pvec
, 1);
470 spin_lock_irq(&zone
->lru_lock
);
471 while (max_scan
> 0 && ret
< nr_pages
) {
477 while (nr_scan
++ < nr_to_process
&&
478 !list_empty(&zone
->inactive_list
)) {
479 page
= list_entry(zone
->inactive_list
.prev
,
482 prefetchw_prev_lru_page(page
,
483 &zone
->inactive_list
, flags
);
485 if (!TestClearPageLRU(page
))
487 list_del(&page
->lru
);
488 if (page_count(page
) == 0) {
489 /* It is currently in pagevec_release() */
491 list_add(&page
->lru
, &zone
->inactive_list
);
494 list_add(&page
->lru
, &page_list
);
495 page_cache_get(page
);
498 zone
->nr_inactive
-= nr_taken
;
499 zone
->pages_scanned
+= nr_taken
;
500 spin_unlock_irq(&zone
->lru_lock
);
506 mod_page_state(pgscan
, nr_scan
);
507 nr_freed
= shrink_list(&page_list
, gfp_mask
,
508 &max_scan
, nr_mapped
);
510 if (nr_freed
<= 0 && list_empty(&page_list
))
513 spin_lock_irq(&zone
->lru_lock
);
515 * Put back any unfreeable pages.
517 while (!list_empty(&page_list
)) {
518 page
= list_entry(page_list
.prev
, struct page
, lru
);
519 if (TestSetPageLRU(page
))
521 list_del(&page
->lru
);
522 if (PageActive(page
))
523 add_page_to_active_list(zone
, page
);
525 add_page_to_inactive_list(zone
, page
);
526 if (!pagevec_add(&pvec
, page
)) {
527 spin_unlock_irq(&zone
->lru_lock
);
528 __pagevec_release(&pvec
);
529 spin_lock_irq(&zone
->lru_lock
);
533 spin_unlock_irq(&zone
->lru_lock
);
535 pagevec_release(&pvec
);
540 * This moves pages from the active list to the inactive list.
542 * We move them the other way if the page is referenced by one or more
543 * processes, from rmap.
545 * If the pages are mostly unmapped, the processing is fast and it is
546 * appropriate to hold zone->lru_lock across the whole operation. But if
547 * the pages are mapped, the processing is slow (page_referenced()) so we
548 * should drop zone->lru_lock around each page. It's impossible to balance
549 * this, so instead we remove the pages from the LRU while processing them.
550 * It is safe to rely on PG_active against the non-LRU pages in here because
551 * nobody will play with that bit on a non-LRU page.
553 * The downside is that we have to touch page->count against each page.
554 * But we had to alter page->flags anyway.
557 refill_inactive_zone(struct zone
*zone
, const int nr_pages_in
,
558 struct page_state
*ps
, int priority
)
561 int pgdeactivate
= 0;
562 int nr_pages
= nr_pages_in
;
563 LIST_HEAD(l_hold
); /* The pages which were snipped off */
564 LIST_HEAD(l_inactive
); /* Pages to go onto the inactive_list */
565 LIST_HEAD(l_active
); /* Pages to go onto the active_list */
568 int reclaim_mapped
= 0;
575 spin_lock_irq(&zone
->lru_lock
);
576 while (nr_pages
&& !list_empty(&zone
->active_list
)) {
577 page
= list_entry(zone
->active_list
.prev
, struct page
, lru
);
578 prefetchw_prev_lru_page(page
, &zone
->active_list
, flags
);
579 if (!TestClearPageLRU(page
))
581 list_del(&page
->lru
);
582 if (page_count(page
) == 0) {
583 /* It is currently in pagevec_release() */
585 list_add(&page
->lru
, &zone
->active_list
);
587 page_cache_get(page
);
588 list_add(&page
->lru
, &l_hold
);
593 zone
->nr_active
-= pgmoved
;
594 spin_unlock_irq(&zone
->lru_lock
);
597 * `distress' is a measure of how much trouble we're having reclaiming
598 * pages. 0 -> no problems. 100 -> great trouble.
600 distress
= 100 >> priority
;
603 * The point of this algorithm is to decide when to start reclaiming
604 * mapped memory instead of just pagecache. Work out how much memory
607 mapped_ratio
= (ps
->nr_mapped
* 100) / total_memory
;
610 * Now decide how much we really want to unmap some pages. The mapped
611 * ratio is downgraded - just because there's a lot of mapped memory
612 * doesn't necessarily mean that page reclaim isn't succeeding.
614 * The distress ratio is important - we don't want to start going oom.
616 * A 100% value of vm_swappiness overrides this algorithm altogether.
618 swap_tendency
= mapped_ratio
/ 2 + distress
+ vm_swappiness
;
621 * Now use this metric to decide whether to start moving mapped memory
622 * onto the inactive list.
624 if (swap_tendency
>= 100)
627 while (!list_empty(&l_hold
)) {
628 page
= list_entry(l_hold
.prev
, struct page
, lru
);
629 list_del(&page
->lru
);
630 if (page_mapped(page
)) {
631 pte_chain_lock(page
);
632 if (page_mapped(page
) && page_referenced(page
)) {
633 pte_chain_unlock(page
);
634 list_add(&page
->lru
, &l_active
);
637 pte_chain_unlock(page
);
638 if (!reclaim_mapped
) {
639 list_add(&page
->lru
, &l_active
);
644 * FIXME: need to consider page_count(page) here if/when we
645 * reap orphaned pages via the LRU (Daniel's locking stuff)
647 if (total_swap_pages
== 0 && !page
->mapping
&&
648 !PagePrivate(page
)) {
649 list_add(&page
->lru
, &l_active
);
652 list_add(&page
->lru
, &l_inactive
);
655 pagevec_init(&pvec
, 1);
657 spin_lock_irq(&zone
->lru_lock
);
658 while (!list_empty(&l_inactive
)) {
659 page
= list_entry(l_inactive
.prev
, struct page
, lru
);
660 prefetchw_prev_lru_page(page
, &l_inactive
, flags
);
661 if (TestSetPageLRU(page
))
663 if (!TestClearPageActive(page
))
665 list_move(&page
->lru
, &zone
->inactive_list
);
667 if (!pagevec_add(&pvec
, page
)) {
668 zone
->nr_inactive
+= pgmoved
;
669 spin_unlock_irq(&zone
->lru_lock
);
670 pgdeactivate
+= pgmoved
;
672 if (buffer_heads_over_limit
)
673 pagevec_strip(&pvec
);
674 __pagevec_release(&pvec
);
675 spin_lock_irq(&zone
->lru_lock
);
678 zone
->nr_inactive
+= pgmoved
;
679 pgdeactivate
+= pgmoved
;
680 if (buffer_heads_over_limit
) {
681 spin_unlock_irq(&zone
->lru_lock
);
682 pagevec_strip(&pvec
);
683 spin_lock_irq(&zone
->lru_lock
);
687 while (!list_empty(&l_active
)) {
688 page
= list_entry(l_active
.prev
, struct page
, lru
);
689 prefetchw_prev_lru_page(page
, &l_active
, flags
);
690 if (TestSetPageLRU(page
))
692 BUG_ON(!PageActive(page
));
693 list_move(&page
->lru
, &zone
->active_list
);
695 if (!pagevec_add(&pvec
, page
)) {
696 zone
->nr_active
+= pgmoved
;
698 spin_unlock_irq(&zone
->lru_lock
);
699 __pagevec_release(&pvec
);
700 spin_lock_irq(&zone
->lru_lock
);
703 zone
->nr_active
+= pgmoved
;
704 spin_unlock_irq(&zone
->lru_lock
);
705 pagevec_release(&pvec
);
707 mod_page_state(pgrefill
, nr_pages_in
- nr_pages
);
708 mod_page_state(pgdeactivate
, pgdeactivate
);
712 * Try to reclaim `nr_pages' from this zone. Returns the number of reclaimed
713 * pages. This is a basic per-zone page freer. Used by both kswapd and
717 shrink_zone(struct zone
*zone
, int max_scan
, unsigned int gfp_mask
,
718 const int nr_pages
, int *nr_mapped
, struct page_state
*ps
, int priority
)
723 * Try to keep the active list 2/3 of the size of the cache. And
724 * make sure that refill_inactive is given a decent number of pages.
726 * The "ratio+1" here is important. With pagecache-intensive workloads
727 * the inactive list is huge, and `ratio' evaluates to zero all the
728 * time. Which pins the active list memory. So we add one to `ratio'
729 * just to make sure that the kernel will slowly sift through the
732 ratio
= (unsigned long)nr_pages
* zone
->nr_active
/
733 ((zone
->nr_inactive
| 1) * 2);
734 atomic_add(ratio
+1, &zone
->refill_counter
);
735 if (atomic_read(&zone
->refill_counter
) > SWAP_CLUSTER_MAX
) {
739 * Don't try to bring down too many pages in one attempt.
740 * If this fails, the caller will increase `priority' and
741 * we'll try again, with an increased chance of reclaiming
744 count
= atomic_read(&zone
->refill_counter
);
745 if (count
> SWAP_CLUSTER_MAX
* 4)
746 count
= SWAP_CLUSTER_MAX
* 4;
747 atomic_sub(count
, &zone
->refill_counter
);
748 refill_inactive_zone(zone
, count
, ps
, priority
);
750 return shrink_cache(nr_pages
, zone
, gfp_mask
,
751 max_scan
, nr_mapped
);
755 * This is the direct reclaim path, for page-allocating processes. We only
756 * try to reclaim pages from zones which will satisfy the caller's allocation
759 * We reclaim from a zone even if that zone is over pages_high. Because:
760 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
762 * b) The zones may be over pages_high but they must go *over* pages_high to
763 * satisfy the `incremental min' zone defense algorithm.
765 * Returns the number of reclaimed pages.
767 * If a zone is deemed to be full of pinned pages then just give it a light
768 * scan then give up on it.
771 shrink_caches(struct zone
*classzone
, int priority
, int *total_scanned
,
772 int gfp_mask
, int nr_pages
, struct page_state
*ps
)
774 struct zone
*first_classzone
;
778 first_classzone
= classzone
->zone_pgdat
->node_zones
;
779 for (zone
= classzone
; zone
>= first_classzone
; zone
--) {
780 int to_reclaim
= max(nr_pages
, SWAP_CLUSTER_MAX
);
784 if (zone
->all_unreclaimable
&& priority
!= DEF_PRIORITY
)
785 continue; /* Let kswapd poll it */
788 * If we cannot reclaim `nr_pages' pages by scanning twice
789 * that many pages then fall back to the next zone.
791 max_scan
= zone
->nr_inactive
>> priority
;
792 if (max_scan
< to_reclaim
* 2)
793 max_scan
= to_reclaim
* 2;
794 ret
+= shrink_zone(zone
, max_scan
, gfp_mask
,
795 to_reclaim
, &nr_mapped
, ps
, priority
);
796 *total_scanned
+= max_scan
+ nr_mapped
;
804 * This is the main entry point to direct page reclaim.
806 * If a full scan of the inactive list fails to free enough memory then we
807 * are "out of memory" and something needs to be killed.
809 * If the caller is !__GFP_FS then the probability of a failure is reasonably
810 * high - the zone may be full of dirty or under-writeback pages, which this
811 * caller can't do much about. So for !__GFP_FS callers, we just perform a
812 * small LRU walk and if that didn't work out, fail the allocation back to the
813 * caller. GFP_NOFS allocators need to know how to deal with it. Kicking
814 * bdflush, waiting and retrying will work.
816 * This is a fairly lame algorithm - it can result in excessive CPU burning and
817 * excessive rotation of the inactive list, which is _supposed_ to be an LRU,
820 int try_to_free_pages(struct zone
*cz
,
821 unsigned int gfp_mask
, unsigned int order
)
825 const int nr_pages
= SWAP_CLUSTER_MAX
;
826 int nr_reclaimed
= 0;
827 struct reclaim_state
*reclaim_state
= current
->reclaim_state
;
829 inc_page_state(allocstall
);
831 for (priority
= DEF_PRIORITY
; priority
>= 0; priority
--) {
832 int total_scanned
= 0;
833 struct page_state ps
;
836 nr_reclaimed
+= shrink_caches(cz
, priority
, &total_scanned
,
837 gfp_mask
, nr_pages
, &ps
);
838 if (nr_reclaimed
>= nr_pages
) {
842 if (!(gfp_mask
& __GFP_FS
))
843 break; /* Let the caller handle it */
845 * Try to write back as many pages as we just scanned. Not
846 * sure if that makes sense, but it's an attempt to avoid
847 * creating IO storms unnecessarily
849 wakeup_bdflush(total_scanned
);
851 /* Take a nap, wait for some writeback to complete */
852 blk_congestion_wait(WRITE
, HZ
/10);
853 if (cz
- cz
->zone_pgdat
->node_zones
< ZONE_HIGHMEM
) {
854 shrink_slab(total_scanned
, gfp_mask
);
856 nr_reclaimed
+= reclaim_state
->reclaimed_slab
;
857 reclaim_state
->reclaimed_slab
= 0;
861 if ((gfp_mask
& __GFP_FS
) && !(gfp_mask
& __GFP_NORETRY
))
868 * For kswapd, balance_pgdat() will work across all this node's zones until
869 * they are all at pages_high.
871 * If `nr_pages' is non-zero then it is the number of pages which are to be
872 * reclaimed, regardless of the zone occupancies. This is a software suspend
875 * Returns the number of pages which were actually freed.
877 * There is special handling here for zones which are full of pinned pages.
878 * This can happen if the pages are all mlocked, or if they are all used by
879 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
880 * What we do is to detect the case where all pages in the zone have been
881 * scanned twice and there has been zero successful reclaim. Mark the zone as
882 * dead and from now on, only perform a short scan. Basically we're polling
883 * the zone for when the problem goes away.
885 static int balance_pgdat(pg_data_t
*pgdat
, int nr_pages
, struct page_state
*ps
)
887 int to_free
= nr_pages
;
890 struct reclaim_state
*reclaim_state
= current
->reclaim_state
;
892 inc_page_state(pageoutrun
);
894 for (priority
= DEF_PRIORITY
; priority
; priority
--) {
895 int all_zones_ok
= 1;
897 for (i
= 0; i
< pgdat
->nr_zones
; i
++) {
898 struct zone
*zone
= pgdat
->node_zones
+ i
;
903 if (zone
->all_unreclaimable
&& priority
!= DEF_PRIORITY
)
906 if (nr_pages
&& to_free
> 0) { /* Software suspend */
907 to_reclaim
= min(to_free
, SWAP_CLUSTER_MAX
*8);
908 } else { /* Zone balancing */
909 to_reclaim
= zone
->pages_high
-zone
->free_pages
;
914 max_scan
= zone
->nr_inactive
>> priority
;
915 if (max_scan
< to_reclaim
* 2)
916 max_scan
= to_reclaim
* 2;
917 if (max_scan
< SWAP_CLUSTER_MAX
)
918 max_scan
= SWAP_CLUSTER_MAX
;
919 to_free
-= shrink_zone(zone
, max_scan
, GFP_KERNEL
,
920 to_reclaim
, &nr_mapped
, ps
, priority
);
921 if (i
< ZONE_HIGHMEM
) {
922 reclaim_state
->reclaimed_slab
= 0;
923 shrink_slab(max_scan
+ nr_mapped
, GFP_KERNEL
);
924 to_free
+= reclaim_state
->reclaimed_slab
;
926 if (zone
->all_unreclaimable
)
928 if (zone
->pages_scanned
> zone
->present_pages
* 2)
929 zone
->all_unreclaimable
= 1;
933 blk_congestion_wait(WRITE
, HZ
/10);
935 return nr_pages
- to_free
;
939 * The background pageout daemon, started as a kernel thread
940 * from the init process.
942 * This basically trickles out pages so that we have _some_
943 * free memory available even if there is no other activity
944 * that frees anything up. This is needed for things like routing
945 * etc, where we otherwise might have all activity going on in
946 * asynchronous contexts that cannot page things out.
948 * If there are applications that are active memory-allocators
949 * (most normal use), this basically shouldn't matter.
953 pg_data_t
*pgdat
= (pg_data_t
*)p
;
954 struct task_struct
*tsk
= current
;
956 struct reclaim_state reclaim_state
= {
959 unsigned long cpumask
;
961 daemonize("kswapd%d", pgdat
->node_id
);
962 cpumask
= node_to_cpumask(pgdat
->node_id
);
964 set_cpus_allowed(tsk
, cpumask
);
965 current
->reclaim_state
= &reclaim_state
;
968 * Tell the memory management that we're a "memory allocator",
969 * and that if we need more memory we should get access to it
970 * regardless (see "__alloc_pages()"). "kswapd" should
971 * never get caught in the normal page freeing logic.
973 * (Kswapd normally doesn't need memory anyway, but sometimes
974 * you need a small amount of memory in order to be able to
975 * page out something else, and this flag essentially protects
976 * us from recursively trying to free more memory as we're
977 * trying to free the first piece of memory in the first place).
979 tsk
->flags
|= PF_MEMALLOC
|PF_KSWAPD
;
982 struct page_state ps
;
984 if (current
->flags
& PF_FREEZE
)
985 refrigerator(PF_IOTHREAD
);
986 prepare_to_wait(&pgdat
->kswapd_wait
, &wait
, TASK_INTERRUPTIBLE
);
988 finish_wait(&pgdat
->kswapd_wait
, &wait
);
990 balance_pgdat(pgdat
, 0, &ps
);
995 * A zone is low on free memory, so wake its kswapd task to service it.
997 void wakeup_kswapd(struct zone
*zone
)
999 if (zone
->free_pages
> zone
->pages_low
)
1001 if (!waitqueue_active(&zone
->zone_pgdat
->kswapd_wait
))
1003 wake_up_interruptible(&zone
->zone_pgdat
->kswapd_wait
);
1006 #ifdef CONFIG_SOFTWARE_SUSPEND
1008 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed
1011 int shrink_all_memory(int nr_pages
)
1014 int nr_to_free
= nr_pages
;
1016 struct reclaim_state reclaim_state
= {
1017 .reclaimed_slab
= 0,
1020 current
->reclaim_state
= &reclaim_state
;
1021 for_each_pgdat(pgdat
) {
1023 struct page_state ps
;
1025 get_page_state(&ps
);
1026 freed
= balance_pgdat(pgdat
, nr_to_free
, &ps
);
1028 nr_to_free
-= freed
;
1029 if (nr_to_free
<= 0)
1032 current
->reclaim_state
= NULL
;
1037 static int __init
kswapd_init(void)
1041 for_each_pgdat(pgdat
)
1042 kernel_thread(kswapd
, pgdat
, CLONE_KERNEL
);
1043 total_memory
= nr_free_pagecache_pages();
1047 module_init(kswapd_init
)