Merge bk://kernel.bkbits.net/davem/net-2.5
[linux-2.6/history.git] / mm / vmscan.c
blobf618384f7b1dce08280c2036665b64d3e0e2fb55
1 /*
2 * linux/mm/vmscan.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
14 #include <linux/mm.h>
15 #include <linux/slab.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/init.h>
20 #include <linux/highmem.h>
21 #include <linux/file.h>
22 #include <linux/writeback.h>
23 #include <linux/suspend.h>
24 #include <linux/blkdev.h>
25 #include <linux/buffer_head.h> /* for try_to_release_page(),
26 buffer_heads_over_limit */
27 #include <linux/mm_inline.h>
28 #include <linux/pagevec.h>
29 #include <linux/backing-dev.h>
30 #include <linux/rmap-locking.h>
31 #include <linux/topology.h>
33 #include <asm/pgalloc.h>
34 #include <asm/tlbflush.h>
35 #include <asm/div64.h>
37 #include <linux/swapops.h>
40 * The "priority" of VM scanning is how much of the queues we will scan in one
41 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
42 * queues ("queue_length >> 12") during an aging round.
44 #define DEF_PRIORITY 12
47 * From 0 .. 100. Higher means more swappy.
49 int vm_swappiness = 60;
50 static long total_memory;
52 #ifdef ARCH_HAS_PREFETCH
53 #define prefetch_prev_lru_page(_page, _base, _field) \
54 do { \
55 if ((_page)->lru.prev != _base) { \
56 struct page *prev; \
58 prev = list_entry(_page->lru.prev, \
59 struct page, lru); \
60 prefetch(&prev->_field); \
61 } \
62 } while (0)
63 #else
64 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
65 #endif
67 #ifdef ARCH_HAS_PREFETCHW
68 #define prefetchw_prev_lru_page(_page, _base, _field) \
69 do { \
70 if ((_page)->lru.prev != _base) { \
71 struct page *prev; \
73 prev = list_entry(_page->lru.prev, \
74 struct page, lru); \
75 prefetchw(&prev->_field); \
76 } \
77 } while (0)
78 #else
79 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
80 #endif
83 * The list of shrinker callbacks used by to apply pressure to
84 * ageable caches.
86 struct shrinker {
87 shrinker_t shrinker;
88 struct list_head list;
89 int seeks; /* seeks to recreate an obj */
90 long nr; /* objs pending delete */
93 static LIST_HEAD(shrinker_list);
94 static DECLARE_MUTEX(shrinker_sem);
97 * Add a shrinker callback to be called from the vm
99 struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
101 struct shrinker *shrinker;
103 shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
104 if (shrinker) {
105 shrinker->shrinker = theshrinker;
106 shrinker->seeks = seeks;
107 shrinker->nr = 0;
108 down(&shrinker_sem);
109 list_add(&shrinker->list, &shrinker_list);
110 up(&shrinker_sem);
112 return shrinker;
116 * Remove one
118 void remove_shrinker(struct shrinker *shrinker)
120 down(&shrinker_sem);
121 list_del(&shrinker->list);
122 up(&shrinker_sem);
123 kfree(shrinker);
126 #define SHRINK_BATCH 128
128 * Call the shrink functions to age shrinkable caches
130 * Here we assume it costs one seek to replace a lru page and that it also
131 * takes a seek to recreate a cache object. With this in mind we age equal
132 * percentages of the lru and ageable caches. This should balance the seeks
133 * generated by these structures.
135 * If the vm encounted mapped pages on the LRU it increase the pressure on
136 * slab to avoid swapping.
138 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
140 static int shrink_slab(long scanned, unsigned int gfp_mask)
142 struct shrinker *shrinker;
143 long pages;
145 if (down_trylock(&shrinker_sem))
146 return 0;
148 pages = nr_used_zone_pages();
149 list_for_each_entry(shrinker, &shrinker_list, list) {
150 unsigned long long delta;
152 delta = scanned * shrinker->seeks;
153 delta *= (*shrinker->shrinker)(0, gfp_mask);
154 do_div(delta, pages + 1);
155 shrinker->nr += delta;
156 if (shrinker->nr > SHRINK_BATCH) {
157 long nr_to_scan = shrinker->nr;
159 shrinker->nr = 0;
160 while (nr_to_scan) {
161 long this_scan = nr_to_scan;
163 if (this_scan > 128)
164 this_scan = 128;
165 (*shrinker->shrinker)(this_scan, gfp_mask);
166 nr_to_scan -= this_scan;
167 cond_resched();
171 up(&shrinker_sem);
172 return 0;
175 /* Must be called with page's pte_chain_lock held. */
176 static inline int page_mapping_inuse(struct page *page)
178 struct address_space *mapping = page->mapping;
180 /* Page is in somebody's page tables. */
181 if (page_mapped(page))
182 return 1;
184 /* XXX: does this happen ? */
185 if (!mapping)
186 return 0;
188 /* Be more reluctant to reclaim swapcache than pagecache */
189 if (PageSwapCache(page))
190 return 1;
192 /* File is mmap'd by somebody. */
193 if (!list_empty(&mapping->i_mmap))
194 return 1;
195 if (!list_empty(&mapping->i_mmap_shared))
196 return 1;
198 return 0;
201 static inline int is_page_cache_freeable(struct page *page)
203 return page_count(page) - !!PagePrivate(page) == 2;
206 static int may_write_to_queue(struct backing_dev_info *bdi)
208 if (current_is_kswapd())
209 return 1;
210 if (current_is_pdflush()) /* This is unlikely, but why not... */
211 return 1;
212 if (!bdi_write_congested(bdi))
213 return 1;
214 if (bdi == current->backing_dev_info)
215 return 1;
216 return 0;
220 * We detected a synchronous write error writing a page out. Probably
221 * -ENOSPC. We need to propagate that into the address_space for a subsequent
222 * fsync(), msync() or close().
224 * The tricky part is that after writepage we cannot touch the mapping: nothing
225 * prevents it from being freed up. But we have a ref on the page and once
226 * that page is locked, the mapping is pinned.
228 * We're allowed to run sleeping lock_page() here because we know the caller has
229 * __GFP_FS.
231 static void handle_write_error(struct address_space *mapping,
232 struct page *page, int error)
234 lock_page(page);
235 if (page->mapping == mapping) {
236 if (error == -ENOSPC)
237 set_bit(AS_ENOSPC, &mapping->flags);
238 else
239 set_bit(AS_EIO, &mapping->flags);
241 unlock_page(page);
245 * shrink_list returns the number of reclaimed pages
247 static int
248 shrink_list(struct list_head *page_list, unsigned int gfp_mask,
249 int *max_scan, int *nr_mapped)
251 struct address_space *mapping;
252 LIST_HEAD(ret_pages);
253 struct pagevec freed_pvec;
254 int pgactivate = 0;
255 int ret = 0;
257 cond_resched();
259 pagevec_init(&freed_pvec, 1);
260 while (!list_empty(page_list)) {
261 struct page *page;
262 int may_enter_fs;
263 int referenced;
265 page = list_entry(page_list->prev, struct page, lru);
266 list_del(&page->lru);
268 if (TestSetPageLocked(page))
269 goto keep;
271 /* Double the slab pressure for mapped and swapcache pages */
272 if (page_mapped(page) || PageSwapCache(page))
273 (*nr_mapped)++;
275 BUG_ON(PageActive(page));
276 may_enter_fs = (gfp_mask & __GFP_FS) ||
277 (PageSwapCache(page) && (gfp_mask & __GFP_IO));
279 if (PageWriteback(page))
280 goto keep_locked;
282 pte_chain_lock(page);
283 referenced = page_referenced(page);
284 if (referenced && page_mapping_inuse(page)) {
285 /* In active use or really unfreeable. Activate it. */
286 pte_chain_unlock(page);
287 goto activate_locked;
290 mapping = page->mapping;
292 #ifdef CONFIG_SWAP
294 * Anonymous process memory without backing store. Try to
295 * allocate it some swap space here.
297 * XXX: implement swap clustering ?
299 if (page_mapped(page) && !mapping && !PagePrivate(page)) {
300 pte_chain_unlock(page);
301 if (!add_to_swap(page))
302 goto activate_locked;
303 pte_chain_lock(page);
304 mapping = page->mapping;
306 #endif /* CONFIG_SWAP */
309 * The page is mapped into the page tables of one or more
310 * processes. Try to unmap it here.
312 if (page_mapped(page) && mapping) {
313 switch (try_to_unmap(page)) {
314 case SWAP_FAIL:
315 pte_chain_unlock(page);
316 goto activate_locked;
317 case SWAP_AGAIN:
318 pte_chain_unlock(page);
319 goto keep_locked;
320 case SWAP_SUCCESS:
321 ; /* try to free the page below */
324 pte_chain_unlock(page);
327 * If the page is dirty, only perform writeback if that write
328 * will be non-blocking. To prevent this allocation from being
329 * stalled by pagecache activity. But note that there may be
330 * stalls if we need to run get_block(). We could test
331 * PagePrivate for that.
333 * If this process is currently in generic_file_write() against
334 * this page's queue, we can perform writeback even if that
335 * will block.
337 * If the page is swapcache, write it back even if that would
338 * block, for some throttling. This happens by accident, because
339 * swap_backing_dev_info is bust: it doesn't reflect the
340 * congestion state of the swapdevs. Easy to fix, if needed.
341 * See swapfile.c:page_queue_congested().
343 if (PageDirty(page)) {
344 if (referenced)
345 goto keep_locked;
346 if (!is_page_cache_freeable(page))
347 goto keep_locked;
348 if (!mapping)
349 goto keep_locked;
350 if (mapping->a_ops->writepage == NULL)
351 goto activate_locked;
352 if (!may_enter_fs)
353 goto keep_locked;
354 if (!may_write_to_queue(mapping->backing_dev_info))
355 goto keep_locked;
356 spin_lock(&mapping->page_lock);
357 if (test_clear_page_dirty(page)) {
358 int res;
359 struct writeback_control wbc = {
360 .sync_mode = WB_SYNC_NONE,
361 .nr_to_write = SWAP_CLUSTER_MAX,
362 .nonblocking = 1,
363 .for_reclaim = 1,
366 list_move(&page->list, &mapping->locked_pages);
367 spin_unlock(&mapping->page_lock);
369 SetPageReclaim(page);
370 res = mapping->a_ops->writepage(page, &wbc);
371 if (res < 0)
372 handle_write_error(mapping, page, res);
373 if (res == WRITEPAGE_ACTIVATE) {
374 ClearPageReclaim(page);
375 goto activate_locked;
377 if (!PageWriteback(page)) {
378 /* synchronous write or broken a_ops? */
379 ClearPageReclaim(page);
381 goto keep;
383 spin_unlock(&mapping->page_lock);
387 * If the page has buffers, try to free the buffer mappings
388 * associated with this page. If we succeed we try to free
389 * the page as well.
391 * We do this even if the page is PageDirty().
392 * try_to_release_page() does not perform I/O, but it is
393 * possible for a page to have PageDirty set, but it is actually
394 * clean (all its buffers are clean). This happens if the
395 * buffers were written out directly, with submit_bh(). ext3
396 * will do this, as well as the blockdev mapping.
397 * try_to_release_page() will discover that cleanness and will
398 * drop the buffers and mark the page clean - it can be freed.
400 * Rarely, pages can have buffers and no ->mapping. These are
401 * the pages which were not successfully invalidated in
402 * truncate_complete_page(). We try to drop those buffers here
403 * and if that worked, and the page is no longer mapped into
404 * process address space (page_count == 0) it can be freed.
405 * Otherwise, leave the page on the LRU so it is swappable.
407 if (PagePrivate(page)) {
408 if (!try_to_release_page(page, gfp_mask))
409 goto activate_locked;
410 if (!mapping && page_count(page) == 1)
411 goto free_it;
414 if (!mapping)
415 goto keep_locked; /* truncate got there first */
417 spin_lock(&mapping->page_lock);
420 * The non-racy check for busy page. It is critical to check
421 * PageDirty _after_ making sure that the page is freeable and
422 * not in use by anybody. (pagecache + us == 2)
424 if (page_count(page) != 2 || PageDirty(page)) {
425 spin_unlock(&mapping->page_lock);
426 goto keep_locked;
429 #ifdef CONFIG_SWAP
430 if (PageSwapCache(page)) {
431 swp_entry_t swap = { .val = page->index };
432 __delete_from_swap_cache(page);
433 spin_unlock(&mapping->page_lock);
434 swap_free(swap);
435 __put_page(page); /* The pagecache ref */
436 goto free_it;
438 #endif /* CONFIG_SWAP */
440 __remove_from_page_cache(page);
441 spin_unlock(&mapping->page_lock);
442 __put_page(page);
444 free_it:
445 unlock_page(page);
446 ret++;
447 if (!pagevec_add(&freed_pvec, page))
448 __pagevec_release_nonlru(&freed_pvec);
449 continue;
451 activate_locked:
452 SetPageActive(page);
453 pgactivate++;
454 keep_locked:
455 unlock_page(page);
456 keep:
457 list_add(&page->lru, &ret_pages);
458 BUG_ON(PageLRU(page));
460 list_splice(&ret_pages, page_list);
461 if (pagevec_count(&freed_pvec))
462 __pagevec_release_nonlru(&freed_pvec);
463 mod_page_state(pgsteal, ret);
464 if (current_is_kswapd())
465 mod_page_state(kswapd_steal, ret);
466 mod_page_state(pgactivate, pgactivate);
467 return ret;
471 * zone->lru_lock is heavily contented. We relieve it by quickly privatising
472 * a batch of pages and working on them outside the lock. Any pages which were
473 * not freed will be added back to the LRU.
475 * shrink_cache() is passed the number of pages to try to free, and returns
476 * the number of pages which were reclaimed.
478 * For pagecache intensive workloads, the first loop here is the hottest spot
479 * in the kernel (apart from the copy_*_user functions).
481 static int
482 shrink_cache(const int nr_pages, struct zone *zone,
483 unsigned int gfp_mask, int max_scan, int *nr_mapped)
485 LIST_HEAD(page_list);
486 struct pagevec pvec;
487 int nr_to_process;
488 int ret = 0;
491 * Try to ensure that we free `nr_pages' pages in one pass of the loop.
493 nr_to_process = nr_pages;
494 if (nr_to_process < SWAP_CLUSTER_MAX)
495 nr_to_process = SWAP_CLUSTER_MAX;
497 pagevec_init(&pvec, 1);
499 lru_add_drain();
500 spin_lock_irq(&zone->lru_lock);
501 while (max_scan > 0 && ret < nr_pages) {
502 struct page *page;
503 int nr_taken = 0;
504 int nr_scan = 0;
505 int nr_freed;
507 while (nr_scan++ < nr_to_process &&
508 !list_empty(&zone->inactive_list)) {
509 page = list_entry(zone->inactive_list.prev,
510 struct page, lru);
512 prefetchw_prev_lru_page(page,
513 &zone->inactive_list, flags);
515 if (!TestClearPageLRU(page))
516 BUG();
517 list_del(&page->lru);
518 if (page_count(page) == 0) {
519 /* It is currently in pagevec_release() */
520 SetPageLRU(page);
521 list_add(&page->lru, &zone->inactive_list);
522 continue;
524 list_add(&page->lru, &page_list);
525 page_cache_get(page);
526 nr_taken++;
528 zone->nr_inactive -= nr_taken;
529 zone->pages_scanned += nr_taken;
530 spin_unlock_irq(&zone->lru_lock);
532 if (nr_taken == 0)
533 goto done;
535 max_scan -= nr_scan;
536 mod_page_state(pgscan, nr_scan);
537 nr_freed = shrink_list(&page_list, gfp_mask,
538 &max_scan, nr_mapped);
539 ret += nr_freed;
540 if (nr_freed <= 0 && list_empty(&page_list))
541 goto done;
543 spin_lock_irq(&zone->lru_lock);
545 * Put back any unfreeable pages.
547 while (!list_empty(&page_list)) {
548 page = list_entry(page_list.prev, struct page, lru);
549 if (TestSetPageLRU(page))
550 BUG();
551 list_del(&page->lru);
552 if (PageActive(page))
553 add_page_to_active_list(zone, page);
554 else
555 add_page_to_inactive_list(zone, page);
556 if (!pagevec_add(&pvec, page)) {
557 spin_unlock_irq(&zone->lru_lock);
558 __pagevec_release(&pvec);
559 spin_lock_irq(&zone->lru_lock);
563 spin_unlock_irq(&zone->lru_lock);
564 done:
565 pagevec_release(&pvec);
566 return ret;
570 * This moves pages from the active list to the inactive list.
572 * We move them the other way if the page is referenced by one or more
573 * processes, from rmap.
575 * If the pages are mostly unmapped, the processing is fast and it is
576 * appropriate to hold zone->lru_lock across the whole operation. But if
577 * the pages are mapped, the processing is slow (page_referenced()) so we
578 * should drop zone->lru_lock around each page. It's impossible to balance
579 * this, so instead we remove the pages from the LRU while processing them.
580 * It is safe to rely on PG_active against the non-LRU pages in here because
581 * nobody will play with that bit on a non-LRU page.
583 * The downside is that we have to touch page->count against each page.
584 * But we had to alter page->flags anyway.
586 static void
587 refill_inactive_zone(struct zone *zone, const int nr_pages_in,
588 struct page_state *ps, int priority)
590 int pgmoved;
591 int pgdeactivate = 0;
592 int nr_pages = nr_pages_in;
593 LIST_HEAD(l_hold); /* The pages which were snipped off */
594 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
595 LIST_HEAD(l_active); /* Pages to go onto the active_list */
596 struct page *page;
597 struct pagevec pvec;
598 int reclaim_mapped = 0;
599 long mapped_ratio;
600 long distress;
601 long swap_tendency;
603 lru_add_drain();
604 pgmoved = 0;
605 spin_lock_irq(&zone->lru_lock);
606 while (nr_pages && !list_empty(&zone->active_list)) {
607 page = list_entry(zone->active_list.prev, struct page, lru);
608 prefetchw_prev_lru_page(page, &zone->active_list, flags);
609 if (!TestClearPageLRU(page))
610 BUG();
611 list_del(&page->lru);
612 if (page_count(page) == 0) {
613 /* It is currently in pagevec_release() */
614 SetPageLRU(page);
615 list_add(&page->lru, &zone->active_list);
616 } else {
617 page_cache_get(page);
618 list_add(&page->lru, &l_hold);
619 pgmoved++;
621 nr_pages--;
623 zone->nr_active -= pgmoved;
624 spin_unlock_irq(&zone->lru_lock);
627 * `distress' is a measure of how much trouble we're having reclaiming
628 * pages. 0 -> no problems. 100 -> great trouble.
630 distress = 100 >> zone->prev_priority;
633 * The point of this algorithm is to decide when to start reclaiming
634 * mapped memory instead of just pagecache. Work out how much memory
635 * is mapped.
637 mapped_ratio = (ps->nr_mapped * 100) / total_memory;
640 * Now decide how much we really want to unmap some pages. The mapped
641 * ratio is downgraded - just because there's a lot of mapped memory
642 * doesn't necessarily mean that page reclaim isn't succeeding.
644 * The distress ratio is important - we don't want to start going oom.
646 * A 100% value of vm_swappiness overrides this algorithm altogether.
648 swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
651 * Now use this metric to decide whether to start moving mapped memory
652 * onto the inactive list.
654 if (swap_tendency >= 100)
655 reclaim_mapped = 1;
657 while (!list_empty(&l_hold)) {
658 page = list_entry(l_hold.prev, struct page, lru);
659 list_del(&page->lru);
660 if (page_mapped(page)) {
661 pte_chain_lock(page);
662 if (page_mapped(page) && page_referenced(page)) {
663 pte_chain_unlock(page);
664 list_add(&page->lru, &l_active);
665 continue;
667 pte_chain_unlock(page);
668 if (!reclaim_mapped) {
669 list_add(&page->lru, &l_active);
670 continue;
674 * FIXME: need to consider page_count(page) here if/when we
675 * reap orphaned pages via the LRU (Daniel's locking stuff)
677 if (total_swap_pages == 0 && !page->mapping &&
678 !PagePrivate(page)) {
679 list_add(&page->lru, &l_active);
680 continue;
682 list_add(&page->lru, &l_inactive);
685 pagevec_init(&pvec, 1);
686 pgmoved = 0;
687 spin_lock_irq(&zone->lru_lock);
688 while (!list_empty(&l_inactive)) {
689 page = list_entry(l_inactive.prev, struct page, lru);
690 prefetchw_prev_lru_page(page, &l_inactive, flags);
691 if (TestSetPageLRU(page))
692 BUG();
693 if (!TestClearPageActive(page))
694 BUG();
695 list_move(&page->lru, &zone->inactive_list);
696 pgmoved++;
697 if (!pagevec_add(&pvec, page)) {
698 zone->nr_inactive += pgmoved;
699 spin_unlock_irq(&zone->lru_lock);
700 pgdeactivate += pgmoved;
701 pgmoved = 0;
702 if (buffer_heads_over_limit)
703 pagevec_strip(&pvec);
704 __pagevec_release(&pvec);
705 spin_lock_irq(&zone->lru_lock);
708 zone->nr_inactive += pgmoved;
709 pgdeactivate += pgmoved;
710 if (buffer_heads_over_limit) {
711 spin_unlock_irq(&zone->lru_lock);
712 pagevec_strip(&pvec);
713 spin_lock_irq(&zone->lru_lock);
716 pgmoved = 0;
717 while (!list_empty(&l_active)) {
718 page = list_entry(l_active.prev, struct page, lru);
719 prefetchw_prev_lru_page(page, &l_active, flags);
720 if (TestSetPageLRU(page))
721 BUG();
722 BUG_ON(!PageActive(page));
723 list_move(&page->lru, &zone->active_list);
724 pgmoved++;
725 if (!pagevec_add(&pvec, page)) {
726 zone->nr_active += pgmoved;
727 pgmoved = 0;
728 spin_unlock_irq(&zone->lru_lock);
729 __pagevec_release(&pvec);
730 spin_lock_irq(&zone->lru_lock);
733 zone->nr_active += pgmoved;
734 spin_unlock_irq(&zone->lru_lock);
735 pagevec_release(&pvec);
737 mod_page_state(pgrefill, nr_pages_in - nr_pages);
738 mod_page_state(pgdeactivate, pgdeactivate);
742 * Try to reclaim `nr_pages' from this zone. Returns the number of reclaimed
743 * pages. This is a basic per-zone page freer. Used by both kswapd and
744 * direct reclaim.
746 static int
747 shrink_zone(struct zone *zone, int max_scan, unsigned int gfp_mask,
748 const int nr_pages, int *nr_mapped, struct page_state *ps, int priority)
750 unsigned long ratio;
753 * Try to keep the active list 2/3 of the size of the cache. And
754 * make sure that refill_inactive is given a decent number of pages.
756 * The "ratio+1" here is important. With pagecache-intensive workloads
757 * the inactive list is huge, and `ratio' evaluates to zero all the
758 * time. Which pins the active list memory. So we add one to `ratio'
759 * just to make sure that the kernel will slowly sift through the
760 * active list.
762 ratio = (unsigned long)nr_pages * zone->nr_active /
763 ((zone->nr_inactive | 1) * 2);
764 atomic_add(ratio+1, &zone->refill_counter);
765 if (atomic_read(&zone->refill_counter) > SWAP_CLUSTER_MAX) {
766 int count;
769 * Don't try to bring down too many pages in one attempt.
770 * If this fails, the caller will increase `priority' and
771 * we'll try again, with an increased chance of reclaiming
772 * mapped memory.
774 count = atomic_read(&zone->refill_counter);
775 if (count > SWAP_CLUSTER_MAX * 4)
776 count = SWAP_CLUSTER_MAX * 4;
777 atomic_sub(count, &zone->refill_counter);
778 refill_inactive_zone(zone, count, ps, priority);
780 return shrink_cache(nr_pages, zone, gfp_mask,
781 max_scan, nr_mapped);
785 * This is the direct reclaim path, for page-allocating processes. We only
786 * try to reclaim pages from zones which will satisfy the caller's allocation
787 * request.
789 * We reclaim from a zone even if that zone is over pages_high. Because:
790 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
791 * allocation or
792 * b) The zones may be over pages_high but they must go *over* pages_high to
793 * satisfy the `incremental min' zone defense algorithm.
795 * Returns the number of reclaimed pages.
797 * If a zone is deemed to be full of pinned pages then just give it a light
798 * scan then give up on it.
800 static int
801 shrink_caches(struct zone *classzone, int priority, int *total_scanned,
802 int gfp_mask, int nr_pages, struct page_state *ps)
804 struct zone *first_classzone;
805 struct zone *zone;
806 int ret = 0;
808 first_classzone = classzone->zone_pgdat->node_zones;
809 for (zone = classzone; zone >= first_classzone; zone--) {
810 int to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX);
811 int nr_mapped = 0;
812 int max_scan;
814 if (zone->free_pages < zone->pages_high)
815 zone->temp_priority = priority;
817 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
818 continue; /* Let kswapd poll it */
821 * If we cannot reclaim `nr_pages' pages by scanning twice
822 * that many pages then fall back to the next zone.
824 max_scan = zone->nr_inactive >> priority;
825 if (max_scan < to_reclaim * 2)
826 max_scan = to_reclaim * 2;
827 ret += shrink_zone(zone, max_scan, gfp_mask,
828 to_reclaim, &nr_mapped, ps, priority);
829 *total_scanned += max_scan + nr_mapped;
830 if (ret >= nr_pages)
831 break;
833 return ret;
837 * This is the main entry point to direct page reclaim.
839 * If a full scan of the inactive list fails to free enough memory then we
840 * are "out of memory" and something needs to be killed.
842 * If the caller is !__GFP_FS then the probability of a failure is reasonably
843 * high - the zone may be full of dirty or under-writeback pages, which this
844 * caller can't do much about. So for !__GFP_FS callers, we just perform a
845 * small LRU walk and if that didn't work out, fail the allocation back to the
846 * caller. GFP_NOFS allocators need to know how to deal with it. Kicking
847 * bdflush, waiting and retrying will work.
849 * This is a fairly lame algorithm - it can result in excessive CPU burning and
850 * excessive rotation of the inactive list, which is _supposed_ to be an LRU,
851 * yes?
853 int try_to_free_pages(struct zone *cz,
854 unsigned int gfp_mask, unsigned int order)
856 int priority;
857 int ret = 0;
858 const int nr_pages = SWAP_CLUSTER_MAX;
859 int nr_reclaimed = 0;
860 struct zone *zone;
861 struct reclaim_state *reclaim_state = current->reclaim_state;
863 inc_page_state(allocstall);
865 for (zone = cz; zone >= cz->zone_pgdat->node_zones; --zone)
866 zone->temp_priority = DEF_PRIORITY;
868 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
869 int total_scanned = 0;
870 struct page_state ps;
872 get_page_state(&ps);
873 nr_reclaimed += shrink_caches(cz, priority, &total_scanned,
874 gfp_mask, nr_pages, &ps);
875 if (nr_reclaimed >= nr_pages) {
876 ret = 1;
877 goto out;
879 if (!(gfp_mask & __GFP_FS))
880 break; /* Let the caller handle it */
882 * Try to write back as many pages as we just scanned. Not
883 * sure if that makes sense, but it's an attempt to avoid
884 * creating IO storms unnecessarily
886 wakeup_bdflush(total_scanned);
888 /* Take a nap, wait for some writeback to complete */
889 blk_congestion_wait(WRITE, HZ/10);
890 if (cz - cz->zone_pgdat->node_zones < ZONE_HIGHMEM) {
891 shrink_slab(total_scanned, gfp_mask);
892 if (reclaim_state) {
893 nr_reclaimed += reclaim_state->reclaimed_slab;
894 reclaim_state->reclaimed_slab = 0;
898 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY))
899 out_of_memory();
900 out:
901 for (zone = cz; zone >= cz->zone_pgdat->node_zones; --zone)
902 zone->prev_priority = zone->temp_priority;
903 return ret;
907 * For kswapd, balance_pgdat() will work across all this node's zones until
908 * they are all at pages_high.
910 * If `nr_pages' is non-zero then it is the number of pages which are to be
911 * reclaimed, regardless of the zone occupancies. This is a software suspend
912 * special.
914 * Returns the number of pages which were actually freed.
916 * There is special handling here for zones which are full of pinned pages.
917 * This can happen if the pages are all mlocked, or if they are all used by
918 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
919 * What we do is to detect the case where all pages in the zone have been
920 * scanned twice and there has been zero successful reclaim. Mark the zone as
921 * dead and from now on, only perform a short scan. Basically we're polling
922 * the zone for when the problem goes away.
924 static int balance_pgdat(pg_data_t *pgdat, int nr_pages, struct page_state *ps)
926 int to_free = nr_pages;
927 int priority;
928 int i;
929 struct reclaim_state *reclaim_state = current->reclaim_state;
931 inc_page_state(pageoutrun);
933 for (i = 0; i < pgdat->nr_zones; i++) {
934 struct zone *zone = pgdat->node_zones + i;
936 zone->temp_priority = DEF_PRIORITY;
939 for (priority = DEF_PRIORITY; priority; priority--) {
940 int all_zones_ok = 1;
942 for (i = 0; i < pgdat->nr_zones; i++) {
943 struct zone *zone = pgdat->node_zones + i;
944 int nr_mapped = 0;
945 int max_scan;
946 int to_reclaim;
948 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
949 continue;
951 if (nr_pages && to_free > 0) { /* Software suspend */
952 to_reclaim = min(to_free, SWAP_CLUSTER_MAX*8);
953 } else { /* Zone balancing */
954 to_reclaim = zone->pages_high-zone->free_pages;
955 if (to_reclaim <= 0)
956 continue;
958 zone->temp_priority = priority;
959 all_zones_ok = 0;
960 max_scan = zone->nr_inactive >> priority;
961 if (max_scan < to_reclaim * 2)
962 max_scan = to_reclaim * 2;
963 if (max_scan < SWAP_CLUSTER_MAX)
964 max_scan = SWAP_CLUSTER_MAX;
965 to_free -= shrink_zone(zone, max_scan, GFP_KERNEL,
966 to_reclaim, &nr_mapped, ps, priority);
967 if (i < ZONE_HIGHMEM) {
968 reclaim_state->reclaimed_slab = 0;
969 shrink_slab(max_scan + nr_mapped, GFP_KERNEL);
970 to_free -= reclaim_state->reclaimed_slab;
972 if (zone->all_unreclaimable)
973 continue;
974 if (zone->pages_scanned > zone->present_pages * 2)
975 zone->all_unreclaimable = 1;
977 if (all_zones_ok)
978 break;
979 if (to_free > 0)
980 blk_congestion_wait(WRITE, HZ/10);
983 for (i = 0; i < pgdat->nr_zones; i++) {
984 struct zone *zone = pgdat->node_zones + i;
986 zone->prev_priority = zone->temp_priority;
988 return nr_pages - to_free;
992 * The background pageout daemon, started as a kernel thread
993 * from the init process.
995 * This basically trickles out pages so that we have _some_
996 * free memory available even if there is no other activity
997 * that frees anything up. This is needed for things like routing
998 * etc, where we otherwise might have all activity going on in
999 * asynchronous contexts that cannot page things out.
1001 * If there are applications that are active memory-allocators
1002 * (most normal use), this basically shouldn't matter.
1004 int kswapd(void *p)
1006 pg_data_t *pgdat = (pg_data_t*)p;
1007 struct task_struct *tsk = current;
1008 DEFINE_WAIT(wait);
1009 struct reclaim_state reclaim_state = {
1010 .reclaimed_slab = 0,
1012 cpumask_t cpumask;
1014 daemonize("kswapd%d", pgdat->node_id);
1015 cpumask = node_to_cpumask(pgdat->node_id);
1016 if (!cpus_empty(cpumask))
1017 set_cpus_allowed(tsk, cpumask);
1018 current->reclaim_state = &reclaim_state;
1021 * Tell the memory management that we're a "memory allocator",
1022 * and that if we need more memory we should get access to it
1023 * regardless (see "__alloc_pages()"). "kswapd" should
1024 * never get caught in the normal page freeing logic.
1026 * (Kswapd normally doesn't need memory anyway, but sometimes
1027 * you need a small amount of memory in order to be able to
1028 * page out something else, and this flag essentially protects
1029 * us from recursively trying to free more memory as we're
1030 * trying to free the first piece of memory in the first place).
1032 tsk->flags |= PF_MEMALLOC|PF_KSWAPD;
1034 for ( ; ; ) {
1035 struct page_state ps;
1037 if (current->flags & PF_FREEZE)
1038 refrigerator(PF_IOTHREAD);
1039 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1040 schedule();
1041 finish_wait(&pgdat->kswapd_wait, &wait);
1042 get_page_state(&ps);
1043 balance_pgdat(pgdat, 0, &ps);
1048 * A zone is low on free memory, so wake its kswapd task to service it.
1050 void wakeup_kswapd(struct zone *zone)
1052 if (zone->free_pages > zone->pages_low)
1053 return;
1054 if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
1055 return;
1056 wake_up_interruptible(&zone->zone_pgdat->kswapd_wait);
1059 #ifdef CONFIG_PM
1061 * Try to free `nr_pages' of memory, system-wide. Returns the number of freed
1062 * pages.
1064 int shrink_all_memory(int nr_pages)
1066 pg_data_t *pgdat;
1067 int nr_to_free = nr_pages;
1068 int ret = 0;
1069 struct reclaim_state reclaim_state = {
1070 .reclaimed_slab = 0,
1073 current->reclaim_state = &reclaim_state;
1074 for_each_pgdat(pgdat) {
1075 int freed;
1076 struct page_state ps;
1078 get_page_state(&ps);
1079 freed = balance_pgdat(pgdat, nr_to_free, &ps);
1080 ret += freed;
1081 nr_to_free -= freed;
1082 if (nr_to_free <= 0)
1083 break;
1085 current->reclaim_state = NULL;
1086 return ret;
1088 #endif
1090 static int __init kswapd_init(void)
1092 pg_data_t *pgdat;
1093 swap_setup();
1094 for_each_pgdat(pgdat)
1095 kernel_thread(kswapd, pgdat, CLONE_KERNEL);
1096 total_memory = nr_free_pagecache_pages();
1097 return 0;
1100 module_init(kswapd_init)