parse errors in ifdefs
[linux-2.6/kmemtrace.git] / mm / rmap.c
blob850165d32b7a18b19aff5bb124df24f4d4f74c79
1 /*
2 * mm/rmap.c - physical to virtual reverse mappings
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
21 * Lock ordering in mm:
23 * inode->i_mutex (while writing or truncating, not reading or faulting)
24 * inode->i_alloc_sem (vmtruncate_range)
25 * mm->mmap_sem
26 * page->flags PG_locked (lock_page)
27 * mapping->i_mmap_lock
28 * anon_vma->lock
29 * mm->page_table_lock or pte_lock
30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers)
34 * inode_lock (in set_page_dirty's __mark_inode_dirty)
35 * sb_lock (within inode_lock in fs/fs-writeback.c)
36 * mapping->tree_lock (widely used, in set_page_dirty,
37 * in arch-dependent flush_dcache_mmap_lock,
38 * within inode_lock in __sync_single_inode)
41 #include <linux/mm.h>
42 #include <linux/pagemap.h>
43 #include <linux/swap.h>
44 #include <linux/swapops.h>
45 #include <linux/slab.h>
46 #include <linux/init.h>
47 #include <linux/rmap.h>
48 #include <linux/rcupdate.h>
49 #include <linux/module.h>
50 #include <linux/kallsyms.h>
52 #include <asm/tlbflush.h>
54 struct kmem_cache *anon_vma_cachep;
56 static inline void validate_anon_vma(struct vm_area_struct *find_vma)
58 #ifdef CONFIG_DEBUG_VM
59 struct anon_vma *anon_vma = find_vma->anon_vma;
60 struct vm_area_struct *vma;
61 unsigned int mapcount = 0;
62 int found = 0;
64 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
65 mapcount++;
66 BUG_ON(mapcount > 100000);
67 if (vma == find_vma)
68 found = 1;
70 BUG_ON(!found);
71 #endif
74 /* This must be called under the mmap_sem. */
75 int anon_vma_prepare(struct vm_area_struct *vma)
77 struct anon_vma *anon_vma = vma->anon_vma;
79 might_sleep();
80 if (unlikely(!anon_vma)) {
81 struct mm_struct *mm = vma->vm_mm;
82 struct anon_vma *allocated, *locked;
84 anon_vma = find_mergeable_anon_vma(vma);
85 if (anon_vma) {
86 allocated = NULL;
87 locked = anon_vma;
88 spin_lock(&locked->lock);
89 } else {
90 anon_vma = anon_vma_alloc();
91 if (unlikely(!anon_vma))
92 return -ENOMEM;
93 allocated = anon_vma;
94 locked = NULL;
97 /* page_table_lock to protect against threads */
98 spin_lock(&mm->page_table_lock);
99 if (likely(!vma->anon_vma)) {
100 vma->anon_vma = anon_vma;
101 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
102 allocated = NULL;
104 spin_unlock(&mm->page_table_lock);
106 if (locked)
107 spin_unlock(&locked->lock);
108 if (unlikely(allocated))
109 anon_vma_free(allocated);
111 return 0;
114 void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
116 BUG_ON(vma->anon_vma != next->anon_vma);
117 list_del(&next->anon_vma_node);
120 void __anon_vma_link(struct vm_area_struct *vma)
122 struct anon_vma *anon_vma = vma->anon_vma;
124 if (anon_vma) {
125 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
126 validate_anon_vma(vma);
130 void anon_vma_link(struct vm_area_struct *vma)
132 struct anon_vma *anon_vma = vma->anon_vma;
134 if (anon_vma) {
135 spin_lock(&anon_vma->lock);
136 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
137 validate_anon_vma(vma);
138 spin_unlock(&anon_vma->lock);
142 void anon_vma_unlink(struct vm_area_struct *vma)
144 struct anon_vma *anon_vma = vma->anon_vma;
145 int empty;
147 if (!anon_vma)
148 return;
150 spin_lock(&anon_vma->lock);
151 validate_anon_vma(vma);
152 list_del(&vma->anon_vma_node);
154 /* We must garbage collect the anon_vma if it's empty */
155 empty = list_empty(&anon_vma->head);
156 spin_unlock(&anon_vma->lock);
158 if (empty)
159 anon_vma_free(anon_vma);
162 static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
163 unsigned long flags)
165 struct anon_vma *anon_vma = data;
167 spin_lock_init(&anon_vma->lock);
168 INIT_LIST_HEAD(&anon_vma->head);
171 void __init anon_vma_init(void)
173 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
174 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL);
178 * Getting a lock on a stable anon_vma from a page off the LRU is
179 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
181 static struct anon_vma *page_lock_anon_vma(struct page *page)
183 struct anon_vma *anon_vma;
184 unsigned long anon_mapping;
186 rcu_read_lock();
187 anon_mapping = (unsigned long) page->mapping;
188 if (!(anon_mapping & PAGE_MAPPING_ANON))
189 goto out;
190 if (!page_mapped(page))
191 goto out;
193 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
194 spin_lock(&anon_vma->lock);
195 return anon_vma;
196 out:
197 rcu_read_unlock();
198 return NULL;
201 static void page_unlock_anon_vma(struct anon_vma *anon_vma)
203 spin_unlock(&anon_vma->lock);
204 rcu_read_unlock();
208 * At what user virtual address is page expected in vma?
210 static inline unsigned long
211 vma_address(struct page *page, struct vm_area_struct *vma)
213 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
214 unsigned long address;
216 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
217 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
218 /* page should be within any vma from prio_tree_next */
219 BUG_ON(!PageAnon(page));
220 return -EFAULT;
222 return address;
226 * At what user virtual address is page expected in vma? checking that the
227 * page matches the vma: currently only used on anon pages, by unuse_vma;
229 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
231 if (PageAnon(page)) {
232 if ((void *)vma->anon_vma !=
233 (void *)page->mapping - PAGE_MAPPING_ANON)
234 return -EFAULT;
235 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
236 if (!vma->vm_file ||
237 vma->vm_file->f_mapping != page->mapping)
238 return -EFAULT;
239 } else
240 return -EFAULT;
241 return vma_address(page, vma);
245 * Check that @page is mapped at @address into @mm.
247 * On success returns with pte mapped and locked.
249 pte_t *page_check_address(struct page *page, struct mm_struct *mm,
250 unsigned long address, spinlock_t **ptlp)
252 pgd_t *pgd;
253 pud_t *pud;
254 pmd_t *pmd;
255 pte_t *pte;
256 spinlock_t *ptl;
258 pgd = pgd_offset(mm, address);
259 if (!pgd_present(*pgd))
260 return NULL;
262 pud = pud_offset(pgd, address);
263 if (!pud_present(*pud))
264 return NULL;
266 pmd = pmd_offset(pud, address);
267 if (!pmd_present(*pmd))
268 return NULL;
270 pte = pte_offset_map(pmd, address);
271 /* Make a quick check before getting the lock */
272 if (!pte_present(*pte)) {
273 pte_unmap(pte);
274 return NULL;
277 ptl = pte_lockptr(mm, pmd);
278 spin_lock(ptl);
279 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
280 *ptlp = ptl;
281 return pte;
283 pte_unmap_unlock(pte, ptl);
284 return NULL;
288 * Subfunctions of page_referenced: page_referenced_one called
289 * repeatedly from either page_referenced_anon or page_referenced_file.
291 static int page_referenced_one(struct page *page,
292 struct vm_area_struct *vma, unsigned int *mapcount)
294 struct mm_struct *mm = vma->vm_mm;
295 unsigned long address;
296 pte_t *pte;
297 spinlock_t *ptl;
298 int referenced = 0;
300 address = vma_address(page, vma);
301 if (address == -EFAULT)
302 goto out;
304 pte = page_check_address(page, mm, address, &ptl);
305 if (!pte)
306 goto out;
308 if (ptep_clear_flush_young(vma, address, pte))
309 referenced++;
311 /* Pretend the page is referenced if the task has the
312 swap token and is in the middle of a page fault. */
313 if (mm != current->mm && has_swap_token(mm) &&
314 rwsem_is_locked(&mm->mmap_sem))
315 referenced++;
317 (*mapcount)--;
318 pte_unmap_unlock(pte, ptl);
319 out:
320 return referenced;
323 static int page_referenced_anon(struct page *page)
325 unsigned int mapcount;
326 struct anon_vma *anon_vma;
327 struct vm_area_struct *vma;
328 int referenced = 0;
330 anon_vma = page_lock_anon_vma(page);
331 if (!anon_vma)
332 return referenced;
334 mapcount = page_mapcount(page);
335 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
336 referenced += page_referenced_one(page, vma, &mapcount);
337 if (!mapcount)
338 break;
341 page_unlock_anon_vma(anon_vma);
342 return referenced;
346 * page_referenced_file - referenced check for object-based rmap
347 * @page: the page we're checking references on.
349 * For an object-based mapped page, find all the places it is mapped and
350 * check/clear the referenced flag. This is done by following the page->mapping
351 * pointer, then walking the chain of vmas it holds. It returns the number
352 * of references it found.
354 * This function is only called from page_referenced for object-based pages.
356 static int page_referenced_file(struct page *page)
358 unsigned int mapcount;
359 struct address_space *mapping = page->mapping;
360 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
361 struct vm_area_struct *vma;
362 struct prio_tree_iter iter;
363 int referenced = 0;
366 * The caller's checks on page->mapping and !PageAnon have made
367 * sure that this is a file page: the check for page->mapping
368 * excludes the case just before it gets set on an anon page.
370 BUG_ON(PageAnon(page));
373 * The page lock not only makes sure that page->mapping cannot
374 * suddenly be NULLified by truncation, it makes sure that the
375 * structure at mapping cannot be freed and reused yet,
376 * so we can safely take mapping->i_mmap_lock.
378 BUG_ON(!PageLocked(page));
380 spin_lock(&mapping->i_mmap_lock);
383 * i_mmap_lock does not stabilize mapcount at all, but mapcount
384 * is more likely to be accurate if we note it after spinning.
386 mapcount = page_mapcount(page);
388 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
389 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
390 == (VM_LOCKED|VM_MAYSHARE)) {
391 referenced++;
392 break;
394 referenced += page_referenced_one(page, vma, &mapcount);
395 if (!mapcount)
396 break;
399 spin_unlock(&mapping->i_mmap_lock);
400 return referenced;
404 * page_referenced - test if the page was referenced
405 * @page: the page to test
406 * @is_locked: caller holds lock on the page
408 * Quick test_and_clear_referenced for all mappings to a page,
409 * returns the number of ptes which referenced the page.
411 int page_referenced(struct page *page, int is_locked)
413 int referenced = 0;
415 if (page_test_and_clear_young(page))
416 referenced++;
418 if (TestClearPageReferenced(page))
419 referenced++;
421 if (page_mapped(page) && page->mapping) {
422 if (PageAnon(page))
423 referenced += page_referenced_anon(page);
424 else if (is_locked)
425 referenced += page_referenced_file(page);
426 else if (TestSetPageLocked(page))
427 referenced++;
428 else {
429 if (page->mapping)
430 referenced += page_referenced_file(page);
431 unlock_page(page);
434 return referenced;
437 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
439 struct mm_struct *mm = vma->vm_mm;
440 unsigned long address;
441 pte_t *pte;
442 spinlock_t *ptl;
443 int ret = 0;
445 address = vma_address(page, vma);
446 if (address == -EFAULT)
447 goto out;
449 pte = page_check_address(page, mm, address, &ptl);
450 if (!pte)
451 goto out;
453 if (pte_dirty(*pte) || pte_write(*pte)) {
454 pte_t entry;
456 flush_cache_page(vma, address, pte_pfn(*pte));
457 entry = ptep_clear_flush(vma, address, pte);
458 entry = pte_wrprotect(entry);
459 entry = pte_mkclean(entry);
460 set_pte_at(mm, address, pte, entry);
461 lazy_mmu_prot_update(entry);
462 ret = 1;
465 pte_unmap_unlock(pte, ptl);
466 out:
467 return ret;
470 static int page_mkclean_file(struct address_space *mapping, struct page *page)
472 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
473 struct vm_area_struct *vma;
474 struct prio_tree_iter iter;
475 int ret = 0;
477 BUG_ON(PageAnon(page));
479 spin_lock(&mapping->i_mmap_lock);
480 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
481 if (vma->vm_flags & VM_SHARED)
482 ret += page_mkclean_one(page, vma);
484 spin_unlock(&mapping->i_mmap_lock);
485 return ret;
488 int page_mkclean(struct page *page)
490 int ret = 0;
492 BUG_ON(!PageLocked(page));
494 if (page_mapped(page)) {
495 struct address_space *mapping = page_mapping(page);
496 if (mapping)
497 ret = page_mkclean_file(mapping, page);
498 if (page_test_dirty(page)) {
499 page_clear_dirty(page);
500 ret = 1;
504 return ret;
506 EXPORT_SYMBOL_GPL(page_mkclean);
509 * page_set_anon_rmap - setup new anonymous rmap
510 * @page: the page to add the mapping to
511 * @vma: the vm area in which the mapping is added
512 * @address: the user virtual address mapped
514 static void __page_set_anon_rmap(struct page *page,
515 struct vm_area_struct *vma, unsigned long address)
517 struct anon_vma *anon_vma = vma->anon_vma;
519 BUG_ON(!anon_vma);
520 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
521 page->mapping = (struct address_space *) anon_vma;
523 page->index = linear_page_index(vma, address);
526 * nr_mapped state can be updated without turning off
527 * interrupts because it is not modified via interrupt.
529 __inc_zone_page_state(page, NR_ANON_PAGES);
533 * page_set_anon_rmap - sanity check anonymous rmap addition
534 * @page: the page to add the mapping to
535 * @vma: the vm area in which the mapping is added
536 * @address: the user virtual address mapped
538 static void __page_check_anon_rmap(struct page *page,
539 struct vm_area_struct *vma, unsigned long address)
541 #ifdef CONFIG_DEBUG_VM
543 * The page's anon-rmap details (mapping and index) are guaranteed to
544 * be set up correctly at this point.
546 * We have exclusion against page_add_anon_rmap because the caller
547 * always holds the page locked, except if called from page_dup_rmap,
548 * in which case the page is already known to be setup.
550 * We have exclusion against page_add_new_anon_rmap because those pages
551 * are initially only visible via the pagetables, and the pte is locked
552 * over the call to page_add_new_anon_rmap.
554 struct anon_vma *anon_vma = vma->anon_vma;
555 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
556 BUG_ON(page->mapping != (struct address_space *)anon_vma);
557 BUG_ON(page->index != linear_page_index(vma, address));
558 #endif
562 * page_add_anon_rmap - add pte mapping to an anonymous page
563 * @page: the page to add the mapping to
564 * @vma: the vm area in which the mapping is added
565 * @address: the user virtual address mapped
567 * The caller needs to hold the pte lock and the page must be locked.
569 void page_add_anon_rmap(struct page *page,
570 struct vm_area_struct *vma, unsigned long address)
572 VM_BUG_ON(!PageLocked(page));
573 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
574 if (atomic_inc_and_test(&page->_mapcount))
575 __page_set_anon_rmap(page, vma, address);
576 else
577 __page_check_anon_rmap(page, vma, address);
581 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
582 * @page: the page to add the mapping to
583 * @vma: the vm area in which the mapping is added
584 * @address: the user virtual address mapped
586 * Same as page_add_anon_rmap but must only be called on *new* pages.
587 * This means the inc-and-test can be bypassed.
588 * Page does not have to be locked.
590 void page_add_new_anon_rmap(struct page *page,
591 struct vm_area_struct *vma, unsigned long address)
593 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
594 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
595 __page_set_anon_rmap(page, vma, address);
599 * page_add_file_rmap - add pte mapping to a file page
600 * @page: the page to add the mapping to
602 * The caller needs to hold the pte lock.
604 void page_add_file_rmap(struct page *page)
606 if (atomic_inc_and_test(&page->_mapcount))
607 __inc_zone_page_state(page, NR_FILE_MAPPED);
610 #ifdef CONFIG_DEBUG_VM
612 * page_dup_rmap - duplicate pte mapping to a page
613 * @page: the page to add the mapping to
615 * For copy_page_range only: minimal extract from page_add_file_rmap /
616 * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
617 * quicker.
619 * The caller needs to hold the pte lock.
621 void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
623 BUG_ON(page_mapcount(page) == 0);
624 if (PageAnon(page))
625 __page_check_anon_rmap(page, vma, address);
626 atomic_inc(&page->_mapcount);
628 #endif
631 * page_remove_rmap - take down pte mapping from a page
632 * @page: page to remove mapping from
634 * The caller needs to hold the pte lock.
636 void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
638 if (atomic_add_negative(-1, &page->_mapcount)) {
639 if (unlikely(page_mapcount(page) < 0)) {
640 printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
641 printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page));
642 printk (KERN_EMERG " page->flags = %lx\n", page->flags);
643 printk (KERN_EMERG " page->count = %x\n", page_count(page));
644 printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
645 print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops);
646 if (vma->vm_ops)
647 print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage);
648 if (vma->vm_file && vma->vm_file->f_op)
649 print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap);
650 BUG();
654 * It would be tidy to reset the PageAnon mapping here,
655 * but that might overwrite a racing page_add_anon_rmap
656 * which increments mapcount after us but sets mapping
657 * before us: so leave the reset to free_hot_cold_page,
658 * and remember that it's only reliable while mapped.
659 * Leaving it set also helps swapoff to reinstate ptes
660 * faster for those pages still in swapcache.
662 if (page_test_dirty(page)) {
663 page_clear_dirty(page);
664 set_page_dirty(page);
666 __dec_zone_page_state(page,
667 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
672 * Subfunctions of try_to_unmap: try_to_unmap_one called
673 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
675 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
676 int migration)
678 struct mm_struct *mm = vma->vm_mm;
679 unsigned long address;
680 pte_t *pte;
681 pte_t pteval;
682 spinlock_t *ptl;
683 int ret = SWAP_AGAIN;
685 address = vma_address(page, vma);
686 if (address == -EFAULT)
687 goto out;
689 pte = page_check_address(page, mm, address, &ptl);
690 if (!pte)
691 goto out;
694 * If the page is mlock()d, we cannot swap it out.
695 * If it's recently referenced (perhaps page_referenced
696 * skipped over this mm) then we should reactivate it.
698 if (!migration && ((vma->vm_flags & VM_LOCKED) ||
699 (ptep_clear_flush_young(vma, address, pte)))) {
700 ret = SWAP_FAIL;
701 goto out_unmap;
704 /* Nuke the page table entry. */
705 flush_cache_page(vma, address, page_to_pfn(page));
706 pteval = ptep_clear_flush(vma, address, pte);
708 /* Move the dirty bit to the physical page now the pte is gone. */
709 if (pte_dirty(pteval))
710 set_page_dirty(page);
712 /* Update high watermark before we lower rss */
713 update_hiwater_rss(mm);
715 if (PageAnon(page)) {
716 swp_entry_t entry = { .val = page_private(page) };
718 if (PageSwapCache(page)) {
720 * Store the swap location in the pte.
721 * See handle_pte_fault() ...
723 swap_duplicate(entry);
724 if (list_empty(&mm->mmlist)) {
725 spin_lock(&mmlist_lock);
726 if (list_empty(&mm->mmlist))
727 list_add(&mm->mmlist, &init_mm.mmlist);
728 spin_unlock(&mmlist_lock);
730 dec_mm_counter(mm, anon_rss);
731 #ifdef CONFIG_MIGRATION
732 } else {
734 * Store the pfn of the page in a special migration
735 * pte. do_swap_page() will wait until the migration
736 * pte is removed and then restart fault handling.
738 BUG_ON(!migration);
739 entry = make_migration_entry(page, pte_write(pteval));
740 #endif
742 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
743 BUG_ON(pte_file(*pte));
744 } else
745 #ifdef CONFIG_MIGRATION
746 if (migration) {
747 /* Establish migration entry for a file page */
748 swp_entry_t entry;
749 entry = make_migration_entry(page, pte_write(pteval));
750 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
751 } else
752 #endif
753 dec_mm_counter(mm, file_rss);
756 page_remove_rmap(page, vma);
757 page_cache_release(page);
759 out_unmap:
760 pte_unmap_unlock(pte, ptl);
761 out:
762 return ret;
766 * objrmap doesn't work for nonlinear VMAs because the assumption that
767 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
768 * Consequently, given a particular page and its ->index, we cannot locate the
769 * ptes which are mapping that page without an exhaustive linear search.
771 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
772 * maps the file to which the target page belongs. The ->vm_private_data field
773 * holds the current cursor into that scan. Successive searches will circulate
774 * around the vma's virtual address space.
776 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
777 * more scanning pressure is placed against them as well. Eventually pages
778 * will become fully unmapped and are eligible for eviction.
780 * For very sparsely populated VMAs this is a little inefficient - chances are
781 * there there won't be many ptes located within the scan cluster. In this case
782 * maybe we could scan further - to the end of the pte page, perhaps.
784 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
785 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
787 static void try_to_unmap_cluster(unsigned long cursor,
788 unsigned int *mapcount, struct vm_area_struct *vma)
790 struct mm_struct *mm = vma->vm_mm;
791 pgd_t *pgd;
792 pud_t *pud;
793 pmd_t *pmd;
794 pte_t *pte;
795 pte_t pteval;
796 spinlock_t *ptl;
797 struct page *page;
798 unsigned long address;
799 unsigned long end;
801 address = (vma->vm_start + cursor) & CLUSTER_MASK;
802 end = address + CLUSTER_SIZE;
803 if (address < vma->vm_start)
804 address = vma->vm_start;
805 if (end > vma->vm_end)
806 end = vma->vm_end;
808 pgd = pgd_offset(mm, address);
809 if (!pgd_present(*pgd))
810 return;
812 pud = pud_offset(pgd, address);
813 if (!pud_present(*pud))
814 return;
816 pmd = pmd_offset(pud, address);
817 if (!pmd_present(*pmd))
818 return;
820 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
822 /* Update high watermark before we lower rss */
823 update_hiwater_rss(mm);
825 for (; address < end; pte++, address += PAGE_SIZE) {
826 if (!pte_present(*pte))
827 continue;
828 page = vm_normal_page(vma, address, *pte);
829 BUG_ON(!page || PageAnon(page));
831 if (ptep_clear_flush_young(vma, address, pte))
832 continue;
834 /* Nuke the page table entry. */
835 flush_cache_page(vma, address, pte_pfn(*pte));
836 pteval = ptep_clear_flush(vma, address, pte);
838 /* If nonlinear, store the file page offset in the pte. */
839 if (page->index != linear_page_index(vma, address))
840 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
842 /* Move the dirty bit to the physical page now the pte is gone. */
843 if (pte_dirty(pteval))
844 set_page_dirty(page);
846 page_remove_rmap(page, vma);
847 page_cache_release(page);
848 dec_mm_counter(mm, file_rss);
849 (*mapcount)--;
851 pte_unmap_unlock(pte - 1, ptl);
854 static int try_to_unmap_anon(struct page *page, int migration)
856 struct anon_vma *anon_vma;
857 struct vm_area_struct *vma;
858 int ret = SWAP_AGAIN;
860 anon_vma = page_lock_anon_vma(page);
861 if (!anon_vma)
862 return ret;
864 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
865 ret = try_to_unmap_one(page, vma, migration);
866 if (ret == SWAP_FAIL || !page_mapped(page))
867 break;
870 page_unlock_anon_vma(anon_vma);
871 return ret;
875 * try_to_unmap_file - unmap file page using the object-based rmap method
876 * @page: the page to unmap
878 * Find all the mappings of a page using the mapping pointer and the vma chains
879 * contained in the address_space struct it points to.
881 * This function is only called from try_to_unmap for object-based pages.
883 static int try_to_unmap_file(struct page *page, int migration)
885 struct address_space *mapping = page->mapping;
886 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
887 struct vm_area_struct *vma;
888 struct prio_tree_iter iter;
889 int ret = SWAP_AGAIN;
890 unsigned long cursor;
891 unsigned long max_nl_cursor = 0;
892 unsigned long max_nl_size = 0;
893 unsigned int mapcount;
895 spin_lock(&mapping->i_mmap_lock);
896 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
897 ret = try_to_unmap_one(page, vma, migration);
898 if (ret == SWAP_FAIL || !page_mapped(page))
899 goto out;
902 if (list_empty(&mapping->i_mmap_nonlinear))
903 goto out;
905 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
906 shared.vm_set.list) {
907 if ((vma->vm_flags & VM_LOCKED) && !migration)
908 continue;
909 cursor = (unsigned long) vma->vm_private_data;
910 if (cursor > max_nl_cursor)
911 max_nl_cursor = cursor;
912 cursor = vma->vm_end - vma->vm_start;
913 if (cursor > max_nl_size)
914 max_nl_size = cursor;
917 if (max_nl_size == 0) { /* any nonlinears locked or reserved */
918 ret = SWAP_FAIL;
919 goto out;
923 * We don't try to search for this page in the nonlinear vmas,
924 * and page_referenced wouldn't have found it anyway. Instead
925 * just walk the nonlinear vmas trying to age and unmap some.
926 * The mapcount of the page we came in with is irrelevant,
927 * but even so use it as a guide to how hard we should try?
929 mapcount = page_mapcount(page);
930 if (!mapcount)
931 goto out;
932 cond_resched_lock(&mapping->i_mmap_lock);
934 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
935 if (max_nl_cursor == 0)
936 max_nl_cursor = CLUSTER_SIZE;
938 do {
939 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
940 shared.vm_set.list) {
941 if ((vma->vm_flags & VM_LOCKED) && !migration)
942 continue;
943 cursor = (unsigned long) vma->vm_private_data;
944 while ( cursor < max_nl_cursor &&
945 cursor < vma->vm_end - vma->vm_start) {
946 try_to_unmap_cluster(cursor, &mapcount, vma);
947 cursor += CLUSTER_SIZE;
948 vma->vm_private_data = (void *) cursor;
949 if ((int)mapcount <= 0)
950 goto out;
952 vma->vm_private_data = (void *) max_nl_cursor;
954 cond_resched_lock(&mapping->i_mmap_lock);
955 max_nl_cursor += CLUSTER_SIZE;
956 } while (max_nl_cursor <= max_nl_size);
959 * Don't loop forever (perhaps all the remaining pages are
960 * in locked vmas). Reset cursor on all unreserved nonlinear
961 * vmas, now forgetting on which ones it had fallen behind.
963 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
964 vma->vm_private_data = NULL;
965 out:
966 spin_unlock(&mapping->i_mmap_lock);
967 return ret;
971 * try_to_unmap - try to remove all page table mappings to a page
972 * @page: the page to get unmapped
974 * Tries to remove all the page table entries which are mapping this
975 * page, used in the pageout path. Caller must hold the page lock.
976 * Return values are:
978 * SWAP_SUCCESS - we succeeded in removing all mappings
979 * SWAP_AGAIN - we missed a mapping, try again later
980 * SWAP_FAIL - the page is unswappable
982 int try_to_unmap(struct page *page, int migration)
984 int ret;
986 BUG_ON(!PageLocked(page));
988 if (PageAnon(page))
989 ret = try_to_unmap_anon(page, migration);
990 else
991 ret = try_to_unmap_file(page, migration);
993 if (!page_mapped(page))
994 ret = SWAP_SUCCESS;
995 return ret;