2 * mm/rmap.c - physical to virtual reverse mappings
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
21 * Lock ordering in mm:
23 * inode->i_mutex (while writing or truncating, not reading or faulting)
24 * inode->i_alloc_sem (vmtruncate_range)
26 * page->flags PG_locked (lock_page)
27 * mapping->i_mmap_lock
29 * mm->page_table_lock or pte_lock
30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers)
34 * inode_lock (in set_page_dirty's __mark_inode_dirty)
35 * sb_lock (within inode_lock in fs/fs-writeback.c)
36 * mapping->tree_lock (widely used, in set_page_dirty,
37 * in arch-dependent flush_dcache_mmap_lock,
38 * within inode_lock in __sync_single_inode)
42 #include <linux/pagemap.h>
43 #include <linux/swap.h>
44 #include <linux/swapops.h>
45 #include <linux/slab.h>
46 #include <linux/init.h>
47 #include <linux/rmap.h>
48 #include <linux/rcupdate.h>
49 #include <linux/module.h>
50 #include <linux/kallsyms.h>
52 #include <asm/tlbflush.h>
54 struct kmem_cache
*anon_vma_cachep
;
56 static inline void validate_anon_vma(struct vm_area_struct
*find_vma
)
58 #ifdef CONFIG_DEBUG_VM
59 struct anon_vma
*anon_vma
= find_vma
->anon_vma
;
60 struct vm_area_struct
*vma
;
61 unsigned int mapcount
= 0;
64 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
66 BUG_ON(mapcount
> 100000);
74 /* This must be called under the mmap_sem. */
75 int anon_vma_prepare(struct vm_area_struct
*vma
)
77 struct anon_vma
*anon_vma
= vma
->anon_vma
;
80 if (unlikely(!anon_vma
)) {
81 struct mm_struct
*mm
= vma
->vm_mm
;
82 struct anon_vma
*allocated
, *locked
;
84 anon_vma
= find_mergeable_anon_vma(vma
);
88 spin_lock(&locked
->lock
);
90 anon_vma
= anon_vma_alloc();
91 if (unlikely(!anon_vma
))
97 /* page_table_lock to protect against threads */
98 spin_lock(&mm
->page_table_lock
);
99 if (likely(!vma
->anon_vma
)) {
100 vma
->anon_vma
= anon_vma
;
101 list_add_tail(&vma
->anon_vma_node
, &anon_vma
->head
);
104 spin_unlock(&mm
->page_table_lock
);
107 spin_unlock(&locked
->lock
);
108 if (unlikely(allocated
))
109 anon_vma_free(allocated
);
114 void __anon_vma_merge(struct vm_area_struct
*vma
, struct vm_area_struct
*next
)
116 BUG_ON(vma
->anon_vma
!= next
->anon_vma
);
117 list_del(&next
->anon_vma_node
);
120 void __anon_vma_link(struct vm_area_struct
*vma
)
122 struct anon_vma
*anon_vma
= vma
->anon_vma
;
125 list_add_tail(&vma
->anon_vma_node
, &anon_vma
->head
);
126 validate_anon_vma(vma
);
130 void anon_vma_link(struct vm_area_struct
*vma
)
132 struct anon_vma
*anon_vma
= vma
->anon_vma
;
135 spin_lock(&anon_vma
->lock
);
136 list_add_tail(&vma
->anon_vma_node
, &anon_vma
->head
);
137 validate_anon_vma(vma
);
138 spin_unlock(&anon_vma
->lock
);
142 void anon_vma_unlink(struct vm_area_struct
*vma
)
144 struct anon_vma
*anon_vma
= vma
->anon_vma
;
150 spin_lock(&anon_vma
->lock
);
151 validate_anon_vma(vma
);
152 list_del(&vma
->anon_vma_node
);
154 /* We must garbage collect the anon_vma if it's empty */
155 empty
= list_empty(&anon_vma
->head
);
156 spin_unlock(&anon_vma
->lock
);
159 anon_vma_free(anon_vma
);
162 static void anon_vma_ctor(void *data
, struct kmem_cache
*cachep
,
165 if ((flags
& (SLAB_CTOR_VERIFY
|SLAB_CTOR_CONSTRUCTOR
)) ==
166 SLAB_CTOR_CONSTRUCTOR
) {
167 struct anon_vma
*anon_vma
= data
;
169 spin_lock_init(&anon_vma
->lock
);
170 INIT_LIST_HEAD(&anon_vma
->head
);
174 void __init
anon_vma_init(void)
176 anon_vma_cachep
= kmem_cache_create("anon_vma", sizeof(struct anon_vma
),
177 0, SLAB_DESTROY_BY_RCU
|SLAB_PANIC
, anon_vma_ctor
, NULL
);
181 * Getting a lock on a stable anon_vma from a page off the LRU is
182 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
184 static struct anon_vma
*page_lock_anon_vma(struct page
*page
)
186 struct anon_vma
*anon_vma
;
187 unsigned long anon_mapping
;
190 anon_mapping
= (unsigned long) page
->mapping
;
191 if (!(anon_mapping
& PAGE_MAPPING_ANON
))
193 if (!page_mapped(page
))
196 anon_vma
= (struct anon_vma
*) (anon_mapping
- PAGE_MAPPING_ANON
);
197 spin_lock(&anon_vma
->lock
);
204 static void page_unlock_anon_vma(struct anon_vma
*anon_vma
)
206 spin_unlock(&anon_vma
->lock
);
211 * At what user virtual address is page expected in vma?
213 static inline unsigned long
214 vma_address(struct page
*page
, struct vm_area_struct
*vma
)
216 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
217 unsigned long address
;
219 address
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
220 if (unlikely(address
< vma
->vm_start
|| address
>= vma
->vm_end
)) {
221 /* page should be within any vma from prio_tree_next */
222 BUG_ON(!PageAnon(page
));
229 * At what user virtual address is page expected in vma? checking that the
230 * page matches the vma: currently only used on anon pages, by unuse_vma;
232 unsigned long page_address_in_vma(struct page
*page
, struct vm_area_struct
*vma
)
234 if (PageAnon(page
)) {
235 if ((void *)vma
->anon_vma
!=
236 (void *)page
->mapping
- PAGE_MAPPING_ANON
)
238 } else if (page
->mapping
&& !(vma
->vm_flags
& VM_NONLINEAR
)) {
240 vma
->vm_file
->f_mapping
!= page
->mapping
)
244 return vma_address(page
, vma
);
248 * Check that @page is mapped at @address into @mm.
250 * On success returns with pte mapped and locked.
252 pte_t
*page_check_address(struct page
*page
, struct mm_struct
*mm
,
253 unsigned long address
, spinlock_t
**ptlp
)
261 pgd
= pgd_offset(mm
, address
);
262 if (!pgd_present(*pgd
))
265 pud
= pud_offset(pgd
, address
);
266 if (!pud_present(*pud
))
269 pmd
= pmd_offset(pud
, address
);
270 if (!pmd_present(*pmd
))
273 pte
= pte_offset_map(pmd
, address
);
274 /* Make a quick check before getting the lock */
275 if (!pte_present(*pte
)) {
280 ptl
= pte_lockptr(mm
, pmd
);
282 if (pte_present(*pte
) && page_to_pfn(page
) == pte_pfn(*pte
)) {
286 pte_unmap_unlock(pte
, ptl
);
291 * Subfunctions of page_referenced: page_referenced_one called
292 * repeatedly from either page_referenced_anon or page_referenced_file.
294 static int page_referenced_one(struct page
*page
,
295 struct vm_area_struct
*vma
, unsigned int *mapcount
)
297 struct mm_struct
*mm
= vma
->vm_mm
;
298 unsigned long address
;
303 address
= vma_address(page
, vma
);
304 if (address
== -EFAULT
)
307 pte
= page_check_address(page
, mm
, address
, &ptl
);
311 if (ptep_clear_flush_young(vma
, address
, pte
))
314 /* Pretend the page is referenced if the task has the
315 swap token and is in the middle of a page fault. */
316 if (mm
!= current
->mm
&& has_swap_token(mm
) &&
317 rwsem_is_locked(&mm
->mmap_sem
))
321 pte_unmap_unlock(pte
, ptl
);
326 static int page_referenced_anon(struct page
*page
)
328 unsigned int mapcount
;
329 struct anon_vma
*anon_vma
;
330 struct vm_area_struct
*vma
;
333 anon_vma
= page_lock_anon_vma(page
);
337 mapcount
= page_mapcount(page
);
338 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
339 referenced
+= page_referenced_one(page
, vma
, &mapcount
);
344 page_unlock_anon_vma(anon_vma
);
349 * page_referenced_file - referenced check for object-based rmap
350 * @page: the page we're checking references on.
352 * For an object-based mapped page, find all the places it is mapped and
353 * check/clear the referenced flag. This is done by following the page->mapping
354 * pointer, then walking the chain of vmas it holds. It returns the number
355 * of references it found.
357 * This function is only called from page_referenced for object-based pages.
359 static int page_referenced_file(struct page
*page
)
361 unsigned int mapcount
;
362 struct address_space
*mapping
= page
->mapping
;
363 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
364 struct vm_area_struct
*vma
;
365 struct prio_tree_iter iter
;
369 * The caller's checks on page->mapping and !PageAnon have made
370 * sure that this is a file page: the check for page->mapping
371 * excludes the case just before it gets set on an anon page.
373 BUG_ON(PageAnon(page
));
376 * The page lock not only makes sure that page->mapping cannot
377 * suddenly be NULLified by truncation, it makes sure that the
378 * structure at mapping cannot be freed and reused yet,
379 * so we can safely take mapping->i_mmap_lock.
381 BUG_ON(!PageLocked(page
));
383 spin_lock(&mapping
->i_mmap_lock
);
386 * i_mmap_lock does not stabilize mapcount at all, but mapcount
387 * is more likely to be accurate if we note it after spinning.
389 mapcount
= page_mapcount(page
);
391 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
392 if ((vma
->vm_flags
& (VM_LOCKED
|VM_MAYSHARE
))
393 == (VM_LOCKED
|VM_MAYSHARE
)) {
397 referenced
+= page_referenced_one(page
, vma
, &mapcount
);
402 spin_unlock(&mapping
->i_mmap_lock
);
407 * page_referenced - test if the page was referenced
408 * @page: the page to test
409 * @is_locked: caller holds lock on the page
411 * Quick test_and_clear_referenced for all mappings to a page,
412 * returns the number of ptes which referenced the page.
414 int page_referenced(struct page
*page
, int is_locked
)
418 if (page_test_and_clear_young(page
))
421 if (TestClearPageReferenced(page
))
424 if (page_mapped(page
) && page
->mapping
) {
426 referenced
+= page_referenced_anon(page
);
428 referenced
+= page_referenced_file(page
);
429 else if (TestSetPageLocked(page
))
433 referenced
+= page_referenced_file(page
);
440 static int page_mkclean_one(struct page
*page
, struct vm_area_struct
*vma
)
442 struct mm_struct
*mm
= vma
->vm_mm
;
443 unsigned long address
;
448 address
= vma_address(page
, vma
);
449 if (address
== -EFAULT
)
452 pte
= page_check_address(page
, mm
, address
, &ptl
);
456 if (pte_dirty(*pte
) || pte_write(*pte
)) {
459 flush_cache_page(vma
, address
, pte_pfn(*pte
));
460 entry
= ptep_clear_flush(vma
, address
, pte
);
461 entry
= pte_wrprotect(entry
);
462 entry
= pte_mkclean(entry
);
463 set_pte_at(mm
, address
, pte
, entry
);
464 lazy_mmu_prot_update(entry
);
468 pte_unmap_unlock(pte
, ptl
);
473 static int page_mkclean_file(struct address_space
*mapping
, struct page
*page
)
475 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
476 struct vm_area_struct
*vma
;
477 struct prio_tree_iter iter
;
480 BUG_ON(PageAnon(page
));
482 spin_lock(&mapping
->i_mmap_lock
);
483 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
484 if (vma
->vm_flags
& VM_SHARED
)
485 ret
+= page_mkclean_one(page
, vma
);
487 spin_unlock(&mapping
->i_mmap_lock
);
491 int page_mkclean(struct page
*page
)
495 BUG_ON(!PageLocked(page
));
497 if (page_mapped(page
)) {
498 struct address_space
*mapping
= page_mapping(page
);
500 ret
= page_mkclean_file(mapping
, page
);
502 if (page_test_and_clear_dirty(page
))
509 * page_set_anon_rmap - setup new anonymous rmap
510 * @page: the page to add the mapping to
511 * @vma: the vm area in which the mapping is added
512 * @address: the user virtual address mapped
514 static void __page_set_anon_rmap(struct page
*page
,
515 struct vm_area_struct
*vma
, unsigned long address
)
517 struct anon_vma
*anon_vma
= vma
->anon_vma
;
520 anon_vma
= (void *) anon_vma
+ PAGE_MAPPING_ANON
;
521 page
->mapping
= (struct address_space
*) anon_vma
;
523 page
->index
= linear_page_index(vma
, address
);
526 * nr_mapped state can be updated without turning off
527 * interrupts because it is not modified via interrupt.
529 __inc_zone_page_state(page
, NR_ANON_PAGES
);
533 * page_add_anon_rmap - add pte mapping to an anonymous page
534 * @page: the page to add the mapping to
535 * @vma: the vm area in which the mapping is added
536 * @address: the user virtual address mapped
538 * The caller needs to hold the pte lock.
540 void page_add_anon_rmap(struct page
*page
,
541 struct vm_area_struct
*vma
, unsigned long address
)
543 if (atomic_inc_and_test(&page
->_mapcount
))
544 __page_set_anon_rmap(page
, vma
, address
);
545 /* else checking page index and mapping is racy */
549 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
550 * @page: the page to add the mapping to
551 * @vma: the vm area in which the mapping is added
552 * @address: the user virtual address mapped
554 * Same as page_add_anon_rmap but must only be called on *new* pages.
555 * This means the inc-and-test can be bypassed.
557 void page_add_new_anon_rmap(struct page
*page
,
558 struct vm_area_struct
*vma
, unsigned long address
)
560 atomic_set(&page
->_mapcount
, 0); /* elevate count by 1 (starts at -1) */
561 __page_set_anon_rmap(page
, vma
, address
);
565 * page_add_file_rmap - add pte mapping to a file page
566 * @page: the page to add the mapping to
568 * The caller needs to hold the pte lock.
570 void page_add_file_rmap(struct page
*page
)
572 if (atomic_inc_and_test(&page
->_mapcount
))
573 __inc_zone_page_state(page
, NR_FILE_MAPPED
);
577 * page_remove_rmap - take down pte mapping from a page
578 * @page: page to remove mapping from
580 * The caller needs to hold the pte lock.
582 void page_remove_rmap(struct page
*page
, struct vm_area_struct
*vma
)
584 if (atomic_add_negative(-1, &page
->_mapcount
)) {
585 if (unlikely(page_mapcount(page
) < 0)) {
586 printk (KERN_EMERG
"Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page
));
587 printk (KERN_EMERG
" page pfn = %lx\n", page_to_pfn(page
));
588 printk (KERN_EMERG
" page->flags = %lx\n", page
->flags
);
589 printk (KERN_EMERG
" page->count = %x\n", page_count(page
));
590 printk (KERN_EMERG
" page->mapping = %p\n", page
->mapping
);
591 print_symbol (KERN_EMERG
" vma->vm_ops = %s\n", (unsigned long)vma
->vm_ops
);
593 print_symbol (KERN_EMERG
" vma->vm_ops->nopage = %s\n", (unsigned long)vma
->vm_ops
->nopage
);
594 if (vma
->vm_file
&& vma
->vm_file
->f_op
)
595 print_symbol (KERN_EMERG
" vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma
->vm_file
->f_op
->mmap
);
600 * It would be tidy to reset the PageAnon mapping here,
601 * but that might overwrite a racing page_add_anon_rmap
602 * which increments mapcount after us but sets mapping
603 * before us: so leave the reset to free_hot_cold_page,
604 * and remember that it's only reliable while mapped.
605 * Leaving it set also helps swapoff to reinstate ptes
606 * faster for those pages still in swapcache.
608 if (page_test_and_clear_dirty(page
))
609 set_page_dirty(page
);
610 __dec_zone_page_state(page
,
611 PageAnon(page
) ? NR_ANON_PAGES
: NR_FILE_MAPPED
);
616 * Subfunctions of try_to_unmap: try_to_unmap_one called
617 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
619 static int try_to_unmap_one(struct page
*page
, struct vm_area_struct
*vma
,
622 struct mm_struct
*mm
= vma
->vm_mm
;
623 unsigned long address
;
627 int ret
= SWAP_AGAIN
;
629 address
= vma_address(page
, vma
);
630 if (address
== -EFAULT
)
633 pte
= page_check_address(page
, mm
, address
, &ptl
);
638 * If the page is mlock()d, we cannot swap it out.
639 * If it's recently referenced (perhaps page_referenced
640 * skipped over this mm) then we should reactivate it.
642 if (!migration
&& ((vma
->vm_flags
& VM_LOCKED
) ||
643 (ptep_clear_flush_young(vma
, address
, pte
)))) {
648 /* Nuke the page table entry. */
649 flush_cache_page(vma
, address
, page_to_pfn(page
));
650 pteval
= ptep_clear_flush(vma
, address
, pte
);
652 /* Move the dirty bit to the physical page now the pte is gone. */
653 if (pte_dirty(pteval
))
654 set_page_dirty(page
);
656 /* Update high watermark before we lower rss */
657 update_hiwater_rss(mm
);
659 if (PageAnon(page
)) {
660 swp_entry_t entry
= { .val
= page_private(page
) };
662 if (PageSwapCache(page
)) {
664 * Store the swap location in the pte.
665 * See handle_pte_fault() ...
667 swap_duplicate(entry
);
668 if (list_empty(&mm
->mmlist
)) {
669 spin_lock(&mmlist_lock
);
670 if (list_empty(&mm
->mmlist
))
671 list_add(&mm
->mmlist
, &init_mm
.mmlist
);
672 spin_unlock(&mmlist_lock
);
674 dec_mm_counter(mm
, anon_rss
);
675 #ifdef CONFIG_MIGRATION
678 * Store the pfn of the page in a special migration
679 * pte. do_swap_page() will wait until the migration
680 * pte is removed and then restart fault handling.
683 entry
= make_migration_entry(page
, pte_write(pteval
));
686 set_pte_at(mm
, address
, pte
, swp_entry_to_pte(entry
));
687 BUG_ON(pte_file(*pte
));
689 #ifdef CONFIG_MIGRATION
691 /* Establish migration entry for a file page */
693 entry
= make_migration_entry(page
, pte_write(pteval
));
694 set_pte_at(mm
, address
, pte
, swp_entry_to_pte(entry
));
697 dec_mm_counter(mm
, file_rss
);
700 page_remove_rmap(page
, vma
);
701 page_cache_release(page
);
704 pte_unmap_unlock(pte
, ptl
);
710 * objrmap doesn't work for nonlinear VMAs because the assumption that
711 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
712 * Consequently, given a particular page and its ->index, we cannot locate the
713 * ptes which are mapping that page without an exhaustive linear search.
715 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
716 * maps the file to which the target page belongs. The ->vm_private_data field
717 * holds the current cursor into that scan. Successive searches will circulate
718 * around the vma's virtual address space.
720 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
721 * more scanning pressure is placed against them as well. Eventually pages
722 * will become fully unmapped and are eligible for eviction.
724 * For very sparsely populated VMAs this is a little inefficient - chances are
725 * there there won't be many ptes located within the scan cluster. In this case
726 * maybe we could scan further - to the end of the pte page, perhaps.
728 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
729 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
731 static void try_to_unmap_cluster(unsigned long cursor
,
732 unsigned int *mapcount
, struct vm_area_struct
*vma
)
734 struct mm_struct
*mm
= vma
->vm_mm
;
742 unsigned long address
;
745 address
= (vma
->vm_start
+ cursor
) & CLUSTER_MASK
;
746 end
= address
+ CLUSTER_SIZE
;
747 if (address
< vma
->vm_start
)
748 address
= vma
->vm_start
;
749 if (end
> vma
->vm_end
)
752 pgd
= pgd_offset(mm
, address
);
753 if (!pgd_present(*pgd
))
756 pud
= pud_offset(pgd
, address
);
757 if (!pud_present(*pud
))
760 pmd
= pmd_offset(pud
, address
);
761 if (!pmd_present(*pmd
))
764 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
766 /* Update high watermark before we lower rss */
767 update_hiwater_rss(mm
);
769 for (; address
< end
; pte
++, address
+= PAGE_SIZE
) {
770 if (!pte_present(*pte
))
772 page
= vm_normal_page(vma
, address
, *pte
);
773 BUG_ON(!page
|| PageAnon(page
));
775 if (ptep_clear_flush_young(vma
, address
, pte
))
778 /* Nuke the page table entry. */
779 flush_cache_page(vma
, address
, pte_pfn(*pte
));
780 pteval
= ptep_clear_flush(vma
, address
, pte
);
782 /* If nonlinear, store the file page offset in the pte. */
783 if (page
->index
!= linear_page_index(vma
, address
))
784 set_pte_at(mm
, address
, pte
, pgoff_to_pte(page
->index
));
786 /* Move the dirty bit to the physical page now the pte is gone. */
787 if (pte_dirty(pteval
))
788 set_page_dirty(page
);
790 page_remove_rmap(page
, vma
);
791 page_cache_release(page
);
792 dec_mm_counter(mm
, file_rss
);
795 pte_unmap_unlock(pte
- 1, ptl
);
798 static int try_to_unmap_anon(struct page
*page
, int migration
)
800 struct anon_vma
*anon_vma
;
801 struct vm_area_struct
*vma
;
802 int ret
= SWAP_AGAIN
;
804 anon_vma
= page_lock_anon_vma(page
);
808 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
809 ret
= try_to_unmap_one(page
, vma
, migration
);
810 if (ret
== SWAP_FAIL
|| !page_mapped(page
))
814 page_unlock_anon_vma(anon_vma
);
819 * try_to_unmap_file - unmap file page using the object-based rmap method
820 * @page: the page to unmap
822 * Find all the mappings of a page using the mapping pointer and the vma chains
823 * contained in the address_space struct it points to.
825 * This function is only called from try_to_unmap for object-based pages.
827 static int try_to_unmap_file(struct page
*page
, int migration
)
829 struct address_space
*mapping
= page
->mapping
;
830 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
831 struct vm_area_struct
*vma
;
832 struct prio_tree_iter iter
;
833 int ret
= SWAP_AGAIN
;
834 unsigned long cursor
;
835 unsigned long max_nl_cursor
= 0;
836 unsigned long max_nl_size
= 0;
837 unsigned int mapcount
;
839 spin_lock(&mapping
->i_mmap_lock
);
840 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
841 ret
= try_to_unmap_one(page
, vma
, migration
);
842 if (ret
== SWAP_FAIL
|| !page_mapped(page
))
846 if (list_empty(&mapping
->i_mmap_nonlinear
))
849 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
,
850 shared
.vm_set
.list
) {
851 if ((vma
->vm_flags
& VM_LOCKED
) && !migration
)
853 cursor
= (unsigned long) vma
->vm_private_data
;
854 if (cursor
> max_nl_cursor
)
855 max_nl_cursor
= cursor
;
856 cursor
= vma
->vm_end
- vma
->vm_start
;
857 if (cursor
> max_nl_size
)
858 max_nl_size
= cursor
;
861 if (max_nl_size
== 0) { /* any nonlinears locked or reserved */
867 * We don't try to search for this page in the nonlinear vmas,
868 * and page_referenced wouldn't have found it anyway. Instead
869 * just walk the nonlinear vmas trying to age and unmap some.
870 * The mapcount of the page we came in with is irrelevant,
871 * but even so use it as a guide to how hard we should try?
873 mapcount
= page_mapcount(page
);
876 cond_resched_lock(&mapping
->i_mmap_lock
);
878 max_nl_size
= (max_nl_size
+ CLUSTER_SIZE
- 1) & CLUSTER_MASK
;
879 if (max_nl_cursor
== 0)
880 max_nl_cursor
= CLUSTER_SIZE
;
883 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
,
884 shared
.vm_set
.list
) {
885 if ((vma
->vm_flags
& VM_LOCKED
) && !migration
)
887 cursor
= (unsigned long) vma
->vm_private_data
;
888 while ( cursor
< max_nl_cursor
&&
889 cursor
< vma
->vm_end
- vma
->vm_start
) {
890 try_to_unmap_cluster(cursor
, &mapcount
, vma
);
891 cursor
+= CLUSTER_SIZE
;
892 vma
->vm_private_data
= (void *) cursor
;
893 if ((int)mapcount
<= 0)
896 vma
->vm_private_data
= (void *) max_nl_cursor
;
898 cond_resched_lock(&mapping
->i_mmap_lock
);
899 max_nl_cursor
+= CLUSTER_SIZE
;
900 } while (max_nl_cursor
<= max_nl_size
);
903 * Don't loop forever (perhaps all the remaining pages are
904 * in locked vmas). Reset cursor on all unreserved nonlinear
905 * vmas, now forgetting on which ones it had fallen behind.
907 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
, shared
.vm_set
.list
)
908 vma
->vm_private_data
= NULL
;
910 spin_unlock(&mapping
->i_mmap_lock
);
915 * try_to_unmap - try to remove all page table mappings to a page
916 * @page: the page to get unmapped
918 * Tries to remove all the page table entries which are mapping this
919 * page, used in the pageout path. Caller must hold the page lock.
922 * SWAP_SUCCESS - we succeeded in removing all mappings
923 * SWAP_AGAIN - we missed a mapping, try again later
924 * SWAP_FAIL - the page is unswappable
926 int try_to_unmap(struct page
*page
, int migration
)
930 BUG_ON(!PageLocked(page
));
933 ret
= try_to_unmap_anon(page
, migration
);
935 ret
= try_to_unmap_file(page
, migration
);
937 if (!page_mapped(page
))