2 * mm/rmap.c - physical to virtual reverse mappings
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
21 * Lock ordering in mm:
23 * inode->i_mutex (while writing or truncating, not reading or faulting)
24 * inode->i_alloc_sem (vmtruncate_range)
26 * page->flags PG_locked (lock_page)
27 * mapping->i_mmap_lock
29 * mm->page_table_lock or pte_lock
30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers)
34 * inode_lock (in set_page_dirty's __mark_inode_dirty)
35 * sb_lock (within inode_lock in fs/fs-writeback.c)
36 * mapping->tree_lock (widely used, in set_page_dirty,
37 * in arch-dependent flush_dcache_mmap_lock,
38 * within inode_lock in __sync_single_inode)
42 #include <linux/pagemap.h>
43 #include <linux/swap.h>
44 #include <linux/swapops.h>
45 #include <linux/slab.h>
46 #include <linux/init.h>
47 #include <linux/rmap.h>
48 #include <linux/rcupdate.h>
49 #include <linux/module.h>
50 #include <linux/kallsyms.h>
52 #include <asm/tlbflush.h>
54 struct kmem_cache
*anon_vma_cachep
;
56 /* This must be called under the mmap_sem. */
57 int anon_vma_prepare(struct vm_area_struct
*vma
)
59 struct anon_vma
*anon_vma
= vma
->anon_vma
;
62 if (unlikely(!anon_vma
)) {
63 struct mm_struct
*mm
= vma
->vm_mm
;
64 struct anon_vma
*allocated
, *locked
;
66 anon_vma
= find_mergeable_anon_vma(vma
);
70 spin_lock(&locked
->lock
);
72 anon_vma
= anon_vma_alloc();
73 if (unlikely(!anon_vma
))
79 /* page_table_lock to protect against threads */
80 spin_lock(&mm
->page_table_lock
);
81 if (likely(!vma
->anon_vma
)) {
82 vma
->anon_vma
= anon_vma
;
83 list_add_tail(&vma
->anon_vma_node
, &anon_vma
->head
);
86 spin_unlock(&mm
->page_table_lock
);
89 spin_unlock(&locked
->lock
);
90 if (unlikely(allocated
))
91 anon_vma_free(allocated
);
96 void __anon_vma_merge(struct vm_area_struct
*vma
, struct vm_area_struct
*next
)
98 BUG_ON(vma
->anon_vma
!= next
->anon_vma
);
99 list_del(&next
->anon_vma_node
);
102 void __anon_vma_link(struct vm_area_struct
*vma
)
104 struct anon_vma
*anon_vma
= vma
->anon_vma
;
107 list_add_tail(&vma
->anon_vma_node
, &anon_vma
->head
);
110 void anon_vma_link(struct vm_area_struct
*vma
)
112 struct anon_vma
*anon_vma
= vma
->anon_vma
;
115 spin_lock(&anon_vma
->lock
);
116 list_add_tail(&vma
->anon_vma_node
, &anon_vma
->head
);
117 spin_unlock(&anon_vma
->lock
);
121 void anon_vma_unlink(struct vm_area_struct
*vma
)
123 struct anon_vma
*anon_vma
= vma
->anon_vma
;
129 spin_lock(&anon_vma
->lock
);
130 list_del(&vma
->anon_vma_node
);
132 /* We must garbage collect the anon_vma if it's empty */
133 empty
= list_empty(&anon_vma
->head
);
134 spin_unlock(&anon_vma
->lock
);
137 anon_vma_free(anon_vma
);
140 static void anon_vma_ctor(void *data
, struct kmem_cache
*cachep
,
143 if ((flags
& (SLAB_CTOR_VERIFY
|SLAB_CTOR_CONSTRUCTOR
)) ==
144 SLAB_CTOR_CONSTRUCTOR
) {
145 struct anon_vma
*anon_vma
= data
;
147 spin_lock_init(&anon_vma
->lock
);
148 INIT_LIST_HEAD(&anon_vma
->head
);
152 void __init
anon_vma_init(void)
154 anon_vma_cachep
= kmem_cache_create("anon_vma", sizeof(struct anon_vma
),
155 0, SLAB_DESTROY_BY_RCU
|SLAB_PANIC
, anon_vma_ctor
, NULL
);
159 * Getting a lock on a stable anon_vma from a page off the LRU is
160 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
162 static struct anon_vma
*page_lock_anon_vma(struct page
*page
)
164 struct anon_vma
*anon_vma
= NULL
;
165 unsigned long anon_mapping
;
168 anon_mapping
= (unsigned long) page
->mapping
;
169 if (!(anon_mapping
& PAGE_MAPPING_ANON
))
171 if (!page_mapped(page
))
174 anon_vma
= (struct anon_vma
*) (anon_mapping
- PAGE_MAPPING_ANON
);
175 spin_lock(&anon_vma
->lock
);
182 * At what user virtual address is page expected in vma?
184 static inline unsigned long
185 vma_address(struct page
*page
, struct vm_area_struct
*vma
)
187 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
188 unsigned long address
;
190 address
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
191 if (unlikely(address
< vma
->vm_start
|| address
>= vma
->vm_end
)) {
192 /* page should be within any vma from prio_tree_next */
193 BUG_ON(!PageAnon(page
));
200 * At what user virtual address is page expected in vma? checking that the
201 * page matches the vma: currently only used on anon pages, by unuse_vma;
203 unsigned long page_address_in_vma(struct page
*page
, struct vm_area_struct
*vma
)
205 if (PageAnon(page
)) {
206 if ((void *)vma
->anon_vma
!=
207 (void *)page
->mapping
- PAGE_MAPPING_ANON
)
209 } else if (page
->mapping
&& !(vma
->vm_flags
& VM_NONLINEAR
)) {
211 vma
->vm_file
->f_mapping
!= page
->mapping
)
215 return vma_address(page
, vma
);
219 * Check that @page is mapped at @address into @mm.
221 * On success returns with pte mapped and locked.
223 pte_t
*page_check_address(struct page
*page
, struct mm_struct
*mm
,
224 unsigned long address
, spinlock_t
**ptlp
)
232 pgd
= pgd_offset(mm
, address
);
233 if (!pgd_present(*pgd
))
236 pud
= pud_offset(pgd
, address
);
237 if (!pud_present(*pud
))
240 pmd
= pmd_offset(pud
, address
);
241 if (!pmd_present(*pmd
))
244 pte
= pte_offset_map(pmd
, address
);
245 /* Make a quick check before getting the lock */
246 if (!pte_present(*pte
)) {
251 ptl
= pte_lockptr(mm
, pmd
);
253 if (pte_present(*pte
) && page_to_pfn(page
) == pte_pfn(*pte
)) {
257 pte_unmap_unlock(pte
, ptl
);
262 * Subfunctions of page_referenced: page_referenced_one called
263 * repeatedly from either page_referenced_anon or page_referenced_file.
265 static int page_referenced_one(struct page
*page
,
266 struct vm_area_struct
*vma
, unsigned int *mapcount
)
268 struct mm_struct
*mm
= vma
->vm_mm
;
269 unsigned long address
;
274 address
= vma_address(page
, vma
);
275 if (address
== -EFAULT
)
278 pte
= page_check_address(page
, mm
, address
, &ptl
);
282 if (ptep_clear_flush_young(vma
, address
, pte
))
285 /* Pretend the page is referenced if the task has the
286 swap token and is in the middle of a page fault. */
287 if (mm
!= current
->mm
&& has_swap_token(mm
) &&
288 rwsem_is_locked(&mm
->mmap_sem
))
292 pte_unmap_unlock(pte
, ptl
);
297 static int page_referenced_anon(struct page
*page
)
299 unsigned int mapcount
;
300 struct anon_vma
*anon_vma
;
301 struct vm_area_struct
*vma
;
304 anon_vma
= page_lock_anon_vma(page
);
308 mapcount
= page_mapcount(page
);
309 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
310 referenced
+= page_referenced_one(page
, vma
, &mapcount
);
314 spin_unlock(&anon_vma
->lock
);
319 * page_referenced_file - referenced check for object-based rmap
320 * @page: the page we're checking references on.
322 * For an object-based mapped page, find all the places it is mapped and
323 * check/clear the referenced flag. This is done by following the page->mapping
324 * pointer, then walking the chain of vmas it holds. It returns the number
325 * of references it found.
327 * This function is only called from page_referenced for object-based pages.
329 static int page_referenced_file(struct page
*page
)
331 unsigned int mapcount
;
332 struct address_space
*mapping
= page
->mapping
;
333 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
334 struct vm_area_struct
*vma
;
335 struct prio_tree_iter iter
;
339 * The caller's checks on page->mapping and !PageAnon have made
340 * sure that this is a file page: the check for page->mapping
341 * excludes the case just before it gets set on an anon page.
343 BUG_ON(PageAnon(page
));
346 * The page lock not only makes sure that page->mapping cannot
347 * suddenly be NULLified by truncation, it makes sure that the
348 * structure at mapping cannot be freed and reused yet,
349 * so we can safely take mapping->i_mmap_lock.
351 BUG_ON(!PageLocked(page
));
353 spin_lock(&mapping
->i_mmap_lock
);
356 * i_mmap_lock does not stabilize mapcount at all, but mapcount
357 * is more likely to be accurate if we note it after spinning.
359 mapcount
= page_mapcount(page
);
361 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
362 if ((vma
->vm_flags
& (VM_LOCKED
|VM_MAYSHARE
))
363 == (VM_LOCKED
|VM_MAYSHARE
)) {
367 referenced
+= page_referenced_one(page
, vma
, &mapcount
);
372 spin_unlock(&mapping
->i_mmap_lock
);
377 * page_referenced - test if the page was referenced
378 * @page: the page to test
379 * @is_locked: caller holds lock on the page
381 * Quick test_and_clear_referenced for all mappings to a page,
382 * returns the number of ptes which referenced the page.
384 int page_referenced(struct page
*page
, int is_locked
)
388 if (page_test_and_clear_young(page
))
391 if (TestClearPageReferenced(page
))
394 if (page_mapped(page
) && page
->mapping
) {
396 referenced
+= page_referenced_anon(page
);
398 referenced
+= page_referenced_file(page
);
399 else if (TestSetPageLocked(page
))
403 referenced
+= page_referenced_file(page
);
410 static int page_mkclean_one(struct page
*page
, struct vm_area_struct
*vma
)
412 struct mm_struct
*mm
= vma
->vm_mm
;
413 unsigned long address
;
418 address
= vma_address(page
, vma
);
419 if (address
== -EFAULT
)
422 pte
= page_check_address(page
, mm
, address
, &ptl
);
426 if (pte_dirty(*pte
) || pte_write(*pte
)) {
429 flush_cache_page(vma
, address
, pte_pfn(*pte
));
430 entry
= ptep_clear_flush(vma
, address
, pte
);
431 entry
= pte_wrprotect(entry
);
432 entry
= pte_mkclean(entry
);
433 set_pte_at(mm
, address
, pte
, entry
);
434 lazy_mmu_prot_update(entry
);
438 pte_unmap_unlock(pte
, ptl
);
443 static int page_mkclean_file(struct address_space
*mapping
, struct page
*page
)
445 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
446 struct vm_area_struct
*vma
;
447 struct prio_tree_iter iter
;
450 BUG_ON(PageAnon(page
));
452 spin_lock(&mapping
->i_mmap_lock
);
453 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
454 if (vma
->vm_flags
& VM_SHARED
)
455 ret
+= page_mkclean_one(page
, vma
);
457 spin_unlock(&mapping
->i_mmap_lock
);
461 int page_mkclean(struct page
*page
)
465 BUG_ON(!PageLocked(page
));
467 if (page_mapped(page
)) {
468 struct address_space
*mapping
= page_mapping(page
);
470 ret
= page_mkclean_file(mapping
, page
);
471 if (page_test_and_clear_dirty(page
))
479 * page_set_anon_rmap - setup new anonymous rmap
480 * @page: the page to add the mapping to
481 * @vma: the vm area in which the mapping is added
482 * @address: the user virtual address mapped
484 static void __page_set_anon_rmap(struct page
*page
,
485 struct vm_area_struct
*vma
, unsigned long address
)
487 struct anon_vma
*anon_vma
= vma
->anon_vma
;
490 anon_vma
= (void *) anon_vma
+ PAGE_MAPPING_ANON
;
491 page
->mapping
= (struct address_space
*) anon_vma
;
493 page
->index
= linear_page_index(vma
, address
);
496 * nr_mapped state can be updated without turning off
497 * interrupts because it is not modified via interrupt.
499 __inc_zone_page_state(page
, NR_ANON_PAGES
);
503 * page_add_anon_rmap - add pte mapping to an anonymous page
504 * @page: the page to add the mapping to
505 * @vma: the vm area in which the mapping is added
506 * @address: the user virtual address mapped
508 * The caller needs to hold the pte lock.
510 void page_add_anon_rmap(struct page
*page
,
511 struct vm_area_struct
*vma
, unsigned long address
)
513 if (atomic_inc_and_test(&page
->_mapcount
))
514 __page_set_anon_rmap(page
, vma
, address
);
515 /* else checking page index and mapping is racy */
519 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
520 * @page: the page to add the mapping to
521 * @vma: the vm area in which the mapping is added
522 * @address: the user virtual address mapped
524 * Same as page_add_anon_rmap but must only be called on *new* pages.
525 * This means the inc-and-test can be bypassed.
527 void page_add_new_anon_rmap(struct page
*page
,
528 struct vm_area_struct
*vma
, unsigned long address
)
530 atomic_set(&page
->_mapcount
, 0); /* elevate count by 1 (starts at -1) */
531 __page_set_anon_rmap(page
, vma
, address
);
535 * page_add_file_rmap - add pte mapping to a file page
536 * @page: the page to add the mapping to
538 * The caller needs to hold the pte lock.
540 void page_add_file_rmap(struct page
*page
)
542 if (atomic_inc_and_test(&page
->_mapcount
))
543 __inc_zone_page_state(page
, NR_FILE_MAPPED
);
547 * page_remove_rmap - take down pte mapping from a page
548 * @page: page to remove mapping from
550 * The caller needs to hold the pte lock.
552 void page_remove_rmap(struct page
*page
, struct vm_area_struct
*vma
)
554 if (atomic_add_negative(-1, &page
->_mapcount
)) {
555 if (unlikely(page_mapcount(page
) < 0)) {
556 printk (KERN_EMERG
"Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page
));
557 printk (KERN_EMERG
" page pfn = %lx\n", page_to_pfn(page
));
558 printk (KERN_EMERG
" page->flags = %lx\n", page
->flags
);
559 printk (KERN_EMERG
" page->count = %x\n", page_count(page
));
560 printk (KERN_EMERG
" page->mapping = %p\n", page
->mapping
);
561 print_symbol (KERN_EMERG
" vma->vm_ops = %s\n", (unsigned long)vma
->vm_ops
);
563 print_symbol (KERN_EMERG
" vma->vm_ops->nopage = %s\n", (unsigned long)vma
->vm_ops
->nopage
);
564 if (vma
->vm_file
&& vma
->vm_file
->f_op
)
565 print_symbol (KERN_EMERG
" vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma
->vm_file
->f_op
->mmap
);
570 * It would be tidy to reset the PageAnon mapping here,
571 * but that might overwrite a racing page_add_anon_rmap
572 * which increments mapcount after us but sets mapping
573 * before us: so leave the reset to free_hot_cold_page,
574 * and remember that it's only reliable while mapped.
575 * Leaving it set also helps swapoff to reinstate ptes
576 * faster for those pages still in swapcache.
578 if (page_test_and_clear_dirty(page
))
579 set_page_dirty(page
);
580 __dec_zone_page_state(page
,
581 PageAnon(page
) ? NR_ANON_PAGES
: NR_FILE_MAPPED
);
586 * Subfunctions of try_to_unmap: try_to_unmap_one called
587 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
589 static int try_to_unmap_one(struct page
*page
, struct vm_area_struct
*vma
,
592 struct mm_struct
*mm
= vma
->vm_mm
;
593 unsigned long address
;
597 int ret
= SWAP_AGAIN
;
599 address
= vma_address(page
, vma
);
600 if (address
== -EFAULT
)
603 pte
= page_check_address(page
, mm
, address
, &ptl
);
608 * If the page is mlock()d, we cannot swap it out.
609 * If it's recently referenced (perhaps page_referenced
610 * skipped over this mm) then we should reactivate it.
612 if (!migration
&& ((vma
->vm_flags
& VM_LOCKED
) ||
613 (ptep_clear_flush_young(vma
, address
, pte
)))) {
618 /* Nuke the page table entry. */
619 flush_cache_page(vma
, address
, page_to_pfn(page
));
620 pteval
= ptep_clear_flush(vma
, address
, pte
);
622 /* Move the dirty bit to the physical page now the pte is gone. */
623 if (pte_dirty(pteval
))
624 set_page_dirty(page
);
626 /* Update high watermark before we lower rss */
627 update_hiwater_rss(mm
);
629 if (PageAnon(page
)) {
630 swp_entry_t entry
= { .val
= page_private(page
) };
632 if (PageSwapCache(page
)) {
634 * Store the swap location in the pte.
635 * See handle_pte_fault() ...
637 swap_duplicate(entry
);
638 if (list_empty(&mm
->mmlist
)) {
639 spin_lock(&mmlist_lock
);
640 if (list_empty(&mm
->mmlist
))
641 list_add(&mm
->mmlist
, &init_mm
.mmlist
);
642 spin_unlock(&mmlist_lock
);
644 dec_mm_counter(mm
, anon_rss
);
645 #ifdef CONFIG_MIGRATION
648 * Store the pfn of the page in a special migration
649 * pte. do_swap_page() will wait until the migration
650 * pte is removed and then restart fault handling.
653 entry
= make_migration_entry(page
, pte_write(pteval
));
656 set_pte_at(mm
, address
, pte
, swp_entry_to_pte(entry
));
657 BUG_ON(pte_file(*pte
));
659 #ifdef CONFIG_MIGRATION
661 /* Establish migration entry for a file page */
663 entry
= make_migration_entry(page
, pte_write(pteval
));
664 set_pte_at(mm
, address
, pte
, swp_entry_to_pte(entry
));
667 dec_mm_counter(mm
, file_rss
);
670 page_remove_rmap(page
, vma
);
671 page_cache_release(page
);
674 pte_unmap_unlock(pte
, ptl
);
680 * objrmap doesn't work for nonlinear VMAs because the assumption that
681 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
682 * Consequently, given a particular page and its ->index, we cannot locate the
683 * ptes which are mapping that page without an exhaustive linear search.
685 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
686 * maps the file to which the target page belongs. The ->vm_private_data field
687 * holds the current cursor into that scan. Successive searches will circulate
688 * around the vma's virtual address space.
690 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
691 * more scanning pressure is placed against them as well. Eventually pages
692 * will become fully unmapped and are eligible for eviction.
694 * For very sparsely populated VMAs this is a little inefficient - chances are
695 * there there won't be many ptes located within the scan cluster. In this case
696 * maybe we could scan further - to the end of the pte page, perhaps.
698 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
699 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
701 static void try_to_unmap_cluster(unsigned long cursor
,
702 unsigned int *mapcount
, struct vm_area_struct
*vma
)
704 struct mm_struct
*mm
= vma
->vm_mm
;
712 unsigned long address
;
715 address
= (vma
->vm_start
+ cursor
) & CLUSTER_MASK
;
716 end
= address
+ CLUSTER_SIZE
;
717 if (address
< vma
->vm_start
)
718 address
= vma
->vm_start
;
719 if (end
> vma
->vm_end
)
722 pgd
= pgd_offset(mm
, address
);
723 if (!pgd_present(*pgd
))
726 pud
= pud_offset(pgd
, address
);
727 if (!pud_present(*pud
))
730 pmd
= pmd_offset(pud
, address
);
731 if (!pmd_present(*pmd
))
734 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
736 /* Update high watermark before we lower rss */
737 update_hiwater_rss(mm
);
739 for (; address
< end
; pte
++, address
+= PAGE_SIZE
) {
740 if (!pte_present(*pte
))
742 page
= vm_normal_page(vma
, address
, *pte
);
743 BUG_ON(!page
|| PageAnon(page
));
745 if (ptep_clear_flush_young(vma
, address
, pte
))
748 /* Nuke the page table entry. */
749 flush_cache_page(vma
, address
, pte_pfn(*pte
));
750 pteval
= ptep_clear_flush(vma
, address
, pte
);
752 /* If nonlinear, store the file page offset in the pte. */
753 if (page
->index
!= linear_page_index(vma
, address
))
754 set_pte_at(mm
, address
, pte
, pgoff_to_pte(page
->index
));
756 /* Move the dirty bit to the physical page now the pte is gone. */
757 if (pte_dirty(pteval
))
758 set_page_dirty(page
);
760 page_remove_rmap(page
, vma
);
761 page_cache_release(page
);
762 dec_mm_counter(mm
, file_rss
);
765 pte_unmap_unlock(pte
- 1, ptl
);
768 static int try_to_unmap_anon(struct page
*page
, int migration
)
770 struct anon_vma
*anon_vma
;
771 struct vm_area_struct
*vma
;
772 int ret
= SWAP_AGAIN
;
774 anon_vma
= page_lock_anon_vma(page
);
778 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
779 ret
= try_to_unmap_one(page
, vma
, migration
);
780 if (ret
== SWAP_FAIL
|| !page_mapped(page
))
783 spin_unlock(&anon_vma
->lock
);
788 * try_to_unmap_file - unmap file page using the object-based rmap method
789 * @page: the page to unmap
791 * Find all the mappings of a page using the mapping pointer and the vma chains
792 * contained in the address_space struct it points to.
794 * This function is only called from try_to_unmap for object-based pages.
796 static int try_to_unmap_file(struct page
*page
, int migration
)
798 struct address_space
*mapping
= page
->mapping
;
799 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
800 struct vm_area_struct
*vma
;
801 struct prio_tree_iter iter
;
802 int ret
= SWAP_AGAIN
;
803 unsigned long cursor
;
804 unsigned long max_nl_cursor
= 0;
805 unsigned long max_nl_size
= 0;
806 unsigned int mapcount
;
808 spin_lock(&mapping
->i_mmap_lock
);
809 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
810 ret
= try_to_unmap_one(page
, vma
, migration
);
811 if (ret
== SWAP_FAIL
|| !page_mapped(page
))
815 if (list_empty(&mapping
->i_mmap_nonlinear
))
818 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
,
819 shared
.vm_set
.list
) {
820 if ((vma
->vm_flags
& VM_LOCKED
) && !migration
)
822 cursor
= (unsigned long) vma
->vm_private_data
;
823 if (cursor
> max_nl_cursor
)
824 max_nl_cursor
= cursor
;
825 cursor
= vma
->vm_end
- vma
->vm_start
;
826 if (cursor
> max_nl_size
)
827 max_nl_size
= cursor
;
830 if (max_nl_size
== 0) { /* any nonlinears locked or reserved */
836 * We don't try to search for this page in the nonlinear vmas,
837 * and page_referenced wouldn't have found it anyway. Instead
838 * just walk the nonlinear vmas trying to age and unmap some.
839 * The mapcount of the page we came in with is irrelevant,
840 * but even so use it as a guide to how hard we should try?
842 mapcount
= page_mapcount(page
);
845 cond_resched_lock(&mapping
->i_mmap_lock
);
847 max_nl_size
= (max_nl_size
+ CLUSTER_SIZE
- 1) & CLUSTER_MASK
;
848 if (max_nl_cursor
== 0)
849 max_nl_cursor
= CLUSTER_SIZE
;
852 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
,
853 shared
.vm_set
.list
) {
854 if ((vma
->vm_flags
& VM_LOCKED
) && !migration
)
856 cursor
= (unsigned long) vma
->vm_private_data
;
857 while ( cursor
< max_nl_cursor
&&
858 cursor
< vma
->vm_end
- vma
->vm_start
) {
859 try_to_unmap_cluster(cursor
, &mapcount
, vma
);
860 cursor
+= CLUSTER_SIZE
;
861 vma
->vm_private_data
= (void *) cursor
;
862 if ((int)mapcount
<= 0)
865 vma
->vm_private_data
= (void *) max_nl_cursor
;
867 cond_resched_lock(&mapping
->i_mmap_lock
);
868 max_nl_cursor
+= CLUSTER_SIZE
;
869 } while (max_nl_cursor
<= max_nl_size
);
872 * Don't loop forever (perhaps all the remaining pages are
873 * in locked vmas). Reset cursor on all unreserved nonlinear
874 * vmas, now forgetting on which ones it had fallen behind.
876 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
, shared
.vm_set
.list
)
877 vma
->vm_private_data
= NULL
;
879 spin_unlock(&mapping
->i_mmap_lock
);
884 * try_to_unmap - try to remove all page table mappings to a page
885 * @page: the page to get unmapped
887 * Tries to remove all the page table entries which are mapping this
888 * page, used in the pageout path. Caller must hold the page lock.
891 * SWAP_SUCCESS - we succeeded in removing all mappings
892 * SWAP_AGAIN - we missed a mapping, try again later
893 * SWAP_FAIL - the page is unswappable
895 int try_to_unmap(struct page
*page
, int migration
)
899 BUG_ON(!PageLocked(page
));
902 ret
= try_to_unmap_anon(page
, migration
);
904 ret
= try_to_unmap_file(page
, migration
);
906 if (!page_mapped(page
))