2 * mm/rmap.c - physical to virtual reverse mappings
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
21 * Lock ordering in mm:
23 * inode->i_mutex (while writing or truncating, not reading or faulting)
26 * When a page fault occurs in writing from user to file, down_read
27 * of mmap_sem nests within i_mutex; in sys_msync, i_mutex nests within
28 * down_read of mmap_sem; i_mutex and down_write of mmap_sem are never
29 * taken together; in truncation, i_mutex is taken outermost.
32 * page->flags PG_locked (lock_page)
33 * mapping->i_mmap_lock
35 * mm->page_table_lock or pte_lock
36 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
37 * swap_lock (in swap_duplicate, swap_info_get)
38 * mmlist_lock (in mmput, drain_mmlist and others)
39 * mapping->private_lock (in __set_page_dirty_buffers)
40 * inode_lock (in set_page_dirty's __mark_inode_dirty)
41 * sb_lock (within inode_lock in fs/fs-writeback.c)
42 * mapping->tree_lock (widely used, in set_page_dirty,
43 * in arch-dependent flush_dcache_mmap_lock,
44 * within inode_lock in __sync_single_inode)
48 #include <linux/pagemap.h>
49 #include <linux/swap.h>
50 #include <linux/swapops.h>
51 #include <linux/slab.h>
52 #include <linux/init.h>
53 #include <linux/rmap.h>
54 #include <linux/rcupdate.h>
55 #include <linux/module.h>
57 #include <asm/tlbflush.h>
59 //#define RMAP_DEBUG /* can be enabled only for debugging */
61 kmem_cache_t
*anon_vma_cachep
;
63 static inline void validate_anon_vma(struct vm_area_struct
*find_vma
)
66 struct anon_vma
*anon_vma
= find_vma
->anon_vma
;
67 struct vm_area_struct
*vma
;
68 unsigned int mapcount
= 0;
71 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
73 BUG_ON(mapcount
> 100000);
81 /* This must be called under the mmap_sem. */
82 int anon_vma_prepare(struct vm_area_struct
*vma
)
84 struct anon_vma
*anon_vma
= vma
->anon_vma
;
87 if (unlikely(!anon_vma
)) {
88 struct mm_struct
*mm
= vma
->vm_mm
;
89 struct anon_vma
*allocated
, *locked
;
91 anon_vma
= find_mergeable_anon_vma(vma
);
95 spin_lock(&locked
->lock
);
97 anon_vma
= anon_vma_alloc();
98 if (unlikely(!anon_vma
))
100 allocated
= anon_vma
;
104 /* page_table_lock to protect against threads */
105 spin_lock(&mm
->page_table_lock
);
106 if (likely(!vma
->anon_vma
)) {
107 vma
->anon_vma
= anon_vma
;
108 list_add(&vma
->anon_vma_node
, &anon_vma
->head
);
111 spin_unlock(&mm
->page_table_lock
);
114 spin_unlock(&locked
->lock
);
115 if (unlikely(allocated
))
116 anon_vma_free(allocated
);
121 void __anon_vma_merge(struct vm_area_struct
*vma
, struct vm_area_struct
*next
)
123 BUG_ON(vma
->anon_vma
!= next
->anon_vma
);
124 list_del(&next
->anon_vma_node
);
127 void __anon_vma_link(struct vm_area_struct
*vma
)
129 struct anon_vma
*anon_vma
= vma
->anon_vma
;
132 list_add(&vma
->anon_vma_node
, &anon_vma
->head
);
133 validate_anon_vma(vma
);
137 void anon_vma_link(struct vm_area_struct
*vma
)
139 struct anon_vma
*anon_vma
= vma
->anon_vma
;
142 spin_lock(&anon_vma
->lock
);
143 list_add(&vma
->anon_vma_node
, &anon_vma
->head
);
144 validate_anon_vma(vma
);
145 spin_unlock(&anon_vma
->lock
);
149 void anon_vma_unlink(struct vm_area_struct
*vma
)
151 struct anon_vma
*anon_vma
= vma
->anon_vma
;
157 spin_lock(&anon_vma
->lock
);
158 validate_anon_vma(vma
);
159 list_del(&vma
->anon_vma_node
);
161 /* We must garbage collect the anon_vma if it's empty */
162 empty
= list_empty(&anon_vma
->head
);
163 spin_unlock(&anon_vma
->lock
);
166 anon_vma_free(anon_vma
);
169 static void anon_vma_ctor(void *data
, kmem_cache_t
*cachep
, unsigned long flags
)
171 if ((flags
& (SLAB_CTOR_VERIFY
|SLAB_CTOR_CONSTRUCTOR
)) ==
172 SLAB_CTOR_CONSTRUCTOR
) {
173 struct anon_vma
*anon_vma
= data
;
175 spin_lock_init(&anon_vma
->lock
);
176 INIT_LIST_HEAD(&anon_vma
->head
);
180 void __init
anon_vma_init(void)
182 anon_vma_cachep
= kmem_cache_create("anon_vma", sizeof(struct anon_vma
),
183 0, SLAB_DESTROY_BY_RCU
|SLAB_PANIC
, anon_vma_ctor
, NULL
);
187 * Getting a lock on a stable anon_vma from a page off the LRU is
188 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
190 static struct anon_vma
*page_lock_anon_vma(struct page
*page
)
192 struct anon_vma
*anon_vma
= NULL
;
193 unsigned long anon_mapping
;
196 anon_mapping
= (unsigned long) page
->mapping
;
197 if (!(anon_mapping
& PAGE_MAPPING_ANON
))
199 if (!page_mapped(page
))
202 anon_vma
= (struct anon_vma
*) (anon_mapping
- PAGE_MAPPING_ANON
);
203 spin_lock(&anon_vma
->lock
);
209 #ifdef CONFIG_MIGRATION
211 * Remove an anonymous page from swap replacing the swap pte's
212 * through real pte's pointing to valid pages and then releasing
213 * the page from the swap cache.
215 * Must hold page lock on page and mmap_sem of one vma that contains
218 void remove_from_swap(struct page
*page
)
220 struct anon_vma
*anon_vma
;
221 struct vm_area_struct
*vma
;
222 unsigned long mapping
;
224 if (!PageSwapCache(page
))
227 mapping
= (unsigned long)page
->mapping
;
229 if (!mapping
|| (mapping
& PAGE_MAPPING_ANON
) == 0)
233 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
235 anon_vma
= (struct anon_vma
*) (mapping
- PAGE_MAPPING_ANON
);
236 spin_lock(&anon_vma
->lock
);
238 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
)
239 remove_vma_swap(vma
, page
);
241 spin_unlock(&anon_vma
->lock
);
242 delete_from_swap_cache(page
);
244 EXPORT_SYMBOL(remove_from_swap
);
248 * At what user virtual address is page expected in vma?
250 static inline unsigned long
251 vma_address(struct page
*page
, struct vm_area_struct
*vma
)
253 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
254 unsigned long address
;
256 address
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
257 if (unlikely(address
< vma
->vm_start
|| address
>= vma
->vm_end
)) {
258 /* page should be within any vma from prio_tree_next */
259 BUG_ON(!PageAnon(page
));
266 * At what user virtual address is page expected in vma? checking that the
267 * page matches the vma: currently only used on anon pages, by unuse_vma;
269 unsigned long page_address_in_vma(struct page
*page
, struct vm_area_struct
*vma
)
271 if (PageAnon(page
)) {
272 if ((void *)vma
->anon_vma
!=
273 (void *)page
->mapping
- PAGE_MAPPING_ANON
)
275 } else if (page
->mapping
&& !(vma
->vm_flags
& VM_NONLINEAR
)) {
277 vma
->vm_file
->f_mapping
!= page
->mapping
)
281 return vma_address(page
, vma
);
285 * Check that @page is mapped at @address into @mm.
287 * On success returns with pte mapped and locked.
289 pte_t
*page_check_address(struct page
*page
, struct mm_struct
*mm
,
290 unsigned long address
, spinlock_t
**ptlp
)
298 pgd
= pgd_offset(mm
, address
);
299 if (!pgd_present(*pgd
))
302 pud
= pud_offset(pgd
, address
);
303 if (!pud_present(*pud
))
306 pmd
= pmd_offset(pud
, address
);
307 if (!pmd_present(*pmd
))
310 pte
= pte_offset_map(pmd
, address
);
311 /* Make a quick check before getting the lock */
312 if (!pte_present(*pte
)) {
317 ptl
= pte_lockptr(mm
, pmd
);
319 if (pte_present(*pte
) && page_to_pfn(page
) == pte_pfn(*pte
)) {
323 pte_unmap_unlock(pte
, ptl
);
328 * Subfunctions of page_referenced: page_referenced_one called
329 * repeatedly from either page_referenced_anon or page_referenced_file.
331 static int page_referenced_one(struct page
*page
,
332 struct vm_area_struct
*vma
, unsigned int *mapcount
)
334 struct mm_struct
*mm
= vma
->vm_mm
;
335 unsigned long address
;
340 address
= vma_address(page
, vma
);
341 if (address
== -EFAULT
)
344 pte
= page_check_address(page
, mm
, address
, &ptl
);
348 if (ptep_clear_flush_young(vma
, address
, pte
))
351 /* Pretend the page is referenced if the task has the
352 swap token and is in the middle of a page fault. */
353 if (mm
!= current
->mm
&& has_swap_token(mm
) &&
354 rwsem_is_locked(&mm
->mmap_sem
))
358 pte_unmap_unlock(pte
, ptl
);
363 static int page_referenced_anon(struct page
*page
)
365 unsigned int mapcount
;
366 struct anon_vma
*anon_vma
;
367 struct vm_area_struct
*vma
;
370 anon_vma
= page_lock_anon_vma(page
);
374 mapcount
= page_mapcount(page
);
375 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
376 referenced
+= page_referenced_one(page
, vma
, &mapcount
);
380 spin_unlock(&anon_vma
->lock
);
385 * page_referenced_file - referenced check for object-based rmap
386 * @page: the page we're checking references on.
388 * For an object-based mapped page, find all the places it is mapped and
389 * check/clear the referenced flag. This is done by following the page->mapping
390 * pointer, then walking the chain of vmas it holds. It returns the number
391 * of references it found.
393 * This function is only called from page_referenced for object-based pages.
395 static int page_referenced_file(struct page
*page
)
397 unsigned int mapcount
;
398 struct address_space
*mapping
= page
->mapping
;
399 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
400 struct vm_area_struct
*vma
;
401 struct prio_tree_iter iter
;
405 * The caller's checks on page->mapping and !PageAnon have made
406 * sure that this is a file page: the check for page->mapping
407 * excludes the case just before it gets set on an anon page.
409 BUG_ON(PageAnon(page
));
412 * The page lock not only makes sure that page->mapping cannot
413 * suddenly be NULLified by truncation, it makes sure that the
414 * structure at mapping cannot be freed and reused yet,
415 * so we can safely take mapping->i_mmap_lock.
417 BUG_ON(!PageLocked(page
));
419 spin_lock(&mapping
->i_mmap_lock
);
422 * i_mmap_lock does not stabilize mapcount at all, but mapcount
423 * is more likely to be accurate if we note it after spinning.
425 mapcount
= page_mapcount(page
);
427 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
428 if ((vma
->vm_flags
& (VM_LOCKED
|VM_MAYSHARE
))
429 == (VM_LOCKED
|VM_MAYSHARE
)) {
433 referenced
+= page_referenced_one(page
, vma
, &mapcount
);
438 spin_unlock(&mapping
->i_mmap_lock
);
443 * page_referenced - test if the page was referenced
444 * @page: the page to test
445 * @is_locked: caller holds lock on the page
447 * Quick test_and_clear_referenced for all mappings to a page,
448 * returns the number of ptes which referenced the page.
450 int page_referenced(struct page
*page
, int is_locked
)
454 if (page_test_and_clear_young(page
))
457 if (TestClearPageReferenced(page
))
460 if (page_mapped(page
) && page
->mapping
) {
462 referenced
+= page_referenced_anon(page
);
464 referenced
+= page_referenced_file(page
);
465 else if (TestSetPageLocked(page
))
469 referenced
+= page_referenced_file(page
);
477 * page_set_anon_rmap - setup new anonymous rmap
478 * @page: the page to add the mapping to
479 * @vma: the vm area in which the mapping is added
480 * @address: the user virtual address mapped
482 static void __page_set_anon_rmap(struct page
*page
,
483 struct vm_area_struct
*vma
, unsigned long address
)
485 struct anon_vma
*anon_vma
= vma
->anon_vma
;
488 anon_vma
= (void *) anon_vma
+ PAGE_MAPPING_ANON
;
489 page
->mapping
= (struct address_space
*) anon_vma
;
491 page
->index
= linear_page_index(vma
, address
);
494 * nr_mapped state can be updated without turning off
495 * interrupts because it is not modified via interrupt.
497 __inc_page_state(nr_mapped
);
501 * page_add_anon_rmap - add pte mapping to an anonymous page
502 * @page: the page to add the mapping to
503 * @vma: the vm area in which the mapping is added
504 * @address: the user virtual address mapped
506 * The caller needs to hold the pte lock.
508 void page_add_anon_rmap(struct page
*page
,
509 struct vm_area_struct
*vma
, unsigned long address
)
511 if (atomic_inc_and_test(&page
->_mapcount
))
512 __page_set_anon_rmap(page
, vma
, address
);
513 /* else checking page index and mapping is racy */
517 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
518 * @page: the page to add the mapping to
519 * @vma: the vm area in which the mapping is added
520 * @address: the user virtual address mapped
522 * Same as page_add_anon_rmap but must only be called on *new* pages.
523 * This means the inc-and-test can be bypassed.
525 void page_add_new_anon_rmap(struct page
*page
,
526 struct vm_area_struct
*vma
, unsigned long address
)
528 atomic_set(&page
->_mapcount
, 0); /* elevate count by 1 (starts at -1) */
529 __page_set_anon_rmap(page
, vma
, address
);
533 * page_add_file_rmap - add pte mapping to a file page
534 * @page: the page to add the mapping to
536 * The caller needs to hold the pte lock.
538 void page_add_file_rmap(struct page
*page
)
540 if (atomic_inc_and_test(&page
->_mapcount
))
541 __inc_page_state(nr_mapped
);
545 * page_remove_rmap - take down pte mapping from a page
546 * @page: page to remove mapping from
548 * The caller needs to hold the pte lock.
550 void page_remove_rmap(struct page
*page
)
552 if (atomic_add_negative(-1, &page
->_mapcount
)) {
553 if (page_mapcount(page
) < 0) {
554 printk (KERN_EMERG
"Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page
));
555 printk (KERN_EMERG
" page->flags = %lx\n", page
->flags
);
556 printk (KERN_EMERG
" page->count = %x\n", page_count(page
));
557 printk (KERN_EMERG
" page->mapping = %p\n", page
->mapping
);
560 BUG_ON(page_mapcount(page
) < 0);
562 * It would be tidy to reset the PageAnon mapping here,
563 * but that might overwrite a racing page_add_anon_rmap
564 * which increments mapcount after us but sets mapping
565 * before us: so leave the reset to free_hot_cold_page,
566 * and remember that it's only reliable while mapped.
567 * Leaving it set also helps swapoff to reinstate ptes
568 * faster for those pages still in swapcache.
570 if (page_test_and_clear_dirty(page
))
571 set_page_dirty(page
);
572 __dec_page_state(nr_mapped
);
577 * Subfunctions of try_to_unmap: try_to_unmap_one called
578 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
580 static int try_to_unmap_one(struct page
*page
, struct vm_area_struct
*vma
,
583 struct mm_struct
*mm
= vma
->vm_mm
;
584 unsigned long address
;
588 int ret
= SWAP_AGAIN
;
590 address
= vma_address(page
, vma
);
591 if (address
== -EFAULT
)
594 pte
= page_check_address(page
, mm
, address
, &ptl
);
599 * If the page is mlock()d, we cannot swap it out.
600 * If it's recently referenced (perhaps page_referenced
601 * skipped over this mm) then we should reactivate it.
603 if ((vma
->vm_flags
& VM_LOCKED
) ||
604 (ptep_clear_flush_young(vma
, address
, pte
)
610 /* Nuke the page table entry. */
611 flush_cache_page(vma
, address
, page_to_pfn(page
));
612 pteval
= ptep_clear_flush(vma
, address
, pte
);
614 /* Move the dirty bit to the physical page now the pte is gone. */
615 if (pte_dirty(pteval
))
616 set_page_dirty(page
);
618 /* Update high watermark before we lower rss */
619 update_hiwater_rss(mm
);
621 if (PageAnon(page
)) {
622 swp_entry_t entry
= { .val
= page_private(page
) };
624 * Store the swap location in the pte.
625 * See handle_pte_fault() ...
627 BUG_ON(!PageSwapCache(page
));
628 swap_duplicate(entry
);
629 if (list_empty(&mm
->mmlist
)) {
630 spin_lock(&mmlist_lock
);
631 if (list_empty(&mm
->mmlist
))
632 list_add(&mm
->mmlist
, &init_mm
.mmlist
);
633 spin_unlock(&mmlist_lock
);
635 set_pte_at(mm
, address
, pte
, swp_entry_to_pte(entry
));
636 BUG_ON(pte_file(*pte
));
637 dec_mm_counter(mm
, anon_rss
);
639 dec_mm_counter(mm
, file_rss
);
641 page_remove_rmap(page
);
642 page_cache_release(page
);
645 pte_unmap_unlock(pte
, ptl
);
651 * objrmap doesn't work for nonlinear VMAs because the assumption that
652 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
653 * Consequently, given a particular page and its ->index, we cannot locate the
654 * ptes which are mapping that page without an exhaustive linear search.
656 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
657 * maps the file to which the target page belongs. The ->vm_private_data field
658 * holds the current cursor into that scan. Successive searches will circulate
659 * around the vma's virtual address space.
661 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
662 * more scanning pressure is placed against them as well. Eventually pages
663 * will become fully unmapped and are eligible for eviction.
665 * For very sparsely populated VMAs this is a little inefficient - chances are
666 * there there won't be many ptes located within the scan cluster. In this case
667 * maybe we could scan further - to the end of the pte page, perhaps.
669 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
670 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
672 static void try_to_unmap_cluster(unsigned long cursor
,
673 unsigned int *mapcount
, struct vm_area_struct
*vma
)
675 struct mm_struct
*mm
= vma
->vm_mm
;
683 unsigned long address
;
686 address
= (vma
->vm_start
+ cursor
) & CLUSTER_MASK
;
687 end
= address
+ CLUSTER_SIZE
;
688 if (address
< vma
->vm_start
)
689 address
= vma
->vm_start
;
690 if (end
> vma
->vm_end
)
693 pgd
= pgd_offset(mm
, address
);
694 if (!pgd_present(*pgd
))
697 pud
= pud_offset(pgd
, address
);
698 if (!pud_present(*pud
))
701 pmd
= pmd_offset(pud
, address
);
702 if (!pmd_present(*pmd
))
705 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
707 /* Update high watermark before we lower rss */
708 update_hiwater_rss(mm
);
710 for (; address
< end
; pte
++, address
+= PAGE_SIZE
) {
711 if (!pte_present(*pte
))
713 page
= vm_normal_page(vma
, address
, *pte
);
714 BUG_ON(!page
|| PageAnon(page
));
716 if (ptep_clear_flush_young(vma
, address
, pte
))
719 /* Nuke the page table entry. */
720 flush_cache_page(vma
, address
, pte_pfn(*pte
));
721 pteval
= ptep_clear_flush(vma
, address
, pte
);
723 /* If nonlinear, store the file page offset in the pte. */
724 if (page
->index
!= linear_page_index(vma
, address
))
725 set_pte_at(mm
, address
, pte
, pgoff_to_pte(page
->index
));
727 /* Move the dirty bit to the physical page now the pte is gone. */
728 if (pte_dirty(pteval
))
729 set_page_dirty(page
);
731 page_remove_rmap(page
);
732 page_cache_release(page
);
733 dec_mm_counter(mm
, file_rss
);
736 pte_unmap_unlock(pte
- 1, ptl
);
739 static int try_to_unmap_anon(struct page
*page
, int ignore_refs
)
741 struct anon_vma
*anon_vma
;
742 struct vm_area_struct
*vma
;
743 int ret
= SWAP_AGAIN
;
745 anon_vma
= page_lock_anon_vma(page
);
749 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
750 ret
= try_to_unmap_one(page
, vma
, ignore_refs
);
751 if (ret
== SWAP_FAIL
|| !page_mapped(page
))
754 spin_unlock(&anon_vma
->lock
);
759 * try_to_unmap_file - unmap file page using the object-based rmap method
760 * @page: the page to unmap
762 * Find all the mappings of a page using the mapping pointer and the vma chains
763 * contained in the address_space struct it points to.
765 * This function is only called from try_to_unmap for object-based pages.
767 static int try_to_unmap_file(struct page
*page
, int ignore_refs
)
769 struct address_space
*mapping
= page
->mapping
;
770 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
771 struct vm_area_struct
*vma
;
772 struct prio_tree_iter iter
;
773 int ret
= SWAP_AGAIN
;
774 unsigned long cursor
;
775 unsigned long max_nl_cursor
= 0;
776 unsigned long max_nl_size
= 0;
777 unsigned int mapcount
;
779 spin_lock(&mapping
->i_mmap_lock
);
780 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
781 ret
= try_to_unmap_one(page
, vma
, ignore_refs
);
782 if (ret
== SWAP_FAIL
|| !page_mapped(page
))
786 if (list_empty(&mapping
->i_mmap_nonlinear
))
789 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
,
790 shared
.vm_set
.list
) {
791 if (vma
->vm_flags
& VM_LOCKED
)
793 cursor
= (unsigned long) vma
->vm_private_data
;
794 if (cursor
> max_nl_cursor
)
795 max_nl_cursor
= cursor
;
796 cursor
= vma
->vm_end
- vma
->vm_start
;
797 if (cursor
> max_nl_size
)
798 max_nl_size
= cursor
;
801 if (max_nl_size
== 0) { /* any nonlinears locked or reserved */
807 * We don't try to search for this page in the nonlinear vmas,
808 * and page_referenced wouldn't have found it anyway. Instead
809 * just walk the nonlinear vmas trying to age and unmap some.
810 * The mapcount of the page we came in with is irrelevant,
811 * but even so use it as a guide to how hard we should try?
813 mapcount
= page_mapcount(page
);
816 cond_resched_lock(&mapping
->i_mmap_lock
);
818 max_nl_size
= (max_nl_size
+ CLUSTER_SIZE
- 1) & CLUSTER_MASK
;
819 if (max_nl_cursor
== 0)
820 max_nl_cursor
= CLUSTER_SIZE
;
823 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
,
824 shared
.vm_set
.list
) {
825 if (vma
->vm_flags
& VM_LOCKED
)
827 cursor
= (unsigned long) vma
->vm_private_data
;
828 while ( cursor
< max_nl_cursor
&&
829 cursor
< vma
->vm_end
- vma
->vm_start
) {
830 try_to_unmap_cluster(cursor
, &mapcount
, vma
);
831 cursor
+= CLUSTER_SIZE
;
832 vma
->vm_private_data
= (void *) cursor
;
833 if ((int)mapcount
<= 0)
836 vma
->vm_private_data
= (void *) max_nl_cursor
;
838 cond_resched_lock(&mapping
->i_mmap_lock
);
839 max_nl_cursor
+= CLUSTER_SIZE
;
840 } while (max_nl_cursor
<= max_nl_size
);
843 * Don't loop forever (perhaps all the remaining pages are
844 * in locked vmas). Reset cursor on all unreserved nonlinear
845 * vmas, now forgetting on which ones it had fallen behind.
847 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
, shared
.vm_set
.list
)
848 vma
->vm_private_data
= NULL
;
850 spin_unlock(&mapping
->i_mmap_lock
);
855 * try_to_unmap - try to remove all page table mappings to a page
856 * @page: the page to get unmapped
858 * Tries to remove all the page table entries which are mapping this
859 * page, used in the pageout path. Caller must hold the page lock.
862 * SWAP_SUCCESS - we succeeded in removing all mappings
863 * SWAP_AGAIN - we missed a mapping, try again later
864 * SWAP_FAIL - the page is unswappable
866 int try_to_unmap(struct page
*page
, int ignore_refs
)
870 BUG_ON(!PageLocked(page
));
873 ret
= try_to_unmap_anon(page
, ignore_refs
);
875 ret
= try_to_unmap_file(page
, ignore_refs
);
877 if (!page_mapped(page
))