2 * mm/rmap.c - physical to virtual reverse mappings
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
21 * Lock ordering in mm:
23 * inode->i_sem (while writing or truncating, not reading or faulting)
26 * When a page fault occurs in writing from user to file, down_read
27 * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within
28 * down_read of mmap_sem; i_sem and down_write of mmap_sem are never
29 * taken together; in truncation, i_sem is taken outermost.
32 * page->flags PG_locked (lock_page)
33 * mapping->i_mmap_lock
36 * zone->lru_lock (in mark_page_accessed)
37 * swap_list_lock (in swap_free etc's swap_info_get)
38 * swap_device_lock (in swap_duplicate, swap_info_get)
39 * mapping->private_lock (in __set_page_dirty_buffers)
40 * inode_lock (in set_page_dirty's __mark_inode_dirty)
41 * sb_lock (within inode_lock in fs/fs-writeback.c)
42 * mapping->tree_lock (widely used, in set_page_dirty,
43 * in arch-dependent flush_dcache_mmap_lock,
44 * within inode_lock in __sync_single_inode)
48 #include <linux/pagemap.h>
49 #include <linux/swap.h>
50 #include <linux/swapops.h>
51 #include <linux/slab.h>
52 #include <linux/init.h>
53 #include <linux/rmap.h>
54 #include <linux/rcupdate.h>
56 #include <asm/tlbflush.h>
58 //#define RMAP_DEBUG /* can be enabled only for debugging */
60 kmem_cache_t
*anon_vma_cachep
;
62 static inline void validate_anon_vma(struct vm_area_struct
*find_vma
)
65 struct anon_vma
*anon_vma
= find_vma
->anon_vma
;
66 struct vm_area_struct
*vma
;
67 unsigned int mapcount
= 0;
70 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
72 BUG_ON(mapcount
> 100000);
80 /* This must be called under the mmap_sem. */
81 int anon_vma_prepare(struct vm_area_struct
*vma
)
83 struct anon_vma
*anon_vma
= vma
->anon_vma
;
86 if (unlikely(!anon_vma
)) {
87 struct mm_struct
*mm
= vma
->vm_mm
;
88 struct anon_vma
*allocated
, *locked
;
90 anon_vma
= find_mergeable_anon_vma(vma
);
94 spin_lock(&locked
->lock
);
96 anon_vma
= anon_vma_alloc();
97 if (unlikely(!anon_vma
))
103 /* page_table_lock to protect against threads */
104 spin_lock(&mm
->page_table_lock
);
105 if (likely(!vma
->anon_vma
)) {
106 vma
->anon_vma
= anon_vma
;
107 list_add(&vma
->anon_vma_node
, &anon_vma
->head
);
110 spin_unlock(&mm
->page_table_lock
);
113 spin_unlock(&locked
->lock
);
114 if (unlikely(allocated
))
115 anon_vma_free(allocated
);
120 void __anon_vma_merge(struct vm_area_struct
*vma
, struct vm_area_struct
*next
)
122 if (!vma
->anon_vma
) {
123 BUG_ON(!next
->anon_vma
);
124 vma
->anon_vma
= next
->anon_vma
;
125 list_add(&vma
->anon_vma_node
, &next
->anon_vma_node
);
127 /* if they're both non-null they must be the same */
128 BUG_ON(vma
->anon_vma
!= next
->anon_vma
);
130 list_del(&next
->anon_vma_node
);
133 void __anon_vma_link(struct vm_area_struct
*vma
)
135 struct anon_vma
*anon_vma
= vma
->anon_vma
;
138 list_add(&vma
->anon_vma_node
, &anon_vma
->head
);
139 validate_anon_vma(vma
);
143 void anon_vma_link(struct vm_area_struct
*vma
)
145 struct anon_vma
*anon_vma
= vma
->anon_vma
;
148 spin_lock(&anon_vma
->lock
);
149 list_add(&vma
->anon_vma_node
, &anon_vma
->head
);
150 validate_anon_vma(vma
);
151 spin_unlock(&anon_vma
->lock
);
155 void anon_vma_unlink(struct vm_area_struct
*vma
)
157 struct anon_vma
*anon_vma
= vma
->anon_vma
;
163 spin_lock(&anon_vma
->lock
);
164 validate_anon_vma(vma
);
165 list_del(&vma
->anon_vma_node
);
167 /* We must garbage collect the anon_vma if it's empty */
168 empty
= list_empty(&anon_vma
->head
);
169 spin_unlock(&anon_vma
->lock
);
172 anon_vma_free(anon_vma
);
175 static void anon_vma_ctor(void *data
, kmem_cache_t
*cachep
, unsigned long flags
)
177 if ((flags
& (SLAB_CTOR_VERIFY
|SLAB_CTOR_CONSTRUCTOR
)) ==
178 SLAB_CTOR_CONSTRUCTOR
) {
179 struct anon_vma
*anon_vma
= data
;
181 spin_lock_init(&anon_vma
->lock
);
182 INIT_LIST_HEAD(&anon_vma
->head
);
186 void __init
anon_vma_init(void)
188 anon_vma_cachep
= kmem_cache_create("anon_vma", sizeof(struct anon_vma
),
189 0, SLAB_DESTROY_BY_RCU
|SLAB_PANIC
, anon_vma_ctor
, NULL
);
193 * Getting a lock on a stable anon_vma from a page off the LRU is
194 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
196 static struct anon_vma
*page_lock_anon_vma(struct page
*page
)
198 struct anon_vma
*anon_vma
= NULL
;
199 unsigned long anon_mapping
;
202 anon_mapping
= (unsigned long) page
->mapping
;
203 if (!(anon_mapping
& PAGE_MAPPING_ANON
))
205 if (!page_mapped(page
))
208 anon_vma
= (struct anon_vma
*) (anon_mapping
- PAGE_MAPPING_ANON
);
209 spin_lock(&anon_vma
->lock
);
216 * At what user virtual address is page expected in vma?
218 static inline unsigned long
219 vma_address(struct page
*page
, struct vm_area_struct
*vma
)
221 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
222 unsigned long address
;
224 address
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
225 if (unlikely(address
< vma
->vm_start
|| address
>= vma
->vm_end
)) {
226 /* page should be within any vma from prio_tree_next */
227 BUG_ON(!PageAnon(page
));
234 * At what user virtual address is page expected in vma? checking that the
235 * page matches the vma: currently only used by unuse_process, on anon pages.
237 unsigned long page_address_in_vma(struct page
*page
, struct vm_area_struct
*vma
)
239 if (PageAnon(page
)) {
240 if ((void *)vma
->anon_vma
!=
241 (void *)page
->mapping
- PAGE_MAPPING_ANON
)
243 } else if (page
->mapping
&& !(vma
->vm_flags
& VM_NONLINEAR
)) {
244 if (vma
->vm_file
->f_mapping
!= page
->mapping
)
248 return vma_address(page
, vma
);
252 * Subfunctions of page_referenced: page_referenced_one called
253 * repeatedly from either page_referenced_anon or page_referenced_file.
255 static int page_referenced_one(struct page
*page
,
256 struct vm_area_struct
*vma
, unsigned int *mapcount
)
258 struct mm_struct
*mm
= vma
->vm_mm
;
259 unsigned long address
;
267 address
= vma_address(page
, vma
);
268 if (address
== -EFAULT
)
271 spin_lock(&mm
->page_table_lock
);
273 pgd
= pgd_offset(mm
, address
);
274 if (!pgd_present(*pgd
))
277 pmd
= pmd_offset(pgd
, address
);
278 if (!pmd_present(*pmd
))
281 pte
= pte_offset_map(pmd
, address
);
282 if (!pte_present(*pte
))
285 if (page_to_pfn(page
) != pte_pfn(*pte
))
288 if (ptep_clear_flush_young(vma
, address
, pte
))
291 if (mm
!= current
->mm
&& has_swap_token(mm
))
299 spin_unlock(&mm
->page_table_lock
);
304 static int page_referenced_anon(struct page
*page
)
306 unsigned int mapcount
;
307 struct anon_vma
*anon_vma
;
308 struct vm_area_struct
*vma
;
311 anon_vma
= page_lock_anon_vma(page
);
315 mapcount
= page_mapcount(page
);
316 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
317 referenced
+= page_referenced_one(page
, vma
, &mapcount
);
321 spin_unlock(&anon_vma
->lock
);
326 * page_referenced_file - referenced check for object-based rmap
327 * @page: the page we're checking references on.
329 * For an object-based mapped page, find all the places it is mapped and
330 * check/clear the referenced flag. This is done by following the page->mapping
331 * pointer, then walking the chain of vmas it holds. It returns the number
332 * of references it found.
334 * This function is only called from page_referenced for object-based pages.
336 static int page_referenced_file(struct page
*page
)
338 unsigned int mapcount
;
339 struct address_space
*mapping
= page
->mapping
;
340 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
341 struct vm_area_struct
*vma
;
342 struct prio_tree_iter iter
;
346 * The caller's checks on page->mapping and !PageAnon have made
347 * sure that this is a file page: the check for page->mapping
348 * excludes the case just before it gets set on an anon page.
350 BUG_ON(PageAnon(page
));
353 * The page lock not only makes sure that page->mapping cannot
354 * suddenly be NULLified by truncation, it makes sure that the
355 * structure at mapping cannot be freed and reused yet,
356 * so we can safely take mapping->i_mmap_lock.
358 BUG_ON(!PageLocked(page
));
360 spin_lock(&mapping
->i_mmap_lock
);
363 * i_mmap_lock does not stabilize mapcount at all, but mapcount
364 * is more likely to be accurate if we note it after spinning.
366 mapcount
= page_mapcount(page
);
368 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
369 if ((vma
->vm_flags
& (VM_LOCKED
|VM_MAYSHARE
))
370 == (VM_LOCKED
|VM_MAYSHARE
)) {
374 referenced
+= page_referenced_one(page
, vma
, &mapcount
);
379 spin_unlock(&mapping
->i_mmap_lock
);
384 * page_referenced - test if the page was referenced
385 * @page: the page to test
386 * @is_locked: caller holds lock on the page
388 * Quick test_and_clear_referenced for all mappings to a page,
389 * returns the number of ptes which referenced the page.
391 int page_referenced(struct page
*page
, int is_locked
)
395 if (page_test_and_clear_young(page
))
398 if (TestClearPageReferenced(page
))
401 if (page_mapped(page
) && page
->mapping
) {
403 referenced
+= page_referenced_anon(page
);
405 referenced
+= page_referenced_file(page
);
406 else if (TestSetPageLocked(page
))
410 referenced
+= page_referenced_file(page
);
418 * page_add_anon_rmap - add pte mapping to an anonymous page
419 * @page: the page to add the mapping to
420 * @vma: the vm area in which the mapping is added
421 * @address: the user virtual address mapped
423 * The caller needs to hold the mm->page_table_lock.
425 void page_add_anon_rmap(struct page
*page
,
426 struct vm_area_struct
*vma
, unsigned long address
)
428 struct anon_vma
*anon_vma
= vma
->anon_vma
;
431 BUG_ON(PageReserved(page
));
434 anon_vma
= (void *) anon_vma
+ PAGE_MAPPING_ANON
;
435 index
= (address
- vma
->vm_start
) >> PAGE_SHIFT
;
436 index
+= vma
->vm_pgoff
;
437 index
>>= PAGE_CACHE_SHIFT
- PAGE_SHIFT
;
439 if (atomic_inc_and_test(&page
->_mapcount
)) {
441 page
->mapping
= (struct address_space
*) anon_vma
;
442 inc_page_state(nr_mapped
);
444 /* else checking page index and mapping is racy */
448 * page_add_file_rmap - add pte mapping to a file page
449 * @page: the page to add the mapping to
451 * The caller needs to hold the mm->page_table_lock.
453 void page_add_file_rmap(struct page
*page
)
455 BUG_ON(PageAnon(page
));
456 if (!pfn_valid(page_to_pfn(page
)) || PageReserved(page
))
459 if (atomic_inc_and_test(&page
->_mapcount
))
460 inc_page_state(nr_mapped
);
464 * page_remove_rmap - take down pte mapping from a page
465 * @page: page to remove mapping from
467 * Caller needs to hold the mm->page_table_lock.
469 void page_remove_rmap(struct page
*page
)
471 BUG_ON(PageReserved(page
));
473 if (atomic_add_negative(-1, &page
->_mapcount
)) {
474 BUG_ON(page_mapcount(page
) < 0);
476 * It would be tidy to reset the PageAnon mapping here,
477 * but that might overwrite a racing page_add_anon_rmap
478 * which increments mapcount after us but sets mapping
479 * before us: so leave the reset to free_hot_cold_page,
480 * and remember that it's only reliable while mapped.
481 * Leaving it set also helps swapoff to reinstate ptes
482 * faster for those pages still in swapcache.
484 if (page_test_and_clear_dirty(page
))
485 set_page_dirty(page
);
486 dec_page_state(nr_mapped
);
491 * Subfunctions of try_to_unmap: try_to_unmap_one called
492 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
494 static int try_to_unmap_one(struct page
*page
, struct vm_area_struct
*vma
)
496 struct mm_struct
*mm
= vma
->vm_mm
;
497 unsigned long address
;
502 int ret
= SWAP_AGAIN
;
506 address
= vma_address(page
, vma
);
507 if (address
== -EFAULT
)
511 * We need the page_table_lock to protect us from page faults,
512 * munmap, fork, etc...
514 spin_lock(&mm
->page_table_lock
);
516 pgd
= pgd_offset(mm
, address
);
517 if (!pgd_present(*pgd
))
520 pmd
= pmd_offset(pgd
, address
);
521 if (!pmd_present(*pmd
))
524 pte
= pte_offset_map(pmd
, address
);
525 if (!pte_present(*pte
))
528 if (page_to_pfn(page
) != pte_pfn(*pte
))
532 * If the page is mlock()d, we cannot swap it out.
533 * If it's recently referenced (perhaps page_referenced
534 * skipped over this mm) then we should reactivate it.
536 if ((vma
->vm_flags
& (VM_LOCKED
|VM_RESERVED
)) ||
537 ptep_clear_flush_young(vma
, address
, pte
)) {
543 * Don't pull an anonymous page out from under get_user_pages.
544 * GUP carefully breaks COW and raises page count (while holding
545 * page_table_lock, as we have here) to make sure that the page
546 * cannot be freed. If we unmap that page here, a user write
547 * access to the virtual address will bring back the page, but
548 * its raised count will (ironically) be taken to mean it's not
549 * an exclusive swap page, do_wp_page will replace it by a copy
550 * page, and the user never get to see the data GUP was holding
551 * the original page for.
553 * This test is also useful for when swapoff (unuse_process) has
554 * to drop page lock: its reference to the page stops existing
555 * ptes from being unmapped, so swapoff can make progress.
557 if (PageSwapCache(page
) &&
558 page_count(page
) != page_mapcount(page
) + 2) {
563 /* Nuke the page table entry. */
564 flush_cache_page(vma
, address
);
565 pteval
= ptep_clear_flush(vma
, address
, pte
);
567 /* Move the dirty bit to the physical page now the pte is gone. */
568 if (pte_dirty(pteval
))
569 set_page_dirty(page
);
571 if (PageAnon(page
)) {
572 swp_entry_t entry
= { .val
= page
->private };
574 * Store the swap location in the pte.
575 * See handle_pte_fault() ...
577 BUG_ON(!PageSwapCache(page
));
578 swap_duplicate(entry
);
579 set_pte(pte
, swp_entry_to_pte(entry
));
580 BUG_ON(pte_file(*pte
));
584 page_remove_rmap(page
);
585 page_cache_release(page
);
590 spin_unlock(&mm
->page_table_lock
);
596 * objrmap doesn't work for nonlinear VMAs because the assumption that
597 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
598 * Consequently, given a particular page and its ->index, we cannot locate the
599 * ptes which are mapping that page without an exhaustive linear search.
601 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
602 * maps the file to which the target page belongs. The ->vm_private_data field
603 * holds the current cursor into that scan. Successive searches will circulate
604 * around the vma's virtual address space.
606 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
607 * more scanning pressure is placed against them as well. Eventually pages
608 * will become fully unmapped and are eligible for eviction.
610 * For very sparsely populated VMAs this is a little inefficient - chances are
611 * there there won't be many ptes located within the scan cluster. In this case
612 * maybe we could scan further - to the end of the pte page, perhaps.
614 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
615 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
617 static void try_to_unmap_cluster(unsigned long cursor
,
618 unsigned int *mapcount
, struct vm_area_struct
*vma
)
620 struct mm_struct
*mm
= vma
->vm_mm
;
626 unsigned long address
;
631 * We need the page_table_lock to protect us from page faults,
632 * munmap, fork, etc...
634 spin_lock(&mm
->page_table_lock
);
636 address
= (vma
->vm_start
+ cursor
) & CLUSTER_MASK
;
637 end
= address
+ CLUSTER_SIZE
;
638 if (address
< vma
->vm_start
)
639 address
= vma
->vm_start
;
640 if (end
> vma
->vm_end
)
643 pgd
= pgd_offset(mm
, address
);
644 if (!pgd_present(*pgd
))
647 pmd
= pmd_offset(pgd
, address
);
648 if (!pmd_present(*pmd
))
651 for (pte
= pte_offset_map(pmd
, address
);
652 address
< end
; pte
++, address
+= PAGE_SIZE
) {
654 if (!pte_present(*pte
))
661 page
= pfn_to_page(pfn
);
662 BUG_ON(PageAnon(page
));
663 if (PageReserved(page
))
666 if (ptep_clear_flush_young(vma
, address
, pte
))
669 /* Nuke the page table entry. */
670 flush_cache_page(vma
, address
);
671 pteval
= ptep_clear_flush(vma
, address
, pte
);
673 /* If nonlinear, store the file page offset in the pte. */
674 if (page
->index
!= linear_page_index(vma
, address
))
675 set_pte(pte
, pgoff_to_pte(page
->index
));
677 /* Move the dirty bit to the physical page now the pte is gone. */
678 if (pte_dirty(pteval
))
679 set_page_dirty(page
);
681 page_remove_rmap(page
);
682 page_cache_release(page
);
690 spin_unlock(&mm
->page_table_lock
);
693 static int try_to_unmap_anon(struct page
*page
)
695 struct anon_vma
*anon_vma
;
696 struct vm_area_struct
*vma
;
697 int ret
= SWAP_AGAIN
;
699 anon_vma
= page_lock_anon_vma(page
);
703 list_for_each_entry(vma
, &anon_vma
->head
, anon_vma_node
) {
704 ret
= try_to_unmap_one(page
, vma
);
705 if (ret
== SWAP_FAIL
|| !page_mapped(page
))
708 spin_unlock(&anon_vma
->lock
);
713 * try_to_unmap_file - unmap file page using the object-based rmap method
714 * @page: the page to unmap
716 * Find all the mappings of a page using the mapping pointer and the vma chains
717 * contained in the address_space struct it points to.
719 * This function is only called from try_to_unmap for object-based pages.
721 static int try_to_unmap_file(struct page
*page
)
723 struct address_space
*mapping
= page
->mapping
;
724 pgoff_t pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
725 struct vm_area_struct
*vma
;
726 struct prio_tree_iter iter
;
727 int ret
= SWAP_AGAIN
;
728 unsigned long cursor
;
729 unsigned long max_nl_cursor
= 0;
730 unsigned long max_nl_size
= 0;
731 unsigned int mapcount
;
733 spin_lock(&mapping
->i_mmap_lock
);
734 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
735 ret
= try_to_unmap_one(page
, vma
);
736 if (ret
== SWAP_FAIL
|| !page_mapped(page
))
740 if (list_empty(&mapping
->i_mmap_nonlinear
))
743 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
,
744 shared
.vm_set
.list
) {
745 if (vma
->vm_flags
& (VM_LOCKED
|VM_RESERVED
))
747 cursor
= (unsigned long) vma
->vm_private_data
;
748 if (cursor
> max_nl_cursor
)
749 max_nl_cursor
= cursor
;
750 cursor
= vma
->vm_end
- vma
->vm_start
;
751 if (cursor
> max_nl_size
)
752 max_nl_size
= cursor
;
755 if (max_nl_size
== 0) { /* any nonlinears locked or reserved */
761 * We don't try to search for this page in the nonlinear vmas,
762 * and page_referenced wouldn't have found it anyway. Instead
763 * just walk the nonlinear vmas trying to age and unmap some.
764 * The mapcount of the page we came in with is irrelevant,
765 * but even so use it as a guide to how hard we should try?
767 mapcount
= page_mapcount(page
);
770 cond_resched_lock(&mapping
->i_mmap_lock
);
772 max_nl_size
= (max_nl_size
+ CLUSTER_SIZE
- 1) & CLUSTER_MASK
;
773 if (max_nl_cursor
== 0)
774 max_nl_cursor
= CLUSTER_SIZE
;
777 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
,
778 shared
.vm_set
.list
) {
779 if (vma
->vm_flags
& (VM_LOCKED
|VM_RESERVED
))
781 cursor
= (unsigned long) vma
->vm_private_data
;
782 while (vma
->vm_mm
->rss
&&
783 cursor
< max_nl_cursor
&&
784 cursor
< vma
->vm_end
- vma
->vm_start
) {
785 try_to_unmap_cluster(cursor
, &mapcount
, vma
);
786 cursor
+= CLUSTER_SIZE
;
787 vma
->vm_private_data
= (void *) cursor
;
788 if ((int)mapcount
<= 0)
791 vma
->vm_private_data
= (void *) max_nl_cursor
;
793 cond_resched_lock(&mapping
->i_mmap_lock
);
794 max_nl_cursor
+= CLUSTER_SIZE
;
795 } while (max_nl_cursor
<= max_nl_size
);
798 * Don't loop forever (perhaps all the remaining pages are
799 * in locked vmas). Reset cursor on all unreserved nonlinear
800 * vmas, now forgetting on which ones it had fallen behind.
802 list_for_each_entry(vma
, &mapping
->i_mmap_nonlinear
,
803 shared
.vm_set
.list
) {
804 if (!(vma
->vm_flags
& VM_RESERVED
))
805 vma
->vm_private_data
= NULL
;
808 spin_unlock(&mapping
->i_mmap_lock
);
813 * try_to_unmap - try to remove all page table mappings to a page
814 * @page: the page to get unmapped
816 * Tries to remove all the page table entries which are mapping this
817 * page, used in the pageout path. Caller must hold the page lock.
820 * SWAP_SUCCESS - we succeeded in removing all mappings
821 * SWAP_AGAIN - we missed a mapping, try again later
822 * SWAP_FAIL - the page is unswappable
824 int try_to_unmap(struct page
*page
)
828 BUG_ON(PageReserved(page
));
829 BUG_ON(!PageLocked(page
));
832 ret
= try_to_unmap_anon(page
);
834 ret
= try_to_unmap_file(page
);
836 if (!page_mapped(page
))