4 * Declarations for Reverse Mapping functions in mm/rmap.c
7 #include <linux/list.h>
8 #include <linux/slab.h>
10 #include <linux/spinlock.h>
13 * The anon_vma heads a list of private "related" vmas, to scan if
14 * an anonymous page pointing to this anon_vma needs to be unmapped:
15 * the vmas on the list will be related by forking, or by splitting.
17 * Since vmas come and go as they are split and merged (particularly
18 * in mprotect), the mapping field of an anonymous page cannot point
19 * directly to a vma: instead it points to an anon_vma, on whose list
20 * the related vmas can be easily linked or unlinked.
22 * After unlinking the last vma on the list, we must garbage collect
23 * the anon_vma object itself: we're guaranteed no page can be
24 * pointing to this anon_vma once its vma list is empty.
27 spinlock_t lock
; /* Serialize access to vma list */
28 struct list_head head
; /* List of private "related" vmas */
33 extern kmem_cache_t
*anon_vma_cachep
;
35 static inline struct anon_vma
*anon_vma_alloc(void)
37 return kmem_cache_alloc(anon_vma_cachep
, SLAB_KERNEL
);
40 static inline void anon_vma_free(struct anon_vma
*anon_vma
)
42 kmem_cache_free(anon_vma_cachep
, anon_vma
);
45 static inline void anon_vma_lock(struct vm_area_struct
*vma
)
47 struct anon_vma
*anon_vma
= vma
->anon_vma
;
49 spin_lock(&anon_vma
->lock
);
52 static inline void anon_vma_unlock(struct vm_area_struct
*vma
)
54 struct anon_vma
*anon_vma
= vma
->anon_vma
;
56 spin_unlock(&anon_vma
->lock
);
60 * anon_vma helper functions.
62 void anon_vma_init(void); /* create anon_vma_cachep */
63 int anon_vma_prepare(struct vm_area_struct
*);
64 void __anon_vma_merge(struct vm_area_struct
*, struct vm_area_struct
*);
65 void anon_vma_unlink(struct vm_area_struct
*);
66 void anon_vma_link(struct vm_area_struct
*);
67 void __anon_vma_link(struct vm_area_struct
*);
70 * rmap interfaces called when adding or removing pte of page
72 void page_add_anon_rmap(struct page
*, struct vm_area_struct
*, unsigned long);
73 void page_add_new_anon_rmap(struct page
*, struct vm_area_struct
*, unsigned long);
74 void page_add_file_rmap(struct page
*);
75 void page_remove_rmap(struct page
*);
78 * page_dup_rmap - duplicate pte mapping to a page
79 * @page: the page to add the mapping to
81 * For copy_page_range only: minimal extract from page_add_rmap,
82 * avoiding unnecessary tests (already checked) so it's quicker.
84 static inline void page_dup_rmap(struct page
*page
)
86 atomic_inc(&page
->_mapcount
);
90 * Called from mm/vmscan.c to handle paging out
92 int page_referenced(struct page
*, int is_locked
);
93 int try_to_unmap(struct page
*, int ignore_refs
);
96 * Called from mm/filemap_xip.c to unmap empty zero page
98 pte_t
*page_check_address(struct page
*, struct mm_struct
*,
99 unsigned long, spinlock_t
**);
102 * Used by swapoff to help locate where page is expected in vma.
104 unsigned long page_address_in_vma(struct page
*, struct vm_area_struct
*);
107 * Cleans the PTEs of shared mappings.
108 * (and since clean PTEs should also be readonly, write protects them too)
110 * returns the number of cleaned PTEs.
112 int page_mkclean(struct page
*);
114 #else /* !CONFIG_MMU */
116 #define anon_vma_init() do {} while (0)
117 #define anon_vma_prepare(vma) (0)
118 #define anon_vma_link(vma) do {} while (0)
120 #define page_referenced(page,l) TestClearPageReferenced(page)
121 #define try_to_unmap(page, refs) SWAP_FAIL
123 static inline int page_mkclean(struct page
*page
)
129 #endif /* CONFIG_MMU */
132 * Return values of try_to_unmap
134 #define SWAP_SUCCESS 0
138 #endif /* _LINUX_RMAP_H */