Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / include / linux / rmap.h
blob1383692ac5bd8c8dc5c06498a38795a256f56c93
1 #ifndef _LINUX_RMAP_H
2 #define _LINUX_RMAP_H
3 /*
4 * Declarations for Reverse Mapping functions in mm/rmap.c
5 */
7 #include <linux/list.h>
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10 #include <linux/spinlock.h>
11 #include <linux/memcontrol.h>
14 * The anon_vma heads a list of private "related" vmas, to scan if
15 * an anonymous page pointing to this anon_vma needs to be unmapped:
16 * the vmas on the list will be related by forking, or by splitting.
18 * Since vmas come and go as they are split and merged (particularly
19 * in mprotect), the mapping field of an anonymous page cannot point
20 * directly to a vma: instead it points to an anon_vma, on whose list
21 * the related vmas can be easily linked or unlinked.
23 * After unlinking the last vma on the list, we must garbage collect
24 * the anon_vma object itself: we're guaranteed no page can be
25 * pointing to this anon_vma once its vma list is empty.
27 struct anon_vma {
28 spinlock_t lock; /* Serialize access to vma list */
29 struct list_head head; /* List of private "related" vmas */
32 #ifdef CONFIG_MMU
34 extern struct kmem_cache *anon_vma_cachep;
36 static inline struct anon_vma *anon_vma_alloc(void)
38 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
41 static inline void anon_vma_free(struct anon_vma *anon_vma)
43 kmem_cache_free(anon_vma_cachep, anon_vma);
46 static inline void anon_vma_lock(struct vm_area_struct *vma)
48 struct anon_vma *anon_vma = vma->anon_vma;
49 if (anon_vma)
50 spin_lock(&anon_vma->lock);
53 static inline void anon_vma_unlock(struct vm_area_struct *vma)
55 struct anon_vma *anon_vma = vma->anon_vma;
56 if (anon_vma)
57 spin_unlock(&anon_vma->lock);
61 * anon_vma helper functions.
63 void anon_vma_init(void); /* create anon_vma_cachep */
64 int anon_vma_prepare(struct vm_area_struct *);
65 void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
66 void anon_vma_unlink(struct vm_area_struct *);
67 void anon_vma_link(struct vm_area_struct *);
68 void __anon_vma_link(struct vm_area_struct *);
71 * rmap interfaces called when adding or removing pte of page
73 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
74 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
75 void page_add_file_rmap(struct page *);
76 void page_remove_rmap(struct page *, struct vm_area_struct *);
78 #ifdef CONFIG_DEBUG_VM
79 void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
80 #else
81 static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
83 atomic_inc(&page->_mapcount);
85 #endif
88 * Called from mm/vmscan.c to handle paging out
90 int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt);
91 int try_to_unmap(struct page *, int ignore_refs);
94 * Called from mm/filemap_xip.c to unmap empty zero page
96 pte_t *page_check_address(struct page *, struct mm_struct *,
97 unsigned long, spinlock_t **);
100 * Used by swapoff to help locate where page is expected in vma.
102 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
105 * Cleans the PTEs of shared mappings.
106 * (and since clean PTEs should also be readonly, write protects them too)
108 * returns the number of cleaned PTEs.
110 int page_mkclean(struct page *);
112 #else /* !CONFIG_MMU */
114 #define anon_vma_init() do {} while (0)
115 #define anon_vma_prepare(vma) (0)
116 #define anon_vma_link(vma) do {} while (0)
118 #define page_referenced(page,l,cnt) TestClearPageReferenced(page)
119 #define try_to_unmap(page, refs) SWAP_FAIL
121 static inline int page_mkclean(struct page *page)
123 return 0;
127 #endif /* CONFIG_MMU */
130 * Return values of try_to_unmap
132 #define SWAP_SUCCESS 0
133 #define SWAP_AGAIN 1
134 #define SWAP_FAIL 2
136 #endif /* _LINUX_RMAP_H */