4 * Memory merging support.
6 * This code enables dynamic sharing of identical pages found in different
7 * memory areas, even if they are not shared by fork().
10 #include <linux/bitops.h>
12 #include <linux/pagemap.h>
13 #include <linux/rmap.h>
14 #include <linux/sched.h>
20 int ksm_madvise(struct vm_area_struct
*vma
, unsigned long start
,
21 unsigned long end
, int advice
, unsigned long *vm_flags
);
22 int __ksm_enter(struct mm_struct
*mm
);
23 void __ksm_exit(struct mm_struct
*mm
);
25 static inline int ksm_fork(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
27 if (test_bit(MMF_VM_MERGEABLE
, &oldmm
->flags
))
28 return __ksm_enter(mm
);
32 static inline void ksm_exit(struct mm_struct
*mm
)
34 if (test_bit(MMF_VM_MERGEABLE
, &mm
->flags
))
39 * A KSM page is one of those write-protected "shared pages" or "merged pages"
40 * which KSM maps into multiple mms, wherever identical anonymous page content
41 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
42 * anon_vma, but to that page's node of the stable tree.
44 static inline int PageKsm(struct page
*page
)
46 return ((unsigned long)page
->mapping
& PAGE_MAPPING_FLAGS
) ==
47 (PAGE_MAPPING_ANON
| PAGE_MAPPING_KSM
);
50 static inline struct stable_node
*page_stable_node(struct page
*page
)
52 return PageKsm(page
) ? page_rmapping(page
) : NULL
;
55 static inline void set_page_stable_node(struct page
*page
,
56 struct stable_node
*stable_node
)
58 page
->mapping
= (void *)stable_node
+
59 (PAGE_MAPPING_ANON
| PAGE_MAPPING_KSM
);
63 * When do_swap_page() first faults in from swap what used to be a KSM page,
64 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
65 * it might be faulted into a different anon_vma (or perhaps to a different
66 * offset in the same anon_vma). do_swap_page() cannot do all the locking
67 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
68 * a copy, and leave remerging the pages to a later pass of ksmd.
70 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
71 * but what if the vma was unmerged while the page was swapped out?
73 struct page
*ksm_does_need_to_copy(struct page
*page
,
74 struct vm_area_struct
*vma
, unsigned long address
);
75 static inline struct page
*ksm_might_need_to_copy(struct page
*page
,
76 struct vm_area_struct
*vma
, unsigned long address
)
78 struct anon_vma
*anon_vma
= page_anon_vma(page
);
81 (anon_vma
->root
== vma
->anon_vma
->root
&&
82 page
->index
== linear_page_index(vma
, address
)))
85 return ksm_does_need_to_copy(page
, vma
, address
);
88 int page_referenced_ksm(struct page
*page
,
89 struct mem_cgroup
*memcg
, unsigned long *vm_flags
);
90 int try_to_unmap_ksm(struct page
*page
, enum ttu_flags flags
);
91 int rmap_walk_ksm(struct page
*page
, int (*rmap_one
)(struct page
*,
92 struct vm_area_struct
*, unsigned long, void *), void *arg
);
93 void ksm_migrate_page(struct page
*newpage
, struct page
*oldpage
);
95 #else /* !CONFIG_KSM */
97 static inline int ksm_fork(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
102 static inline void ksm_exit(struct mm_struct
*mm
)
106 static inline int PageKsm(struct page
*page
)
112 static inline int ksm_madvise(struct vm_area_struct
*vma
, unsigned long start
,
113 unsigned long end
, int advice
, unsigned long *vm_flags
)
118 static inline struct page
*ksm_might_need_to_copy(struct page
*page
,
119 struct vm_area_struct
*vma
, unsigned long address
)
124 static inline int page_referenced_ksm(struct page
*page
,
125 struct mem_cgroup
*memcg
, unsigned long *vm_flags
)
130 static inline int try_to_unmap_ksm(struct page
*page
, enum ttu_flags flags
)
135 static inline int rmap_walk_ksm(struct page
*page
, int (*rmap_one
)(struct page
*,
136 struct vm_area_struct
*, unsigned long, void *), void *arg
)
141 static inline void ksm_migrate_page(struct page
*newpage
, struct page
*oldpage
)
144 #endif /* CONFIG_MMU */
145 #endif /* !CONFIG_KSM */
147 #endif /* __LINUX_KSM_H */