4 * Memory merging support.
6 * This code enables dynamic sharing of identical pages found in different
7 * memory areas, even if they are not shared by fork().
10 #include <linux/bitops.h>
12 #include <linux/sched.h>
13 #include <linux/vmstat.h>
18 int ksm_madvise(struct vm_area_struct
*vma
, unsigned long start
,
19 unsigned long end
, int advice
, unsigned long *vm_flags
);
20 int __ksm_enter(struct mm_struct
*mm
);
21 void __ksm_exit(struct mm_struct
*mm
,
22 struct mmu_gather
**tlbp
, unsigned long end
);
24 static inline int ksm_fork(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
26 if (test_bit(MMF_VM_MERGEABLE
, &oldmm
->flags
))
27 return __ksm_enter(mm
);
32 * For KSM to handle OOM without deadlock when it's breaking COW in a
33 * likely victim of the OOM killer, exit_mmap() has to serialize with
34 * ksm_exit() after freeing mm's pages but before freeing its page tables.
35 * That leaves a window in which KSM might refault pages which have just
36 * been finally unmapped: guard against that with ksm_test_exit(), and
37 * use it after getting mmap_sem in ksm.c, to check if mm is exiting.
39 static inline bool ksm_test_exit(struct mm_struct
*mm
)
41 return atomic_read(&mm
->mm_users
) == 0;
44 static inline void ksm_exit(struct mm_struct
*mm
,
45 struct mmu_gather
**tlbp
, unsigned long end
)
47 if (test_bit(MMF_VM_MERGEABLE
, &mm
->flags
))
48 __ksm_exit(mm
, tlbp
, end
);
52 * A KSM page is one of those write-protected "shared pages" or "merged pages"
53 * which KSM maps into multiple mms, wherever identical anonymous page content
54 * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma.
56 static inline int PageKsm(struct page
*page
)
58 return ((unsigned long)page
->mapping
== PAGE_MAPPING_ANON
);
62 * But we have to avoid the checking which page_add_anon_rmap() performs.
64 static inline void page_add_ksm_rmap(struct page
*page
)
66 if (atomic_inc_and_test(&page
->_mapcount
)) {
67 page
->mapping
= (void *) PAGE_MAPPING_ANON
;
68 __inc_zone_page_state(page
, NR_ANON_PAGES
);
71 #else /* !CONFIG_KSM */
73 static inline int ksm_madvise(struct vm_area_struct
*vma
, unsigned long start
,
74 unsigned long end
, int advice
, unsigned long *vm_flags
)
79 static inline int ksm_fork(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
84 static inline bool ksm_test_exit(struct mm_struct
*mm
)
89 static inline void ksm_exit(struct mm_struct
*mm
,
90 struct mmu_gather
**tlbp
, unsigned long end
)
94 static inline int PageKsm(struct page
*page
)
99 /* No stub required for page_add_ksm_rmap(page) */
100 #endif /* !CONFIG_KSM */