4 * Memory merging support.
6 * This code enables dynamic sharing of identical pages found in different
7 * memory areas, even if they are not shared by fork().
10 #include <linux/bitops.h>
12 #include <linux/sched.h>
13 #include <linux/vmstat.h>
16 int ksm_madvise(struct vm_area_struct
*vma
, unsigned long start
,
17 unsigned long end
, int advice
, unsigned long *vm_flags
);
18 int __ksm_enter(struct mm_struct
*mm
);
19 void __ksm_exit(struct mm_struct
*mm
);
21 static inline int ksm_fork(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
23 if (test_bit(MMF_VM_MERGEABLE
, &oldmm
->flags
))
24 return __ksm_enter(mm
);
28 static inline void ksm_exit(struct mm_struct
*mm
)
30 if (test_bit(MMF_VM_MERGEABLE
, &mm
->flags
))
35 * A KSM page is one of those write-protected "shared pages" or "merged pages"
36 * which KSM maps into multiple mms, wherever identical anonymous page content
37 * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma.
39 static inline int PageKsm(struct page
*page
)
41 return ((unsigned long)page
->mapping
== PAGE_MAPPING_ANON
);
45 * But we have to avoid the checking which page_add_anon_rmap() performs.
47 static inline void page_add_ksm_rmap(struct page
*page
)
49 if (atomic_inc_and_test(&page
->_mapcount
)) {
50 page
->mapping
= (void *) PAGE_MAPPING_ANON
;
51 __inc_zone_page_state(page
, NR_ANON_PAGES
);
54 #else /* !CONFIG_KSM */
56 static inline int ksm_madvise(struct vm_area_struct
*vma
, unsigned long start
,
57 unsigned long end
, int advice
, unsigned long *vm_flags
)
62 static inline int ksm_fork(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
67 static inline void ksm_exit(struct mm_struct
*mm
)
71 static inline int PageKsm(struct page
*page
)
76 /* No stub required for page_add_ksm_rmap(page) */
77 #endif /* !CONFIG_KSM */