1 #ifndef _LINUX_KHUGEPAGED_H
2 #define _LINUX_KHUGEPAGED_H
4 #include <linux/sched.h> /* MMF_VM_HUGEPAGE */
6 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
7 extern int __khugepaged_enter(struct mm_struct
*mm
);
8 extern void __khugepaged_exit(struct mm_struct
*mm
);
9 extern int khugepaged_enter_vma_merge(struct vm_area_struct
*vma
);
11 #define khugepaged_enabled() \
12 (transparent_hugepage_flags & \
13 ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
14 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
15 #define khugepaged_always() \
16 (transparent_hugepage_flags & \
17 (1<<TRANSPARENT_HUGEPAGE_FLAG))
18 #define khugepaged_req_madv() \
19 (transparent_hugepage_flags & \
20 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
21 #define khugepaged_defrag() \
22 (transparent_hugepage_flags & \
23 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
25 static inline int khugepaged_fork(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
27 if (test_bit(MMF_VM_HUGEPAGE
, &oldmm
->flags
))
28 return __khugepaged_enter(mm
);
32 static inline void khugepaged_exit(struct mm_struct
*mm
)
34 if (test_bit(MMF_VM_HUGEPAGE
, &mm
->flags
))
35 __khugepaged_exit(mm
);
38 static inline int khugepaged_enter(struct vm_area_struct
*vma
)
40 if (!test_bit(MMF_VM_HUGEPAGE
, &vma
->vm_mm
->flags
))
41 if ((khugepaged_always() ||
42 (khugepaged_req_madv() &&
43 vma
->vm_flags
& VM_HUGEPAGE
)) &&
44 !(vma
->vm_flags
& VM_NOHUGEPAGE
))
45 if (__khugepaged_enter(vma
->vm_mm
))
49 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
50 static inline int khugepaged_fork(struct mm_struct
*mm
, struct mm_struct
*oldmm
)
54 static inline void khugepaged_exit(struct mm_struct
*mm
)
57 static inline int khugepaged_enter(struct vm_area_struct
*vma
)
61 static inline int khugepaged_enter_vma_merge(struct vm_area_struct
*vma
)
65 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
67 #endif /* _LINUX_KHUGEPAGED_H */