tracing: Change tracing_pipe_fops() to rely on tracing_get_cpu()
[linux-2.6.git] / include / linux / huge_mm.h
blobb60de92e2edc4ecadc2681a77b8973c1087dd748
1 #ifndef _LINUX_HUGE_MM_H
2 #define _LINUX_HUGE_MM_H
4 extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5 struct vm_area_struct *vma,
6 unsigned long address, pmd_t *pmd,
7 unsigned int flags);
8 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10 struct vm_area_struct *vma);
11 extern void huge_pmd_set_accessed(struct mm_struct *mm,
12 struct vm_area_struct *vma,
13 unsigned long address, pmd_t *pmd,
14 pmd_t orig_pmd, int dirty);
15 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
16 unsigned long address, pmd_t *pmd,
17 pmd_t orig_pmd);
18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
19 unsigned long addr,
20 pmd_t *pmd,
21 unsigned int flags);
22 extern int zap_huge_pmd(struct mmu_gather *tlb,
23 struct vm_area_struct *vma,
24 pmd_t *pmd, unsigned long addr);
25 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
26 unsigned long addr, unsigned long end,
27 unsigned char *vec);
28 extern int move_huge_pmd(struct vm_area_struct *vma,
29 struct vm_area_struct *new_vma,
30 unsigned long old_addr,
31 unsigned long new_addr, unsigned long old_end,
32 pmd_t *old_pmd, pmd_t *new_pmd);
33 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
34 unsigned long addr, pgprot_t newprot,
35 int prot_numa);
37 enum transparent_hugepage_flag {
38 TRANSPARENT_HUGEPAGE_FLAG,
39 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
40 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
41 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
42 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
43 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
44 #ifdef CONFIG_DEBUG_VM
45 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
46 #endif
49 enum page_check_address_pmd_flag {
50 PAGE_CHECK_ADDRESS_PMD_FLAG,
51 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
52 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
54 extern pmd_t *page_check_address_pmd(struct page *page,
55 struct mm_struct *mm,
56 unsigned long address,
57 enum page_check_address_pmd_flag flag);
59 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
60 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
62 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
63 #define HPAGE_PMD_SHIFT PMD_SHIFT
64 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
65 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
67 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
69 #define transparent_hugepage_enabled(__vma) \
70 ((transparent_hugepage_flags & \
71 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
72 (transparent_hugepage_flags & \
73 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
74 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
75 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
76 !is_vma_temporary_stack(__vma))
77 #define transparent_hugepage_defrag(__vma) \
78 ((transparent_hugepage_flags & \
79 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
80 (transparent_hugepage_flags & \
81 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
82 (__vma)->vm_flags & VM_HUGEPAGE))
83 #define transparent_hugepage_use_zero_page() \
84 (transparent_hugepage_flags & \
85 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
86 #ifdef CONFIG_DEBUG_VM
87 #define transparent_hugepage_debug_cow() \
88 (transparent_hugepage_flags & \
89 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
90 #else /* CONFIG_DEBUG_VM */
91 #define transparent_hugepage_debug_cow() 0
92 #endif /* CONFIG_DEBUG_VM */
94 extern unsigned long transparent_hugepage_flags;
95 extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
96 pmd_t *dst_pmd, pmd_t *src_pmd,
97 struct vm_area_struct *vma,
98 unsigned long addr, unsigned long end);
99 extern int handle_pte_fault(struct mm_struct *mm,
100 struct vm_area_struct *vma, unsigned long address,
101 pte_t *pte, pmd_t *pmd, unsigned int flags);
102 extern int split_huge_page_to_list(struct page *page, struct list_head *list);
103 static inline int split_huge_page(struct page *page)
105 return split_huge_page_to_list(page, NULL);
107 extern void __split_huge_page_pmd(struct vm_area_struct *vma,
108 unsigned long address, pmd_t *pmd);
109 #define split_huge_page_pmd(__vma, __address, __pmd) \
110 do { \
111 pmd_t *____pmd = (__pmd); \
112 if (unlikely(pmd_trans_huge(*____pmd))) \
113 __split_huge_page_pmd(__vma, __address, \
114 ____pmd); \
115 } while (0)
116 #define wait_split_huge_page(__anon_vma, __pmd) \
117 do { \
118 pmd_t *____pmd = (__pmd); \
119 anon_vma_lock_write(__anon_vma); \
120 anon_vma_unlock_write(__anon_vma); \
121 BUG_ON(pmd_trans_splitting(*____pmd) || \
122 pmd_trans_huge(*____pmd)); \
123 } while (0)
124 extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
125 pmd_t *pmd);
126 #if HPAGE_PMD_ORDER >= MAX_ORDER
127 #error "hugepages can't be allocated by the buddy allocator"
128 #endif
129 extern int hugepage_madvise(struct vm_area_struct *vma,
130 unsigned long *vm_flags, int advice);
131 extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
132 unsigned long start,
133 unsigned long end,
134 long adjust_next);
135 extern int __pmd_trans_huge_lock(pmd_t *pmd,
136 struct vm_area_struct *vma);
137 /* mmap_sem must be held on entry */
138 static inline int pmd_trans_huge_lock(pmd_t *pmd,
139 struct vm_area_struct *vma)
141 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
142 if (pmd_trans_huge(*pmd))
143 return __pmd_trans_huge_lock(pmd, vma);
144 else
145 return 0;
147 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
148 unsigned long start,
149 unsigned long end,
150 long adjust_next)
152 if (!vma->anon_vma || vma->vm_ops)
153 return;
154 __vma_adjust_trans_huge(vma, start, end, adjust_next);
156 static inline int hpage_nr_pages(struct page *page)
158 if (unlikely(PageTransHuge(page)))
159 return HPAGE_PMD_NR;
160 return 1;
162 static inline struct page *compound_trans_head(struct page *page)
164 if (PageTail(page)) {
165 struct page *head;
166 head = page->first_page;
167 smp_rmb();
169 * head may be a dangling pointer.
170 * __split_huge_page_refcount clears PageTail before
171 * overwriting first_page, so if PageTail is still
172 * there it means the head pointer isn't dangling.
174 if (PageTail(page))
175 return head;
177 return page;
180 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
181 unsigned long addr, pmd_t pmd, pmd_t *pmdp);
183 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
184 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
185 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
186 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
188 #define hpage_nr_pages(x) 1
190 #define transparent_hugepage_enabled(__vma) 0
192 #define transparent_hugepage_flags 0UL
193 static inline int
194 split_huge_page_to_list(struct page *page, struct list_head *list)
196 return 0;
198 static inline int split_huge_page(struct page *page)
200 return 0;
202 #define split_huge_page_pmd(__vma, __address, __pmd) \
203 do { } while (0)
204 #define wait_split_huge_page(__anon_vma, __pmd) \
205 do { } while (0)
206 #define split_huge_page_pmd_mm(__mm, __address, __pmd) \
207 do { } while (0)
208 #define compound_trans_head(page) compound_head(page)
209 static inline int hugepage_madvise(struct vm_area_struct *vma,
210 unsigned long *vm_flags, int advice)
212 BUG();
213 return 0;
215 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
216 unsigned long start,
217 unsigned long end,
218 long adjust_next)
221 static inline int pmd_trans_huge_lock(pmd_t *pmd,
222 struct vm_area_struct *vma)
224 return 0;
227 static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
228 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
230 return 0;
233 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
235 #endif /* _LINUX_HUGE_MM_H */