[PATCH] bcm43xx: Fix array overrun in bcm43xx_geo_init
[linux-2.6/kmemtrace.git] / include / linux / hugetlb.h
blob4c5e610fe442eb7a4c116d84dda43bfcd9923ced
1 #ifndef _LINUX_HUGETLB_H
2 #define _LINUX_HUGETLB_H
4 #ifdef CONFIG_HUGETLB_PAGE
6 #include <linux/mempolicy.h>
7 #include <asm/tlbflush.h>
9 struct ctl_table;
11 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
13 return vma->vm_flags & VM_HUGETLB;
16 int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
17 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
18 int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
19 void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
20 int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
21 int hugetlb_report_meminfo(char *);
22 int hugetlb_report_node_meminfo(int, char *);
23 unsigned long hugetlb_total_pages(void);
24 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
25 unsigned long address, int write_access);
27 extern unsigned long max_huge_pages;
28 extern const unsigned long hugetlb_zero, hugetlb_infinity;
29 extern int sysctl_hugetlb_shm_group;
31 /* arch callbacks */
33 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
34 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
35 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
36 int write);
37 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
38 pmd_t *pmd, int write);
39 int pmd_huge(pmd_t pmd);
40 void hugetlb_change_protection(struct vm_area_struct *vma,
41 unsigned long address, unsigned long end, pgprot_t newprot);
43 #ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
44 #define is_hugepage_only_range(mm, addr, len) 0
45 #endif
47 #ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
48 #define hugetlb_free_pgd_range free_pgd_range
49 #else
50 void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
51 unsigned long end, unsigned long floor,
52 unsigned long ceiling);
53 #endif
55 #ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
57 * If the arch doesn't supply something else, assume that hugepage
58 * size aligned regions are ok without further preparation.
60 static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
62 if (len & ~HPAGE_MASK)
63 return -EINVAL;
64 if (addr & ~HPAGE_MASK)
65 return -EINVAL;
66 return 0;
68 #else
69 int prepare_hugepage_range(unsigned long addr, unsigned long len);
70 #endif
72 #ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
73 #define set_huge_pte_at(mm, addr, ptep, pte) set_pte_at(mm, addr, ptep, pte)
74 #define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
75 #else
76 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
77 pte_t *ptep, pte_t pte);
78 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
79 pte_t *ptep);
80 #endif
82 #ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
83 #define hugetlb_prefault_arch_hook(mm) do { } while (0)
84 #else
85 void hugetlb_prefault_arch_hook(struct mm_struct *mm);
86 #endif
88 #else /* !CONFIG_HUGETLB_PAGE */
90 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
92 return 0;
94 static inline unsigned long hugetlb_total_pages(void)
96 return 0;
99 #define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; })
100 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
101 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
102 #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
103 #define unmap_hugepage_range(vma, start, end) BUG()
104 #define hugetlb_report_meminfo(buf) 0
105 #define hugetlb_report_node_meminfo(n, buf) 0
106 #define follow_huge_pmd(mm, addr, pmd, write) NULL
107 #define prepare_hugepage_range(addr, len) (-EINVAL)
108 #define pmd_huge(x) 0
109 #define is_hugepage_only_range(mm, addr, len) 0
110 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
111 #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
113 #define hugetlb_change_protection(vma, address, end, newprot)
115 #ifndef HPAGE_MASK
116 #define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
117 #define HPAGE_SIZE PAGE_SIZE
118 #endif
120 #endif /* !CONFIG_HUGETLB_PAGE */
122 #ifdef CONFIG_HUGETLBFS
123 struct hugetlbfs_config {
124 uid_t uid;
125 gid_t gid;
126 umode_t mode;
127 long nr_blocks;
128 long nr_inodes;
131 struct hugetlbfs_sb_info {
132 long max_blocks; /* blocks allowed */
133 long free_blocks; /* blocks free */
134 long max_inodes; /* inodes allowed */
135 long free_inodes; /* inodes free */
136 spinlock_t stat_lock;
140 struct hugetlbfs_inode_info {
141 struct shared_policy policy;
142 /* Protected by the (global) hugetlb_lock */
143 unsigned long prereserved_hpages;
144 struct inode vfs_inode;
147 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
149 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
152 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
154 return sb->s_fs_info;
157 extern const struct file_operations hugetlbfs_file_operations;
158 extern struct vm_operations_struct hugetlb_vm_ops;
159 struct file *hugetlb_zero_setup(size_t);
160 int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
161 unsigned long atleast_hpages);
162 void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info,
163 unsigned long atmost_hpages);
164 int hugetlb_get_quota(struct address_space *mapping);
165 void hugetlb_put_quota(struct address_space *mapping);
167 static inline int is_file_hugepages(struct file *file)
169 return file->f_op == &hugetlbfs_file_operations;
172 static inline void set_file_hugepages(struct file *file)
174 file->f_op = &hugetlbfs_file_operations;
176 #else /* !CONFIG_HUGETLBFS */
178 #define is_file_hugepages(file) 0
179 #define set_file_hugepages(file) BUG()
180 #define hugetlb_zero_setup(size) ERR_PTR(-ENOSYS)
182 #endif /* !CONFIG_HUGETLBFS */
184 #endif /* _LINUX_HUGETLB_H */