allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / include / linux / highmem.h
blob12c5e4e3135a945ea8a10f0c06d6a4fcd9015fc6
1 #ifndef _LINUX_HIGHMEM_H
2 #define _LINUX_HIGHMEM_H
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/uaccess.h>
8 #include <asm/cacheflush.h>
10 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
11 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
14 #endif
16 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
17 static inline void flush_kernel_dcache_page(struct page *page)
20 #endif
22 #ifdef CONFIG_HIGHMEM
24 #include <asm/highmem.h>
26 /* declarations for linux/mm/highmem.c */
27 unsigned int nr_free_highpages(void);
28 extern unsigned long totalhigh_pages;
30 void kmap_flush_unused(void);
32 #else /* CONFIG_HIGHMEM */
34 static inline unsigned int nr_free_highpages(void) { return 0; }
36 #define totalhigh_pages 0
38 #ifndef ARCH_HAS_KMAP
39 static inline void *kmap(struct page *page)
41 might_sleep();
42 return page_address(page);
45 #define kunmap(page) do { (void) (page); } while (0)
47 #include <asm/kmap_types.h>
49 static inline void *kmap_atomic(struct page *page, enum km_type idx)
51 pagefault_disable();
52 return page_address(page);
54 #define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
56 #define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0)
57 #define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
58 #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
60 #define kmap_flush_unused() do {} while(0)
61 #endif
63 #endif /* CONFIG_HIGHMEM */
65 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
66 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
68 void *addr = kmap_atomic(page, KM_USER0);
69 clear_user_page(addr, vaddr, page);
70 kunmap_atomic(addr, KM_USER0);
71 /* Make sure this page is cleared on other CPU's too before using it */
72 smp_wmb();
75 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
76 /**
77 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
78 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
79 * @vma: The VMA the page is to be allocated for
80 * @vaddr: The virtual address the page will be inserted into
82 * This function will allocate a page for a VMA but the caller is expected
83 * to specify via movableflags whether the page will be movable in the
84 * future or not
86 * An architecture may override this function by defining
87 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
88 * implementation.
90 static inline struct page *
91 __alloc_zeroed_user_highpage(gfp_t movableflags,
92 struct vm_area_struct *vma,
93 unsigned long vaddr)
95 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
96 vma, vaddr);
98 if (page)
99 clear_user_highpage(page, vaddr);
101 return page;
103 #endif
106 * alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA
107 * @vma: The VMA the page is to be allocated for
108 * @vaddr: The virtual address the page will be inserted into
110 * This function will allocate a page for a VMA that the caller knows will
111 * not be able to move in the future using move_pages() or reclaim. If it
112 * is known that the page can move, use alloc_zeroed_user_highpage_movable
114 static inline struct page *
115 alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
117 return __alloc_zeroed_user_highpage(0, vma, vaddr);
121 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
122 * @vma: The VMA the page is to be allocated for
123 * @vaddr: The virtual address the page will be inserted into
125 * This function will allocate a page for a VMA that the caller knows will
126 * be able to migrate in the future using move_pages() or reclaimed
128 static inline struct page *
129 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
130 unsigned long vaddr)
132 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
135 static inline void clear_highpage(struct page *page)
137 void *kaddr = kmap_atomic(page, KM_USER0);
138 clear_page(kaddr);
139 kunmap_atomic(kaddr, KM_USER0);
143 * Same but also flushes aliased cache contents to RAM.
145 * This must be a macro because KM_USER0 and friends aren't defined if
146 * !CONFIG_HIGHMEM
148 #define zero_user_page(page, offset, size, km_type) \
149 do { \
150 void *kaddr; \
152 BUG_ON((offset) + (size) > PAGE_SIZE); \
154 kaddr = kmap_atomic(page, km_type); \
155 memset((char *)kaddr + (offset), 0, (size)); \
156 flush_dcache_page(page); \
157 kunmap_atomic(kaddr, (km_type)); \
158 } while (0)
160 static inline void __deprecated memclear_highpage_flush(struct page *page,
161 unsigned int offset, unsigned int size)
163 zero_user_page(page, offset, size, KM_USER0);
166 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
168 static inline void copy_user_highpage(struct page *to, struct page *from,
169 unsigned long vaddr, struct vm_area_struct *vma)
171 char *vfrom, *vto;
173 vfrom = kmap_atomic(from, KM_USER0);
174 vto = kmap_atomic(to, KM_USER1);
175 copy_user_page(vto, vfrom, vaddr, to);
176 kunmap_atomic(vfrom, KM_USER0);
177 kunmap_atomic(vto, KM_USER1);
178 /* Make sure this page is cleared on other CPU's too before using it */
179 smp_wmb();
182 #endif
184 static inline void copy_highpage(struct page *to, struct page *from)
186 char *vfrom, *vto;
188 vfrom = kmap_atomic(from, KM_USER0);
189 vto = kmap_atomic(to, KM_USER1);
190 copy_page(vto, vfrom);
191 kunmap_atomic(vfrom, KM_USER0);
192 kunmap_atomic(vto, KM_USER1);
195 #endif /* _LINUX_HIGHMEM_H */