async: make sure independent async domains can't accidentally entangle
[linux-2.6/mini2440.git] / include / linux / highmem.h
blob1fcb7126a01f1aa3e7a4c63abd46c4311946c843
1 #ifndef _LINUX_HIGHMEM_H
2 #define _LINUX_HIGHMEM_H
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/uaccess.h>
8 #include <asm/cacheflush.h>
10 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
11 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
14 #endif
16 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
17 static inline void flush_kernel_dcache_page(struct page *page)
20 #endif
22 #include <asm/kmap_types.h>
24 #if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT)
26 void debug_kmap_atomic(enum km_type type);
28 #else
30 static inline void debug_kmap_atomic(enum km_type type)
34 #endif
36 #ifdef CONFIG_HIGHMEM
37 #include <asm/highmem.h>
39 /* declarations for linux/mm/highmem.c */
40 unsigned int nr_free_highpages(void);
41 extern unsigned long totalhigh_pages;
43 void kmap_flush_unused(void);
45 #else /* CONFIG_HIGHMEM */
47 static inline unsigned int nr_free_highpages(void) { return 0; }
49 #define totalhigh_pages 0
51 #ifndef ARCH_HAS_KMAP
52 static inline void *kmap(struct page *page)
54 might_sleep();
55 return page_address(page);
58 #define kunmap(page) do { (void) (page); } while (0)
60 static inline void *kmap_atomic(struct page *page, enum km_type idx)
62 pagefault_disable();
63 return page_address(page);
65 #define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
67 #define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0)
68 #define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
69 #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
71 #define kmap_flush_unused() do {} while(0)
72 #endif
74 #endif /* CONFIG_HIGHMEM */
76 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
77 #ifndef clear_user_highpage
78 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
80 void *addr = kmap_atomic(page, KM_USER0);
81 clear_user_page(addr, vaddr, page);
82 kunmap_atomic(addr, KM_USER0);
84 #endif
86 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
87 /**
88 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
89 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
90 * @vma: The VMA the page is to be allocated for
91 * @vaddr: The virtual address the page will be inserted into
93 * This function will allocate a page for a VMA but the caller is expected
94 * to specify via movableflags whether the page will be movable in the
95 * future or not
97 * An architecture may override this function by defining
98 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
99 * implementation.
101 static inline struct page *
102 __alloc_zeroed_user_highpage(gfp_t movableflags,
103 struct vm_area_struct *vma,
104 unsigned long vaddr)
106 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
107 vma, vaddr);
109 if (page)
110 clear_user_highpage(page, vaddr);
112 return page;
114 #endif
117 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
118 * @vma: The VMA the page is to be allocated for
119 * @vaddr: The virtual address the page will be inserted into
121 * This function will allocate a page for a VMA that the caller knows will
122 * be able to migrate in the future using move_pages() or reclaimed
124 static inline struct page *
125 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
126 unsigned long vaddr)
128 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
131 static inline void clear_highpage(struct page *page)
133 void *kaddr = kmap_atomic(page, KM_USER0);
134 clear_page(kaddr);
135 kunmap_atomic(kaddr, KM_USER0);
138 static inline void zero_user_segments(struct page *page,
139 unsigned start1, unsigned end1,
140 unsigned start2, unsigned end2)
142 void *kaddr = kmap_atomic(page, KM_USER0);
144 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
146 if (end1 > start1)
147 memset(kaddr + start1, 0, end1 - start1);
149 if (end2 > start2)
150 memset(kaddr + start2, 0, end2 - start2);
152 kunmap_atomic(kaddr, KM_USER0);
153 flush_dcache_page(page);
156 static inline void zero_user_segment(struct page *page,
157 unsigned start, unsigned end)
159 zero_user_segments(page, start, end, 0, 0);
162 static inline void zero_user(struct page *page,
163 unsigned start, unsigned size)
165 zero_user_segments(page, start, start + size, 0, 0);
168 static inline void __deprecated memclear_highpage_flush(struct page *page,
169 unsigned int offset, unsigned int size)
171 zero_user(page, offset, size);
174 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
176 static inline void copy_user_highpage(struct page *to, struct page *from,
177 unsigned long vaddr, struct vm_area_struct *vma)
179 char *vfrom, *vto;
181 vfrom = kmap_atomic(from, KM_USER0);
182 vto = kmap_atomic(to, KM_USER1);
183 copy_user_page(vto, vfrom, vaddr, to);
184 kunmap_atomic(vfrom, KM_USER0);
185 kunmap_atomic(vto, KM_USER1);
188 #endif
190 static inline void copy_highpage(struct page *to, struct page *from)
192 char *vfrom, *vto;
194 vfrom = kmap_atomic(from, KM_USER0);
195 vto = kmap_atomic(to, KM_USER1);
196 copy_page(vto, vfrom);
197 kunmap_atomic(vfrom, KM_USER0);
198 kunmap_atomic(vto, KM_USER1);
201 #endif /* _LINUX_HIGHMEM_H */