added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / include / linux / pagemap.h
blob076a7dc67c2bd25c89bec24ce036d719f1886dc3
1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
4 /*
5 * Copyright 1995 Linus Torvalds
6 */
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
15 #include <linux/hardirq.h> /* for in_interrupt() */
18 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
19 * allocation mode flags.
21 enum mapping_flags {
22 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
23 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
24 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
25 #ifdef CONFIG_UNEVICTABLE_LRU
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
27 #endif
30 static inline void mapping_set_error(struct address_space *mapping, int error)
32 if (unlikely(error)) {
33 if (error == -ENOSPC)
34 set_bit(AS_ENOSPC, &mapping->flags);
35 else
36 set_bit(AS_EIO, &mapping->flags);
40 #ifdef CONFIG_UNEVICTABLE_LRU
42 static inline void mapping_set_unevictable(struct address_space *mapping)
44 set_bit(AS_UNEVICTABLE, &mapping->flags);
47 static inline void mapping_clear_unevictable(struct address_space *mapping)
49 clear_bit(AS_UNEVICTABLE, &mapping->flags);
52 static inline int mapping_unevictable(struct address_space *mapping)
54 if (likely(mapping))
55 return test_bit(AS_UNEVICTABLE, &mapping->flags);
56 return !!mapping;
58 #else
59 static inline void mapping_set_unevictable(struct address_space *mapping) { }
60 static inline void mapping_clear_unevictable(struct address_space *mapping) { }
61 static inline int mapping_unevictable(struct address_space *mapping)
63 return 0;
65 #endif
67 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
69 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
73 * This is non-atomic. Only to be used before the mapping is activated.
74 * Probably needs a barrier...
76 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
78 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
79 (__force unsigned long)mask;
83 * The page cache can done in larger chunks than
84 * one page, because it allows for more efficient
85 * throughput (it can then be mapped into user
86 * space in smaller chunks for same flexibility).
88 * Or rather, it _will_ be done in larger chunks.
90 #define PAGE_CACHE_SHIFT PAGE_SHIFT
91 #define PAGE_CACHE_SIZE PAGE_SIZE
92 #define PAGE_CACHE_MASK PAGE_MASK
93 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
95 #define page_cache_get(page) get_page(page)
96 #define page_cache_release(page) put_page(page)
97 void release_pages(struct page **pages, int nr, int cold);
100 * speculatively take a reference to a page.
101 * If the page is free (_count == 0), then _count is untouched, and 0
102 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
104 * This function must be called inside the same rcu_read_lock() section as has
105 * been used to lookup the page in the pagecache radix-tree (or page table):
106 * this allows allocators to use a synchronize_rcu() to stabilize _count.
108 * Unless an RCU grace period has passed, the count of all pages coming out
109 * of the allocator must be considered unstable. page_count may return higher
110 * than expected, and put_page must be able to do the right thing when the
111 * page has been finished with, no matter what it is subsequently allocated
112 * for (because put_page is what is used here to drop an invalid speculative
113 * reference).
115 * This is the interesting part of the lockless pagecache (and lockless
116 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
117 * has the following pattern:
118 * 1. find page in radix tree
119 * 2. conditionally increment refcount
120 * 3. check the page is still in pagecache (if no, goto 1)
122 * Remove-side that cares about stability of _count (eg. reclaim) has the
123 * following (with tree_lock held for write):
124 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
125 * B. remove page from pagecache
126 * C. free the page
128 * There are 2 critical interleavings that matter:
129 * - 2 runs before A: in this case, A sees elevated refcount and bails out
130 * - A runs before 2: in this case, 2 sees zero refcount and retries;
131 * subsequently, B will complete and 1 will find no page, causing the
132 * lookup to return NULL.
134 * It is possible that between 1 and 2, the page is removed then the exact same
135 * page is inserted into the same position in pagecache. That's OK: the
136 * old find_get_page using tree_lock could equally have run before or after
137 * such a re-insertion, depending on order that locks are granted.
139 * Lookups racing against pagecache insertion isn't a big problem: either 1
140 * will find the page or it will not. Likewise, the old find_get_page could run
141 * either before the insertion or afterwards, depending on timing.
143 static inline int page_cache_get_speculative(struct page *page)
145 VM_BUG_ON(in_interrupt());
147 #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
148 # ifdef CONFIG_PREEMPT
149 VM_BUG_ON(!in_atomic());
150 # endif
152 * Preempt must be disabled here - we rely on rcu_read_lock doing
153 * this for us.
155 * Pagecache won't be truncated from interrupt context, so if we have
156 * found a page in the radix tree here, we have pinned its refcount by
157 * disabling preempt, and hence no need for the "speculative get" that
158 * SMP requires.
160 VM_BUG_ON(page_count(page) == 0);
161 atomic_inc(&page->_count);
163 #else
164 if (unlikely(!get_page_unless_zero(page))) {
166 * Either the page has been freed, or will be freed.
167 * In either case, retry here and the caller should
168 * do the right thing (see comments above).
170 return 0;
172 #endif
173 VM_BUG_ON(PageTail(page));
175 return 1;
179 * Same as above, but add instead of inc (could just be merged)
181 static inline int page_cache_add_speculative(struct page *page, int count)
183 VM_BUG_ON(in_interrupt());
185 #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
186 # ifdef CONFIG_PREEMPT
187 VM_BUG_ON(!in_atomic());
188 # endif
189 VM_BUG_ON(page_count(page) == 0);
190 atomic_add(count, &page->_count);
192 #else
193 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
194 return 0;
195 #endif
196 VM_BUG_ON(PageCompound(page) && page != compound_head(page));
198 return 1;
201 static inline int page_freeze_refs(struct page *page, int count)
203 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
206 static inline void page_unfreeze_refs(struct page *page, int count)
208 VM_BUG_ON(page_count(page) != 0);
209 VM_BUG_ON(count == 0);
211 atomic_set(&page->_count, count);
214 #ifdef CONFIG_NUMA
215 extern struct page *__page_cache_alloc(gfp_t gfp);
216 #else
217 static inline struct page *__page_cache_alloc(gfp_t gfp)
219 return alloc_pages(gfp, 0);
221 #endif
223 static inline struct page *page_cache_alloc(struct address_space *x)
225 return __page_cache_alloc(mapping_gfp_mask(x));
228 static inline struct page *page_cache_alloc_cold(struct address_space *x)
230 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
233 typedef int filler_t(void *, struct page *);
235 extern struct page * find_get_page(struct address_space *mapping,
236 pgoff_t index);
237 extern struct page * find_lock_page(struct address_space *mapping,
238 pgoff_t index);
239 extern struct page * find_or_create_page(struct address_space *mapping,
240 pgoff_t index, gfp_t gfp_mask);
241 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
242 unsigned int nr_pages, struct page **pages);
243 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
244 unsigned int nr_pages, struct page **pages);
245 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
246 int tag, unsigned int nr_pages, struct page **pages);
248 struct page *grab_cache_page_write_begin(struct address_space *mapping,
249 pgoff_t index, unsigned flags);
252 * Returns locked page at given index in given cache, creating it if needed.
254 static inline struct page *grab_cache_page(struct address_space *mapping,
255 pgoff_t index)
257 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
260 extern struct page * grab_cache_page_nowait(struct address_space *mapping,
261 pgoff_t index);
262 extern struct page * read_cache_page_async(struct address_space *mapping,
263 pgoff_t index, filler_t *filler,
264 void *data);
265 extern struct page * read_cache_page(struct address_space *mapping,
266 pgoff_t index, filler_t *filler,
267 void *data);
268 extern int read_cache_pages(struct address_space *mapping,
269 struct list_head *pages, filler_t *filler, void *data);
271 static inline struct page *read_mapping_page_async(
272 struct address_space *mapping,
273 pgoff_t index, void *data)
275 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
276 return read_cache_page_async(mapping, index, filler, data);
279 static inline struct page *read_mapping_page(struct address_space *mapping,
280 pgoff_t index, void *data)
282 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
283 return read_cache_page(mapping, index, filler, data);
287 * Return byte-offset into filesystem object for page.
289 static inline loff_t page_offset(struct page *page)
291 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
294 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
295 unsigned long address)
297 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
298 pgoff += vma->vm_pgoff;
299 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
302 extern void __lock_page(struct page *page);
303 extern int __lock_page_killable(struct page *page);
304 extern void __lock_page_nosync(struct page *page);
305 extern void unlock_page(struct page *page);
307 static inline void __set_page_locked(struct page *page)
309 __set_bit(PG_locked, &page->flags);
312 static inline void __clear_page_locked(struct page *page)
314 __clear_bit(PG_locked, &page->flags);
317 static inline int trylock_page(struct page *page)
319 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
323 * lock_page may only be called if we have the page's inode pinned.
325 static inline void lock_page(struct page *page)
327 might_sleep();
328 if (!trylock_page(page))
329 __lock_page(page);
333 * lock_page_killable is like lock_page but can be interrupted by fatal
334 * signals. It returns 0 if it locked the page and -EINTR if it was
335 * killed while waiting.
337 static inline int lock_page_killable(struct page *page)
339 might_sleep();
340 if (!trylock_page(page))
341 return __lock_page_killable(page);
342 return 0;
346 * lock_page_nosync should only be used if we can't pin the page's inode.
347 * Doesn't play quite so well with block device plugging.
349 static inline void lock_page_nosync(struct page *page)
351 might_sleep();
352 if (!trylock_page(page))
353 __lock_page_nosync(page);
357 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
358 * Never use this directly!
360 extern void wait_on_page_bit(struct page *page, int bit_nr);
363 * Wait for a page to be unlocked.
365 * This must be called with the caller "holding" the page,
366 * ie with increased "page->count" so that the page won't
367 * go away during the wait..
369 static inline void wait_on_page_locked(struct page *page)
371 if (PageLocked(page))
372 wait_on_page_bit(page, PG_locked);
376 * Wait for a page to complete writeback
378 static inline void wait_on_page_writeback(struct page *page)
380 if (PageWriteback(page))
381 wait_on_page_bit(page, PG_writeback);
384 extern void end_page_writeback(struct page *page);
387 * Fault a userspace page into pagetables. Return non-zero on a fault.
389 * This assumes that two userspace pages are always sufficient. That's
390 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
392 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
394 int ret;
396 if (unlikely(size == 0))
397 return 0;
400 * Writing zeroes into userspace here is OK, because we know that if
401 * the zero gets there, we'll be overwriting it.
403 ret = __put_user(0, uaddr);
404 if (ret == 0) {
405 char __user *end = uaddr + size - 1;
408 * If the page was already mapped, this will get a cache miss
409 * for sure, so try to avoid doing it.
411 if (((unsigned long)uaddr & PAGE_MASK) !=
412 ((unsigned long)end & PAGE_MASK))
413 ret = __put_user(0, end);
415 return ret;
418 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
420 volatile char c;
421 int ret;
423 if (unlikely(size == 0))
424 return 0;
426 ret = __get_user(c, uaddr);
427 if (ret == 0) {
428 const char __user *end = uaddr + size - 1;
430 if (((unsigned long)uaddr & PAGE_MASK) !=
431 ((unsigned long)end & PAGE_MASK))
432 ret = __get_user(c, end);
434 return ret;
437 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
438 pgoff_t index, gfp_t gfp_mask);
439 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
440 pgoff_t index, gfp_t gfp_mask);
441 extern void remove_from_page_cache(struct page *page);
442 extern void __remove_from_page_cache(struct page *page);
445 * Like add_to_page_cache_locked, but used to add newly allocated pages:
446 * the page is new, so we can just run __set_page_locked() against it.
448 static inline int add_to_page_cache(struct page *page,
449 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
451 int error;
453 __set_page_locked(page);
454 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
455 if (unlikely(error))
456 __clear_page_locked(page);
457 return error;
460 #endif /* _LINUX_PAGEMAP_H */