1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
5 * Copyright 1995 Linus Torvalds
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
15 #include <linux/hardirq.h> /* for in_interrupt() */
18 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
19 * allocation mode flags.
21 #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
22 #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
23 #define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */
25 static inline void mapping_set_error(struct address_space
*mapping
, int error
)
27 if (unlikely(error
)) {
29 set_bit(AS_ENOSPC
, &mapping
->flags
);
31 set_bit(AS_EIO
, &mapping
->flags
);
35 static inline gfp_t
mapping_gfp_mask(struct address_space
* mapping
)
37 return (__force gfp_t
)mapping
->flags
& __GFP_BITS_MASK
;
41 * This is non-atomic. Only to be used before the mapping is activated.
42 * Probably needs a barrier...
44 static inline void mapping_set_gfp_mask(struct address_space
*m
, gfp_t mask
)
46 m
->flags
= (m
->flags
& ~(__force
unsigned long)__GFP_BITS_MASK
) |
47 (__force
unsigned long)mask
;
51 * The page cache can done in larger chunks than
52 * one page, because it allows for more efficient
53 * throughput (it can then be mapped into user
54 * space in smaller chunks for same flexibility).
56 * Or rather, it _will_ be done in larger chunks.
58 #define PAGE_CACHE_SHIFT PAGE_SHIFT
59 #define PAGE_CACHE_SIZE PAGE_SIZE
60 #define PAGE_CACHE_MASK PAGE_MASK
61 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
63 #define page_cache_get(page) get_page(page)
64 #define page_cache_release(page) put_page(page)
65 void release_pages(struct page
**pages
, int nr
, int cold
);
68 * speculatively take a reference to a page.
69 * If the page is free (_count == 0), then _count is untouched, and 0
70 * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
72 * This function must be called inside the same rcu_read_lock() section as has
73 * been used to lookup the page in the pagecache radix-tree (or page table):
74 * this allows allocators to use a synchronize_rcu() to stabilize _count.
76 * Unless an RCU grace period has passed, the count of all pages coming out
77 * of the allocator must be considered unstable. page_count may return higher
78 * than expected, and put_page must be able to do the right thing when the
79 * page has been finished with, no matter what it is subsequently allocated
80 * for (because put_page is what is used here to drop an invalid speculative
83 * This is the interesting part of the lockless pagecache (and lockless
84 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
85 * has the following pattern:
86 * 1. find page in radix tree
87 * 2. conditionally increment refcount
88 * 3. check the page is still in pagecache (if no, goto 1)
90 * Remove-side that cares about stability of _count (eg. reclaim) has the
91 * following (with tree_lock held for write):
92 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
93 * B. remove page from pagecache
96 * There are 2 critical interleavings that matter:
97 * - 2 runs before A: in this case, A sees elevated refcount and bails out
98 * - A runs before 2: in this case, 2 sees zero refcount and retries;
99 * subsequently, B will complete and 1 will find no page, causing the
100 * lookup to return NULL.
102 * It is possible that between 1 and 2, the page is removed then the exact same
103 * page is inserted into the same position in pagecache. That's OK: the
104 * old find_get_page using tree_lock could equally have run before or after
105 * such a re-insertion, depending on order that locks are granted.
107 * Lookups racing against pagecache insertion isn't a big problem: either 1
108 * will find the page or it will not. Likewise, the old find_get_page could run
109 * either before the insertion or afterwards, depending on timing.
111 static inline int page_cache_get_speculative(struct page
*page
)
113 VM_BUG_ON(in_interrupt());
115 #if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
116 # ifdef CONFIG_PREEMPT
117 VM_BUG_ON(!in_atomic());
120 * Preempt must be disabled here - we rely on rcu_read_lock doing
123 * Pagecache won't be truncated from interrupt context, so if we have
124 * found a page in the radix tree here, we have pinned its refcount by
125 * disabling preempt, and hence no need for the "speculative get" that
128 VM_BUG_ON(page_count(page
) == 0);
129 atomic_inc(&page
->_count
);
132 if (unlikely(!get_page_unless_zero(page
))) {
134 * Either the page has been freed, or will be freed.
135 * In either case, retry here and the caller should
136 * do the right thing (see comments above).
141 VM_BUG_ON(PageTail(page
));
146 static inline int page_freeze_refs(struct page
*page
, int count
)
148 return likely(atomic_cmpxchg(&page
->_count
, count
, 0) == count
);
151 static inline void page_unfreeze_refs(struct page
*page
, int count
)
153 VM_BUG_ON(page_count(page
) != 0);
154 VM_BUG_ON(count
== 0);
156 atomic_set(&page
->_count
, count
);
160 extern struct page
*__page_cache_alloc(gfp_t gfp
);
162 static inline struct page
*__page_cache_alloc(gfp_t gfp
)
164 return alloc_pages(gfp
, 0);
168 static inline struct page
*page_cache_alloc(struct address_space
*x
)
170 return __page_cache_alloc(mapping_gfp_mask(x
));
173 static inline struct page
*page_cache_alloc_cold(struct address_space
*x
)
175 return __page_cache_alloc(mapping_gfp_mask(x
)|__GFP_COLD
);
178 typedef int filler_t(void *, struct page
*);
180 extern struct page
* find_get_page(struct address_space
*mapping
,
182 extern struct page
* find_lock_page(struct address_space
*mapping
,
184 extern struct page
* find_or_create_page(struct address_space
*mapping
,
185 pgoff_t index
, gfp_t gfp_mask
);
186 unsigned find_get_pages(struct address_space
*mapping
, pgoff_t start
,
187 unsigned int nr_pages
, struct page
**pages
);
188 unsigned find_get_pages_contig(struct address_space
*mapping
, pgoff_t start
,
189 unsigned int nr_pages
, struct page
**pages
);
190 unsigned find_get_pages_tag(struct address_space
*mapping
, pgoff_t
*index
,
191 int tag
, unsigned int nr_pages
, struct page
**pages
);
193 struct page
*__grab_cache_page(struct address_space
*mapping
, pgoff_t index
);
196 * Returns locked page at given index in given cache, creating it if needed.
198 static inline struct page
*grab_cache_page(struct address_space
*mapping
,
201 return find_or_create_page(mapping
, index
, mapping_gfp_mask(mapping
));
204 extern struct page
* grab_cache_page_nowait(struct address_space
*mapping
,
206 extern struct page
* read_cache_page_async(struct address_space
*mapping
,
207 pgoff_t index
, filler_t
*filler
,
209 extern struct page
* read_cache_page(struct address_space
*mapping
,
210 pgoff_t index
, filler_t
*filler
,
212 extern int read_cache_pages(struct address_space
*mapping
,
213 struct list_head
*pages
, filler_t
*filler
, void *data
);
215 static inline struct page
*read_mapping_page_async(
216 struct address_space
*mapping
,
217 pgoff_t index
, void *data
)
219 filler_t
*filler
= (filler_t
*)mapping
->a_ops
->readpage
;
220 return read_cache_page_async(mapping
, index
, filler
, data
);
223 static inline struct page
*read_mapping_page(struct address_space
*mapping
,
224 pgoff_t index
, void *data
)
226 filler_t
*filler
= (filler_t
*)mapping
->a_ops
->readpage
;
227 return read_cache_page(mapping
, index
, filler
, data
);
230 int add_to_page_cache_locked(struct page
*page
, struct address_space
*mapping
,
231 pgoff_t index
, gfp_t gfp_mask
);
232 int add_to_page_cache_lru(struct page
*page
, struct address_space
*mapping
,
233 pgoff_t index
, gfp_t gfp_mask
);
234 extern void remove_from_page_cache(struct page
*page
);
235 extern void __remove_from_page_cache(struct page
*page
);
238 * Like add_to_page_cache_locked, but used to add newly allocated pages:
239 * the page is new, so we can just run SetPageLocked() against it.
241 static inline int add_to_page_cache(struct page
*page
,
242 struct address_space
*mapping
, pgoff_t offset
, gfp_t gfp_mask
)
247 error
= add_to_page_cache_locked(page
, mapping
, offset
, gfp_mask
);
249 ClearPageLocked(page
);
254 * Return byte-offset into filesystem object for page.
256 static inline loff_t
page_offset(struct page
*page
)
258 return ((loff_t
)page
->index
) << PAGE_CACHE_SHIFT
;
261 static inline pgoff_t
linear_page_index(struct vm_area_struct
*vma
,
262 unsigned long address
)
264 pgoff_t pgoff
= (address
- vma
->vm_start
) >> PAGE_SHIFT
;
265 pgoff
+= vma
->vm_pgoff
;
266 return pgoff
>> (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
269 extern void __lock_page(struct page
*page
);
270 extern int __lock_page_killable(struct page
*page
);
271 extern void __lock_page_nosync(struct page
*page
);
272 extern void unlock_page(struct page
*page
);
275 * lock_page may only be called if we have the page's inode pinned.
277 static inline void lock_page(struct page
*page
)
280 if (TestSetPageLocked(page
))
285 * lock_page_killable is like lock_page but can be interrupted by fatal
286 * signals. It returns 0 if it locked the page and -EINTR if it was
287 * killed while waiting.
289 static inline int lock_page_killable(struct page
*page
)
292 if (TestSetPageLocked(page
))
293 return __lock_page_killable(page
);
298 * lock_page_nosync should only be used if we can't pin the page's inode.
299 * Doesn't play quite so well with block device plugging.
301 static inline void lock_page_nosync(struct page
*page
)
304 if (TestSetPageLocked(page
))
305 __lock_page_nosync(page
);
309 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
310 * Never use this directly!
312 extern void wait_on_page_bit(struct page
*page
, int bit_nr
);
315 * Wait for a page to be unlocked.
317 * This must be called with the caller "holding" the page,
318 * ie with increased "page->count" so that the page won't
319 * go away during the wait..
321 static inline void wait_on_page_locked(struct page
*page
)
323 if (PageLocked(page
))
324 wait_on_page_bit(page
, PG_locked
);
328 * Wait for a page to complete writeback
330 static inline void wait_on_page_writeback(struct page
*page
)
332 if (PageWriteback(page
))
333 wait_on_page_bit(page
, PG_writeback
);
336 extern void end_page_writeback(struct page
*page
);
339 * Fault a userspace page into pagetables. Return non-zero on a fault.
341 * This assumes that two userspace pages are always sufficient. That's
342 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
344 static inline int fault_in_pages_writeable(char __user
*uaddr
, int size
)
348 if (unlikely(size
== 0))
352 * Writing zeroes into userspace here is OK, because we know that if
353 * the zero gets there, we'll be overwriting it.
355 ret
= __put_user(0, uaddr
);
357 char __user
*end
= uaddr
+ size
- 1;
360 * If the page was already mapped, this will get a cache miss
361 * for sure, so try to avoid doing it.
363 if (((unsigned long)uaddr
& PAGE_MASK
) !=
364 ((unsigned long)end
& PAGE_MASK
))
365 ret
= __put_user(0, end
);
370 static inline int fault_in_pages_readable(const char __user
*uaddr
, int size
)
375 if (unlikely(size
== 0))
378 ret
= __get_user(c
, uaddr
);
380 const char __user
*end
= uaddr
+ size
- 1;
382 if (((unsigned long)uaddr
& PAGE_MASK
) !=
383 ((unsigned long)end
& PAGE_MASK
))
384 ret
= __get_user(c
, end
);
389 #endif /* _LINUX_PAGEMAP_H */