[PATCH] net/rxrpc: use list_move()
[linux-2.6/kvm.git] / include / linux / pagemap.h
blob1245df7141aa6c6c1a973ac429ef417fa70488e9
1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
4 /*
5 * Copyright 1995 Linus Torvalds
6 */
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
16 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
17 * allocation mode flags.
19 #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
20 #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
22 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
24 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
28 * This is non-atomic. Only to be used before the mapping is activated.
29 * Probably needs a barrier...
31 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
33 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
34 (__force unsigned long)mask;
38 * The page cache can done in larger chunks than
39 * one page, because it allows for more efficient
40 * throughput (it can then be mapped into user
41 * space in smaller chunks for same flexibility).
43 * Or rather, it _will_ be done in larger chunks.
45 #define PAGE_CACHE_SHIFT PAGE_SHIFT
46 #define PAGE_CACHE_SIZE PAGE_SIZE
47 #define PAGE_CACHE_MASK PAGE_MASK
48 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
50 #define page_cache_get(page) get_page(page)
51 #define page_cache_release(page) put_page(page)
52 void release_pages(struct page **pages, int nr, int cold);
54 #ifdef CONFIG_NUMA
55 extern struct page *page_cache_alloc(struct address_space *x);
56 extern struct page *page_cache_alloc_cold(struct address_space *x);
57 #else
58 static inline struct page *page_cache_alloc(struct address_space *x)
60 return alloc_pages(mapping_gfp_mask(x), 0);
63 static inline struct page *page_cache_alloc_cold(struct address_space *x)
65 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
67 #endif
69 typedef int filler_t(void *, struct page *);
71 extern struct page * find_get_page(struct address_space *mapping,
72 unsigned long index);
73 extern struct page * find_lock_page(struct address_space *mapping,
74 unsigned long index);
75 extern __deprecated_for_modules struct page * find_trylock_page(
76 struct address_space *mapping, unsigned long index);
77 extern struct page * find_or_create_page(struct address_space *mapping,
78 unsigned long index, gfp_t gfp_mask);
79 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
80 unsigned int nr_pages, struct page **pages);
81 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
82 unsigned int nr_pages, struct page **pages);
83 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
84 int tag, unsigned int nr_pages, struct page **pages);
87 * Returns locked page at given index in given cache, creating it if needed.
89 static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
91 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
94 extern struct page * grab_cache_page_nowait(struct address_space *mapping,
95 unsigned long index);
96 extern struct page * read_cache_page(struct address_space *mapping,
97 unsigned long index, filler_t *filler,
98 void *data);
99 extern int read_cache_pages(struct address_space *mapping,
100 struct list_head *pages, filler_t *filler, void *data);
102 static inline struct page *read_mapping_page(struct address_space *mapping,
103 unsigned long index, void *data)
105 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
106 return read_cache_page(mapping, index, filler, data);
109 int add_to_page_cache(struct page *page, struct address_space *mapping,
110 unsigned long index, gfp_t gfp_mask);
111 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
112 unsigned long index, gfp_t gfp_mask);
113 extern void remove_from_page_cache(struct page *page);
114 extern void __remove_from_page_cache(struct page *page);
116 extern atomic_t nr_pagecache;
118 #ifdef CONFIG_SMP
120 #define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
121 DECLARE_PER_CPU(long, nr_pagecache_local);
124 * pagecache_acct implements approximate accounting for pagecache.
125 * vm_enough_memory() do not need high accuracy. Writers will keep
126 * an offset in their per-cpu arena and will spill that into the
127 * global count whenever the absolute value of the local count
128 * exceeds the counter's threshold.
130 * MUST be protected from preemption.
131 * current protection is mapping->page_lock.
133 static inline void pagecache_acct(int count)
135 long *local;
137 local = &__get_cpu_var(nr_pagecache_local);
138 *local += count;
139 if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) {
140 atomic_add(*local, &nr_pagecache);
141 *local = 0;
145 #else
147 static inline void pagecache_acct(int count)
149 atomic_add(count, &nr_pagecache);
151 #endif
153 static inline unsigned long get_page_cache_size(void)
155 int ret = atomic_read(&nr_pagecache);
156 if (unlikely(ret < 0))
157 ret = 0;
158 return ret;
162 * Return byte-offset into filesystem object for page.
164 static inline loff_t page_offset(struct page *page)
166 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
169 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
170 unsigned long address)
172 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
173 pgoff += vma->vm_pgoff;
174 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
177 extern void FASTCALL(__lock_page(struct page *page));
178 extern void FASTCALL(unlock_page(struct page *page));
180 static inline void lock_page(struct page *page)
182 might_sleep();
183 if (TestSetPageLocked(page))
184 __lock_page(page);
188 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
189 * Never use this directly!
191 extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
194 * Wait for a page to be unlocked.
196 * This must be called with the caller "holding" the page,
197 * ie with increased "page->count" so that the page won't
198 * go away during the wait..
200 static inline void wait_on_page_locked(struct page *page)
202 if (PageLocked(page))
203 wait_on_page_bit(page, PG_locked);
207 * Wait for a page to complete writeback
209 static inline void wait_on_page_writeback(struct page *page)
211 if (PageWriteback(page))
212 wait_on_page_bit(page, PG_writeback);
215 extern void end_page_writeback(struct page *page);
218 * Fault a userspace page into pagetables. Return non-zero on a fault.
220 * This assumes that two userspace pages are always sufficient. That's
221 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
223 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
225 int ret;
228 * Writing zeroes into userspace here is OK, because we know that if
229 * the zero gets there, we'll be overwriting it.
231 ret = __put_user(0, uaddr);
232 if (ret == 0) {
233 char __user *end = uaddr + size - 1;
236 * If the page was already mapped, this will get a cache miss
237 * for sure, so try to avoid doing it.
239 if (((unsigned long)uaddr & PAGE_MASK) !=
240 ((unsigned long)end & PAGE_MASK))
241 ret = __put_user(0, end);
243 return ret;
246 static inline void fault_in_pages_readable(const char __user *uaddr, int size)
248 volatile char c;
249 int ret;
251 ret = __get_user(c, uaddr);
252 if (ret == 0) {
253 const char __user *end = uaddr + size - 1;
255 if (((unsigned long)uaddr & PAGE_MASK) !=
256 ((unsigned long)end & PAGE_MASK))
257 __get_user(c, end);
261 #endif /* _LINUX_PAGEMAP_H */