MOXA linux-2.6.x / linux-2.6.19-uc1 from UC-7110-LX-BOOTLOADER-1.9_VERSION-4.2.tgz
[linux-2.6.19-moxart.git] / fs / nfs / pagelist.c
blob100a74f09f9d3c90be069693ddabff1548fab03c
1 /*
2 * linux/fs/nfs/pagelist.c
4 * A set of helper functions for managing NFS read and write requests.
5 * The main purpose of these routines is to provide support for the
6 * coalescing of several requests into a single RPC call.
8 * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/sunrpc/clnt.h>
15 #include <linux/nfs3.h>
16 #include <linux/nfs4.h>
17 #include <linux/nfs_page.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_mount.h>
21 #define NFS_PARANOIA 1
23 static kmem_cache_t *nfs_page_cachep;
25 static inline struct nfs_page *
26 nfs_page_alloc(void)
28 struct nfs_page *p;
29 p = kmem_cache_alloc(nfs_page_cachep, SLAB_KERNEL);
30 if (p) {
31 memset(p, 0, sizeof(*p));
32 INIT_LIST_HEAD(&p->wb_list);
34 return p;
37 static inline void
38 nfs_page_free(struct nfs_page *p)
40 kmem_cache_free(nfs_page_cachep, p);
43 /**
44 * nfs_create_request - Create an NFS read/write request.
45 * @file: file descriptor to use
46 * @inode: inode to which the request is attached
47 * @page: page to write
48 * @offset: starting offset within the page for the write
49 * @count: number of bytes to read/write
51 * The page must be locked by the caller. This makes sure we never
52 * create two different requests for the same page, and avoids
53 * a possible deadlock when we reach the hard limit on the number
54 * of dirty pages.
55 * User should ensure it is safe to sleep in this function.
57 struct nfs_page *
58 nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
59 struct page *page,
60 unsigned int offset, unsigned int count)
62 struct nfs_server *server = NFS_SERVER(inode);
63 struct nfs_page *req;
65 /* Deal with hard limits. */
66 for (;;) {
67 /* try to allocate the request struct */
68 req = nfs_page_alloc();
69 if (req != NULL)
70 break;
72 /* Try to free up at least one request in order to stay
73 * below the hard limit
75 if (signalled() && (server->flags & NFS_MOUNT_INTR))
76 return ERR_PTR(-ERESTARTSYS);
77 yield();
80 /* Initialize the request struct. Initially, we assume a
81 * long write-back delay. This will be adjusted in
82 * update_nfs_request below if the region is not locked. */
83 req->wb_page = page;
84 atomic_set(&req->wb_complete, 0);
85 req->wb_index = page->index;
86 page_cache_get(page);
87 BUG_ON(PagePrivate(page));
88 BUG_ON(!PageLocked(page));
89 #if 0 // mask by Victor yu. 02-12-2007
90 BUG_ON(page->mapping->host != inode);
91 #else
92 BUG_ON(page->u.xx.mapping->host != inode);
93 #endif
94 req->wb_offset = offset;
95 req->wb_pgbase = offset;
96 req->wb_bytes = count;
97 atomic_set(&req->wb_count, 1);
98 req->wb_context = get_nfs_open_context(ctx);
100 return req;
104 * nfs_unlock_request - Unlock request and wake up sleepers.
105 * @req:
107 void nfs_unlock_request(struct nfs_page *req)
109 if (!NFS_WBACK_BUSY(req)) {
110 printk(KERN_ERR "NFS: Invalid unlock attempted\n");
111 BUG();
113 smp_mb__before_clear_bit();
114 clear_bit(PG_BUSY, &req->wb_flags);
115 smp_mb__after_clear_bit();
116 wake_up_bit(&req->wb_flags, PG_BUSY);
117 nfs_release_request(req);
121 * nfs_set_page_writeback_locked - Lock a request for writeback
122 * @req:
124 int nfs_set_page_writeback_locked(struct nfs_page *req)
126 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
128 if (!nfs_lock_request(req))
129 return 0;
130 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
131 return 1;
135 * nfs_clear_page_writeback - Unlock request and wake up sleepers
137 void nfs_clear_page_writeback(struct nfs_page *req)
139 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
141 if (req->wb_page != NULL) {
142 spin_lock(&nfsi->req_lock);
143 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
144 spin_unlock(&nfsi->req_lock);
146 nfs_unlock_request(req);
150 * nfs_clear_request - Free up all resources allocated to the request
151 * @req:
153 * Release page resources associated with a write request after it
154 * has completed.
156 void nfs_clear_request(struct nfs_page *req)
158 struct page *page = req->wb_page;
159 if (page != NULL) {
160 page_cache_release(page);
161 req->wb_page = NULL;
167 * nfs_release_request - Release the count on an NFS read/write request
168 * @req: request to release
170 * Note: Should never be called with the spinlock held!
172 void
173 nfs_release_request(struct nfs_page *req)
175 if (!atomic_dec_and_test(&req->wb_count))
176 return;
178 #ifdef NFS_PARANOIA
179 BUG_ON (!list_empty(&req->wb_list));
180 BUG_ON (NFS_WBACK_BUSY(req));
181 #endif
183 /* Release struct file or cached credential */
184 nfs_clear_request(req);
185 put_nfs_open_context(req->wb_context);
186 nfs_page_free(req);
189 static int nfs_wait_bit_interruptible(void *word)
191 int ret = 0;
193 if (signal_pending(current))
194 ret = -ERESTARTSYS;
195 else
196 schedule();
197 return ret;
201 * nfs_wait_on_request - Wait for a request to complete.
202 * @req: request to wait upon.
204 * Interruptible by signals only if mounted with intr flag.
205 * The user is responsible for holding a count on the request.
208 nfs_wait_on_request(struct nfs_page *req)
210 struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode);
211 sigset_t oldmask;
212 int ret = 0;
214 if (!test_bit(PG_BUSY, &req->wb_flags))
215 goto out;
217 * Note: the call to rpc_clnt_sigmask() suffices to ensure that we
218 * are not interrupted if intr flag is not set
220 rpc_clnt_sigmask(clnt, &oldmask);
221 ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
222 nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
223 rpc_clnt_sigunmask(clnt, &oldmask);
224 out:
225 return ret;
229 * nfs_coalesce_requests - Split coalesced requests out from a list.
230 * @head: source list
231 * @dst: destination list
232 * @nmax: maximum number of requests to coalesce
234 * Moves a maximum of 'nmax' elements from one list to another.
235 * The elements are checked to ensure that they form a contiguous set
236 * of pages, and that the RPC credentials are the same.
239 nfs_coalesce_requests(struct list_head *head, struct list_head *dst,
240 unsigned int nmax)
242 struct nfs_page *req = NULL;
243 unsigned int npages = 0;
245 while (!list_empty(head)) {
246 struct nfs_page *prev = req;
248 req = nfs_list_entry(head->next);
249 if (prev) {
250 if (req->wb_context->cred != prev->wb_context->cred)
251 break;
252 if (req->wb_context->lockowner != prev->wb_context->lockowner)
253 break;
254 if (req->wb_context->state != prev->wb_context->state)
255 break;
256 if (req->wb_index != (prev->wb_index + 1))
257 break;
259 if (req->wb_pgbase != 0)
260 break;
262 nfs_list_remove_request(req);
263 nfs_list_add_request(req, dst);
264 npages++;
265 if (req->wb_pgbase + req->wb_bytes != PAGE_CACHE_SIZE)
266 break;
267 if (npages >= nmax)
268 break;
270 return npages;
273 #define NFS_SCAN_MAXENTRIES 16
275 * nfs_scan_lock_dirty - Scan the radix tree for dirty requests
276 * @nfsi: NFS inode
277 * @dst: Destination list
278 * @idx_start: lower bound of page->index to scan
279 * @npages: idx_start + npages sets the upper bound to scan.
281 * Moves elements from one of the inode request lists.
282 * If the number of requests is set to 0, the entire address_space
283 * starting at index idx_start, is scanned.
284 * The requests are *not* checked to ensure that they form a contiguous set.
285 * You must be holding the inode's req_lock when calling this function
288 nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst,
289 unsigned long idx_start, unsigned int npages)
291 struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
292 struct nfs_page *req;
293 unsigned long idx_end;
294 int found, i;
295 int res;
297 res = 0;
298 if (npages == 0)
299 idx_end = ~0;
300 else
301 idx_end = idx_start + npages - 1;
303 for (;;) {
304 found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,
305 (void **)&pgvec[0], idx_start, NFS_SCAN_MAXENTRIES,
306 NFS_PAGE_TAG_DIRTY);
307 if (found <= 0)
308 break;
309 for (i = 0; i < found; i++) {
310 req = pgvec[i];
311 if (req->wb_index > idx_end)
312 goto out;
314 idx_start = req->wb_index + 1;
316 if (nfs_set_page_writeback_locked(req)) {
317 radix_tree_tag_clear(&nfsi->nfs_page_tree,
318 req->wb_index, NFS_PAGE_TAG_DIRTY);
319 nfs_list_remove_request(req);
320 nfs_list_add_request(req, dst);
321 dec_zone_page_state(req->wb_page, NR_FILE_DIRTY);
322 res++;
326 out:
327 return res;
331 * nfs_scan_list - Scan a list for matching requests
332 * @nfsi: NFS inode
333 * @head: One of the NFS inode request lists
334 * @dst: Destination list
335 * @idx_start: lower bound of page->index to scan
336 * @npages: idx_start + npages sets the upper bound to scan.
338 * Moves elements from one of the inode request lists.
339 * If the number of requests is set to 0, the entire address_space
340 * starting at index idx_start, is scanned.
341 * The requests are *not* checked to ensure that they form a contiguous set.
342 * You must be holding the inode's req_lock when calling this function
344 int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head,
345 struct list_head *dst, unsigned long idx_start,
346 unsigned int npages)
348 struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
349 struct nfs_page *req;
350 unsigned long idx_end;
351 int found, i;
352 int res;
354 res = 0;
355 if (npages == 0)
356 idx_end = ~0;
357 else
358 idx_end = idx_start + npages - 1;
360 for (;;) {
361 found = radix_tree_gang_lookup(&nfsi->nfs_page_tree,
362 (void **)&pgvec[0], idx_start,
363 NFS_SCAN_MAXENTRIES);
364 if (found <= 0)
365 break;
366 for (i = 0; i < found; i++) {
367 req = pgvec[i];
368 if (req->wb_index > idx_end)
369 goto out;
370 idx_start = req->wb_index + 1;
371 if (req->wb_list_head != head)
372 continue;
373 if (nfs_set_page_writeback_locked(req)) {
374 nfs_list_remove_request(req);
375 nfs_list_add_request(req, dst);
376 res++;
381 out:
382 return res;
385 int __init nfs_init_nfspagecache(void)
387 nfs_page_cachep = kmem_cache_create("nfs_page",
388 sizeof(struct nfs_page),
389 0, SLAB_HWCACHE_ALIGN,
390 NULL, NULL);
391 if (nfs_page_cachep == NULL)
392 return -ENOMEM;
394 return 0;
397 void nfs_destroy_nfspagecache(void)
399 kmem_cache_destroy(nfs_page_cachep);