MOXA linux-2.6.x / linux-2.6.19-uc1 from UC-7110-LX-BOOTLOADER-1.9_VERSION-4.2.tgz
[linux-2.6.19-moxart.git] / fs / nfs / write.c
blob37516db17599768528f975d0279ff52fce92fac5
1 /*
2 * linux/fs/nfs/write.c
4 * Writing file data over NFS.
6 * We do it like this: When a (user) process wishes to write data to an
7 * NFS file, a write request is allocated that contains the RPC task data
8 * plus some info on the page to be written, and added to the inode's
9 * write chain. If the process writes past the end of the page, an async
10 * RPC call to write the page is scheduled immediately; otherwise, the call
11 * is delayed for a few seconds.
13 * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
15 * Write requests are kept on the inode's writeback list. Each entry in
16 * that list references the page (portion) to be written. When the
17 * cache timeout has expired, the RPC task is woken up, and tries to
18 * lock the page. As soon as it manages to do so, the request is moved
19 * from the writeback list to the writelock list.
21 * Note: we must make sure never to confuse the inode passed in the
22 * write_page request with the one in page->inode. As far as I understand
23 * it, these are different when doing a swap-out.
25 * To understand everything that goes on here and in the NFS read code,
26 * one should be aware that a page is locked in exactly one of the following
27 * cases:
29 * - A write request is in progress.
30 * - A user process is in generic_file_write/nfs_update_page
31 * - A user process is in generic_file_read
33 * Also note that because of the way pages are invalidated in
34 * nfs_revalidate_inode, the following assertions hold:
36 * - If a page is dirty, there will be no read requests (a page will
37 * not be re-read unless invalidated by nfs_revalidate_inode).
38 * - If the page is not uptodate, there will be no pending write
39 * requests, and no process will be in nfs_update_page.
41 * FIXME: Interaction with the vmscan routines is not optimal yet.
42 * Either vmscan must be made nfs-savvy, or we need a different page
43 * reclaim concept that supports something like FS-independent
44 * buffer_heads with a b_ops-> field.
46 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
49 #include <linux/types.h>
50 #include <linux/slab.h>
51 #include <linux/mm.h>
52 #include <linux/pagemap.h>
53 #include <linux/file.h>
54 #include <linux/writeback.h>
56 #include <linux/sunrpc/clnt.h>
57 #include <linux/nfs_fs.h>
58 #include <linux/nfs_mount.h>
59 #include <linux/nfs_page.h>
60 #include <linux/backing-dev.h>
62 #include <asm/uaccess.h>
63 #include <linux/smp_lock.h>
65 #include "delegation.h"
66 #include "iostat.h"
68 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
70 #define MIN_POOL_WRITE (32)
71 #define MIN_POOL_COMMIT (4)
74 * Local function declarations
76 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
77 struct inode *,
78 struct page *,
79 unsigned int, unsigned int);
80 static int nfs_wait_on_write_congestion(struct address_space *, int);
81 static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
82 static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
83 unsigned int npages, int how);
84 static const struct rpc_call_ops nfs_write_partial_ops;
85 static const struct rpc_call_ops nfs_write_full_ops;
86 static const struct rpc_call_ops nfs_commit_ops;
88 static kmem_cache_t *nfs_wdata_cachep;
89 static mempool_t *nfs_wdata_mempool;
90 static mempool_t *nfs_commit_mempool;
92 static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
94 struct nfs_write_data *nfs_commit_alloc(void)
96 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
98 if (p) {
99 memset(p, 0, sizeof(*p));
100 INIT_LIST_HEAD(&p->pages);
102 return p;
105 void nfs_commit_free(struct nfs_write_data *p)
107 if (p && (p->pagevec != &p->page_array[0]))
108 kfree(p->pagevec);
109 mempool_free(p, nfs_commit_mempool);
112 struct nfs_write_data *nfs_writedata_alloc(size_t len)
114 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
115 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
117 if (p) {
118 memset(p, 0, sizeof(*p));
119 INIT_LIST_HEAD(&p->pages);
120 p->npages = pagecount;
121 if (pagecount <= ARRAY_SIZE(p->page_array))
122 p->pagevec = p->page_array;
123 else {
124 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
125 if (!p->pagevec) {
126 mempool_free(p, nfs_wdata_mempool);
127 p = NULL;
131 return p;
134 static void nfs_writedata_free(struct nfs_write_data *p)
136 if (p && (p->pagevec != &p->page_array[0]))
137 kfree(p->pagevec);
138 mempool_free(p, nfs_wdata_mempool);
141 void nfs_writedata_release(void *wdata)
143 nfs_writedata_free(wdata);
146 /* Adjust the file length if we're writing beyond the end */
147 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
149 #if 0 // mask by Victor Yu. 02-12-2007
150 struct inode *inode = page->mapping->host;
151 #else
152 struct inode *inode = page->u.xx.mapping->host;
153 #endif
154 loff_t end, i_size = i_size_read(inode);
155 unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
157 if (i_size > 0 && page->index < end_index)
158 return;
159 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
160 if (i_size >= end)
161 return;
162 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
163 i_size_write(inode, end);
166 /* We can set the PG_uptodate flag if we see that a write request
167 * covers the full page.
169 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
171 loff_t end_offs;
173 if (PageUptodate(page))
174 return;
175 if (base != 0)
176 return;
177 if (count == PAGE_CACHE_SIZE) {
178 SetPageUptodate(page);
179 return;
182 #if 0 // mask by Victor Yu. 02-12-2007
183 end_offs = i_size_read(page->mapping->host) - 1;
184 #else
185 end_offs = i_size_read(page->u.xx.mapping->host) - 1;
186 #endif
187 if (end_offs < 0)
188 return;
189 /* Is this the last page? */
190 if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT))
191 return;
192 /* This is the last page: set PG_uptodate if we cover the entire
193 * extent of the data, then zero the rest of the page.
195 if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) {
196 memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
197 SetPageUptodate(page);
202 * Write a page synchronously.
203 * Offset is the data offset within the page.
205 static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
206 struct page *page, unsigned int offset, unsigned int count,
207 int how)
209 unsigned int wsize = NFS_SERVER(inode)->wsize;
210 int result, written = 0;
211 struct nfs_write_data *wdata;
213 wdata = nfs_writedata_alloc(wsize);
214 if (!wdata)
215 return -ENOMEM;
217 wdata->flags = how;
218 wdata->cred = ctx->cred;
219 wdata->inode = inode;
220 wdata->args.fh = NFS_FH(inode);
221 wdata->args.context = ctx;
222 wdata->args.pages = &page;
223 wdata->args.stable = NFS_FILE_SYNC;
224 wdata->args.pgbase = offset;
225 wdata->args.count = wsize;
226 wdata->res.fattr = &wdata->fattr;
227 wdata->res.verf = &wdata->verf;
229 dprintk("NFS: nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
230 inode->i_sb->s_id,
231 (long long)NFS_FILEID(inode),
232 count, (long long)(page_offset(page) + offset));
234 set_page_writeback(page);
235 nfs_begin_data_update(inode);
236 do {
237 if (count < wsize)
238 wdata->args.count = count;
239 wdata->args.offset = page_offset(page) + wdata->args.pgbase;
241 result = NFS_PROTO(inode)->write(wdata);
243 if (result < 0) {
244 /* Must mark the page invalid after I/O error */
245 ClearPageUptodate(page);
246 goto io_error;
248 if (result < wdata->args.count)
249 printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
250 wdata->args.count, result);
252 wdata->args.offset += result;
253 wdata->args.pgbase += result;
254 written += result;
255 count -= result;
256 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
257 } while (count);
258 /* Update file length */
259 nfs_grow_file(page, offset, written);
260 /* Set the PG_uptodate flag? */
261 nfs_mark_uptodate(page, offset, written);
263 if (PageError(page))
264 ClearPageError(page);
266 io_error:
267 nfs_end_data_update(inode);
268 end_page_writeback(page);
269 nfs_writedata_free(wdata);
270 return written ? written : result;
273 static int nfs_writepage_async(struct nfs_open_context *ctx,
274 struct inode *inode, struct page *page,
275 unsigned int offset, unsigned int count)
277 struct nfs_page *req;
279 req = nfs_update_request(ctx, inode, page, offset, count);
280 if (IS_ERR(req))
281 return PTR_ERR(req);
282 /* Update file length */
283 nfs_grow_file(page, offset, count);
284 /* Set the PG_uptodate flag? */
285 nfs_mark_uptodate(page, offset, count);
286 nfs_unlock_request(req);
287 return 0;
290 static int wb_priority(struct writeback_control *wbc)
292 if (wbc->for_reclaim)
293 return FLUSH_HIGHPRI;
294 if (wbc->for_kupdate)
295 return FLUSH_LOWPRI;
296 return 0;
300 * Write an mmapped page to the server.
302 int nfs_writepage(struct page *page, struct writeback_control *wbc)
304 struct nfs_open_context *ctx;
305 #if 0 // mask by Victor Yu. 02-12-2007
306 struct inode *inode = page->mapping->host;
307 #else
308 struct inode *inode = page->u.xx.mapping->host;
309 #endif
310 unsigned long end_index;
311 unsigned offset = PAGE_CACHE_SIZE;
312 loff_t i_size = i_size_read(inode);
313 int inode_referenced = 0;
314 int priority = wb_priority(wbc);
315 int err;
317 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
318 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
321 * Note: We need to ensure that we have a reference to the inode
322 * if we are to do asynchronous writes. If not, waiting
323 * in nfs_wait_on_request() may deadlock with clear_inode().
325 * If igrab() fails here, then it is in any case safe to
326 * call nfs_wb_page(), since there will be no pending writes.
328 if (igrab(inode) != 0)
329 inode_referenced = 1;
330 end_index = i_size >> PAGE_CACHE_SHIFT;
332 /* Ensure we've flushed out any previous writes */
333 nfs_wb_page_priority(inode, page, priority);
335 /* easy case */
336 if (page->index < end_index)
337 goto do_it;
338 /* things got complicated... */
339 offset = i_size & (PAGE_CACHE_SIZE-1);
341 /* OK, are we completely out? */
342 err = 0; /* potential race with truncate - ignore */
343 if (page->index >= end_index+1 || !offset)
344 goto out;
345 do_it:
346 ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
347 if (ctx == NULL) {
348 err = -EBADF;
349 goto out;
351 lock_kernel();
352 if (!IS_SYNC(inode) && inode_referenced) {
353 err = nfs_writepage_async(ctx, inode, page, 0, offset);
354 if (!wbc->for_writepages)
355 nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
356 } else {
357 err = nfs_writepage_sync(ctx, inode, page, 0,
358 offset, priority);
359 if (err >= 0) {
360 if (err != offset)
361 redirty_page_for_writepage(wbc, page);
362 err = 0;
365 unlock_kernel();
366 put_nfs_open_context(ctx);
367 out:
368 unlock_page(page);
369 if (inode_referenced)
370 iput(inode);
371 return err;
375 * Note: causes nfs_update_request() to block on the assumption
376 * that the writeback is generated due to memory pressure.
378 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
380 struct backing_dev_info *bdi = mapping->backing_dev_info;
381 struct inode *inode = mapping->host;
382 int err;
384 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
386 err = generic_writepages(mapping, wbc);
387 if (err)
388 return err;
389 while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
390 if (wbc->nonblocking)
391 return 0;
392 nfs_wait_on_write_congestion(mapping, 0);
394 err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
395 if (err < 0)
396 goto out;
397 nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
398 wbc->nr_to_write -= err;
399 if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
400 err = nfs_wait_on_requests(inode, 0, 0);
401 if (err < 0)
402 goto out;
404 err = nfs_commit_inode(inode, wb_priority(wbc));
405 if (err > 0) {
406 wbc->nr_to_write -= err;
407 err = 0;
409 out:
410 clear_bit(BDI_write_congested, &bdi->state);
411 wake_up_all(&nfs_write_congestion);
412 congestion_end(WRITE);
413 return err;
417 * Insert a write request into an inode
419 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
421 struct nfs_inode *nfsi = NFS_I(inode);
422 int error;
424 error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
425 BUG_ON(error == -EEXIST);
426 if (error)
427 return error;
428 if (!nfsi->npages) {
429 igrab(inode);
430 nfs_begin_data_update(inode);
431 if (nfs_have_delegation(inode, FMODE_WRITE))
432 nfsi->change_attr++;
434 SetPagePrivate(req->wb_page);
435 nfsi->npages++;
436 atomic_inc(&req->wb_count);
437 return 0;
441 * Insert a write request into an inode
443 static void nfs_inode_remove_request(struct nfs_page *req)
445 struct inode *inode = req->wb_context->dentry->d_inode;
446 struct nfs_inode *nfsi = NFS_I(inode);
448 BUG_ON (!NFS_WBACK_BUSY(req));
450 spin_lock(&nfsi->req_lock);
451 ClearPagePrivate(req->wb_page);
452 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
453 nfsi->npages--;
454 if (!nfsi->npages) {
455 spin_unlock(&nfsi->req_lock);
456 nfs_end_data_update(inode);
457 iput(inode);
458 } else
459 spin_unlock(&nfsi->req_lock);
460 nfs_clear_request(req);
461 nfs_release_request(req);
465 * Find a request
467 static inline struct nfs_page *
468 _nfs_find_request(struct inode *inode, unsigned long index)
470 struct nfs_inode *nfsi = NFS_I(inode);
471 struct nfs_page *req;
473 req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index);
474 if (req)
475 atomic_inc(&req->wb_count);
476 return req;
479 static struct nfs_page *
480 nfs_find_request(struct inode *inode, unsigned long index)
482 struct nfs_page *req;
483 struct nfs_inode *nfsi = NFS_I(inode);
485 spin_lock(&nfsi->req_lock);
486 req = _nfs_find_request(inode, index);
487 spin_unlock(&nfsi->req_lock);
488 return req;
492 * Add a request to the inode's dirty list.
494 static void
495 nfs_mark_request_dirty(struct nfs_page *req)
497 struct inode *inode = req->wb_context->dentry->d_inode;
498 struct nfs_inode *nfsi = NFS_I(inode);
500 spin_lock(&nfsi->req_lock);
501 radix_tree_tag_set(&nfsi->nfs_page_tree,
502 req->wb_index, NFS_PAGE_TAG_DIRTY);
503 nfs_list_add_request(req, &nfsi->dirty);
504 nfsi->ndirty++;
505 spin_unlock(&nfsi->req_lock);
506 inc_zone_page_state(req->wb_page, NR_FILE_DIRTY);
507 mark_inode_dirty(inode);
511 * Check if a request is dirty
513 static inline int
514 nfs_dirty_request(struct nfs_page *req)
516 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
517 return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
520 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
522 * Add a request to the inode's commit list.
524 static void
525 nfs_mark_request_commit(struct nfs_page *req)
527 struct inode *inode = req->wb_context->dentry->d_inode;
528 struct nfs_inode *nfsi = NFS_I(inode);
530 spin_lock(&nfsi->req_lock);
531 nfs_list_add_request(req, &nfsi->commit);
532 nfsi->ncommit++;
533 spin_unlock(&nfsi->req_lock);
534 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
535 mark_inode_dirty(inode);
537 #endif
540 * Wait for a request to complete.
542 * Interruptible by signals only if mounted with intr flag.
544 static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages)
546 struct nfs_inode *nfsi = NFS_I(inode);
547 struct nfs_page *req;
548 unsigned long idx_end, next;
549 unsigned int res = 0;
550 int error;
552 if (npages == 0)
553 idx_end = ~0;
554 else
555 idx_end = idx_start + npages - 1;
557 next = idx_start;
558 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
559 if (req->wb_index > idx_end)
560 break;
562 next = req->wb_index + 1;
563 BUG_ON(!NFS_WBACK_BUSY(req));
565 atomic_inc(&req->wb_count);
566 spin_unlock(&nfsi->req_lock);
567 error = nfs_wait_on_request(req);
568 nfs_release_request(req);
569 spin_lock(&nfsi->req_lock);
570 if (error < 0)
571 return error;
572 res++;
574 return res;
577 static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
579 struct nfs_inode *nfsi = NFS_I(inode);
580 int ret;
582 spin_lock(&nfsi->req_lock);
583 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
584 spin_unlock(&nfsi->req_lock);
585 return ret;
588 static void nfs_cancel_dirty_list(struct list_head *head)
590 struct nfs_page *req;
591 while(!list_empty(head)) {
592 req = nfs_list_entry(head->next);
593 nfs_list_remove_request(req);
594 nfs_inode_remove_request(req);
595 nfs_clear_page_writeback(req);
599 static void nfs_cancel_commit_list(struct list_head *head)
601 struct nfs_page *req;
603 while(!list_empty(head)) {
604 req = nfs_list_entry(head->next);
605 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
606 nfs_list_remove_request(req);
607 nfs_inode_remove_request(req);
608 nfs_unlock_request(req);
613 * nfs_scan_dirty - Scan an inode for dirty requests
614 * @inode: NFS inode to scan
615 * @dst: destination list
616 * @idx_start: lower bound of page->index to scan.
617 * @npages: idx_start + npages sets the upper bound to scan.
619 * Moves requests from the inode's dirty page list.
620 * The requests are *not* checked to ensure that they form a contiguous set.
622 static int
623 nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
625 struct nfs_inode *nfsi = NFS_I(inode);
626 int res = 0;
628 if (nfsi->ndirty != 0) {
629 res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages);
630 nfsi->ndirty -= res;
631 if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
632 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
634 return res;
637 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
639 * nfs_scan_commit - Scan an inode for commit requests
640 * @inode: NFS inode to scan
641 * @dst: destination list
642 * @idx_start: lower bound of page->index to scan.
643 * @npages: idx_start + npages sets the upper bound to scan.
645 * Moves requests from the inode's 'commit' request list.
646 * The requests are *not* checked to ensure that they form a contiguous set.
648 static int
649 nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
651 struct nfs_inode *nfsi = NFS_I(inode);
652 int res = 0;
654 if (nfsi->ncommit != 0) {
655 res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages);
656 nfsi->ncommit -= res;
657 if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
658 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
660 return res;
662 #else
663 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
665 return 0;
667 #endif
669 static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
671 struct backing_dev_info *bdi = mapping->backing_dev_info;
672 DEFINE_WAIT(wait);
673 int ret = 0;
675 might_sleep();
677 if (!bdi_write_congested(bdi))
678 return 0;
680 nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT);
682 if (intr) {
683 struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
684 sigset_t oldset;
686 rpc_clnt_sigmask(clnt, &oldset);
687 prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
688 if (bdi_write_congested(bdi)) {
689 if (signalled())
690 ret = -ERESTARTSYS;
691 else
692 schedule();
694 rpc_clnt_sigunmask(clnt, &oldset);
695 } else {
696 prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
697 if (bdi_write_congested(bdi))
698 schedule();
700 finish_wait(&nfs_write_congestion, &wait);
701 return ret;
706 * Try to update any existing write request, or create one if there is none.
707 * In order to match, the request's credentials must match those of
708 * the calling process.
710 * Note: Should always be called with the Page Lock held!
712 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
713 struct inode *inode, struct page *page,
714 unsigned int offset, unsigned int bytes)
716 struct nfs_server *server = NFS_SERVER(inode);
717 struct nfs_inode *nfsi = NFS_I(inode);
718 struct nfs_page *req, *new = NULL;
719 unsigned long rqend, end;
721 end = offset + bytes;
723 #if 0 // mask by Victor Yu. 02-12-2007
724 if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR))
725 #else
726 if (nfs_wait_on_write_congestion(page->u.xx.mapping, server->flags & NFS_MOUNT_INTR))
727 #endif
728 return ERR_PTR(-ERESTARTSYS);
729 for (;;) {
730 /* Loop over all inode entries and see if we find
731 * A request for the page we wish to update
733 spin_lock(&nfsi->req_lock);
734 req = _nfs_find_request(inode, page->index);
735 if (req) {
736 if (!nfs_lock_request_dontget(req)) {
737 int error;
738 spin_unlock(&nfsi->req_lock);
739 error = nfs_wait_on_request(req);
740 nfs_release_request(req);
741 if (error < 0) {
742 if (new)
743 nfs_release_request(new);
744 return ERR_PTR(error);
746 continue;
748 spin_unlock(&nfsi->req_lock);
749 if (new)
750 nfs_release_request(new);
751 break;
754 if (new) {
755 int error;
756 nfs_lock_request_dontget(new);
757 error = nfs_inode_add_request(inode, new);
758 if (error) {
759 spin_unlock(&nfsi->req_lock);
760 nfs_unlock_request(new);
761 return ERR_PTR(error);
763 spin_unlock(&nfsi->req_lock);
764 nfs_mark_request_dirty(new);
765 return new;
767 spin_unlock(&nfsi->req_lock);
769 new = nfs_create_request(ctx, inode, page, offset, bytes);
770 if (IS_ERR(new))
771 return new;
774 /* We have a request for our page.
775 * If the creds don't match, or the
776 * page addresses don't match,
777 * tell the caller to wait on the conflicting
778 * request.
780 rqend = req->wb_offset + req->wb_bytes;
781 if (req->wb_context != ctx
782 || req->wb_page != page
783 || !nfs_dirty_request(req)
784 || offset > rqend || end < req->wb_offset) {
785 nfs_unlock_request(req);
786 return ERR_PTR(-EBUSY);
789 /* Okay, the request matches. Update the region */
790 if (offset < req->wb_offset) {
791 req->wb_offset = offset;
792 req->wb_pgbase = offset;
793 req->wb_bytes = rqend - req->wb_offset;
796 if (end > rqend)
797 req->wb_bytes = end - req->wb_offset;
799 return req;
802 int nfs_flush_incompatible(struct file *file, struct page *page)
804 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
805 #if 0 // mask by Victor Yu. 02-12-2007
806 struct inode *inode = page->mapping->host;
807 #else
808 struct inode *inode = page->u.xx.mapping->host;
809 #endif
810 struct nfs_page *req;
811 int status = 0;
813 * Look for a request corresponding to this page. If there
814 * is one, and it belongs to another file, we flush it out
815 * before we try to copy anything into the page. Do this
816 * due to the lack of an ACCESS-type call in NFSv2.
817 * Also do the same if we find a request from an existing
818 * dropped page.
820 req = nfs_find_request(inode, page->index);
821 if (req) {
822 if (req->wb_page != page || ctx != req->wb_context)
823 status = nfs_wb_page(inode, page);
824 nfs_release_request(req);
826 return (status < 0) ? status : 0;
830 * Update and possibly write a cached page of an NFS file.
832 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
833 * things with a page scheduled for an RPC call (e.g. invalidate it).
835 int nfs_updatepage(struct file *file, struct page *page,
836 unsigned int offset, unsigned int count)
838 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
839 #if 0 // mask by Victor Yu. 02-12-2007
840 struct inode *inode = page->mapping->host;
841 #else
842 struct inode *inode = page->u.xx.mapping->host;
843 #endif
844 struct nfs_page *req;
845 int status = 0;
847 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
849 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
850 file->f_dentry->d_parent->d_name.name,
851 file->f_dentry->d_name.name, count,
852 (long long)(page_offset(page) +offset));
854 if (IS_SYNC(inode)) {
855 status = nfs_writepage_sync(ctx, inode, page, offset, count, 0);
856 if (status > 0) {
857 if (offset == 0 && status == PAGE_CACHE_SIZE)
858 SetPageUptodate(page);
859 return 0;
861 return status;
864 /* If we're not using byte range locks, and we know the page
865 * is entirely in cache, it may be more efficient to avoid
866 * fragmenting write requests.
868 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
869 loff_t end_offs = i_size_read(inode) - 1;
870 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
872 count += offset;
873 offset = 0;
874 if (unlikely(end_offs < 0)) {
875 /* Do nothing */
876 } else if (page->index == end_index) {
877 unsigned int pglen;
878 pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1;
879 if (count < pglen)
880 count = pglen;
881 } else if (page->index < end_index)
882 count = PAGE_CACHE_SIZE;
886 * Try to find an NFS request corresponding to this page
887 * and update it.
888 * If the existing request cannot be updated, we must flush
889 * it out now.
891 do {
892 req = nfs_update_request(ctx, inode, page, offset, count);
893 status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
894 if (status != -EBUSY)
895 break;
896 /* Request could not be updated. Flush it out and try again */
897 status = nfs_wb_page(inode, page);
898 } while (status >= 0);
899 if (status < 0)
900 goto done;
902 status = 0;
904 /* Update file length */
905 nfs_grow_file(page, offset, count);
906 /* Set the PG_uptodate flag? */
907 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
908 nfs_unlock_request(req);
909 done:
910 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
911 status, (long long)i_size_read(inode));
912 if (status < 0)
913 ClearPageUptodate(page);
914 return status;
917 static void nfs_writepage_release(struct nfs_page *req)
919 end_page_writeback(req->wb_page);
921 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
922 if (!PageError(req->wb_page)) {
923 if (NFS_NEED_RESCHED(req)) {
924 nfs_mark_request_dirty(req);
925 goto out;
926 } else if (NFS_NEED_COMMIT(req)) {
927 nfs_mark_request_commit(req);
928 goto out;
931 nfs_inode_remove_request(req);
933 out:
934 nfs_clear_commit(req);
935 nfs_clear_reschedule(req);
936 #else
937 nfs_inode_remove_request(req);
938 #endif
939 nfs_clear_page_writeback(req);
942 static inline int flush_task_priority(int how)
944 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
945 case FLUSH_HIGHPRI:
946 return RPC_PRIORITY_HIGH;
947 case FLUSH_LOWPRI:
948 return RPC_PRIORITY_LOW;
950 return RPC_PRIORITY_NORMAL;
954 * Set up the argument/result storage required for the RPC call.
956 static void nfs_write_rpcsetup(struct nfs_page *req,
957 struct nfs_write_data *data,
958 const struct rpc_call_ops *call_ops,
959 unsigned int count, unsigned int offset,
960 int how)
962 struct inode *inode;
963 int flags;
965 /* Set up the RPC argument and reply structs
966 * NB: take care not to mess about with data->commit et al. */
968 data->req = req;
969 data->inode = inode = req->wb_context->dentry->d_inode;
970 data->cred = req->wb_context->cred;
972 data->args.fh = NFS_FH(inode);
973 data->args.offset = req_offset(req) + offset;
974 data->args.pgbase = req->wb_pgbase + offset;
975 data->args.pages = data->pagevec;
976 data->args.count = count;
977 data->args.context = req->wb_context;
979 data->res.fattr = &data->fattr;
980 data->res.count = count;
981 data->res.verf = &data->verf;
982 nfs_fattr_init(&data->fattr);
984 /* Set up the initial task struct. */
985 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
986 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
987 NFS_PROTO(inode)->write_setup(data, how);
989 data->task.tk_priority = flush_task_priority(how);
990 data->task.tk_cookie = (unsigned long)inode;
992 dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
993 data->task.tk_pid,
994 inode->i_sb->s_id,
995 (long long)NFS_FILEID(inode),
996 count,
997 (unsigned long long)data->args.offset);
1000 static void nfs_execute_write(struct nfs_write_data *data)
1002 struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
1003 sigset_t oldset;
1005 rpc_clnt_sigmask(clnt, &oldset);
1006 lock_kernel();
1007 rpc_execute(&data->task);
1008 unlock_kernel();
1009 rpc_clnt_sigunmask(clnt, &oldset);
1013 * Generate multiple small requests to write out a single
1014 * contiguous dirty area on one page.
1016 static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
1018 struct nfs_page *req = nfs_list_entry(head->next);
1019 struct page *page = req->wb_page;
1020 struct nfs_write_data *data;
1021 size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
1022 unsigned int offset;
1023 int requests = 0;
1024 LIST_HEAD(list);
1026 nfs_list_remove_request(req);
1028 nbytes = req->wb_bytes;
1029 do {
1030 size_t len = min(nbytes, wsize);
1032 data = nfs_writedata_alloc(len);
1033 if (!data)
1034 goto out_bad;
1035 list_add(&data->pages, &list);
1036 requests++;
1037 nbytes -= len;
1038 } while (nbytes != 0);
1039 atomic_set(&req->wb_complete, requests);
1041 ClearPageError(page);
1042 set_page_writeback(page);
1043 offset = 0;
1044 nbytes = req->wb_bytes;
1045 do {
1046 data = list_entry(list.next, struct nfs_write_data, pages);
1047 list_del_init(&data->pages);
1049 data->pagevec[0] = page;
1051 if (nbytes > wsize) {
1052 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1053 wsize, offset, how);
1054 offset += wsize;
1055 nbytes -= wsize;
1056 } else {
1057 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1058 nbytes, offset, how);
1059 nbytes = 0;
1061 nfs_execute_write(data);
1062 } while (nbytes != 0);
1064 return 0;
1066 out_bad:
1067 while (!list_empty(&list)) {
1068 data = list_entry(list.next, struct nfs_write_data, pages);
1069 list_del(&data->pages);
1070 nfs_writedata_free(data);
1072 nfs_mark_request_dirty(req);
1073 nfs_clear_page_writeback(req);
1074 return -ENOMEM;
1078 * Create an RPC task for the given write request and kick it.
1079 * The page must have been locked by the caller.
1081 * It may happen that the page we're passed is not marked dirty.
1082 * This is the case if nfs_updatepage detects a conflicting request
1083 * that has been written but not committed.
1085 static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
1087 struct nfs_page *req;
1088 struct page **pages;
1089 struct nfs_write_data *data;
1090 unsigned int count;
1092 data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize);
1093 if (!data)
1094 goto out_bad;
1096 pages = data->pagevec;
1097 count = 0;
1098 while (!list_empty(head)) {
1099 req = nfs_list_entry(head->next);
1100 nfs_list_remove_request(req);
1101 nfs_list_add_request(req, &data->pages);
1102 ClearPageError(req->wb_page);
1103 set_page_writeback(req->wb_page);
1104 *pages++ = req->wb_page;
1105 count += req->wb_bytes;
1107 req = nfs_list_entry(data->pages.next);
1109 /* Set up the argument struct */
1110 nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
1112 nfs_execute_write(data);
1113 return 0;
1114 out_bad:
1115 while (!list_empty(head)) {
1116 struct nfs_page *req = nfs_list_entry(head->next);
1117 nfs_list_remove_request(req);
1118 nfs_mark_request_dirty(req);
1119 nfs_clear_page_writeback(req);
1121 return -ENOMEM;
1124 static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how)
1126 LIST_HEAD(one_request);
1127 int (*flush_one)(struct inode *, struct list_head *, int);
1128 struct nfs_page *req;
1129 int wpages = NFS_SERVER(inode)->wpages;
1130 int wsize = NFS_SERVER(inode)->wsize;
1131 int error;
1133 flush_one = nfs_flush_one;
1134 if (wsize < PAGE_CACHE_SIZE)
1135 flush_one = nfs_flush_multi;
1136 /* For single writes, FLUSH_STABLE is more efficient */
1137 if (npages <= wpages && npages == NFS_I(inode)->npages
1138 && nfs_list_entry(head->next)->wb_bytes <= wsize)
1139 how |= FLUSH_STABLE;
1141 do {
1142 nfs_coalesce_requests(head, &one_request, wpages);
1143 req = nfs_list_entry(one_request.next);
1144 error = flush_one(inode, &one_request, how);
1145 if (error < 0)
1146 goto out_err;
1147 } while (!list_empty(head));
1148 return 0;
1149 out_err:
1150 while (!list_empty(head)) {
1151 req = nfs_list_entry(head->next);
1152 nfs_list_remove_request(req);
1153 nfs_mark_request_dirty(req);
1154 nfs_clear_page_writeback(req);
1156 return error;
1160 * Handle a write reply that flushed part of a page.
1162 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1164 struct nfs_write_data *data = calldata;
1165 struct nfs_page *req = data->req;
1166 struct page *page = req->wb_page;
1168 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1169 req->wb_context->dentry->d_inode->i_sb->s_id,
1170 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1171 req->wb_bytes,
1172 (long long)req_offset(req));
1174 if (nfs_writeback_done(task, data) != 0)
1175 return;
1177 if (task->tk_status < 0) {
1178 ClearPageUptodate(page);
1179 SetPageError(page);
1180 req->wb_context->error = task->tk_status;
1181 dprintk(", error = %d\n", task->tk_status);
1182 } else {
1183 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1184 if (data->verf.committed < NFS_FILE_SYNC) {
1185 if (!NFS_NEED_COMMIT(req)) {
1186 nfs_defer_commit(req);
1187 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1188 dprintk(" defer commit\n");
1189 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1190 nfs_defer_reschedule(req);
1191 dprintk(" server reboot detected\n");
1193 } else
1194 #endif
1195 dprintk(" OK\n");
1198 if (atomic_dec_and_test(&req->wb_complete))
1199 nfs_writepage_release(req);
1202 static const struct rpc_call_ops nfs_write_partial_ops = {
1203 .rpc_call_done = nfs_writeback_done_partial,
1204 .rpc_release = nfs_writedata_release,
1208 * Handle a write reply that flushes a whole page.
1210 * FIXME: There is an inherent race with invalidate_inode_pages and
1211 * writebacks since the page->count is kept > 1 for as long
1212 * as the page has a write request pending.
1214 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1216 struct nfs_write_data *data = calldata;
1217 struct nfs_page *req;
1218 struct page *page;
1220 if (nfs_writeback_done(task, data) != 0)
1221 return;
1223 /* Update attributes as result of writeback. */
1224 while (!list_empty(&data->pages)) {
1225 req = nfs_list_entry(data->pages.next);
1226 nfs_list_remove_request(req);
1227 page = req->wb_page;
1229 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1230 req->wb_context->dentry->d_inode->i_sb->s_id,
1231 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1232 req->wb_bytes,
1233 (long long)req_offset(req));
1235 if (task->tk_status < 0) {
1236 ClearPageUptodate(page);
1237 SetPageError(page);
1238 req->wb_context->error = task->tk_status;
1239 end_page_writeback(page);
1240 nfs_inode_remove_request(req);
1241 dprintk(", error = %d\n", task->tk_status);
1242 goto next;
1244 end_page_writeback(page);
1246 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1247 if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
1248 nfs_inode_remove_request(req);
1249 dprintk(" OK\n");
1250 goto next;
1252 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1253 nfs_mark_request_commit(req);
1254 dprintk(" marked for commit\n");
1255 #else
1256 nfs_inode_remove_request(req);
1257 #endif
1258 next:
1259 nfs_clear_page_writeback(req);
1263 static const struct rpc_call_ops nfs_write_full_ops = {
1264 .rpc_call_done = nfs_writeback_done_full,
1265 .rpc_release = nfs_writedata_release,
1270 * This function is called when the WRITE call is complete.
1272 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1274 struct nfs_writeargs *argp = &data->args;
1275 struct nfs_writeres *resp = &data->res;
1276 int status;
1278 dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
1279 task->tk_pid, task->tk_status);
1282 * ->write_done will attempt to use post-op attributes to detect
1283 * conflicting writes by other clients. A strict interpretation
1284 * of close-to-open would allow us to continue caching even if
1285 * another writer had changed the file, but some applications
1286 * depend on tighter cache coherency when writing.
1288 status = NFS_PROTO(data->inode)->write_done(task, data);
1289 if (status != 0)
1290 return status;
1291 nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1293 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1294 if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1295 /* We tried a write call, but the server did not
1296 * commit data to stable storage even though we
1297 * requested it.
1298 * Note: There is a known bug in Tru64 < 5.0 in which
1299 * the server reports NFS_DATA_SYNC, but performs
1300 * NFS_FILE_SYNC. We therefore implement this checking
1301 * as a dprintk() in order to avoid filling syslog.
1303 static unsigned long complain;
1305 if (time_before(complain, jiffies)) {
1306 dprintk("NFS: faulty NFS server %s:"
1307 " (committed = %d) != (stable = %d)\n",
1308 NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1309 resp->verf->committed, argp->stable);
1310 complain = jiffies + 300 * HZ;
1313 #endif
1314 /* Is this a short write? */
1315 if (task->tk_status >= 0 && resp->count < argp->count) {
1316 static unsigned long complain;
1318 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1320 /* Has the server at least made some progress? */
1321 if (resp->count != 0) {
1322 /* Was this an NFSv2 write or an NFSv3 stable write? */
1323 if (resp->verf->committed != NFS_UNSTABLE) {
1324 /* Resend from where the server left off */
1325 argp->offset += resp->count;
1326 argp->pgbase += resp->count;
1327 argp->count -= resp->count;
1328 } else {
1329 /* Resend as a stable write in order to avoid
1330 * headaches in the case of a server crash.
1332 argp->stable = NFS_FILE_SYNC;
1334 rpc_restart_call(task);
1335 return -EAGAIN;
1337 if (time_before(complain, jiffies)) {
1338 printk(KERN_WARNING
1339 "NFS: Server wrote zero bytes, expected %u.\n",
1340 argp->count);
1341 complain = jiffies + 300 * HZ;
1343 /* Can't do anything about it except throw an error. */
1344 task->tk_status = -EIO;
1346 return 0;
1350 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1351 void nfs_commit_release(void *wdata)
1353 nfs_commit_free(wdata);
1357 * Set up the argument/result storage required for the RPC call.
1359 static void nfs_commit_rpcsetup(struct list_head *head,
1360 struct nfs_write_data *data,
1361 int how)
1363 struct nfs_page *first;
1364 struct inode *inode;
1365 int flags;
1367 /* Set up the RPC argument and reply structs
1368 * NB: take care not to mess about with data->commit et al. */
1370 list_splice_init(head, &data->pages);
1371 first = nfs_list_entry(data->pages.next);
1372 inode = first->wb_context->dentry->d_inode;
1374 data->inode = inode;
1375 data->cred = first->wb_context->cred;
1377 data->args.fh = NFS_FH(data->inode);
1378 /* Note: we always request a commit of the entire inode */
1379 data->args.offset = 0;
1380 data->args.count = 0;
1381 data->res.count = 0;
1382 data->res.fattr = &data->fattr;
1383 data->res.verf = &data->verf;
1384 nfs_fattr_init(&data->fattr);
1386 /* Set up the initial task struct. */
1387 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1388 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1389 NFS_PROTO(inode)->commit_setup(data, how);
1391 data->task.tk_priority = flush_task_priority(how);
1392 data->task.tk_cookie = (unsigned long)inode;
1394 dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
1398 * Commit dirty pages
1400 static int
1401 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1403 struct nfs_write_data *data;
1404 struct nfs_page *req;
1406 data = nfs_commit_alloc();
1408 if (!data)
1409 goto out_bad;
1411 /* Set up the argument struct */
1412 nfs_commit_rpcsetup(head, data, how);
1414 nfs_execute_write(data);
1415 return 0;
1416 out_bad:
1417 while (!list_empty(head)) {
1418 req = nfs_list_entry(head->next);
1419 nfs_list_remove_request(req);
1420 nfs_mark_request_commit(req);
1421 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1422 nfs_clear_page_writeback(req);
1424 return -ENOMEM;
1428 * COMMIT call returned
1430 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1432 struct nfs_write_data *data = calldata;
1433 struct nfs_page *req;
1435 dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1436 task->tk_pid, task->tk_status);
1438 /* Call the NFS version-specific code */
1439 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1440 return;
1442 while (!list_empty(&data->pages)) {
1443 req = nfs_list_entry(data->pages.next);
1444 nfs_list_remove_request(req);
1445 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1447 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1448 req->wb_context->dentry->d_inode->i_sb->s_id,
1449 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1450 req->wb_bytes,
1451 (long long)req_offset(req));
1452 if (task->tk_status < 0) {
1453 req->wb_context->error = task->tk_status;
1454 nfs_inode_remove_request(req);
1455 dprintk(", error = %d\n", task->tk_status);
1456 goto next;
1459 /* Okay, COMMIT succeeded, apparently. Check the verifier
1460 * returned by the server against all stored verfs. */
1461 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1462 /* We have a match */
1463 nfs_inode_remove_request(req);
1464 dprintk(" OK\n");
1465 goto next;
1467 /* We have a mismatch. Write the page again */
1468 dprintk(" mismatch\n");
1469 nfs_mark_request_dirty(req);
1470 next:
1471 nfs_clear_page_writeback(req);
1475 static const struct rpc_call_ops nfs_commit_ops = {
1476 .rpc_call_done = nfs_commit_done,
1477 .rpc_release = nfs_commit_release,
1479 #else
1480 static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1482 return 0;
1484 #endif
1486 static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
1487 unsigned int npages, int how)
1489 struct nfs_inode *nfsi = NFS_I(inode);
1490 LIST_HEAD(head);
1491 int res;
1493 spin_lock(&nfsi->req_lock);
1494 res = nfs_scan_dirty(inode, &head, idx_start, npages);
1495 spin_unlock(&nfsi->req_lock);
1496 if (res) {
1497 int error = nfs_flush_list(inode, &head, res, how);
1498 if (error < 0)
1499 return error;
1501 return res;
1504 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1505 int nfs_commit_inode(struct inode *inode, int how)
1507 struct nfs_inode *nfsi = NFS_I(inode);
1508 LIST_HEAD(head);
1509 int res;
1511 spin_lock(&nfsi->req_lock);
1512 res = nfs_scan_commit(inode, &head, 0, 0);
1513 spin_unlock(&nfsi->req_lock);
1514 if (res) {
1515 int error = nfs_commit_list(inode, &head, how);
1516 if (error < 0)
1517 return error;
1519 return res;
1521 #endif
1523 int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
1524 unsigned int npages, int how)
1526 struct nfs_inode *nfsi = NFS_I(inode);
1527 LIST_HEAD(head);
1528 int nocommit = how & FLUSH_NOCOMMIT;
1529 int pages, ret;
1531 how &= ~FLUSH_NOCOMMIT;
1532 spin_lock(&nfsi->req_lock);
1533 do {
1534 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1535 if (ret != 0)
1536 continue;
1537 pages = nfs_scan_dirty(inode, &head, idx_start, npages);
1538 if (pages != 0) {
1539 spin_unlock(&nfsi->req_lock);
1540 if (how & FLUSH_INVALIDATE)
1541 nfs_cancel_dirty_list(&head);
1542 else
1543 ret = nfs_flush_list(inode, &head, pages, how);
1544 spin_lock(&nfsi->req_lock);
1545 continue;
1547 if (nocommit)
1548 break;
1549 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1550 if (pages == 0)
1551 break;
1552 if (how & FLUSH_INVALIDATE) {
1553 spin_unlock(&nfsi->req_lock);
1554 nfs_cancel_commit_list(&head);
1555 spin_lock(&nfsi->req_lock);
1556 continue;
1558 pages += nfs_scan_commit(inode, &head, 0, 0);
1559 spin_unlock(&nfsi->req_lock);
1560 ret = nfs_commit_list(inode, &head, how);
1561 spin_lock(&nfsi->req_lock);
1562 } while (ret >= 0);
1563 spin_unlock(&nfsi->req_lock);
1564 return ret;
1567 int __init nfs_init_writepagecache(void)
1569 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1570 sizeof(struct nfs_write_data),
1571 0, SLAB_HWCACHE_ALIGN,
1572 NULL, NULL);
1573 if (nfs_wdata_cachep == NULL)
1574 return -ENOMEM;
1576 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1577 nfs_wdata_cachep);
1578 if (nfs_wdata_mempool == NULL)
1579 return -ENOMEM;
1581 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1582 nfs_wdata_cachep);
1583 if (nfs_commit_mempool == NULL)
1584 return -ENOMEM;
1586 return 0;
1589 void nfs_destroy_writepagecache(void)
1591 mempool_destroy(nfs_commit_mempool);
1592 mempool_destroy(nfs_wdata_mempool);
1593 kmem_cache_destroy(nfs_wdata_cachep);