sky2: fiber pause bits
[linux-2.6/s3c2410-cpufreq.git] / fs / nfs / write.c
blobf6675d2c386c29127a9e6bad4223f47a1dbd3e12
1 /*
2 * linux/fs/nfs/write.c
4 * Writing file data over NFS.
6 * We do it like this: When a (user) process wishes to write data to an
7 * NFS file, a write request is allocated that contains the RPC task data
8 * plus some info on the page to be written, and added to the inode's
9 * write chain. If the process writes past the end of the page, an async
10 * RPC call to write the page is scheduled immediately; otherwise, the call
11 * is delayed for a few seconds.
13 * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
15 * Write requests are kept on the inode's writeback list. Each entry in
16 * that list references the page (portion) to be written. When the
17 * cache timeout has expired, the RPC task is woken up, and tries to
18 * lock the page. As soon as it manages to do so, the request is moved
19 * from the writeback list to the writelock list.
21 * Note: we must make sure never to confuse the inode passed in the
22 * write_page request with the one in page->inode. As far as I understand
23 * it, these are different when doing a swap-out.
25 * To understand everything that goes on here and in the NFS read code,
26 * one should be aware that a page is locked in exactly one of the following
27 * cases:
29 * - A write request is in progress.
30 * - A user process is in generic_file_write/nfs_update_page
31 * - A user process is in generic_file_read
33 * Also note that because of the way pages are invalidated in
34 * nfs_revalidate_inode, the following assertions hold:
36 * - If a page is dirty, there will be no read requests (a page will
37 * not be re-read unless invalidated by nfs_revalidate_inode).
38 * - If the page is not uptodate, there will be no pending write
39 * requests, and no process will be in nfs_update_page.
41 * FIXME: Interaction with the vmscan routines is not optimal yet.
42 * Either vmscan must be made nfs-savvy, or we need a different page
43 * reclaim concept that supports something like FS-independent
44 * buffer_heads with a b_ops-> field.
46 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
49 #include <linux/types.h>
50 #include <linux/slab.h>
51 #include <linux/mm.h>
52 #include <linux/pagemap.h>
53 #include <linux/file.h>
54 #include <linux/writeback.h>
56 #include <linux/sunrpc/clnt.h>
57 #include <linux/nfs_fs.h>
58 #include <linux/nfs_mount.h>
59 #include <linux/nfs_page.h>
60 #include <asm/uaccess.h>
61 #include <linux/smp_lock.h>
63 #include "delegation.h"
64 #include "iostat.h"
66 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
68 #define MIN_POOL_WRITE (32)
69 #define MIN_POOL_COMMIT (4)
72 * Local function declarations
74 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
75 struct inode *,
76 struct page *,
77 unsigned int, unsigned int);
78 static int nfs_wait_on_write_congestion(struct address_space *, int);
79 static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
80 static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
81 unsigned int npages, int how);
82 static const struct rpc_call_ops nfs_write_partial_ops;
83 static const struct rpc_call_ops nfs_write_full_ops;
84 static const struct rpc_call_ops nfs_commit_ops;
86 static kmem_cache_t *nfs_wdata_cachep;
87 static mempool_t *nfs_wdata_mempool;
88 static mempool_t *nfs_commit_mempool;
90 static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
92 struct nfs_write_data *nfs_commit_alloc(void)
94 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
96 if (p) {
97 memset(p, 0, sizeof(*p));
98 INIT_LIST_HEAD(&p->pages);
100 return p;
103 void nfs_commit_free(struct nfs_write_data *p)
105 if (p && (p->pagevec != &p->page_array[0]))
106 kfree(p->pagevec);
107 mempool_free(p, nfs_commit_mempool);
110 struct nfs_write_data *nfs_writedata_alloc(size_t len)
112 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
113 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
115 if (p) {
116 memset(p, 0, sizeof(*p));
117 INIT_LIST_HEAD(&p->pages);
118 p->npages = pagecount;
119 if (pagecount <= ARRAY_SIZE(p->page_array))
120 p->pagevec = p->page_array;
121 else {
122 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
123 if (!p->pagevec) {
124 mempool_free(p, nfs_wdata_mempool);
125 p = NULL;
129 return p;
132 static void nfs_writedata_free(struct nfs_write_data *p)
134 if (p && (p->pagevec != &p->page_array[0]))
135 kfree(p->pagevec);
136 mempool_free(p, nfs_wdata_mempool);
139 void nfs_writedata_release(void *wdata)
141 nfs_writedata_free(wdata);
144 /* Adjust the file length if we're writing beyond the end */
145 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
147 struct inode *inode = page->mapping->host;
148 loff_t end, i_size = i_size_read(inode);
149 unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
151 if (i_size > 0 && page->index < end_index)
152 return;
153 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
154 if (i_size >= end)
155 return;
156 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
157 i_size_write(inode, end);
160 /* We can set the PG_uptodate flag if we see that a write request
161 * covers the full page.
163 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
165 loff_t end_offs;
167 if (PageUptodate(page))
168 return;
169 if (base != 0)
170 return;
171 if (count == PAGE_CACHE_SIZE) {
172 SetPageUptodate(page);
173 return;
176 end_offs = i_size_read(page->mapping->host) - 1;
177 if (end_offs < 0)
178 return;
179 /* Is this the last page? */
180 if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT))
181 return;
182 /* This is the last page: set PG_uptodate if we cover the entire
183 * extent of the data, then zero the rest of the page.
185 if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) {
186 memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
187 SetPageUptodate(page);
192 * Write a page synchronously.
193 * Offset is the data offset within the page.
195 static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
196 struct page *page, unsigned int offset, unsigned int count,
197 int how)
199 unsigned int wsize = NFS_SERVER(inode)->wsize;
200 int result, written = 0;
201 struct nfs_write_data *wdata;
203 wdata = nfs_writedata_alloc(wsize);
204 if (!wdata)
205 return -ENOMEM;
207 wdata->flags = how;
208 wdata->cred = ctx->cred;
209 wdata->inode = inode;
210 wdata->args.fh = NFS_FH(inode);
211 wdata->args.context = ctx;
212 wdata->args.pages = &page;
213 wdata->args.stable = NFS_FILE_SYNC;
214 wdata->args.pgbase = offset;
215 wdata->args.count = wsize;
216 wdata->res.fattr = &wdata->fattr;
217 wdata->res.verf = &wdata->verf;
219 dprintk("NFS: nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
220 inode->i_sb->s_id,
221 (long long)NFS_FILEID(inode),
222 count, (long long)(page_offset(page) + offset));
224 set_page_writeback(page);
225 nfs_begin_data_update(inode);
226 do {
227 if (count < wsize)
228 wdata->args.count = count;
229 wdata->args.offset = page_offset(page) + wdata->args.pgbase;
231 result = NFS_PROTO(inode)->write(wdata);
233 if (result < 0) {
234 /* Must mark the page invalid after I/O error */
235 ClearPageUptodate(page);
236 goto io_error;
238 if (result < wdata->args.count)
239 printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
240 wdata->args.count, result);
242 wdata->args.offset += result;
243 wdata->args.pgbase += result;
244 written += result;
245 count -= result;
246 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
247 } while (count);
248 /* Update file length */
249 nfs_grow_file(page, offset, written);
250 /* Set the PG_uptodate flag? */
251 nfs_mark_uptodate(page, offset, written);
253 if (PageError(page))
254 ClearPageError(page);
256 io_error:
257 nfs_end_data_update(inode);
258 end_page_writeback(page);
259 nfs_writedata_free(wdata);
260 return written ? written : result;
263 static int nfs_writepage_async(struct nfs_open_context *ctx,
264 struct inode *inode, struct page *page,
265 unsigned int offset, unsigned int count)
267 struct nfs_page *req;
269 req = nfs_update_request(ctx, inode, page, offset, count);
270 if (IS_ERR(req))
271 return PTR_ERR(req);
272 /* Update file length */
273 nfs_grow_file(page, offset, count);
274 /* Set the PG_uptodate flag? */
275 nfs_mark_uptodate(page, offset, count);
276 nfs_unlock_request(req);
277 return 0;
280 static int wb_priority(struct writeback_control *wbc)
282 if (wbc->for_reclaim)
283 return FLUSH_HIGHPRI;
284 if (wbc->for_kupdate)
285 return FLUSH_LOWPRI;
286 return 0;
290 * Write an mmapped page to the server.
292 int nfs_writepage(struct page *page, struct writeback_control *wbc)
294 struct nfs_open_context *ctx;
295 struct inode *inode = page->mapping->host;
296 unsigned long end_index;
297 unsigned offset = PAGE_CACHE_SIZE;
298 loff_t i_size = i_size_read(inode);
299 int inode_referenced = 0;
300 int priority = wb_priority(wbc);
301 int err;
303 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
304 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
307 * Note: We need to ensure that we have a reference to the inode
308 * if we are to do asynchronous writes. If not, waiting
309 * in nfs_wait_on_request() may deadlock with clear_inode().
311 * If igrab() fails here, then it is in any case safe to
312 * call nfs_wb_page(), since there will be no pending writes.
314 if (igrab(inode) != 0)
315 inode_referenced = 1;
316 end_index = i_size >> PAGE_CACHE_SHIFT;
318 /* Ensure we've flushed out any previous writes */
319 nfs_wb_page_priority(inode, page, priority);
321 /* easy case */
322 if (page->index < end_index)
323 goto do_it;
324 /* things got complicated... */
325 offset = i_size & (PAGE_CACHE_SIZE-1);
327 /* OK, are we completely out? */
328 err = 0; /* potential race with truncate - ignore */
329 if (page->index >= end_index+1 || !offset)
330 goto out;
331 do_it:
332 ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
333 if (ctx == NULL) {
334 err = -EBADF;
335 goto out;
337 lock_kernel();
338 if (!IS_SYNC(inode) && inode_referenced) {
339 err = nfs_writepage_async(ctx, inode, page, 0, offset);
340 if (!wbc->for_writepages)
341 nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
342 } else {
343 err = nfs_writepage_sync(ctx, inode, page, 0,
344 offset, priority);
345 if (err >= 0) {
346 if (err != offset)
347 redirty_page_for_writepage(wbc, page);
348 err = 0;
351 unlock_kernel();
352 put_nfs_open_context(ctx);
353 out:
354 unlock_page(page);
355 if (inode_referenced)
356 iput(inode);
357 return err;
361 * Note: causes nfs_update_request() to block on the assumption
362 * that the writeback is generated due to memory pressure.
364 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
366 struct backing_dev_info *bdi = mapping->backing_dev_info;
367 struct inode *inode = mapping->host;
368 int err;
370 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
372 err = generic_writepages(mapping, wbc);
373 if (err)
374 return err;
375 while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
376 if (wbc->nonblocking)
377 return 0;
378 nfs_wait_on_write_congestion(mapping, 0);
380 err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
381 if (err < 0)
382 goto out;
383 nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
384 wbc->nr_to_write -= err;
385 if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
386 err = nfs_wait_on_requests(inode, 0, 0);
387 if (err < 0)
388 goto out;
390 err = nfs_commit_inode(inode, wb_priority(wbc));
391 if (err > 0) {
392 wbc->nr_to_write -= err;
393 err = 0;
395 out:
396 clear_bit(BDI_write_congested, &bdi->state);
397 wake_up_all(&nfs_write_congestion);
398 writeback_congestion_end();
399 return err;
403 * Insert a write request into an inode
405 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
407 struct nfs_inode *nfsi = NFS_I(inode);
408 int error;
410 error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
411 BUG_ON(error == -EEXIST);
412 if (error)
413 return error;
414 if (!nfsi->npages) {
415 igrab(inode);
416 nfs_begin_data_update(inode);
417 if (nfs_have_delegation(inode, FMODE_WRITE))
418 nfsi->change_attr++;
420 SetPagePrivate(req->wb_page);
421 nfsi->npages++;
422 atomic_inc(&req->wb_count);
423 return 0;
427 * Insert a write request into an inode
429 static void nfs_inode_remove_request(struct nfs_page *req)
431 struct inode *inode = req->wb_context->dentry->d_inode;
432 struct nfs_inode *nfsi = NFS_I(inode);
434 BUG_ON (!NFS_WBACK_BUSY(req));
436 spin_lock(&nfsi->req_lock);
437 ClearPagePrivate(req->wb_page);
438 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
439 nfsi->npages--;
440 if (!nfsi->npages) {
441 spin_unlock(&nfsi->req_lock);
442 nfs_end_data_update(inode);
443 iput(inode);
444 } else
445 spin_unlock(&nfsi->req_lock);
446 nfs_clear_request(req);
447 nfs_release_request(req);
451 * Find a request
453 static inline struct nfs_page *
454 _nfs_find_request(struct inode *inode, unsigned long index)
456 struct nfs_inode *nfsi = NFS_I(inode);
457 struct nfs_page *req;
459 req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index);
460 if (req)
461 atomic_inc(&req->wb_count);
462 return req;
465 static struct nfs_page *
466 nfs_find_request(struct inode *inode, unsigned long index)
468 struct nfs_page *req;
469 struct nfs_inode *nfsi = NFS_I(inode);
471 spin_lock(&nfsi->req_lock);
472 req = _nfs_find_request(inode, index);
473 spin_unlock(&nfsi->req_lock);
474 return req;
478 * Add a request to the inode's dirty list.
480 static void
481 nfs_mark_request_dirty(struct nfs_page *req)
483 struct inode *inode = req->wb_context->dentry->d_inode;
484 struct nfs_inode *nfsi = NFS_I(inode);
486 spin_lock(&nfsi->req_lock);
487 radix_tree_tag_set(&nfsi->nfs_page_tree,
488 req->wb_index, NFS_PAGE_TAG_DIRTY);
489 nfs_list_add_request(req, &nfsi->dirty);
490 nfsi->ndirty++;
491 spin_unlock(&nfsi->req_lock);
492 inc_zone_page_state(req->wb_page, NR_FILE_DIRTY);
493 mark_inode_dirty(inode);
497 * Check if a request is dirty
499 static inline int
500 nfs_dirty_request(struct nfs_page *req)
502 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
503 return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
506 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
508 * Add a request to the inode's commit list.
510 static void
511 nfs_mark_request_commit(struct nfs_page *req)
513 struct inode *inode = req->wb_context->dentry->d_inode;
514 struct nfs_inode *nfsi = NFS_I(inode);
516 spin_lock(&nfsi->req_lock);
517 nfs_list_add_request(req, &nfsi->commit);
518 nfsi->ncommit++;
519 spin_unlock(&nfsi->req_lock);
520 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
521 mark_inode_dirty(inode);
523 #endif
526 * Wait for a request to complete.
528 * Interruptible by signals only if mounted with intr flag.
530 static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages)
532 struct nfs_inode *nfsi = NFS_I(inode);
533 struct nfs_page *req;
534 unsigned long idx_end, next;
535 unsigned int res = 0;
536 int error;
538 if (npages == 0)
539 idx_end = ~0;
540 else
541 idx_end = idx_start + npages - 1;
543 next = idx_start;
544 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
545 if (req->wb_index > idx_end)
546 break;
548 next = req->wb_index + 1;
549 BUG_ON(!NFS_WBACK_BUSY(req));
551 atomic_inc(&req->wb_count);
552 spin_unlock(&nfsi->req_lock);
553 error = nfs_wait_on_request(req);
554 nfs_release_request(req);
555 spin_lock(&nfsi->req_lock);
556 if (error < 0)
557 return error;
558 res++;
560 return res;
563 static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
565 struct nfs_inode *nfsi = NFS_I(inode);
566 int ret;
568 spin_lock(&nfsi->req_lock);
569 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
570 spin_unlock(&nfsi->req_lock);
571 return ret;
574 static void nfs_cancel_dirty_list(struct list_head *head)
576 struct nfs_page *req;
577 while(!list_empty(head)) {
578 req = nfs_list_entry(head->next);
579 nfs_list_remove_request(req);
580 nfs_inode_remove_request(req);
581 nfs_clear_page_writeback(req);
585 static void nfs_cancel_commit_list(struct list_head *head)
587 struct nfs_page *req;
589 while(!list_empty(head)) {
590 req = nfs_list_entry(head->next);
591 nfs_list_remove_request(req);
592 nfs_inode_remove_request(req);
593 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
594 nfs_clear_page_writeback(req);
599 * nfs_scan_dirty - Scan an inode for dirty requests
600 * @inode: NFS inode to scan
601 * @dst: destination list
602 * @idx_start: lower bound of page->index to scan.
603 * @npages: idx_start + npages sets the upper bound to scan.
605 * Moves requests from the inode's dirty page list.
606 * The requests are *not* checked to ensure that they form a contiguous set.
608 static int
609 nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
611 struct nfs_inode *nfsi = NFS_I(inode);
612 int res = 0;
614 if (nfsi->ndirty != 0) {
615 res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages);
616 nfsi->ndirty -= res;
617 if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
618 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
620 return res;
623 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
625 * nfs_scan_commit - Scan an inode for commit requests
626 * @inode: NFS inode to scan
627 * @dst: destination list
628 * @idx_start: lower bound of page->index to scan.
629 * @npages: idx_start + npages sets the upper bound to scan.
631 * Moves requests from the inode's 'commit' request list.
632 * The requests are *not* checked to ensure that they form a contiguous set.
634 static int
635 nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
637 struct nfs_inode *nfsi = NFS_I(inode);
638 int res = 0;
640 if (nfsi->ncommit != 0) {
641 res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages);
642 nfsi->ncommit -= res;
643 if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
644 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
646 return res;
648 #else
649 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
651 return 0;
653 #endif
655 static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
657 struct backing_dev_info *bdi = mapping->backing_dev_info;
658 DEFINE_WAIT(wait);
659 int ret = 0;
661 might_sleep();
663 if (!bdi_write_congested(bdi))
664 return 0;
666 nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT);
668 if (intr) {
669 struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
670 sigset_t oldset;
672 rpc_clnt_sigmask(clnt, &oldset);
673 prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
674 if (bdi_write_congested(bdi)) {
675 if (signalled())
676 ret = -ERESTARTSYS;
677 else
678 schedule();
680 rpc_clnt_sigunmask(clnt, &oldset);
681 } else {
682 prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
683 if (bdi_write_congested(bdi))
684 schedule();
686 finish_wait(&nfs_write_congestion, &wait);
687 return ret;
692 * Try to update any existing write request, or create one if there is none.
693 * In order to match, the request's credentials must match those of
694 * the calling process.
696 * Note: Should always be called with the Page Lock held!
698 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
699 struct inode *inode, struct page *page,
700 unsigned int offset, unsigned int bytes)
702 struct nfs_server *server = NFS_SERVER(inode);
703 struct nfs_inode *nfsi = NFS_I(inode);
704 struct nfs_page *req, *new = NULL;
705 unsigned long rqend, end;
707 end = offset + bytes;
709 if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR))
710 return ERR_PTR(-ERESTARTSYS);
711 for (;;) {
712 /* Loop over all inode entries and see if we find
713 * A request for the page we wish to update
715 spin_lock(&nfsi->req_lock);
716 req = _nfs_find_request(inode, page->index);
717 if (req) {
718 if (!nfs_lock_request_dontget(req)) {
719 int error;
720 spin_unlock(&nfsi->req_lock);
721 error = nfs_wait_on_request(req);
722 nfs_release_request(req);
723 if (error < 0) {
724 if (new)
725 nfs_release_request(new);
726 return ERR_PTR(error);
728 continue;
730 spin_unlock(&nfsi->req_lock);
731 if (new)
732 nfs_release_request(new);
733 break;
736 if (new) {
737 int error;
738 nfs_lock_request_dontget(new);
739 error = nfs_inode_add_request(inode, new);
740 if (error) {
741 spin_unlock(&nfsi->req_lock);
742 nfs_unlock_request(new);
743 return ERR_PTR(error);
745 spin_unlock(&nfsi->req_lock);
746 nfs_mark_request_dirty(new);
747 return new;
749 spin_unlock(&nfsi->req_lock);
751 new = nfs_create_request(ctx, inode, page, offset, bytes);
752 if (IS_ERR(new))
753 return new;
756 /* We have a request for our page.
757 * If the creds don't match, or the
758 * page addresses don't match,
759 * tell the caller to wait on the conflicting
760 * request.
762 rqend = req->wb_offset + req->wb_bytes;
763 if (req->wb_context != ctx
764 || req->wb_page != page
765 || !nfs_dirty_request(req)
766 || offset > rqend || end < req->wb_offset) {
767 nfs_unlock_request(req);
768 return ERR_PTR(-EBUSY);
771 /* Okay, the request matches. Update the region */
772 if (offset < req->wb_offset) {
773 req->wb_offset = offset;
774 req->wb_pgbase = offset;
775 req->wb_bytes = rqend - req->wb_offset;
778 if (end > rqend)
779 req->wb_bytes = end - req->wb_offset;
781 return req;
784 int nfs_flush_incompatible(struct file *file, struct page *page)
786 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
787 struct inode *inode = page->mapping->host;
788 struct nfs_page *req;
789 int status = 0;
791 * Look for a request corresponding to this page. If there
792 * is one, and it belongs to another file, we flush it out
793 * before we try to copy anything into the page. Do this
794 * due to the lack of an ACCESS-type call in NFSv2.
795 * Also do the same if we find a request from an existing
796 * dropped page.
798 req = nfs_find_request(inode, page->index);
799 if (req) {
800 if (req->wb_page != page || ctx != req->wb_context)
801 status = nfs_wb_page(inode, page);
802 nfs_release_request(req);
804 return (status < 0) ? status : 0;
808 * Update and possibly write a cached page of an NFS file.
810 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
811 * things with a page scheduled for an RPC call (e.g. invalidate it).
813 int nfs_updatepage(struct file *file, struct page *page,
814 unsigned int offset, unsigned int count)
816 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
817 struct inode *inode = page->mapping->host;
818 struct nfs_page *req;
819 int status = 0;
821 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
823 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
824 file->f_dentry->d_parent->d_name.name,
825 file->f_dentry->d_name.name, count,
826 (long long)(page_offset(page) +offset));
828 if (IS_SYNC(inode)) {
829 status = nfs_writepage_sync(ctx, inode, page, offset, count, 0);
830 if (status > 0) {
831 if (offset == 0 && status == PAGE_CACHE_SIZE)
832 SetPageUptodate(page);
833 return 0;
835 return status;
838 /* If we're not using byte range locks, and we know the page
839 * is entirely in cache, it may be more efficient to avoid
840 * fragmenting write requests.
842 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
843 loff_t end_offs = i_size_read(inode) - 1;
844 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
846 count += offset;
847 offset = 0;
848 if (unlikely(end_offs < 0)) {
849 /* Do nothing */
850 } else if (page->index == end_index) {
851 unsigned int pglen;
852 pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1;
853 if (count < pglen)
854 count = pglen;
855 } else if (page->index < end_index)
856 count = PAGE_CACHE_SIZE;
860 * Try to find an NFS request corresponding to this page
861 * and update it.
862 * If the existing request cannot be updated, we must flush
863 * it out now.
865 do {
866 req = nfs_update_request(ctx, inode, page, offset, count);
867 status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
868 if (status != -EBUSY)
869 break;
870 /* Request could not be updated. Flush it out and try again */
871 status = nfs_wb_page(inode, page);
872 } while (status >= 0);
873 if (status < 0)
874 goto done;
876 status = 0;
878 /* Update file length */
879 nfs_grow_file(page, offset, count);
880 /* Set the PG_uptodate flag? */
881 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
882 nfs_unlock_request(req);
883 done:
884 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
885 status, (long long)i_size_read(inode));
886 if (status < 0)
887 ClearPageUptodate(page);
888 return status;
891 static void nfs_writepage_release(struct nfs_page *req)
893 end_page_writeback(req->wb_page);
895 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
896 if (!PageError(req->wb_page)) {
897 if (NFS_NEED_RESCHED(req)) {
898 nfs_mark_request_dirty(req);
899 goto out;
900 } else if (NFS_NEED_COMMIT(req)) {
901 nfs_mark_request_commit(req);
902 goto out;
905 nfs_inode_remove_request(req);
907 out:
908 nfs_clear_commit(req);
909 nfs_clear_reschedule(req);
910 #else
911 nfs_inode_remove_request(req);
912 #endif
913 nfs_clear_page_writeback(req);
916 static inline int flush_task_priority(int how)
918 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
919 case FLUSH_HIGHPRI:
920 return RPC_PRIORITY_HIGH;
921 case FLUSH_LOWPRI:
922 return RPC_PRIORITY_LOW;
924 return RPC_PRIORITY_NORMAL;
928 * Set up the argument/result storage required for the RPC call.
930 static void nfs_write_rpcsetup(struct nfs_page *req,
931 struct nfs_write_data *data,
932 const struct rpc_call_ops *call_ops,
933 unsigned int count, unsigned int offset,
934 int how)
936 struct inode *inode;
937 int flags;
939 /* Set up the RPC argument and reply structs
940 * NB: take care not to mess about with data->commit et al. */
942 data->req = req;
943 data->inode = inode = req->wb_context->dentry->d_inode;
944 data->cred = req->wb_context->cred;
946 data->args.fh = NFS_FH(inode);
947 data->args.offset = req_offset(req) + offset;
948 data->args.pgbase = req->wb_pgbase + offset;
949 data->args.pages = data->pagevec;
950 data->args.count = count;
951 data->args.context = req->wb_context;
953 data->res.fattr = &data->fattr;
954 data->res.count = count;
955 data->res.verf = &data->verf;
956 nfs_fattr_init(&data->fattr);
958 /* Set up the initial task struct. */
959 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
960 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
961 NFS_PROTO(inode)->write_setup(data, how);
963 data->task.tk_priority = flush_task_priority(how);
964 data->task.tk_cookie = (unsigned long)inode;
966 dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
967 data->task.tk_pid,
968 inode->i_sb->s_id,
969 (long long)NFS_FILEID(inode),
970 count,
971 (unsigned long long)data->args.offset);
974 static void nfs_execute_write(struct nfs_write_data *data)
976 struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
977 sigset_t oldset;
979 rpc_clnt_sigmask(clnt, &oldset);
980 lock_kernel();
981 rpc_execute(&data->task);
982 unlock_kernel();
983 rpc_clnt_sigunmask(clnt, &oldset);
987 * Generate multiple small requests to write out a single
988 * contiguous dirty area on one page.
990 static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
992 struct nfs_page *req = nfs_list_entry(head->next);
993 struct page *page = req->wb_page;
994 struct nfs_write_data *data;
995 size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
996 unsigned int offset;
997 int requests = 0;
998 LIST_HEAD(list);
1000 nfs_list_remove_request(req);
1002 nbytes = req->wb_bytes;
1003 do {
1004 size_t len = min(nbytes, wsize);
1006 data = nfs_writedata_alloc(len);
1007 if (!data)
1008 goto out_bad;
1009 list_add(&data->pages, &list);
1010 requests++;
1011 nbytes -= len;
1012 } while (nbytes != 0);
1013 atomic_set(&req->wb_complete, requests);
1015 ClearPageError(page);
1016 set_page_writeback(page);
1017 offset = 0;
1018 nbytes = req->wb_bytes;
1019 do {
1020 data = list_entry(list.next, struct nfs_write_data, pages);
1021 list_del_init(&data->pages);
1023 data->pagevec[0] = page;
1025 if (nbytes > wsize) {
1026 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1027 wsize, offset, how);
1028 offset += wsize;
1029 nbytes -= wsize;
1030 } else {
1031 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1032 nbytes, offset, how);
1033 nbytes = 0;
1035 nfs_execute_write(data);
1036 } while (nbytes != 0);
1038 return 0;
1040 out_bad:
1041 while (!list_empty(&list)) {
1042 data = list_entry(list.next, struct nfs_write_data, pages);
1043 list_del(&data->pages);
1044 nfs_writedata_free(data);
1046 nfs_mark_request_dirty(req);
1047 nfs_clear_page_writeback(req);
1048 return -ENOMEM;
1052 * Create an RPC task for the given write request and kick it.
1053 * The page must have been locked by the caller.
1055 * It may happen that the page we're passed is not marked dirty.
1056 * This is the case if nfs_updatepage detects a conflicting request
1057 * that has been written but not committed.
1059 static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
1061 struct nfs_page *req;
1062 struct page **pages;
1063 struct nfs_write_data *data;
1064 unsigned int count;
1066 data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize);
1067 if (!data)
1068 goto out_bad;
1070 pages = data->pagevec;
1071 count = 0;
1072 while (!list_empty(head)) {
1073 req = nfs_list_entry(head->next);
1074 nfs_list_remove_request(req);
1075 nfs_list_add_request(req, &data->pages);
1076 ClearPageError(req->wb_page);
1077 set_page_writeback(req->wb_page);
1078 *pages++ = req->wb_page;
1079 count += req->wb_bytes;
1081 req = nfs_list_entry(data->pages.next);
1083 /* Set up the argument struct */
1084 nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
1086 nfs_execute_write(data);
1087 return 0;
1088 out_bad:
1089 while (!list_empty(head)) {
1090 struct nfs_page *req = nfs_list_entry(head->next);
1091 nfs_list_remove_request(req);
1092 nfs_mark_request_dirty(req);
1093 nfs_clear_page_writeback(req);
1095 return -ENOMEM;
1098 static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how)
1100 LIST_HEAD(one_request);
1101 int (*flush_one)(struct inode *, struct list_head *, int);
1102 struct nfs_page *req;
1103 int wpages = NFS_SERVER(inode)->wpages;
1104 int wsize = NFS_SERVER(inode)->wsize;
1105 int error;
1107 flush_one = nfs_flush_one;
1108 if (wsize < PAGE_CACHE_SIZE)
1109 flush_one = nfs_flush_multi;
1110 /* For single writes, FLUSH_STABLE is more efficient */
1111 if (npages <= wpages && npages == NFS_I(inode)->npages
1112 && nfs_list_entry(head->next)->wb_bytes <= wsize)
1113 how |= FLUSH_STABLE;
1115 do {
1116 nfs_coalesce_requests(head, &one_request, wpages);
1117 req = nfs_list_entry(one_request.next);
1118 error = flush_one(inode, &one_request, how);
1119 if (error < 0)
1120 goto out_err;
1121 } while (!list_empty(head));
1122 return 0;
1123 out_err:
1124 while (!list_empty(head)) {
1125 req = nfs_list_entry(head->next);
1126 nfs_list_remove_request(req);
1127 nfs_mark_request_dirty(req);
1128 nfs_clear_page_writeback(req);
1130 return error;
1134 * Handle a write reply that flushed part of a page.
1136 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1138 struct nfs_write_data *data = calldata;
1139 struct nfs_page *req = data->req;
1140 struct page *page = req->wb_page;
1142 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1143 req->wb_context->dentry->d_inode->i_sb->s_id,
1144 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1145 req->wb_bytes,
1146 (long long)req_offset(req));
1148 if (nfs_writeback_done(task, data) != 0)
1149 return;
1151 if (task->tk_status < 0) {
1152 ClearPageUptodate(page);
1153 SetPageError(page);
1154 req->wb_context->error = task->tk_status;
1155 dprintk(", error = %d\n", task->tk_status);
1156 } else {
1157 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1158 if (data->verf.committed < NFS_FILE_SYNC) {
1159 if (!NFS_NEED_COMMIT(req)) {
1160 nfs_defer_commit(req);
1161 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1162 dprintk(" defer commit\n");
1163 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1164 nfs_defer_reschedule(req);
1165 dprintk(" server reboot detected\n");
1167 } else
1168 #endif
1169 dprintk(" OK\n");
1172 if (atomic_dec_and_test(&req->wb_complete))
1173 nfs_writepage_release(req);
1176 static const struct rpc_call_ops nfs_write_partial_ops = {
1177 .rpc_call_done = nfs_writeback_done_partial,
1178 .rpc_release = nfs_writedata_release,
1182 * Handle a write reply that flushes a whole page.
1184 * FIXME: There is an inherent race with invalidate_inode_pages and
1185 * writebacks since the page->count is kept > 1 for as long
1186 * as the page has a write request pending.
1188 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1190 struct nfs_write_data *data = calldata;
1191 struct nfs_page *req;
1192 struct page *page;
1194 if (nfs_writeback_done(task, data) != 0)
1195 return;
1197 /* Update attributes as result of writeback. */
1198 while (!list_empty(&data->pages)) {
1199 req = nfs_list_entry(data->pages.next);
1200 nfs_list_remove_request(req);
1201 page = req->wb_page;
1203 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1204 req->wb_context->dentry->d_inode->i_sb->s_id,
1205 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1206 req->wb_bytes,
1207 (long long)req_offset(req));
1209 if (task->tk_status < 0) {
1210 ClearPageUptodate(page);
1211 SetPageError(page);
1212 req->wb_context->error = task->tk_status;
1213 end_page_writeback(page);
1214 nfs_inode_remove_request(req);
1215 dprintk(", error = %d\n", task->tk_status);
1216 goto next;
1218 end_page_writeback(page);
1220 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1221 if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
1222 nfs_inode_remove_request(req);
1223 dprintk(" OK\n");
1224 goto next;
1226 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1227 nfs_mark_request_commit(req);
1228 dprintk(" marked for commit\n");
1229 #else
1230 nfs_inode_remove_request(req);
1231 #endif
1232 next:
1233 nfs_clear_page_writeback(req);
1237 static const struct rpc_call_ops nfs_write_full_ops = {
1238 .rpc_call_done = nfs_writeback_done_full,
1239 .rpc_release = nfs_writedata_release,
1244 * This function is called when the WRITE call is complete.
1246 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1248 struct nfs_writeargs *argp = &data->args;
1249 struct nfs_writeres *resp = &data->res;
1250 int status;
1252 dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
1253 task->tk_pid, task->tk_status);
1256 * ->write_done will attempt to use post-op attributes to detect
1257 * conflicting writes by other clients. A strict interpretation
1258 * of close-to-open would allow us to continue caching even if
1259 * another writer had changed the file, but some applications
1260 * depend on tighter cache coherency when writing.
1262 status = NFS_PROTO(data->inode)->write_done(task, data);
1263 if (status != 0)
1264 return status;
1265 nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1267 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1268 if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1269 /* We tried a write call, but the server did not
1270 * commit data to stable storage even though we
1271 * requested it.
1272 * Note: There is a known bug in Tru64 < 5.0 in which
1273 * the server reports NFS_DATA_SYNC, but performs
1274 * NFS_FILE_SYNC. We therefore implement this checking
1275 * as a dprintk() in order to avoid filling syslog.
1277 static unsigned long complain;
1279 if (time_before(complain, jiffies)) {
1280 dprintk("NFS: faulty NFS server %s:"
1281 " (committed = %d) != (stable = %d)\n",
1282 NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1283 resp->verf->committed, argp->stable);
1284 complain = jiffies + 300 * HZ;
1287 #endif
1288 /* Is this a short write? */
1289 if (task->tk_status >= 0 && resp->count < argp->count) {
1290 static unsigned long complain;
1292 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1294 /* Has the server at least made some progress? */
1295 if (resp->count != 0) {
1296 /* Was this an NFSv2 write or an NFSv3 stable write? */
1297 if (resp->verf->committed != NFS_UNSTABLE) {
1298 /* Resend from where the server left off */
1299 argp->offset += resp->count;
1300 argp->pgbase += resp->count;
1301 argp->count -= resp->count;
1302 } else {
1303 /* Resend as a stable write in order to avoid
1304 * headaches in the case of a server crash.
1306 argp->stable = NFS_FILE_SYNC;
1308 rpc_restart_call(task);
1309 return -EAGAIN;
1311 if (time_before(complain, jiffies)) {
1312 printk(KERN_WARNING
1313 "NFS: Server wrote zero bytes, expected %u.\n",
1314 argp->count);
1315 complain = jiffies + 300 * HZ;
1317 /* Can't do anything about it except throw an error. */
1318 task->tk_status = -EIO;
1320 return 0;
1324 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1325 void nfs_commit_release(void *wdata)
1327 nfs_commit_free(wdata);
1331 * Set up the argument/result storage required for the RPC call.
1333 static void nfs_commit_rpcsetup(struct list_head *head,
1334 struct nfs_write_data *data,
1335 int how)
1337 struct nfs_page *first;
1338 struct inode *inode;
1339 int flags;
1341 /* Set up the RPC argument and reply structs
1342 * NB: take care not to mess about with data->commit et al. */
1344 list_splice_init(head, &data->pages);
1345 first = nfs_list_entry(data->pages.next);
1346 inode = first->wb_context->dentry->d_inode;
1348 data->inode = inode;
1349 data->cred = first->wb_context->cred;
1351 data->args.fh = NFS_FH(data->inode);
1352 /* Note: we always request a commit of the entire inode */
1353 data->args.offset = 0;
1354 data->args.count = 0;
1355 data->res.count = 0;
1356 data->res.fattr = &data->fattr;
1357 data->res.verf = &data->verf;
1358 nfs_fattr_init(&data->fattr);
1360 /* Set up the initial task struct. */
1361 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1362 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1363 NFS_PROTO(inode)->commit_setup(data, how);
1365 data->task.tk_priority = flush_task_priority(how);
1366 data->task.tk_cookie = (unsigned long)inode;
1368 dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
1372 * Commit dirty pages
1374 static int
1375 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1377 struct nfs_write_data *data;
1378 struct nfs_page *req;
1380 data = nfs_commit_alloc();
1382 if (!data)
1383 goto out_bad;
1385 /* Set up the argument struct */
1386 nfs_commit_rpcsetup(head, data, how);
1388 nfs_execute_write(data);
1389 return 0;
1390 out_bad:
1391 while (!list_empty(head)) {
1392 req = nfs_list_entry(head->next);
1393 nfs_list_remove_request(req);
1394 nfs_mark_request_commit(req);
1395 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1396 nfs_clear_page_writeback(req);
1398 return -ENOMEM;
1402 * COMMIT call returned
1404 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1406 struct nfs_write_data *data = calldata;
1407 struct nfs_page *req;
1409 dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1410 task->tk_pid, task->tk_status);
1412 /* Call the NFS version-specific code */
1413 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1414 return;
1416 while (!list_empty(&data->pages)) {
1417 req = nfs_list_entry(data->pages.next);
1418 nfs_list_remove_request(req);
1419 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1421 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1422 req->wb_context->dentry->d_inode->i_sb->s_id,
1423 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1424 req->wb_bytes,
1425 (long long)req_offset(req));
1426 if (task->tk_status < 0) {
1427 req->wb_context->error = task->tk_status;
1428 nfs_inode_remove_request(req);
1429 dprintk(", error = %d\n", task->tk_status);
1430 goto next;
1433 /* Okay, COMMIT succeeded, apparently. Check the verifier
1434 * returned by the server against all stored verfs. */
1435 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1436 /* We have a match */
1437 nfs_inode_remove_request(req);
1438 dprintk(" OK\n");
1439 goto next;
1441 /* We have a mismatch. Write the page again */
1442 dprintk(" mismatch\n");
1443 nfs_mark_request_dirty(req);
1444 next:
1445 nfs_clear_page_writeback(req);
1449 static const struct rpc_call_ops nfs_commit_ops = {
1450 .rpc_call_done = nfs_commit_done,
1451 .rpc_release = nfs_commit_release,
1453 #else
1454 static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1456 return 0;
1458 #endif
1460 static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
1461 unsigned int npages, int how)
1463 struct nfs_inode *nfsi = NFS_I(inode);
1464 LIST_HEAD(head);
1465 int res;
1467 spin_lock(&nfsi->req_lock);
1468 res = nfs_scan_dirty(inode, &head, idx_start, npages);
1469 spin_unlock(&nfsi->req_lock);
1470 if (res) {
1471 int error = nfs_flush_list(inode, &head, res, how);
1472 if (error < 0)
1473 return error;
1475 return res;
1478 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1479 int nfs_commit_inode(struct inode *inode, int how)
1481 struct nfs_inode *nfsi = NFS_I(inode);
1482 LIST_HEAD(head);
1483 int res;
1485 spin_lock(&nfsi->req_lock);
1486 res = nfs_scan_commit(inode, &head, 0, 0);
1487 spin_unlock(&nfsi->req_lock);
1488 if (res) {
1489 int error = nfs_commit_list(inode, &head, how);
1490 if (error < 0)
1491 return error;
1493 return res;
1495 #endif
1497 int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
1498 unsigned int npages, int how)
1500 struct nfs_inode *nfsi = NFS_I(inode);
1501 LIST_HEAD(head);
1502 int nocommit = how & FLUSH_NOCOMMIT;
1503 int pages, ret;
1505 how &= ~FLUSH_NOCOMMIT;
1506 spin_lock(&nfsi->req_lock);
1507 do {
1508 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1509 if (ret != 0)
1510 continue;
1511 pages = nfs_scan_dirty(inode, &head, idx_start, npages);
1512 if (pages != 0) {
1513 spin_unlock(&nfsi->req_lock);
1514 if (how & FLUSH_INVALIDATE)
1515 nfs_cancel_dirty_list(&head);
1516 else
1517 ret = nfs_flush_list(inode, &head, pages, how);
1518 spin_lock(&nfsi->req_lock);
1519 continue;
1521 if (nocommit)
1522 break;
1523 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1524 if (pages == 0)
1525 break;
1526 if (how & FLUSH_INVALIDATE) {
1527 spin_unlock(&nfsi->req_lock);
1528 nfs_cancel_commit_list(&head);
1529 spin_lock(&nfsi->req_lock);
1530 continue;
1532 pages += nfs_scan_commit(inode, &head, 0, 0);
1533 spin_unlock(&nfsi->req_lock);
1534 ret = nfs_commit_list(inode, &head, how);
1535 spin_lock(&nfsi->req_lock);
1536 } while (ret >= 0);
1537 spin_unlock(&nfsi->req_lock);
1538 return ret;
1541 int __init nfs_init_writepagecache(void)
1543 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1544 sizeof(struct nfs_write_data),
1545 0, SLAB_HWCACHE_ALIGN,
1546 NULL, NULL);
1547 if (nfs_wdata_cachep == NULL)
1548 return -ENOMEM;
1550 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1551 nfs_wdata_cachep);
1552 if (nfs_wdata_mempool == NULL)
1553 return -ENOMEM;
1555 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1556 nfs_wdata_cachep);
1557 if (nfs_commit_mempool == NULL)
1558 return -ENOMEM;
1560 return 0;
1563 void nfs_destroy_writepagecache(void)
1565 mempool_destroy(nfs_commit_mempool);
1566 mempool_destroy(nfs_wdata_mempool);
1567 kmem_cache_destroy(nfs_wdata_cachep);