[PATCH] LSM: remove BSD secure level security module
[usb.git] / fs / nfs / write.c
blobb674462793d3375cf2adab2d642a8526e956fa0d
1 /*
2 * linux/fs/nfs/write.c
4 * Writing file data over NFS.
6 * We do it like this: When a (user) process wishes to write data to an
7 * NFS file, a write request is allocated that contains the RPC task data
8 * plus some info on the page to be written, and added to the inode's
9 * write chain. If the process writes past the end of the page, an async
10 * RPC call to write the page is scheduled immediately; otherwise, the call
11 * is delayed for a few seconds.
13 * Just like readahead, no async I/O is performed if wsize < PAGE_SIZE.
15 * Write requests are kept on the inode's writeback list. Each entry in
16 * that list references the page (portion) to be written. When the
17 * cache timeout has expired, the RPC task is woken up, and tries to
18 * lock the page. As soon as it manages to do so, the request is moved
19 * from the writeback list to the writelock list.
21 * Note: we must make sure never to confuse the inode passed in the
22 * write_page request with the one in page->inode. As far as I understand
23 * it, these are different when doing a swap-out.
25 * To understand everything that goes on here and in the NFS read code,
26 * one should be aware that a page is locked in exactly one of the following
27 * cases:
29 * - A write request is in progress.
30 * - A user process is in generic_file_write/nfs_update_page
31 * - A user process is in generic_file_read
33 * Also note that because of the way pages are invalidated in
34 * nfs_revalidate_inode, the following assertions hold:
36 * - If a page is dirty, there will be no read requests (a page will
37 * not be re-read unless invalidated by nfs_revalidate_inode).
38 * - If the page is not uptodate, there will be no pending write
39 * requests, and no process will be in nfs_update_page.
41 * FIXME: Interaction with the vmscan routines is not optimal yet.
42 * Either vmscan must be made nfs-savvy, or we need a different page
43 * reclaim concept that supports something like FS-independent
44 * buffer_heads with a b_ops-> field.
46 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
49 #include <linux/types.h>
50 #include <linux/slab.h>
51 #include <linux/mm.h>
52 #include <linux/pagemap.h>
53 #include <linux/file.h>
54 #include <linux/mpage.h>
55 #include <linux/writeback.h>
57 #include <linux/sunrpc/clnt.h>
58 #include <linux/nfs_fs.h>
59 #include <linux/nfs_mount.h>
60 #include <linux/nfs_page.h>
61 #include <asm/uaccess.h>
62 #include <linux/smp_lock.h>
64 #include "delegation.h"
65 #include "iostat.h"
67 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
69 #define MIN_POOL_WRITE (32)
70 #define MIN_POOL_COMMIT (4)
73 * Local function declarations
75 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
76 struct inode *,
77 struct page *,
78 unsigned int, unsigned int);
79 static int nfs_wait_on_write_congestion(struct address_space *, int);
80 static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
81 static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
82 unsigned int npages, int how);
83 static const struct rpc_call_ops nfs_write_partial_ops;
84 static const struct rpc_call_ops nfs_write_full_ops;
85 static const struct rpc_call_ops nfs_commit_ops;
87 static kmem_cache_t *nfs_wdata_cachep;
88 static mempool_t *nfs_wdata_mempool;
89 static mempool_t *nfs_commit_mempool;
91 static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
93 struct nfs_write_data *nfs_commit_alloc(void)
95 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
97 if (p) {
98 memset(p, 0, sizeof(*p));
99 INIT_LIST_HEAD(&p->pages);
101 return p;
104 void nfs_commit_free(struct nfs_write_data *p)
106 if (p && (p->pagevec != &p->page_array[0]))
107 kfree(p->pagevec);
108 mempool_free(p, nfs_commit_mempool);
111 struct nfs_write_data *nfs_writedata_alloc(size_t len)
113 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
114 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
116 if (p) {
117 memset(p, 0, sizeof(*p));
118 INIT_LIST_HEAD(&p->pages);
119 p->npages = pagecount;
120 if (pagecount <= ARRAY_SIZE(p->page_array))
121 p->pagevec = p->page_array;
122 else {
123 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
124 if (!p->pagevec) {
125 mempool_free(p, nfs_wdata_mempool);
126 p = NULL;
130 return p;
133 static void nfs_writedata_free(struct nfs_write_data *p)
135 if (p && (p->pagevec != &p->page_array[0]))
136 kfree(p->pagevec);
137 mempool_free(p, nfs_wdata_mempool);
140 void nfs_writedata_release(void *wdata)
142 nfs_writedata_free(wdata);
145 /* Adjust the file length if we're writing beyond the end */
146 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
148 struct inode *inode = page->mapping->host;
149 loff_t end, i_size = i_size_read(inode);
150 unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
152 if (i_size > 0 && page->index < end_index)
153 return;
154 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
155 if (i_size >= end)
156 return;
157 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
158 i_size_write(inode, end);
161 /* We can set the PG_uptodate flag if we see that a write request
162 * covers the full page.
164 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
166 loff_t end_offs;
168 if (PageUptodate(page))
169 return;
170 if (base != 0)
171 return;
172 if (count == PAGE_CACHE_SIZE) {
173 SetPageUptodate(page);
174 return;
177 end_offs = i_size_read(page->mapping->host) - 1;
178 if (end_offs < 0)
179 return;
180 /* Is this the last page? */
181 if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT))
182 return;
183 /* This is the last page: set PG_uptodate if we cover the entire
184 * extent of the data, then zero the rest of the page.
186 if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) {
187 memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
188 SetPageUptodate(page);
193 * Write a page synchronously.
194 * Offset is the data offset within the page.
196 static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
197 struct page *page, unsigned int offset, unsigned int count,
198 int how)
200 unsigned int wsize = NFS_SERVER(inode)->wsize;
201 int result, written = 0;
202 struct nfs_write_data *wdata;
204 wdata = nfs_writedata_alloc(wsize);
205 if (!wdata)
206 return -ENOMEM;
208 wdata->flags = how;
209 wdata->cred = ctx->cred;
210 wdata->inode = inode;
211 wdata->args.fh = NFS_FH(inode);
212 wdata->args.context = ctx;
213 wdata->args.pages = &page;
214 wdata->args.stable = NFS_FILE_SYNC;
215 wdata->args.pgbase = offset;
216 wdata->args.count = wsize;
217 wdata->res.fattr = &wdata->fattr;
218 wdata->res.verf = &wdata->verf;
220 dprintk("NFS: nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
221 inode->i_sb->s_id,
222 (long long)NFS_FILEID(inode),
223 count, (long long)(page_offset(page) + offset));
225 set_page_writeback(page);
226 nfs_begin_data_update(inode);
227 do {
228 if (count < wsize)
229 wdata->args.count = count;
230 wdata->args.offset = page_offset(page) + wdata->args.pgbase;
232 result = NFS_PROTO(inode)->write(wdata);
234 if (result < 0) {
235 /* Must mark the page invalid after I/O error */
236 ClearPageUptodate(page);
237 goto io_error;
239 if (result < wdata->args.count)
240 printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
241 wdata->args.count, result);
243 wdata->args.offset += result;
244 wdata->args.pgbase += result;
245 written += result;
246 count -= result;
247 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
248 } while (count);
249 /* Update file length */
250 nfs_grow_file(page, offset, written);
251 /* Set the PG_uptodate flag? */
252 nfs_mark_uptodate(page, offset, written);
254 if (PageError(page))
255 ClearPageError(page);
257 io_error:
258 nfs_end_data_update(inode);
259 end_page_writeback(page);
260 nfs_writedata_free(wdata);
261 return written ? written : result;
264 static int nfs_writepage_async(struct nfs_open_context *ctx,
265 struct inode *inode, struct page *page,
266 unsigned int offset, unsigned int count)
268 struct nfs_page *req;
270 req = nfs_update_request(ctx, inode, page, offset, count);
271 if (IS_ERR(req))
272 return PTR_ERR(req);
273 /* Update file length */
274 nfs_grow_file(page, offset, count);
275 /* Set the PG_uptodate flag? */
276 nfs_mark_uptodate(page, offset, count);
277 nfs_unlock_request(req);
278 return 0;
281 static int wb_priority(struct writeback_control *wbc)
283 if (wbc->for_reclaim)
284 return FLUSH_HIGHPRI;
285 if (wbc->for_kupdate)
286 return FLUSH_LOWPRI;
287 return 0;
291 * Write an mmapped page to the server.
293 int nfs_writepage(struct page *page, struct writeback_control *wbc)
295 struct nfs_open_context *ctx;
296 struct inode *inode = page->mapping->host;
297 unsigned long end_index;
298 unsigned offset = PAGE_CACHE_SIZE;
299 loff_t i_size = i_size_read(inode);
300 int inode_referenced = 0;
301 int priority = wb_priority(wbc);
302 int err;
304 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
305 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
308 * Note: We need to ensure that we have a reference to the inode
309 * if we are to do asynchronous writes. If not, waiting
310 * in nfs_wait_on_request() may deadlock with clear_inode().
312 * If igrab() fails here, then it is in any case safe to
313 * call nfs_wb_page(), since there will be no pending writes.
315 if (igrab(inode) != 0)
316 inode_referenced = 1;
317 end_index = i_size >> PAGE_CACHE_SHIFT;
319 /* Ensure we've flushed out any previous writes */
320 nfs_wb_page_priority(inode, page, priority);
322 /* easy case */
323 if (page->index < end_index)
324 goto do_it;
325 /* things got complicated... */
326 offset = i_size & (PAGE_CACHE_SIZE-1);
328 /* OK, are we completely out? */
329 err = 0; /* potential race with truncate - ignore */
330 if (page->index >= end_index+1 || !offset)
331 goto out;
332 do_it:
333 ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
334 if (ctx == NULL) {
335 err = -EBADF;
336 goto out;
338 lock_kernel();
339 if (!IS_SYNC(inode) && inode_referenced) {
340 err = nfs_writepage_async(ctx, inode, page, 0, offset);
341 if (!wbc->for_writepages)
342 nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
343 } else {
344 err = nfs_writepage_sync(ctx, inode, page, 0,
345 offset, priority);
346 if (err >= 0) {
347 if (err != offset)
348 redirty_page_for_writepage(wbc, page);
349 err = 0;
352 unlock_kernel();
353 put_nfs_open_context(ctx);
354 out:
355 unlock_page(page);
356 if (inode_referenced)
357 iput(inode);
358 return err;
362 * Note: causes nfs_update_request() to block on the assumption
363 * that the writeback is generated due to memory pressure.
365 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
367 struct backing_dev_info *bdi = mapping->backing_dev_info;
368 struct inode *inode = mapping->host;
369 int err;
371 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
373 err = generic_writepages(mapping, wbc);
374 if (err)
375 return err;
376 while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
377 if (wbc->nonblocking)
378 return 0;
379 nfs_wait_on_write_congestion(mapping, 0);
381 err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
382 if (err < 0)
383 goto out;
384 nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
385 wbc->nr_to_write -= err;
386 if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
387 err = nfs_wait_on_requests(inode, 0, 0);
388 if (err < 0)
389 goto out;
391 err = nfs_commit_inode(inode, wb_priority(wbc));
392 if (err > 0) {
393 wbc->nr_to_write -= err;
394 err = 0;
396 out:
397 clear_bit(BDI_write_congested, &bdi->state);
398 wake_up_all(&nfs_write_congestion);
399 writeback_congestion_end();
400 return err;
404 * Insert a write request into an inode
406 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
408 struct nfs_inode *nfsi = NFS_I(inode);
409 int error;
411 error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
412 BUG_ON(error == -EEXIST);
413 if (error)
414 return error;
415 if (!nfsi->npages) {
416 igrab(inode);
417 nfs_begin_data_update(inode);
418 if (nfs_have_delegation(inode, FMODE_WRITE))
419 nfsi->change_attr++;
421 SetPagePrivate(req->wb_page);
422 nfsi->npages++;
423 atomic_inc(&req->wb_count);
424 return 0;
428 * Insert a write request into an inode
430 static void nfs_inode_remove_request(struct nfs_page *req)
432 struct inode *inode = req->wb_context->dentry->d_inode;
433 struct nfs_inode *nfsi = NFS_I(inode);
435 BUG_ON (!NFS_WBACK_BUSY(req));
437 spin_lock(&nfsi->req_lock);
438 ClearPagePrivate(req->wb_page);
439 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
440 nfsi->npages--;
441 if (!nfsi->npages) {
442 spin_unlock(&nfsi->req_lock);
443 nfs_end_data_update(inode);
444 iput(inode);
445 } else
446 spin_unlock(&nfsi->req_lock);
447 nfs_clear_request(req);
448 nfs_release_request(req);
452 * Find a request
454 static inline struct nfs_page *
455 _nfs_find_request(struct inode *inode, unsigned long index)
457 struct nfs_inode *nfsi = NFS_I(inode);
458 struct nfs_page *req;
460 req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index);
461 if (req)
462 atomic_inc(&req->wb_count);
463 return req;
466 static struct nfs_page *
467 nfs_find_request(struct inode *inode, unsigned long index)
469 struct nfs_page *req;
470 struct nfs_inode *nfsi = NFS_I(inode);
472 spin_lock(&nfsi->req_lock);
473 req = _nfs_find_request(inode, index);
474 spin_unlock(&nfsi->req_lock);
475 return req;
479 * Add a request to the inode's dirty list.
481 static void
482 nfs_mark_request_dirty(struct nfs_page *req)
484 struct inode *inode = req->wb_context->dentry->d_inode;
485 struct nfs_inode *nfsi = NFS_I(inode);
487 spin_lock(&nfsi->req_lock);
488 radix_tree_tag_set(&nfsi->nfs_page_tree,
489 req->wb_index, NFS_PAGE_TAG_DIRTY);
490 nfs_list_add_request(req, &nfsi->dirty);
491 nfsi->ndirty++;
492 spin_unlock(&nfsi->req_lock);
493 inc_zone_page_state(req->wb_page, NR_FILE_DIRTY);
494 mark_inode_dirty(inode);
498 * Check if a request is dirty
500 static inline int
501 nfs_dirty_request(struct nfs_page *req)
503 struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
504 return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
507 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
509 * Add a request to the inode's commit list.
511 static void
512 nfs_mark_request_commit(struct nfs_page *req)
514 struct inode *inode = req->wb_context->dentry->d_inode;
515 struct nfs_inode *nfsi = NFS_I(inode);
517 spin_lock(&nfsi->req_lock);
518 nfs_list_add_request(req, &nfsi->commit);
519 nfsi->ncommit++;
520 spin_unlock(&nfsi->req_lock);
521 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
522 mark_inode_dirty(inode);
524 #endif
527 * Wait for a request to complete.
529 * Interruptible by signals only if mounted with intr flag.
531 static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages)
533 struct nfs_inode *nfsi = NFS_I(inode);
534 struct nfs_page *req;
535 unsigned long idx_end, next;
536 unsigned int res = 0;
537 int error;
539 if (npages == 0)
540 idx_end = ~0;
541 else
542 idx_end = idx_start + npages - 1;
544 next = idx_start;
545 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
546 if (req->wb_index > idx_end)
547 break;
549 next = req->wb_index + 1;
550 BUG_ON(!NFS_WBACK_BUSY(req));
552 atomic_inc(&req->wb_count);
553 spin_unlock(&nfsi->req_lock);
554 error = nfs_wait_on_request(req);
555 nfs_release_request(req);
556 spin_lock(&nfsi->req_lock);
557 if (error < 0)
558 return error;
559 res++;
561 return res;
564 static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
566 struct nfs_inode *nfsi = NFS_I(inode);
567 int ret;
569 spin_lock(&nfsi->req_lock);
570 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
571 spin_unlock(&nfsi->req_lock);
572 return ret;
575 static void nfs_cancel_dirty_list(struct list_head *head)
577 struct nfs_page *req;
578 while(!list_empty(head)) {
579 req = nfs_list_entry(head->next);
580 nfs_list_remove_request(req);
581 nfs_inode_remove_request(req);
582 nfs_clear_page_writeback(req);
586 static void nfs_cancel_commit_list(struct list_head *head)
588 struct nfs_page *req;
590 while(!list_empty(head)) {
591 req = nfs_list_entry(head->next);
592 nfs_list_remove_request(req);
593 nfs_inode_remove_request(req);
594 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
595 nfs_clear_page_writeback(req);
600 * nfs_scan_dirty - Scan an inode for dirty requests
601 * @inode: NFS inode to scan
602 * @dst: destination list
603 * @idx_start: lower bound of page->index to scan.
604 * @npages: idx_start + npages sets the upper bound to scan.
606 * Moves requests from the inode's dirty page list.
607 * The requests are *not* checked to ensure that they form a contiguous set.
609 static int
610 nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
612 struct nfs_inode *nfsi = NFS_I(inode);
613 int res = 0;
615 if (nfsi->ndirty != 0) {
616 res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages);
617 nfsi->ndirty -= res;
618 if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
619 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
621 return res;
624 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
626 * nfs_scan_commit - Scan an inode for commit requests
627 * @inode: NFS inode to scan
628 * @dst: destination list
629 * @idx_start: lower bound of page->index to scan.
630 * @npages: idx_start + npages sets the upper bound to scan.
632 * Moves requests from the inode's 'commit' request list.
633 * The requests are *not* checked to ensure that they form a contiguous set.
635 static int
636 nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
638 struct nfs_inode *nfsi = NFS_I(inode);
639 int res = 0;
641 if (nfsi->ncommit != 0) {
642 res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages);
643 nfsi->ncommit -= res;
644 if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
645 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
647 return res;
649 #else
650 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
652 return 0;
654 #endif
656 static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
658 struct backing_dev_info *bdi = mapping->backing_dev_info;
659 DEFINE_WAIT(wait);
660 int ret = 0;
662 might_sleep();
664 if (!bdi_write_congested(bdi))
665 return 0;
667 nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT);
669 if (intr) {
670 struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
671 sigset_t oldset;
673 rpc_clnt_sigmask(clnt, &oldset);
674 prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
675 if (bdi_write_congested(bdi)) {
676 if (signalled())
677 ret = -ERESTARTSYS;
678 else
679 schedule();
681 rpc_clnt_sigunmask(clnt, &oldset);
682 } else {
683 prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
684 if (bdi_write_congested(bdi))
685 schedule();
687 finish_wait(&nfs_write_congestion, &wait);
688 return ret;
693 * Try to update any existing write request, or create one if there is none.
694 * In order to match, the request's credentials must match those of
695 * the calling process.
697 * Note: Should always be called with the Page Lock held!
699 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
700 struct inode *inode, struct page *page,
701 unsigned int offset, unsigned int bytes)
703 struct nfs_server *server = NFS_SERVER(inode);
704 struct nfs_inode *nfsi = NFS_I(inode);
705 struct nfs_page *req, *new = NULL;
706 unsigned long rqend, end;
708 end = offset + bytes;
710 if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR))
711 return ERR_PTR(-ERESTARTSYS);
712 for (;;) {
713 /* Loop over all inode entries and see if we find
714 * A request for the page we wish to update
716 spin_lock(&nfsi->req_lock);
717 req = _nfs_find_request(inode, page->index);
718 if (req) {
719 if (!nfs_lock_request_dontget(req)) {
720 int error;
721 spin_unlock(&nfsi->req_lock);
722 error = nfs_wait_on_request(req);
723 nfs_release_request(req);
724 if (error < 0) {
725 if (new)
726 nfs_release_request(new);
727 return ERR_PTR(error);
729 continue;
731 spin_unlock(&nfsi->req_lock);
732 if (new)
733 nfs_release_request(new);
734 break;
737 if (new) {
738 int error;
739 nfs_lock_request_dontget(new);
740 error = nfs_inode_add_request(inode, new);
741 if (error) {
742 spin_unlock(&nfsi->req_lock);
743 nfs_unlock_request(new);
744 return ERR_PTR(error);
746 spin_unlock(&nfsi->req_lock);
747 nfs_mark_request_dirty(new);
748 return new;
750 spin_unlock(&nfsi->req_lock);
752 new = nfs_create_request(ctx, inode, page, offset, bytes);
753 if (IS_ERR(new))
754 return new;
757 /* We have a request for our page.
758 * If the creds don't match, or the
759 * page addresses don't match,
760 * tell the caller to wait on the conflicting
761 * request.
763 rqend = req->wb_offset + req->wb_bytes;
764 if (req->wb_context != ctx
765 || req->wb_page != page
766 || !nfs_dirty_request(req)
767 || offset > rqend || end < req->wb_offset) {
768 nfs_unlock_request(req);
769 return ERR_PTR(-EBUSY);
772 /* Okay, the request matches. Update the region */
773 if (offset < req->wb_offset) {
774 req->wb_offset = offset;
775 req->wb_pgbase = offset;
776 req->wb_bytes = rqend - req->wb_offset;
779 if (end > rqend)
780 req->wb_bytes = end - req->wb_offset;
782 return req;
785 int nfs_flush_incompatible(struct file *file, struct page *page)
787 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
788 struct inode *inode = page->mapping->host;
789 struct nfs_page *req;
790 int status = 0;
792 * Look for a request corresponding to this page. If there
793 * is one, and it belongs to another file, we flush it out
794 * before we try to copy anything into the page. Do this
795 * due to the lack of an ACCESS-type call in NFSv2.
796 * Also do the same if we find a request from an existing
797 * dropped page.
799 req = nfs_find_request(inode, page->index);
800 if (req) {
801 if (req->wb_page != page || ctx != req->wb_context)
802 status = nfs_wb_page(inode, page);
803 nfs_release_request(req);
805 return (status < 0) ? status : 0;
809 * Update and possibly write a cached page of an NFS file.
811 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
812 * things with a page scheduled for an RPC call (e.g. invalidate it).
814 int nfs_updatepage(struct file *file, struct page *page,
815 unsigned int offset, unsigned int count)
817 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
818 struct inode *inode = page->mapping->host;
819 struct nfs_page *req;
820 int status = 0;
822 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
824 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
825 file->f_dentry->d_parent->d_name.name,
826 file->f_dentry->d_name.name, count,
827 (long long)(page_offset(page) +offset));
829 if (IS_SYNC(inode)) {
830 status = nfs_writepage_sync(ctx, inode, page, offset, count, 0);
831 if (status > 0) {
832 if (offset == 0 && status == PAGE_CACHE_SIZE)
833 SetPageUptodate(page);
834 return 0;
836 return status;
839 /* If we're not using byte range locks, and we know the page
840 * is entirely in cache, it may be more efficient to avoid
841 * fragmenting write requests.
843 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
844 loff_t end_offs = i_size_read(inode) - 1;
845 unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
847 count += offset;
848 offset = 0;
849 if (unlikely(end_offs < 0)) {
850 /* Do nothing */
851 } else if (page->index == end_index) {
852 unsigned int pglen;
853 pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1;
854 if (count < pglen)
855 count = pglen;
856 } else if (page->index < end_index)
857 count = PAGE_CACHE_SIZE;
861 * Try to find an NFS request corresponding to this page
862 * and update it.
863 * If the existing request cannot be updated, we must flush
864 * it out now.
866 do {
867 req = nfs_update_request(ctx, inode, page, offset, count);
868 status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
869 if (status != -EBUSY)
870 break;
871 /* Request could not be updated. Flush it out and try again */
872 status = nfs_wb_page(inode, page);
873 } while (status >= 0);
874 if (status < 0)
875 goto done;
877 status = 0;
879 /* Update file length */
880 nfs_grow_file(page, offset, count);
881 /* Set the PG_uptodate flag? */
882 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
883 nfs_unlock_request(req);
884 done:
885 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
886 status, (long long)i_size_read(inode));
887 if (status < 0)
888 ClearPageUptodate(page);
889 return status;
892 static void nfs_writepage_release(struct nfs_page *req)
894 end_page_writeback(req->wb_page);
896 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
897 if (!PageError(req->wb_page)) {
898 if (NFS_NEED_RESCHED(req)) {
899 nfs_mark_request_dirty(req);
900 goto out;
901 } else if (NFS_NEED_COMMIT(req)) {
902 nfs_mark_request_commit(req);
903 goto out;
906 nfs_inode_remove_request(req);
908 out:
909 nfs_clear_commit(req);
910 nfs_clear_reschedule(req);
911 #else
912 nfs_inode_remove_request(req);
913 #endif
914 nfs_clear_page_writeback(req);
917 static inline int flush_task_priority(int how)
919 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
920 case FLUSH_HIGHPRI:
921 return RPC_PRIORITY_HIGH;
922 case FLUSH_LOWPRI:
923 return RPC_PRIORITY_LOW;
925 return RPC_PRIORITY_NORMAL;
929 * Set up the argument/result storage required for the RPC call.
931 static void nfs_write_rpcsetup(struct nfs_page *req,
932 struct nfs_write_data *data,
933 const struct rpc_call_ops *call_ops,
934 unsigned int count, unsigned int offset,
935 int how)
937 struct inode *inode;
938 int flags;
940 /* Set up the RPC argument and reply structs
941 * NB: take care not to mess about with data->commit et al. */
943 data->req = req;
944 data->inode = inode = req->wb_context->dentry->d_inode;
945 data->cred = req->wb_context->cred;
947 data->args.fh = NFS_FH(inode);
948 data->args.offset = req_offset(req) + offset;
949 data->args.pgbase = req->wb_pgbase + offset;
950 data->args.pages = data->pagevec;
951 data->args.count = count;
952 data->args.context = req->wb_context;
954 data->res.fattr = &data->fattr;
955 data->res.count = count;
956 data->res.verf = &data->verf;
957 nfs_fattr_init(&data->fattr);
959 /* Set up the initial task struct. */
960 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
961 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
962 NFS_PROTO(inode)->write_setup(data, how);
964 data->task.tk_priority = flush_task_priority(how);
965 data->task.tk_cookie = (unsigned long)inode;
967 dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
968 data->task.tk_pid,
969 inode->i_sb->s_id,
970 (long long)NFS_FILEID(inode),
971 count,
972 (unsigned long long)data->args.offset);
975 static void nfs_execute_write(struct nfs_write_data *data)
977 struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
978 sigset_t oldset;
980 rpc_clnt_sigmask(clnt, &oldset);
981 lock_kernel();
982 rpc_execute(&data->task);
983 unlock_kernel();
984 rpc_clnt_sigunmask(clnt, &oldset);
988 * Generate multiple small requests to write out a single
989 * contiguous dirty area on one page.
991 static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
993 struct nfs_page *req = nfs_list_entry(head->next);
994 struct page *page = req->wb_page;
995 struct nfs_write_data *data;
996 size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
997 unsigned int offset;
998 int requests = 0;
999 LIST_HEAD(list);
1001 nfs_list_remove_request(req);
1003 nbytes = req->wb_bytes;
1004 do {
1005 size_t len = min(nbytes, wsize);
1007 data = nfs_writedata_alloc(len);
1008 if (!data)
1009 goto out_bad;
1010 list_add(&data->pages, &list);
1011 requests++;
1012 nbytes -= len;
1013 } while (nbytes != 0);
1014 atomic_set(&req->wb_complete, requests);
1016 ClearPageError(page);
1017 set_page_writeback(page);
1018 offset = 0;
1019 nbytes = req->wb_bytes;
1020 do {
1021 data = list_entry(list.next, struct nfs_write_data, pages);
1022 list_del_init(&data->pages);
1024 data->pagevec[0] = page;
1026 if (nbytes > wsize) {
1027 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1028 wsize, offset, how);
1029 offset += wsize;
1030 nbytes -= wsize;
1031 } else {
1032 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1033 nbytes, offset, how);
1034 nbytes = 0;
1036 nfs_execute_write(data);
1037 } while (nbytes != 0);
1039 return 0;
1041 out_bad:
1042 while (!list_empty(&list)) {
1043 data = list_entry(list.next, struct nfs_write_data, pages);
1044 list_del(&data->pages);
1045 nfs_writedata_free(data);
1047 nfs_mark_request_dirty(req);
1048 nfs_clear_page_writeback(req);
1049 return -ENOMEM;
1053 * Create an RPC task for the given write request and kick it.
1054 * The page must have been locked by the caller.
1056 * It may happen that the page we're passed is not marked dirty.
1057 * This is the case if nfs_updatepage detects a conflicting request
1058 * that has been written but not committed.
1060 static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
1062 struct nfs_page *req;
1063 struct page **pages;
1064 struct nfs_write_data *data;
1065 unsigned int count;
1067 data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize);
1068 if (!data)
1069 goto out_bad;
1071 pages = data->pagevec;
1072 count = 0;
1073 while (!list_empty(head)) {
1074 req = nfs_list_entry(head->next);
1075 nfs_list_remove_request(req);
1076 nfs_list_add_request(req, &data->pages);
1077 ClearPageError(req->wb_page);
1078 set_page_writeback(req->wb_page);
1079 *pages++ = req->wb_page;
1080 count += req->wb_bytes;
1082 req = nfs_list_entry(data->pages.next);
1084 /* Set up the argument struct */
1085 nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
1087 nfs_execute_write(data);
1088 return 0;
1089 out_bad:
1090 while (!list_empty(head)) {
1091 struct nfs_page *req = nfs_list_entry(head->next);
1092 nfs_list_remove_request(req);
1093 nfs_mark_request_dirty(req);
1094 nfs_clear_page_writeback(req);
1096 return -ENOMEM;
1099 static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how)
1101 LIST_HEAD(one_request);
1102 int (*flush_one)(struct inode *, struct list_head *, int);
1103 struct nfs_page *req;
1104 int wpages = NFS_SERVER(inode)->wpages;
1105 int wsize = NFS_SERVER(inode)->wsize;
1106 int error;
1108 flush_one = nfs_flush_one;
1109 if (wsize < PAGE_CACHE_SIZE)
1110 flush_one = nfs_flush_multi;
1111 /* For single writes, FLUSH_STABLE is more efficient */
1112 if (npages <= wpages && npages == NFS_I(inode)->npages
1113 && nfs_list_entry(head->next)->wb_bytes <= wsize)
1114 how |= FLUSH_STABLE;
1116 do {
1117 nfs_coalesce_requests(head, &one_request, wpages);
1118 req = nfs_list_entry(one_request.next);
1119 error = flush_one(inode, &one_request, how);
1120 if (error < 0)
1121 goto out_err;
1122 } while (!list_empty(head));
1123 return 0;
1124 out_err:
1125 while (!list_empty(head)) {
1126 req = nfs_list_entry(head->next);
1127 nfs_list_remove_request(req);
1128 nfs_mark_request_dirty(req);
1129 nfs_clear_page_writeback(req);
1131 return error;
1135 * Handle a write reply that flushed part of a page.
1137 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1139 struct nfs_write_data *data = calldata;
1140 struct nfs_page *req = data->req;
1141 struct page *page = req->wb_page;
1143 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1144 req->wb_context->dentry->d_inode->i_sb->s_id,
1145 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1146 req->wb_bytes,
1147 (long long)req_offset(req));
1149 if (nfs_writeback_done(task, data) != 0)
1150 return;
1152 if (task->tk_status < 0) {
1153 ClearPageUptodate(page);
1154 SetPageError(page);
1155 req->wb_context->error = task->tk_status;
1156 dprintk(", error = %d\n", task->tk_status);
1157 } else {
1158 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1159 if (data->verf.committed < NFS_FILE_SYNC) {
1160 if (!NFS_NEED_COMMIT(req)) {
1161 nfs_defer_commit(req);
1162 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1163 dprintk(" defer commit\n");
1164 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1165 nfs_defer_reschedule(req);
1166 dprintk(" server reboot detected\n");
1168 } else
1169 #endif
1170 dprintk(" OK\n");
1173 if (atomic_dec_and_test(&req->wb_complete))
1174 nfs_writepage_release(req);
1177 static const struct rpc_call_ops nfs_write_partial_ops = {
1178 .rpc_call_done = nfs_writeback_done_partial,
1179 .rpc_release = nfs_writedata_release,
1183 * Handle a write reply that flushes a whole page.
1185 * FIXME: There is an inherent race with invalidate_inode_pages and
1186 * writebacks since the page->count is kept > 1 for as long
1187 * as the page has a write request pending.
1189 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1191 struct nfs_write_data *data = calldata;
1192 struct nfs_page *req;
1193 struct page *page;
1195 if (nfs_writeback_done(task, data) != 0)
1196 return;
1198 /* Update attributes as result of writeback. */
1199 while (!list_empty(&data->pages)) {
1200 req = nfs_list_entry(data->pages.next);
1201 nfs_list_remove_request(req);
1202 page = req->wb_page;
1204 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1205 req->wb_context->dentry->d_inode->i_sb->s_id,
1206 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1207 req->wb_bytes,
1208 (long long)req_offset(req));
1210 if (task->tk_status < 0) {
1211 ClearPageUptodate(page);
1212 SetPageError(page);
1213 req->wb_context->error = task->tk_status;
1214 end_page_writeback(page);
1215 nfs_inode_remove_request(req);
1216 dprintk(", error = %d\n", task->tk_status);
1217 goto next;
1219 end_page_writeback(page);
1221 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1222 if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
1223 nfs_inode_remove_request(req);
1224 dprintk(" OK\n");
1225 goto next;
1227 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1228 nfs_mark_request_commit(req);
1229 dprintk(" marked for commit\n");
1230 #else
1231 nfs_inode_remove_request(req);
1232 #endif
1233 next:
1234 nfs_clear_page_writeback(req);
1238 static const struct rpc_call_ops nfs_write_full_ops = {
1239 .rpc_call_done = nfs_writeback_done_full,
1240 .rpc_release = nfs_writedata_release,
1245 * This function is called when the WRITE call is complete.
1247 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1249 struct nfs_writeargs *argp = &data->args;
1250 struct nfs_writeres *resp = &data->res;
1251 int status;
1253 dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
1254 task->tk_pid, task->tk_status);
1257 * ->write_done will attempt to use post-op attributes to detect
1258 * conflicting writes by other clients. A strict interpretation
1259 * of close-to-open would allow us to continue caching even if
1260 * another writer had changed the file, but some applications
1261 * depend on tighter cache coherency when writing.
1263 status = NFS_PROTO(data->inode)->write_done(task, data);
1264 if (status != 0)
1265 return status;
1266 nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1268 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1269 if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1270 /* We tried a write call, but the server did not
1271 * commit data to stable storage even though we
1272 * requested it.
1273 * Note: There is a known bug in Tru64 < 5.0 in which
1274 * the server reports NFS_DATA_SYNC, but performs
1275 * NFS_FILE_SYNC. We therefore implement this checking
1276 * as a dprintk() in order to avoid filling syslog.
1278 static unsigned long complain;
1280 if (time_before(complain, jiffies)) {
1281 dprintk("NFS: faulty NFS server %s:"
1282 " (committed = %d) != (stable = %d)\n",
1283 NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1284 resp->verf->committed, argp->stable);
1285 complain = jiffies + 300 * HZ;
1288 #endif
1289 /* Is this a short write? */
1290 if (task->tk_status >= 0 && resp->count < argp->count) {
1291 static unsigned long complain;
1293 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1295 /* Has the server at least made some progress? */
1296 if (resp->count != 0) {
1297 /* Was this an NFSv2 write or an NFSv3 stable write? */
1298 if (resp->verf->committed != NFS_UNSTABLE) {
1299 /* Resend from where the server left off */
1300 argp->offset += resp->count;
1301 argp->pgbase += resp->count;
1302 argp->count -= resp->count;
1303 } else {
1304 /* Resend as a stable write in order to avoid
1305 * headaches in the case of a server crash.
1307 argp->stable = NFS_FILE_SYNC;
1309 rpc_restart_call(task);
1310 return -EAGAIN;
1312 if (time_before(complain, jiffies)) {
1313 printk(KERN_WARNING
1314 "NFS: Server wrote zero bytes, expected %u.\n",
1315 argp->count);
1316 complain = jiffies + 300 * HZ;
1318 /* Can't do anything about it except throw an error. */
1319 task->tk_status = -EIO;
1321 return 0;
1325 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1326 void nfs_commit_release(void *wdata)
1328 nfs_commit_free(wdata);
1332 * Set up the argument/result storage required for the RPC call.
1334 static void nfs_commit_rpcsetup(struct list_head *head,
1335 struct nfs_write_data *data,
1336 int how)
1338 struct nfs_page *first;
1339 struct inode *inode;
1340 int flags;
1342 /* Set up the RPC argument and reply structs
1343 * NB: take care not to mess about with data->commit et al. */
1345 list_splice_init(head, &data->pages);
1346 first = nfs_list_entry(data->pages.next);
1347 inode = first->wb_context->dentry->d_inode;
1349 data->inode = inode;
1350 data->cred = first->wb_context->cred;
1352 data->args.fh = NFS_FH(data->inode);
1353 /* Note: we always request a commit of the entire inode */
1354 data->args.offset = 0;
1355 data->args.count = 0;
1356 data->res.count = 0;
1357 data->res.fattr = &data->fattr;
1358 data->res.verf = &data->verf;
1359 nfs_fattr_init(&data->fattr);
1361 /* Set up the initial task struct. */
1362 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1363 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1364 NFS_PROTO(inode)->commit_setup(data, how);
1366 data->task.tk_priority = flush_task_priority(how);
1367 data->task.tk_cookie = (unsigned long)inode;
1369 dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
1373 * Commit dirty pages
1375 static int
1376 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1378 struct nfs_write_data *data;
1379 struct nfs_page *req;
1381 data = nfs_commit_alloc();
1383 if (!data)
1384 goto out_bad;
1386 /* Set up the argument struct */
1387 nfs_commit_rpcsetup(head, data, how);
1389 nfs_execute_write(data);
1390 return 0;
1391 out_bad:
1392 while (!list_empty(head)) {
1393 req = nfs_list_entry(head->next);
1394 nfs_list_remove_request(req);
1395 nfs_mark_request_commit(req);
1396 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1397 nfs_clear_page_writeback(req);
1399 return -ENOMEM;
1403 * COMMIT call returned
1405 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1407 struct nfs_write_data *data = calldata;
1408 struct nfs_page *req;
1410 dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1411 task->tk_pid, task->tk_status);
1413 /* Call the NFS version-specific code */
1414 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1415 return;
1417 while (!list_empty(&data->pages)) {
1418 req = nfs_list_entry(data->pages.next);
1419 nfs_list_remove_request(req);
1420 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1422 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1423 req->wb_context->dentry->d_inode->i_sb->s_id,
1424 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1425 req->wb_bytes,
1426 (long long)req_offset(req));
1427 if (task->tk_status < 0) {
1428 req->wb_context->error = task->tk_status;
1429 nfs_inode_remove_request(req);
1430 dprintk(", error = %d\n", task->tk_status);
1431 goto next;
1434 /* Okay, COMMIT succeeded, apparently. Check the verifier
1435 * returned by the server against all stored verfs. */
1436 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1437 /* We have a match */
1438 nfs_inode_remove_request(req);
1439 dprintk(" OK\n");
1440 goto next;
1442 /* We have a mismatch. Write the page again */
1443 dprintk(" mismatch\n");
1444 nfs_mark_request_dirty(req);
1445 next:
1446 nfs_clear_page_writeback(req);
1450 static const struct rpc_call_ops nfs_commit_ops = {
1451 .rpc_call_done = nfs_commit_done,
1452 .rpc_release = nfs_commit_release,
1454 #else
1455 static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1457 return 0;
1459 #endif
1461 static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
1462 unsigned int npages, int how)
1464 struct nfs_inode *nfsi = NFS_I(inode);
1465 LIST_HEAD(head);
1466 int res;
1468 spin_lock(&nfsi->req_lock);
1469 res = nfs_scan_dirty(inode, &head, idx_start, npages);
1470 spin_unlock(&nfsi->req_lock);
1471 if (res) {
1472 int error = nfs_flush_list(inode, &head, res, how);
1473 if (error < 0)
1474 return error;
1476 return res;
1479 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1480 int nfs_commit_inode(struct inode *inode, int how)
1482 struct nfs_inode *nfsi = NFS_I(inode);
1483 LIST_HEAD(head);
1484 int res;
1486 spin_lock(&nfsi->req_lock);
1487 res = nfs_scan_commit(inode, &head, 0, 0);
1488 spin_unlock(&nfsi->req_lock);
1489 if (res) {
1490 int error = nfs_commit_list(inode, &head, how);
1491 if (error < 0)
1492 return error;
1494 return res;
1496 #endif
1498 int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
1499 unsigned int npages, int how)
1501 struct nfs_inode *nfsi = NFS_I(inode);
1502 LIST_HEAD(head);
1503 int nocommit = how & FLUSH_NOCOMMIT;
1504 int pages, ret;
1506 how &= ~FLUSH_NOCOMMIT;
1507 spin_lock(&nfsi->req_lock);
1508 do {
1509 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1510 if (ret != 0)
1511 continue;
1512 pages = nfs_scan_dirty(inode, &head, idx_start, npages);
1513 if (pages != 0) {
1514 spin_unlock(&nfsi->req_lock);
1515 if (how & FLUSH_INVALIDATE)
1516 nfs_cancel_dirty_list(&head);
1517 else
1518 ret = nfs_flush_list(inode, &head, pages, how);
1519 spin_lock(&nfsi->req_lock);
1520 continue;
1522 if (nocommit)
1523 break;
1524 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1525 if (pages == 0)
1526 break;
1527 if (how & FLUSH_INVALIDATE) {
1528 spin_unlock(&nfsi->req_lock);
1529 nfs_cancel_commit_list(&head);
1530 spin_lock(&nfsi->req_lock);
1531 continue;
1533 pages += nfs_scan_commit(inode, &head, 0, 0);
1534 spin_unlock(&nfsi->req_lock);
1535 ret = nfs_commit_list(inode, &head, how);
1536 spin_lock(&nfsi->req_lock);
1537 } while (ret >= 0);
1538 spin_unlock(&nfsi->req_lock);
1539 return ret;
1542 int __init nfs_init_writepagecache(void)
1544 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1545 sizeof(struct nfs_write_data),
1546 0, SLAB_HWCACHE_ALIGN,
1547 NULL, NULL);
1548 if (nfs_wdata_cachep == NULL)
1549 return -ENOMEM;
1551 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1552 nfs_wdata_cachep);
1553 if (nfs_wdata_mempool == NULL)
1554 return -ENOMEM;
1556 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1557 nfs_wdata_cachep);
1558 if (nfs_commit_mempool == NULL)
1559 return -ENOMEM;
1561 return 0;
1564 void nfs_destroy_writepagecache(void)
1566 mempool_destroy(nfs_commit_mempool);
1567 mempool_destroy(nfs_wdata_mempool);
1568 kmem_cache_destroy(nfs_wdata_cachep);