4 * Write file data over NFS.
6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
9 #include <linux/types.h>
10 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_mount.h>
20 #include <linux/nfs_page.h>
21 #include <linux/backing-dev.h>
23 #include <asm/uaccess.h>
25 #include "delegation.h"
29 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
31 #define MIN_POOL_WRITE (32)
32 #define MIN_POOL_COMMIT (4)
35 * Local function declarations
37 static struct nfs_page
* nfs_update_request(struct nfs_open_context
*,
39 unsigned int, unsigned int);
40 static void nfs_pageio_init_write(struct nfs_pageio_descriptor
*desc
,
41 struct inode
*inode
, int ioflags
);
42 static const struct rpc_call_ops nfs_write_partial_ops
;
43 static const struct rpc_call_ops nfs_write_full_ops
;
44 static const struct rpc_call_ops nfs_commit_ops
;
46 static struct kmem_cache
*nfs_wdata_cachep
;
47 static mempool_t
*nfs_wdata_mempool
;
48 static mempool_t
*nfs_commit_mempool
;
50 struct nfs_write_data
*nfs_commit_alloc(void)
52 struct nfs_write_data
*p
= mempool_alloc(nfs_commit_mempool
, GFP_NOFS
);
55 memset(p
, 0, sizeof(*p
));
56 INIT_LIST_HEAD(&p
->pages
);
61 static void nfs_commit_rcu_free(struct rcu_head
*head
)
63 struct nfs_write_data
*p
= container_of(head
, struct nfs_write_data
, task
.u
.tk_rcu
);
64 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
66 mempool_free(p
, nfs_commit_mempool
);
69 void nfs_commit_free(struct nfs_write_data
*wdata
)
71 call_rcu_bh(&wdata
->task
.u
.tk_rcu
, nfs_commit_rcu_free
);
74 struct nfs_write_data
*nfs_writedata_alloc(unsigned int pagecount
)
76 struct nfs_write_data
*p
= mempool_alloc(nfs_wdata_mempool
, GFP_NOFS
);
79 memset(p
, 0, sizeof(*p
));
80 INIT_LIST_HEAD(&p
->pages
);
81 p
->npages
= pagecount
;
82 if (pagecount
<= ARRAY_SIZE(p
->page_array
))
83 p
->pagevec
= p
->page_array
;
85 p
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), GFP_NOFS
);
87 mempool_free(p
, nfs_wdata_mempool
);
95 static void nfs_writedata_rcu_free(struct rcu_head
*head
)
97 struct nfs_write_data
*p
= container_of(head
, struct nfs_write_data
, task
.u
.tk_rcu
);
98 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
100 mempool_free(p
, nfs_wdata_mempool
);
103 static void nfs_writedata_free(struct nfs_write_data
*wdata
)
105 call_rcu_bh(&wdata
->task
.u
.tk_rcu
, nfs_writedata_rcu_free
);
108 void nfs_writedata_release(void *wdata
)
110 nfs_writedata_free(wdata
);
113 static struct nfs_page
*nfs_page_find_request_locked(struct page
*page
)
115 struct nfs_page
*req
= NULL
;
117 if (PagePrivate(page
)) {
118 req
= (struct nfs_page
*)page_private(page
);
120 kref_get(&req
->wb_kref
);
125 static struct nfs_page
*nfs_page_find_request(struct page
*page
)
127 struct inode
*inode
= page
->mapping
->host
;
128 struct nfs_page
*req
= NULL
;
130 spin_lock(&inode
->i_lock
);
131 req
= nfs_page_find_request_locked(page
);
132 spin_unlock(&inode
->i_lock
);
136 /* Adjust the file length if we're writing beyond the end */
137 static void nfs_grow_file(struct page
*page
, unsigned int offset
, unsigned int count
)
139 struct inode
*inode
= page
->mapping
->host
;
140 loff_t end
, i_size
= i_size_read(inode
);
141 pgoff_t end_index
= (i_size
- 1) >> PAGE_CACHE_SHIFT
;
143 if (i_size
> 0 && page
->index
< end_index
)
145 end
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + ((loff_t
)offset
+count
);
148 nfs_inc_stats(inode
, NFSIOS_EXTENDWRITE
);
149 i_size_write(inode
, end
);
152 /* A writeback failed: mark the page as bad, and invalidate the page cache */
153 static void nfs_set_pageerror(struct page
*page
)
156 nfs_zap_mapping(page
->mapping
->host
, page
->mapping
);
159 /* We can set the PG_uptodate flag if we see that a write request
160 * covers the full page.
162 static void nfs_mark_uptodate(struct page
*page
, unsigned int base
, unsigned int count
)
164 if (PageUptodate(page
))
168 if (count
!= nfs_page_length(page
))
170 SetPageUptodate(page
);
173 static int nfs_writepage_setup(struct nfs_open_context
*ctx
, struct page
*page
,
174 unsigned int offset
, unsigned int count
)
176 struct nfs_page
*req
;
180 req
= nfs_update_request(ctx
, page
, offset
, count
);
186 ret
= nfs_wb_page(page
->mapping
->host
, page
);
190 /* Update file length */
191 nfs_grow_file(page
, offset
, count
);
192 nfs_unlock_request(req
);
196 static int wb_priority(struct writeback_control
*wbc
)
198 if (wbc
->for_reclaim
)
199 return FLUSH_HIGHPRI
| FLUSH_STABLE
;
200 if (wbc
->for_kupdate
)
206 * NFS congestion control
209 int nfs_congestion_kb
;
211 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
212 #define NFS_CONGESTION_OFF_THRESH \
213 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
215 static int nfs_set_page_writeback(struct page
*page
)
217 int ret
= test_set_page_writeback(page
);
220 struct inode
*inode
= page
->mapping
->host
;
221 struct nfs_server
*nfss
= NFS_SERVER(inode
);
223 if (atomic_long_inc_return(&nfss
->writeback
) >
224 NFS_CONGESTION_ON_THRESH
)
225 set_bdi_congested(&nfss
->backing_dev_info
, WRITE
);
230 static void nfs_end_page_writeback(struct page
*page
)
232 struct inode
*inode
= page
->mapping
->host
;
233 struct nfs_server
*nfss
= NFS_SERVER(inode
);
235 end_page_writeback(page
);
236 if (atomic_long_dec_return(&nfss
->writeback
) < NFS_CONGESTION_OFF_THRESH
) {
237 clear_bdi_congested(&nfss
->backing_dev_info
, WRITE
);
238 congestion_end(WRITE
);
243 * Find an associated nfs write request, and prepare to flush it out
244 * Returns 1 if there was no write request, or if the request was
245 * already tagged by nfs_set_page_dirty.Returns 0 if the request
247 * May also return an error if the user signalled nfs_wait_on_request().
249 static int nfs_page_async_flush(struct nfs_pageio_descriptor
*pgio
,
252 struct inode
*inode
= page
->mapping
->host
;
253 struct nfs_inode
*nfsi
= NFS_I(inode
);
254 struct nfs_page
*req
;
257 spin_lock(&inode
->i_lock
);
259 req
= nfs_page_find_request_locked(page
);
261 spin_unlock(&inode
->i_lock
);
264 if (nfs_lock_request_dontget(req
))
266 /* Note: If we hold the page lock, as is the case in nfs_writepage,
267 * then the call to nfs_lock_request_dontget() will always
268 * succeed provided that someone hasn't already marked the
269 * request as dirty (in which case we don't care).
271 spin_unlock(&inode
->i_lock
);
272 ret
= nfs_wait_on_request(req
);
273 nfs_release_request(req
);
276 spin_lock(&inode
->i_lock
);
278 if (test_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
279 /* This request is marked for commit */
280 spin_unlock(&inode
->i_lock
);
281 nfs_unlock_request(req
);
282 nfs_pageio_complete(pgio
);
285 if (nfs_set_page_writeback(page
) != 0) {
286 spin_unlock(&inode
->i_lock
);
289 radix_tree_tag_set(&nfsi
->nfs_page_tree
, req
->wb_index
,
290 NFS_PAGE_TAG_LOCKED
);
291 ret
= test_bit(PG_NEED_FLUSH
, &req
->wb_flags
);
292 spin_unlock(&inode
->i_lock
);
293 nfs_pageio_add_request(pgio
, req
);
298 * Write an mmapped page to the server.
300 static int nfs_writepage_locked(struct page
*page
, struct writeback_control
*wbc
)
302 struct nfs_pageio_descriptor mypgio
, *pgio
;
303 struct nfs_open_context
*ctx
;
304 struct inode
*inode
= page
->mapping
->host
;
308 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGE
);
309 nfs_add_stats(inode
, NFSIOS_WRITEPAGES
, 1);
311 if (wbc
->for_writepages
)
312 pgio
= wbc
->fs_private
;
314 nfs_pageio_init_write(&mypgio
, inode
, wb_priority(wbc
));
318 nfs_pageio_cond_complete(pgio
, page
->index
);
320 err
= nfs_page_async_flush(pgio
, page
);
324 offset
= nfs_page_length(page
);
328 nfs_pageio_cond_complete(pgio
, page
->index
);
330 ctx
= nfs_find_open_context(inode
, NULL
, FMODE_WRITE
);
335 err
= nfs_writepage_setup(ctx
, page
, 0, offset
);
336 put_nfs_open_context(ctx
);
339 err
= nfs_page_async_flush(pgio
, page
);
343 if (!wbc
->for_writepages
)
344 nfs_pageio_complete(pgio
);
348 int nfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
352 err
= nfs_writepage_locked(page
, wbc
);
357 int nfs_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
359 struct inode
*inode
= mapping
->host
;
360 struct nfs_pageio_descriptor pgio
;
363 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGES
);
365 nfs_pageio_init_write(&pgio
, inode
, wb_priority(wbc
));
366 wbc
->fs_private
= &pgio
;
367 err
= generic_writepages(mapping
, wbc
);
368 nfs_pageio_complete(&pgio
);
372 return pgio
.pg_error
;
377 * Insert a write request into an inode
379 static int nfs_inode_add_request(struct inode
*inode
, struct nfs_page
*req
)
381 struct nfs_inode
*nfsi
= NFS_I(inode
);
384 error
= radix_tree_insert(&nfsi
->nfs_page_tree
, req
->wb_index
, req
);
385 BUG_ON(error
== -EEXIST
);
390 nfs_begin_data_update(inode
);
391 if (nfs_have_delegation(inode
, FMODE_WRITE
))
394 SetPagePrivate(req
->wb_page
);
395 set_page_private(req
->wb_page
, (unsigned long)req
);
396 if (PageDirty(req
->wb_page
))
397 set_bit(PG_NEED_FLUSH
, &req
->wb_flags
);
399 kref_get(&req
->wb_kref
);
404 * Remove a write request from an inode
406 static void nfs_inode_remove_request(struct nfs_page
*req
)
408 struct inode
*inode
= req
->wb_context
->path
.dentry
->d_inode
;
409 struct nfs_inode
*nfsi
= NFS_I(inode
);
411 BUG_ON (!NFS_WBACK_BUSY(req
));
413 spin_lock(&inode
->i_lock
);
414 set_page_private(req
->wb_page
, 0);
415 ClearPagePrivate(req
->wb_page
);
416 radix_tree_delete(&nfsi
->nfs_page_tree
, req
->wb_index
);
417 if (test_and_clear_bit(PG_NEED_FLUSH
, &req
->wb_flags
))
418 __set_page_dirty_nobuffers(req
->wb_page
);
421 spin_unlock(&inode
->i_lock
);
422 nfs_end_data_update(inode
);
425 spin_unlock(&inode
->i_lock
);
426 nfs_clear_request(req
);
427 nfs_release_request(req
);
431 nfs_redirty_request(struct nfs_page
*req
)
433 __set_page_dirty_nobuffers(req
->wb_page
);
437 * Check if a request is dirty
440 nfs_dirty_request(struct nfs_page
*req
)
442 struct page
*page
= req
->wb_page
;
444 if (page
== NULL
|| test_bit(PG_NEED_COMMIT
, &req
->wb_flags
))
446 return !PageWriteback(req
->wb_page
);
449 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
451 * Add a request to the inode's commit list.
454 nfs_mark_request_commit(struct nfs_page
*req
)
456 struct inode
*inode
= req
->wb_context
->path
.dentry
->d_inode
;
457 struct nfs_inode
*nfsi
= NFS_I(inode
);
459 spin_lock(&inode
->i_lock
);
461 set_bit(PG_NEED_COMMIT
, &(req
)->wb_flags
);
462 radix_tree_tag_set(&nfsi
->nfs_page_tree
,
464 NFS_PAGE_TAG_COMMIT
);
465 spin_unlock(&inode
->i_lock
);
466 inc_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
467 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
471 int nfs_write_need_commit(struct nfs_write_data
*data
)
473 return data
->verf
.committed
!= NFS_FILE_SYNC
;
477 int nfs_reschedule_unstable_write(struct nfs_page
*req
)
479 if (test_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
480 nfs_mark_request_commit(req
);
483 if (test_and_clear_bit(PG_NEED_RESCHED
, &req
->wb_flags
)) {
484 nfs_redirty_request(req
);
491 nfs_mark_request_commit(struct nfs_page
*req
)
496 int nfs_write_need_commit(struct nfs_write_data
*data
)
502 int nfs_reschedule_unstable_write(struct nfs_page
*req
)
509 * Wait for a request to complete.
511 * Interruptible by signals only if mounted with intr flag.
513 static int nfs_wait_on_requests_locked(struct inode
*inode
, pgoff_t idx_start
, unsigned int npages
)
515 struct nfs_inode
*nfsi
= NFS_I(inode
);
516 struct nfs_page
*req
;
517 pgoff_t idx_end
, next
;
518 unsigned int res
= 0;
524 idx_end
= idx_start
+ npages
- 1;
527 while (radix_tree_gang_lookup_tag(&nfsi
->nfs_page_tree
, (void **)&req
, next
, 1, NFS_PAGE_TAG_LOCKED
)) {
528 if (req
->wb_index
> idx_end
)
531 next
= req
->wb_index
+ 1;
532 BUG_ON(!NFS_WBACK_BUSY(req
));
534 kref_get(&req
->wb_kref
);
535 spin_unlock(&inode
->i_lock
);
536 error
= nfs_wait_on_request(req
);
537 nfs_release_request(req
);
538 spin_lock(&inode
->i_lock
);
546 static void nfs_cancel_commit_list(struct list_head
*head
)
548 struct nfs_page
*req
;
550 while(!list_empty(head
)) {
551 req
= nfs_list_entry(head
->next
);
552 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
553 nfs_list_remove_request(req
);
554 clear_bit(PG_NEED_COMMIT
, &(req
)->wb_flags
);
555 nfs_inode_remove_request(req
);
556 nfs_unlock_request(req
);
560 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
562 * nfs_scan_commit - Scan an inode for commit requests
563 * @inode: NFS inode to scan
564 * @dst: destination list
565 * @idx_start: lower bound of page->index to scan.
566 * @npages: idx_start + npages sets the upper bound to scan.
568 * Moves requests from the inode's 'commit' request list.
569 * The requests are *not* checked to ensure that they form a contiguous set.
572 nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, pgoff_t idx_start
, unsigned int npages
)
574 struct nfs_inode
*nfsi
= NFS_I(inode
);
577 if (nfsi
->ncommit
!= 0) {
578 res
= nfs_scan_list(nfsi
, dst
, idx_start
, npages
,
579 NFS_PAGE_TAG_COMMIT
);
580 nfsi
->ncommit
-= res
;
585 static inline int nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, pgoff_t idx_start
, unsigned int npages
)
592 * Try to update any existing write request, or create one if there is none.
593 * In order to match, the request's credentials must match those of
594 * the calling process.
596 * Note: Should always be called with the Page Lock held!
598 static struct nfs_page
* nfs_update_request(struct nfs_open_context
* ctx
,
599 struct page
*page
, unsigned int offset
, unsigned int bytes
)
601 struct address_space
*mapping
= page
->mapping
;
602 struct inode
*inode
= mapping
->host
;
603 struct nfs_page
*req
, *new = NULL
;
606 end
= offset
+ bytes
;
609 /* Loop over all inode entries and see if we find
610 * A request for the page we wish to update
612 spin_lock(&inode
->i_lock
);
613 req
= nfs_page_find_request_locked(page
);
615 if (!nfs_lock_request_dontget(req
)) {
618 spin_unlock(&inode
->i_lock
);
619 error
= nfs_wait_on_request(req
);
620 nfs_release_request(req
);
623 nfs_release_request(new);
624 return ERR_PTR(error
);
628 spin_unlock(&inode
->i_lock
);
630 nfs_release_request(new);
636 nfs_lock_request_dontget(new);
637 error
= nfs_inode_add_request(inode
, new);
639 spin_unlock(&inode
->i_lock
);
640 nfs_unlock_request(new);
641 return ERR_PTR(error
);
643 spin_unlock(&inode
->i_lock
);
647 spin_unlock(&inode
->i_lock
);
649 new = nfs_create_request(ctx
, inode
, page
, offset
, bytes
);
654 /* We have a request for our page.
655 * If the creds don't match, or the
656 * page addresses don't match,
657 * tell the caller to wait on the conflicting
660 rqend
= req
->wb_offset
+ req
->wb_bytes
;
661 if (req
->wb_context
!= ctx
662 || req
->wb_page
!= page
663 || !nfs_dirty_request(req
)
664 || offset
> rqend
|| end
< req
->wb_offset
) {
665 nfs_unlock_request(req
);
666 return ERR_PTR(-EBUSY
);
669 /* Okay, the request matches. Update the region */
670 if (offset
< req
->wb_offset
) {
671 req
->wb_offset
= offset
;
672 req
->wb_pgbase
= offset
;
673 req
->wb_bytes
= max(end
, rqend
) - req
->wb_offset
;
678 req
->wb_bytes
= end
- req
->wb_offset
;
682 /* If this page might potentially be marked as up to date,
683 * then we need to zero any uninitalised data. */
684 if (req
->wb_pgbase
== 0 && req
->wb_bytes
!= PAGE_CACHE_SIZE
685 && !PageUptodate(req
->wb_page
))
686 zero_user_page(req
->wb_page
, req
->wb_bytes
,
687 PAGE_CACHE_SIZE
- req
->wb_bytes
,
692 int nfs_flush_incompatible(struct file
*file
, struct page
*page
)
694 struct nfs_open_context
*ctx
= (struct nfs_open_context
*)file
->private_data
;
695 struct nfs_page
*req
;
696 int do_flush
, status
;
698 * Look for a request corresponding to this page. If there
699 * is one, and it belongs to another file, we flush it out
700 * before we try to copy anything into the page. Do this
701 * due to the lack of an ACCESS-type call in NFSv2.
702 * Also do the same if we find a request from an existing
706 req
= nfs_page_find_request(page
);
709 do_flush
= req
->wb_page
!= page
|| req
->wb_context
!= ctx
710 || !nfs_dirty_request(req
);
711 nfs_release_request(req
);
714 status
= nfs_wb_page(page
->mapping
->host
, page
);
715 } while (status
== 0);
720 * If the page cache is marked as unsafe or invalid, then we can't rely on
721 * the PageUptodate() flag. In this case, we will need to turn off
722 * write optimisations that depend on the page contents being correct.
724 static int nfs_write_pageuptodate(struct page
*page
, struct inode
*inode
)
726 return PageUptodate(page
) &&
727 !(NFS_I(inode
)->cache_validity
& (NFS_INO_REVAL_PAGECACHE
|NFS_INO_INVALID_DATA
));
731 * Update and possibly write a cached page of an NFS file.
733 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
734 * things with a page scheduled for an RPC call (e.g. invalidate it).
736 int nfs_updatepage(struct file
*file
, struct page
*page
,
737 unsigned int offset
, unsigned int count
)
739 struct nfs_open_context
*ctx
= (struct nfs_open_context
*)file
->private_data
;
740 struct inode
*inode
= page
->mapping
->host
;
743 nfs_inc_stats(inode
, NFSIOS_VFSUPDATEPAGE
);
745 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
746 file
->f_path
.dentry
->d_parent
->d_name
.name
,
747 file
->f_path
.dentry
->d_name
.name
, count
,
748 (long long)(page_offset(page
) +offset
));
750 /* If we're not using byte range locks, and we know the page
751 * is up to date, it may be more efficient to extend the write
752 * to cover the entire page in order to avoid fragmentation
755 if (nfs_write_pageuptodate(page
, inode
) &&
756 inode
->i_flock
== NULL
&&
757 !(file
->f_mode
& O_SYNC
)) {
758 count
= max(count
+ offset
, nfs_page_length(page
));
762 status
= nfs_writepage_setup(ctx
, page
, offset
, count
);
763 __set_page_dirty_nobuffers(page
);
765 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
766 status
, (long long)i_size_read(inode
));
768 nfs_set_pageerror(page
);
772 static void nfs_writepage_release(struct nfs_page
*req
)
775 if (PageError(req
->wb_page
)) {
776 nfs_end_page_writeback(req
->wb_page
);
777 nfs_inode_remove_request(req
);
778 } else if (!nfs_reschedule_unstable_write(req
)) {
779 /* Set the PG_uptodate flag */
780 nfs_mark_uptodate(req
->wb_page
, req
->wb_pgbase
, req
->wb_bytes
);
781 nfs_end_page_writeback(req
->wb_page
);
782 nfs_inode_remove_request(req
);
784 nfs_end_page_writeback(req
->wb_page
);
785 nfs_clear_page_tag_locked(req
);
788 static inline int flush_task_priority(int how
)
790 switch (how
& (FLUSH_HIGHPRI
|FLUSH_LOWPRI
)) {
792 return RPC_PRIORITY_HIGH
;
794 return RPC_PRIORITY_LOW
;
796 return RPC_PRIORITY_NORMAL
;
800 * Set up the argument/result storage required for the RPC call.
802 static void nfs_write_rpcsetup(struct nfs_page
*req
,
803 struct nfs_write_data
*data
,
804 const struct rpc_call_ops
*call_ops
,
805 unsigned int count
, unsigned int offset
,
811 /* Set up the RPC argument and reply structs
812 * NB: take care not to mess about with data->commit et al. */
815 data
->inode
= inode
= req
->wb_context
->path
.dentry
->d_inode
;
816 data
->cred
= req
->wb_context
->cred
;
818 data
->args
.fh
= NFS_FH(inode
);
819 data
->args
.offset
= req_offset(req
) + offset
;
820 data
->args
.pgbase
= req
->wb_pgbase
+ offset
;
821 data
->args
.pages
= data
->pagevec
;
822 data
->args
.count
= count
;
823 data
->args
.context
= req
->wb_context
;
825 data
->res
.fattr
= &data
->fattr
;
826 data
->res
.count
= count
;
827 data
->res
.verf
= &data
->verf
;
828 nfs_fattr_init(&data
->fattr
);
830 /* Set up the initial task struct. */
831 flags
= (how
& FLUSH_SYNC
) ? 0 : RPC_TASK_ASYNC
;
832 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), flags
, call_ops
, data
);
833 NFS_PROTO(inode
)->write_setup(data
, how
);
835 data
->task
.tk_priority
= flush_task_priority(how
);
836 data
->task
.tk_cookie
= (unsigned long)inode
;
838 dprintk("NFS: %5u initiated write call "
839 "(req %s/%Ld, %u bytes @ offset %Lu)\n",
842 (long long)NFS_FILEID(inode
),
844 (unsigned long long)data
->args
.offset
);
847 static void nfs_execute_write(struct nfs_write_data
*data
)
849 struct rpc_clnt
*clnt
= NFS_CLIENT(data
->inode
);
852 rpc_clnt_sigmask(clnt
, &oldset
);
853 rpc_execute(&data
->task
);
854 rpc_clnt_sigunmask(clnt
, &oldset
);
858 * Generate multiple small requests to write out a single
859 * contiguous dirty area on one page.
861 static int nfs_flush_multi(struct inode
*inode
, struct list_head
*head
, unsigned int npages
, size_t count
, int how
)
863 struct nfs_page
*req
= nfs_list_entry(head
->next
);
864 struct page
*page
= req
->wb_page
;
865 struct nfs_write_data
*data
;
866 size_t wsize
= NFS_SERVER(inode
)->wsize
, nbytes
;
871 nfs_list_remove_request(req
);
875 size_t len
= min(nbytes
, wsize
);
877 data
= nfs_writedata_alloc(1);
880 list_add(&data
->pages
, &list
);
883 } while (nbytes
!= 0);
884 atomic_set(&req
->wb_complete
, requests
);
886 ClearPageError(page
);
890 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
891 list_del_init(&data
->pages
);
893 data
->pagevec
[0] = page
;
897 nfs_write_rpcsetup(req
, data
, &nfs_write_partial_ops
,
901 nfs_execute_write(data
);
902 } while (nbytes
!= 0);
907 while (!list_empty(&list
)) {
908 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
909 list_del(&data
->pages
);
910 nfs_writedata_release(data
);
912 nfs_redirty_request(req
);
913 nfs_end_page_writeback(req
->wb_page
);
914 nfs_clear_page_tag_locked(req
);
919 * Create an RPC task for the given write request and kick it.
920 * The page must have been locked by the caller.
922 * It may happen that the page we're passed is not marked dirty.
923 * This is the case if nfs_updatepage detects a conflicting request
924 * that has been written but not committed.
926 static int nfs_flush_one(struct inode
*inode
, struct list_head
*head
, unsigned int npages
, size_t count
, int how
)
928 struct nfs_page
*req
;
930 struct nfs_write_data
*data
;
932 data
= nfs_writedata_alloc(npages
);
936 pages
= data
->pagevec
;
937 while (!list_empty(head
)) {
938 req
= nfs_list_entry(head
->next
);
939 nfs_list_remove_request(req
);
940 nfs_list_add_request(req
, &data
->pages
);
941 ClearPageError(req
->wb_page
);
942 *pages
++ = req
->wb_page
;
944 req
= nfs_list_entry(data
->pages
.next
);
946 /* Set up the argument struct */
947 nfs_write_rpcsetup(req
, data
, &nfs_write_full_ops
, count
, 0, how
);
949 nfs_execute_write(data
);
952 while (!list_empty(head
)) {
953 req
= nfs_list_entry(head
->next
);
954 nfs_list_remove_request(req
);
955 nfs_redirty_request(req
);
956 nfs_end_page_writeback(req
->wb_page
);
957 nfs_clear_page_tag_locked(req
);
962 static void nfs_pageio_init_write(struct nfs_pageio_descriptor
*pgio
,
963 struct inode
*inode
, int ioflags
)
965 int wsize
= NFS_SERVER(inode
)->wsize
;
967 if (wsize
< PAGE_CACHE_SIZE
)
968 nfs_pageio_init(pgio
, inode
, nfs_flush_multi
, wsize
, ioflags
);
970 nfs_pageio_init(pgio
, inode
, nfs_flush_one
, wsize
, ioflags
);
974 * Handle a write reply that flushed part of a page.
976 static void nfs_writeback_done_partial(struct rpc_task
*task
, void *calldata
)
978 struct nfs_write_data
*data
= calldata
;
979 struct nfs_page
*req
= data
->req
;
980 struct page
*page
= req
->wb_page
;
982 dprintk("NFS: write (%s/%Ld %d@%Ld)",
983 req
->wb_context
->path
.dentry
->d_inode
->i_sb
->s_id
,
984 (long long)NFS_FILEID(req
->wb_context
->path
.dentry
->d_inode
),
986 (long long)req_offset(req
));
988 if (nfs_writeback_done(task
, data
) != 0)
991 if (task
->tk_status
< 0) {
992 nfs_set_pageerror(page
);
993 req
->wb_context
->error
= task
->tk_status
;
994 dprintk(", error = %d\n", task
->tk_status
);
998 if (nfs_write_need_commit(data
)) {
999 struct inode
*inode
= page
->mapping
->host
;
1001 spin_lock(&inode
->i_lock
);
1002 if (test_bit(PG_NEED_RESCHED
, &req
->wb_flags
)) {
1003 /* Do nothing we need to resend the writes */
1004 } else if (!test_and_set_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
1005 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
1006 dprintk(" defer commit\n");
1007 } else if (memcmp(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
))) {
1008 set_bit(PG_NEED_RESCHED
, &req
->wb_flags
);
1009 clear_bit(PG_NEED_COMMIT
, &req
->wb_flags
);
1010 dprintk(" server reboot detected\n");
1012 spin_unlock(&inode
->i_lock
);
1017 if (atomic_dec_and_test(&req
->wb_complete
))
1018 nfs_writepage_release(req
);
1021 static const struct rpc_call_ops nfs_write_partial_ops
= {
1022 .rpc_call_done
= nfs_writeback_done_partial
,
1023 .rpc_release
= nfs_writedata_release
,
1027 * Handle a write reply that flushes a whole page.
1029 * FIXME: There is an inherent race with invalidate_inode_pages and
1030 * writebacks since the page->count is kept > 1 for as long
1031 * as the page has a write request pending.
1033 static void nfs_writeback_done_full(struct rpc_task
*task
, void *calldata
)
1035 struct nfs_write_data
*data
= calldata
;
1036 struct nfs_page
*req
;
1039 if (nfs_writeback_done(task
, data
) != 0)
1042 /* Update attributes as result of writeback. */
1043 while (!list_empty(&data
->pages
)) {
1044 req
= nfs_list_entry(data
->pages
.next
);
1045 nfs_list_remove_request(req
);
1046 page
= req
->wb_page
;
1048 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1049 req
->wb_context
->path
.dentry
->d_inode
->i_sb
->s_id
,
1050 (long long)NFS_FILEID(req
->wb_context
->path
.dentry
->d_inode
),
1052 (long long)req_offset(req
));
1054 if (task
->tk_status
< 0) {
1055 nfs_set_pageerror(page
);
1056 req
->wb_context
->error
= task
->tk_status
;
1057 dprintk(", error = %d\n", task
->tk_status
);
1058 goto remove_request
;
1061 if (nfs_write_need_commit(data
)) {
1062 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
1063 nfs_mark_request_commit(req
);
1064 nfs_end_page_writeback(page
);
1065 dprintk(" marked for commit\n");
1068 /* Set the PG_uptodate flag? */
1069 nfs_mark_uptodate(page
, req
->wb_pgbase
, req
->wb_bytes
);
1072 nfs_end_page_writeback(page
);
1073 nfs_inode_remove_request(req
);
1075 nfs_clear_page_tag_locked(req
);
1079 static const struct rpc_call_ops nfs_write_full_ops
= {
1080 .rpc_call_done
= nfs_writeback_done_full
,
1081 .rpc_release
= nfs_writedata_release
,
1086 * This function is called when the WRITE call is complete.
1088 int nfs_writeback_done(struct rpc_task
*task
, struct nfs_write_data
*data
)
1090 struct nfs_writeargs
*argp
= &data
->args
;
1091 struct nfs_writeres
*resp
= &data
->res
;
1094 dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1095 task
->tk_pid
, task
->tk_status
);
1098 * ->write_done will attempt to use post-op attributes to detect
1099 * conflicting writes by other clients. A strict interpretation
1100 * of close-to-open would allow us to continue caching even if
1101 * another writer had changed the file, but some applications
1102 * depend on tighter cache coherency when writing.
1104 status
= NFS_PROTO(data
->inode
)->write_done(task
, data
);
1107 nfs_add_stats(data
->inode
, NFSIOS_SERVERWRITTENBYTES
, resp
->count
);
1109 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1110 if (resp
->verf
->committed
< argp
->stable
&& task
->tk_status
>= 0) {
1111 /* We tried a write call, but the server did not
1112 * commit data to stable storage even though we
1114 * Note: There is a known bug in Tru64 < 5.0 in which
1115 * the server reports NFS_DATA_SYNC, but performs
1116 * NFS_FILE_SYNC. We therefore implement this checking
1117 * as a dprintk() in order to avoid filling syslog.
1119 static unsigned long complain
;
1121 if (time_before(complain
, jiffies
)) {
1122 dprintk("NFS: faulty NFS server %s:"
1123 " (committed = %d) != (stable = %d)\n",
1124 NFS_SERVER(data
->inode
)->nfs_client
->cl_hostname
,
1125 resp
->verf
->committed
, argp
->stable
);
1126 complain
= jiffies
+ 300 * HZ
;
1130 /* Is this a short write? */
1131 if (task
->tk_status
>= 0 && resp
->count
< argp
->count
) {
1132 static unsigned long complain
;
1134 nfs_inc_stats(data
->inode
, NFSIOS_SHORTWRITE
);
1136 /* Has the server at least made some progress? */
1137 if (resp
->count
!= 0) {
1138 /* Was this an NFSv2 write or an NFSv3 stable write? */
1139 if (resp
->verf
->committed
!= NFS_UNSTABLE
) {
1140 /* Resend from where the server left off */
1141 argp
->offset
+= resp
->count
;
1142 argp
->pgbase
+= resp
->count
;
1143 argp
->count
-= resp
->count
;
1145 /* Resend as a stable write in order to avoid
1146 * headaches in the case of a server crash.
1148 argp
->stable
= NFS_FILE_SYNC
;
1150 rpc_restart_call(task
);
1153 if (time_before(complain
, jiffies
)) {
1155 "NFS: Server wrote zero bytes, expected %u.\n",
1157 complain
= jiffies
+ 300 * HZ
;
1159 /* Can't do anything about it except throw an error. */
1160 task
->tk_status
= -EIO
;
1166 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1167 void nfs_commit_release(void *wdata
)
1169 nfs_commit_free(wdata
);
1173 * Set up the argument/result storage required for the RPC call.
1175 static void nfs_commit_rpcsetup(struct list_head
*head
,
1176 struct nfs_write_data
*data
,
1179 struct nfs_page
*first
;
1180 struct inode
*inode
;
1183 /* Set up the RPC argument and reply structs
1184 * NB: take care not to mess about with data->commit et al. */
1186 list_splice_init(head
, &data
->pages
);
1187 first
= nfs_list_entry(data
->pages
.next
);
1188 inode
= first
->wb_context
->path
.dentry
->d_inode
;
1190 data
->inode
= inode
;
1191 data
->cred
= first
->wb_context
->cred
;
1193 data
->args
.fh
= NFS_FH(data
->inode
);
1194 /* Note: we always request a commit of the entire inode */
1195 data
->args
.offset
= 0;
1196 data
->args
.count
= 0;
1197 data
->res
.count
= 0;
1198 data
->res
.fattr
= &data
->fattr
;
1199 data
->res
.verf
= &data
->verf
;
1200 nfs_fattr_init(&data
->fattr
);
1202 /* Set up the initial task struct. */
1203 flags
= (how
& FLUSH_SYNC
) ? 0 : RPC_TASK_ASYNC
;
1204 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), flags
, &nfs_commit_ops
, data
);
1205 NFS_PROTO(inode
)->commit_setup(data
, how
);
1207 data
->task
.tk_priority
= flush_task_priority(how
);
1208 data
->task
.tk_cookie
= (unsigned long)inode
;
1210 dprintk("NFS: %5u initiated commit call\n", data
->task
.tk_pid
);
1214 * Commit dirty pages
1217 nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
)
1219 struct nfs_write_data
*data
;
1220 struct nfs_page
*req
;
1222 data
= nfs_commit_alloc();
1227 /* Set up the argument struct */
1228 nfs_commit_rpcsetup(head
, data
, how
);
1230 nfs_execute_write(data
);
1233 while (!list_empty(head
)) {
1234 req
= nfs_list_entry(head
->next
);
1235 nfs_list_remove_request(req
);
1236 nfs_mark_request_commit(req
);
1237 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1238 nfs_clear_page_tag_locked(req
);
1244 * COMMIT call returned
1246 static void nfs_commit_done(struct rpc_task
*task
, void *calldata
)
1248 struct nfs_write_data
*data
= calldata
;
1249 struct nfs_page
*req
;
1251 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1252 task
->tk_pid
, task
->tk_status
);
1254 /* Call the NFS version-specific code */
1255 if (NFS_PROTO(data
->inode
)->commit_done(task
, data
) != 0)
1258 while (!list_empty(&data
->pages
)) {
1259 req
= nfs_list_entry(data
->pages
.next
);
1260 nfs_list_remove_request(req
);
1261 clear_bit(PG_NEED_COMMIT
, &(req
)->wb_flags
);
1262 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1264 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1265 req
->wb_context
->path
.dentry
->d_inode
->i_sb
->s_id
,
1266 (long long)NFS_FILEID(req
->wb_context
->path
.dentry
->d_inode
),
1268 (long long)req_offset(req
));
1269 if (task
->tk_status
< 0) {
1270 req
->wb_context
->error
= task
->tk_status
;
1271 nfs_inode_remove_request(req
);
1272 dprintk(", error = %d\n", task
->tk_status
);
1276 /* Okay, COMMIT succeeded, apparently. Check the verifier
1277 * returned by the server against all stored verfs. */
1278 if (!memcmp(req
->wb_verf
.verifier
, data
->verf
.verifier
, sizeof(data
->verf
.verifier
))) {
1279 /* We have a match */
1280 /* Set the PG_uptodate flag */
1281 nfs_mark_uptodate(req
->wb_page
, req
->wb_pgbase
,
1283 nfs_inode_remove_request(req
);
1287 /* We have a mismatch. Write the page again */
1288 dprintk(" mismatch\n");
1289 nfs_redirty_request(req
);
1291 nfs_clear_page_tag_locked(req
);
1295 static const struct rpc_call_ops nfs_commit_ops
= {
1296 .rpc_call_done
= nfs_commit_done
,
1297 .rpc_release
= nfs_commit_release
,
1300 int nfs_commit_inode(struct inode
*inode
, int how
)
1305 spin_lock(&inode
->i_lock
);
1306 res
= nfs_scan_commit(inode
, &head
, 0, 0);
1307 spin_unlock(&inode
->i_lock
);
1309 int error
= nfs_commit_list(inode
, &head
, how
);
1316 static inline int nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
)
1322 long nfs_sync_mapping_wait(struct address_space
*mapping
, struct writeback_control
*wbc
, int how
)
1324 struct inode
*inode
= mapping
->host
;
1325 pgoff_t idx_start
, idx_end
;
1326 unsigned int npages
= 0;
1328 int nocommit
= how
& FLUSH_NOCOMMIT
;
1332 if (wbc
->range_cyclic
)
1335 idx_start
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
1336 idx_end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
1337 if (idx_end
> idx_start
) {
1338 pgoff_t l_npages
= 1 + idx_end
- idx_start
;
1340 if (sizeof(npages
) != sizeof(l_npages
) &&
1341 (pgoff_t
)npages
!= l_npages
)
1345 how
&= ~FLUSH_NOCOMMIT
;
1346 spin_lock(&inode
->i_lock
);
1348 ret
= nfs_wait_on_requests_locked(inode
, idx_start
, npages
);
1353 pages
= nfs_scan_commit(inode
, &head
, idx_start
, npages
);
1356 if (how
& FLUSH_INVALIDATE
) {
1357 spin_unlock(&inode
->i_lock
);
1358 nfs_cancel_commit_list(&head
);
1360 spin_lock(&inode
->i_lock
);
1363 pages
+= nfs_scan_commit(inode
, &head
, 0, 0);
1364 spin_unlock(&inode
->i_lock
);
1365 ret
= nfs_commit_list(inode
, &head
, how
);
1366 spin_lock(&inode
->i_lock
);
1369 spin_unlock(&inode
->i_lock
);
1374 * flush the inode to disk.
1376 int nfs_wb_all(struct inode
*inode
)
1378 struct address_space
*mapping
= inode
->i_mapping
;
1379 struct writeback_control wbc
= {
1380 .bdi
= mapping
->backing_dev_info
,
1381 .sync_mode
= WB_SYNC_ALL
,
1382 .nr_to_write
= LONG_MAX
,
1383 .for_writepages
= 1,
1388 ret
= nfs_writepages(mapping
, &wbc
);
1391 ret
= nfs_sync_mapping_wait(mapping
, &wbc
, 0);
1395 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1399 int nfs_sync_mapping_range(struct address_space
*mapping
, loff_t range_start
, loff_t range_end
, int how
)
1401 struct writeback_control wbc
= {
1402 .bdi
= mapping
->backing_dev_info
,
1403 .sync_mode
= WB_SYNC_ALL
,
1404 .nr_to_write
= LONG_MAX
,
1405 .range_start
= range_start
,
1406 .range_end
= range_end
,
1407 .for_writepages
= 1,
1411 ret
= nfs_writepages(mapping
, &wbc
);
1414 ret
= nfs_sync_mapping_wait(mapping
, &wbc
, how
);
1418 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1422 int nfs_wb_page_cancel(struct inode
*inode
, struct page
*page
)
1424 struct nfs_page
*req
;
1425 loff_t range_start
= page_offset(page
);
1426 loff_t range_end
= range_start
+ (loff_t
)(PAGE_CACHE_SIZE
- 1);
1427 struct writeback_control wbc
= {
1428 .bdi
= page
->mapping
->backing_dev_info
,
1429 .sync_mode
= WB_SYNC_ALL
,
1430 .nr_to_write
= LONG_MAX
,
1431 .range_start
= range_start
,
1432 .range_end
= range_end
,
1436 BUG_ON(!PageLocked(page
));
1438 req
= nfs_page_find_request(page
);
1441 if (test_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
1442 nfs_release_request(req
);
1445 if (nfs_lock_request_dontget(req
)) {
1446 nfs_inode_remove_request(req
);
1448 * In case nfs_inode_remove_request has marked the
1449 * page as being dirty
1451 cancel_dirty_page(page
, PAGE_CACHE_SIZE
);
1452 nfs_unlock_request(req
);
1455 ret
= nfs_wait_on_request(req
);
1459 if (!PagePrivate(page
))
1461 ret
= nfs_sync_mapping_wait(page
->mapping
, &wbc
, FLUSH_INVALIDATE
);
1466 int nfs_wb_page_priority(struct inode
*inode
, struct page
*page
, int how
)
1468 loff_t range_start
= page_offset(page
);
1469 loff_t range_end
= range_start
+ (loff_t
)(PAGE_CACHE_SIZE
- 1);
1470 struct writeback_control wbc
= {
1471 .bdi
= page
->mapping
->backing_dev_info
,
1472 .sync_mode
= WB_SYNC_ALL
,
1473 .nr_to_write
= LONG_MAX
,
1474 .range_start
= range_start
,
1475 .range_end
= range_end
,
1479 BUG_ON(!PageLocked(page
));
1480 if (clear_page_dirty_for_io(page
)) {
1481 ret
= nfs_writepage_locked(page
, &wbc
);
1485 if (!PagePrivate(page
))
1487 ret
= nfs_sync_mapping_wait(page
->mapping
, &wbc
, how
);
1491 __mark_inode_dirty(inode
, I_DIRTY_PAGES
);
1496 * Write back all requests on one page - we do this before reading it.
1498 int nfs_wb_page(struct inode
*inode
, struct page
* page
)
1500 return nfs_wb_page_priority(inode
, page
, FLUSH_STABLE
);
1503 int nfs_set_page_dirty(struct page
*page
)
1505 struct address_space
*mapping
= page
->mapping
;
1506 struct inode
*inode
;
1507 struct nfs_page
*req
;
1512 inode
= mapping
->host
;
1515 spin_lock(&inode
->i_lock
);
1516 req
= nfs_page_find_request_locked(page
);
1518 /* Mark any existing write requests for flushing */
1519 ret
= !test_and_set_bit(PG_NEED_FLUSH
, &req
->wb_flags
);
1520 spin_unlock(&inode
->i_lock
);
1521 nfs_release_request(req
);
1524 ret
= __set_page_dirty_nobuffers(page
);
1525 spin_unlock(&inode
->i_lock
);
1528 return !TestSetPageDirty(page
);
1532 int __init
nfs_init_writepagecache(void)
1534 nfs_wdata_cachep
= kmem_cache_create("nfs_write_data",
1535 sizeof(struct nfs_write_data
),
1536 0, SLAB_HWCACHE_ALIGN
,
1538 if (nfs_wdata_cachep
== NULL
)
1541 nfs_wdata_mempool
= mempool_create_slab_pool(MIN_POOL_WRITE
,
1543 if (nfs_wdata_mempool
== NULL
)
1546 nfs_commit_mempool
= mempool_create_slab_pool(MIN_POOL_COMMIT
,
1548 if (nfs_commit_mempool
== NULL
)
1552 * NFS congestion size, scale with available memory.
1564 * This allows larger machines to have larger/more transfers.
1565 * Limit the default to 256M
1567 nfs_congestion_kb
= (16*int_sqrt(totalram_pages
)) << (PAGE_SHIFT
-10);
1568 if (nfs_congestion_kb
> 256*1024)
1569 nfs_congestion_kb
= 256*1024;
1574 void nfs_destroy_writepagecache(void)
1576 mempool_destroy(nfs_commit_mempool
);
1577 mempool_destroy(nfs_wdata_mempool
);
1578 kmem_cache_destroy(nfs_wdata_cachep
);