4 * Write file data over NFS.
6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
9 #include <linux/types.h>
10 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_mount.h>
20 #include <linux/nfs_page.h>
21 #include <linux/backing-dev.h>
23 #include <asm/uaccess.h>
25 #include "delegation.h"
29 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
31 #define MIN_POOL_WRITE (32)
32 #define MIN_POOL_COMMIT (4)
35 * Local function declarations
37 static struct nfs_page
* nfs_update_request(struct nfs_open_context
*,
39 unsigned int, unsigned int);
40 static void nfs_pageio_init_write(struct nfs_pageio_descriptor
*desc
,
41 struct inode
*inode
, int ioflags
);
42 static const struct rpc_call_ops nfs_write_partial_ops
;
43 static const struct rpc_call_ops nfs_write_full_ops
;
44 static const struct rpc_call_ops nfs_commit_ops
;
46 static struct kmem_cache
*nfs_wdata_cachep
;
47 static mempool_t
*nfs_wdata_mempool
;
48 static mempool_t
*nfs_commit_mempool
;
50 struct nfs_write_data
*nfs_commit_alloc(void)
52 struct nfs_write_data
*p
= mempool_alloc(nfs_commit_mempool
, GFP_NOFS
);
55 memset(p
, 0, sizeof(*p
));
56 INIT_LIST_HEAD(&p
->pages
);
61 static void nfs_commit_rcu_free(struct rcu_head
*head
)
63 struct nfs_write_data
*p
= container_of(head
, struct nfs_write_data
, task
.u
.tk_rcu
);
64 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
66 mempool_free(p
, nfs_commit_mempool
);
69 void nfs_commit_free(struct nfs_write_data
*wdata
)
71 call_rcu_bh(&wdata
->task
.u
.tk_rcu
, nfs_commit_rcu_free
);
74 struct nfs_write_data
*nfs_writedata_alloc(unsigned int pagecount
)
76 struct nfs_write_data
*p
= mempool_alloc(nfs_wdata_mempool
, GFP_NOFS
);
79 memset(p
, 0, sizeof(*p
));
80 INIT_LIST_HEAD(&p
->pages
);
81 p
->npages
= pagecount
;
82 if (pagecount
<= ARRAY_SIZE(p
->page_array
))
83 p
->pagevec
= p
->page_array
;
85 p
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), GFP_NOFS
);
87 mempool_free(p
, nfs_wdata_mempool
);
95 static void nfs_writedata_rcu_free(struct rcu_head
*head
)
97 struct nfs_write_data
*p
= container_of(head
, struct nfs_write_data
, task
.u
.tk_rcu
);
98 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
100 mempool_free(p
, nfs_wdata_mempool
);
103 static void nfs_writedata_free(struct nfs_write_data
*wdata
)
105 call_rcu_bh(&wdata
->task
.u
.tk_rcu
, nfs_writedata_rcu_free
);
108 void nfs_writedata_release(void *wdata
)
110 nfs_writedata_free(wdata
);
113 static struct nfs_page
*nfs_page_find_request_locked(struct page
*page
)
115 struct nfs_page
*req
= NULL
;
117 if (PagePrivate(page
)) {
118 req
= (struct nfs_page
*)page_private(page
);
120 kref_get(&req
->wb_kref
);
125 static struct nfs_page
*nfs_page_find_request(struct page
*page
)
127 struct inode
*inode
= page
->mapping
->host
;
128 struct nfs_page
*req
= NULL
;
130 spin_lock(&inode
->i_lock
);
131 req
= nfs_page_find_request_locked(page
);
132 spin_unlock(&inode
->i_lock
);
136 /* Adjust the file length if we're writing beyond the end */
137 static void nfs_grow_file(struct page
*page
, unsigned int offset
, unsigned int count
)
139 struct inode
*inode
= page
->mapping
->host
;
140 loff_t end
, i_size
= i_size_read(inode
);
141 pgoff_t end_index
= (i_size
- 1) >> PAGE_CACHE_SHIFT
;
143 if (i_size
> 0 && page
->index
< end_index
)
145 end
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + ((loff_t
)offset
+count
);
148 nfs_inc_stats(inode
, NFSIOS_EXTENDWRITE
);
149 i_size_write(inode
, end
);
152 /* A writeback failed: mark the page as bad, and invalidate the page cache */
153 static void nfs_set_pageerror(struct page
*page
)
156 nfs_zap_mapping(page
->mapping
->host
, page
->mapping
);
159 /* We can set the PG_uptodate flag if we see that a write request
160 * covers the full page.
162 static void nfs_mark_uptodate(struct page
*page
, unsigned int base
, unsigned int count
)
164 if (PageUptodate(page
))
168 if (count
!= nfs_page_length(page
))
170 if (count
!= PAGE_CACHE_SIZE
)
171 zero_user_page(page
, count
, PAGE_CACHE_SIZE
- count
, KM_USER0
);
172 SetPageUptodate(page
);
175 static int nfs_writepage_setup(struct nfs_open_context
*ctx
, struct page
*page
,
176 unsigned int offset
, unsigned int count
)
178 struct nfs_page
*req
;
182 req
= nfs_update_request(ctx
, page
, offset
, count
);
188 ret
= nfs_wb_page(page
->mapping
->host
, page
);
192 /* Update file length */
193 nfs_grow_file(page
, offset
, count
);
194 nfs_unlock_request(req
);
198 static int wb_priority(struct writeback_control
*wbc
)
200 if (wbc
->for_reclaim
)
201 return FLUSH_HIGHPRI
| FLUSH_STABLE
;
202 if (wbc
->for_kupdate
)
208 * NFS congestion control
211 int nfs_congestion_kb
;
213 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
214 #define NFS_CONGESTION_OFF_THRESH \
215 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
217 static int nfs_set_page_writeback(struct page
*page
)
219 int ret
= test_set_page_writeback(page
);
222 struct inode
*inode
= page
->mapping
->host
;
223 struct nfs_server
*nfss
= NFS_SERVER(inode
);
225 if (atomic_long_inc_return(&nfss
->writeback
) >
226 NFS_CONGESTION_ON_THRESH
)
227 set_bdi_congested(&nfss
->backing_dev_info
, WRITE
);
232 static void nfs_end_page_writeback(struct page
*page
)
234 struct inode
*inode
= page
->mapping
->host
;
235 struct nfs_server
*nfss
= NFS_SERVER(inode
);
237 end_page_writeback(page
);
238 if (atomic_long_dec_return(&nfss
->writeback
) < NFS_CONGESTION_OFF_THRESH
) {
239 clear_bdi_congested(&nfss
->backing_dev_info
, WRITE
);
240 congestion_end(WRITE
);
245 * Find an associated nfs write request, and prepare to flush it out
246 * Returns 1 if there was no write request, or if the request was
247 * already tagged by nfs_set_page_dirty.Returns 0 if the request
249 * May also return an error if the user signalled nfs_wait_on_request().
251 static int nfs_page_async_flush(struct nfs_pageio_descriptor
*pgio
,
254 struct inode
*inode
= page
->mapping
->host
;
255 struct nfs_inode
*nfsi
= NFS_I(inode
);
256 struct nfs_page
*req
;
259 spin_lock(&inode
->i_lock
);
261 req
= nfs_page_find_request_locked(page
);
263 spin_unlock(&inode
->i_lock
);
266 if (nfs_lock_request_dontget(req
))
268 /* Note: If we hold the page lock, as is the case in nfs_writepage,
269 * then the call to nfs_lock_request_dontget() will always
270 * succeed provided that someone hasn't already marked the
271 * request as dirty (in which case we don't care).
273 spin_unlock(&inode
->i_lock
);
274 ret
= nfs_wait_on_request(req
);
275 nfs_release_request(req
);
278 spin_lock(&inode
->i_lock
);
280 if (test_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
281 /* This request is marked for commit */
282 spin_unlock(&inode
->i_lock
);
283 nfs_unlock_request(req
);
284 nfs_pageio_complete(pgio
);
287 if (nfs_set_page_writeback(page
) != 0) {
288 spin_unlock(&inode
->i_lock
);
291 radix_tree_tag_set(&nfsi
->nfs_page_tree
, req
->wb_index
,
292 NFS_PAGE_TAG_LOCKED
);
293 ret
= test_bit(PG_NEED_FLUSH
, &req
->wb_flags
);
294 spin_unlock(&inode
->i_lock
);
295 nfs_pageio_add_request(pgio
, req
);
300 * Write an mmapped page to the server.
302 static int nfs_writepage_locked(struct page
*page
, struct writeback_control
*wbc
)
304 struct nfs_pageio_descriptor mypgio
, *pgio
;
305 struct nfs_open_context
*ctx
;
306 struct inode
*inode
= page
->mapping
->host
;
310 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGE
);
311 nfs_add_stats(inode
, NFSIOS_WRITEPAGES
, 1);
313 if (wbc
->for_writepages
)
314 pgio
= wbc
->fs_private
;
316 nfs_pageio_init_write(&mypgio
, inode
, wb_priority(wbc
));
320 nfs_pageio_cond_complete(pgio
, page
->index
);
322 err
= nfs_page_async_flush(pgio
, page
);
326 offset
= nfs_page_length(page
);
330 nfs_pageio_cond_complete(pgio
, page
->index
);
332 ctx
= nfs_find_open_context(inode
, NULL
, FMODE_WRITE
);
337 err
= nfs_writepage_setup(ctx
, page
, 0, offset
);
338 put_nfs_open_context(ctx
);
341 err
= nfs_page_async_flush(pgio
, page
);
345 if (!wbc
->for_writepages
)
346 nfs_pageio_complete(pgio
);
350 int nfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
354 err
= nfs_writepage_locked(page
, wbc
);
359 int nfs_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
361 struct inode
*inode
= mapping
->host
;
362 struct nfs_pageio_descriptor pgio
;
365 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGES
);
367 nfs_pageio_init_write(&pgio
, inode
, wb_priority(wbc
));
368 wbc
->fs_private
= &pgio
;
369 err
= generic_writepages(mapping
, wbc
);
370 nfs_pageio_complete(&pgio
);
374 return pgio
.pg_error
;
379 * Insert a write request into an inode
381 static int nfs_inode_add_request(struct inode
*inode
, struct nfs_page
*req
)
383 struct nfs_inode
*nfsi
= NFS_I(inode
);
386 error
= radix_tree_insert(&nfsi
->nfs_page_tree
, req
->wb_index
, req
);
387 BUG_ON(error
== -EEXIST
);
392 nfs_begin_data_update(inode
);
393 if (nfs_have_delegation(inode
, FMODE_WRITE
))
396 SetPagePrivate(req
->wb_page
);
397 set_page_private(req
->wb_page
, (unsigned long)req
);
398 if (PageDirty(req
->wb_page
))
399 set_bit(PG_NEED_FLUSH
, &req
->wb_flags
);
401 kref_get(&req
->wb_kref
);
406 * Remove a write request from an inode
408 static void nfs_inode_remove_request(struct nfs_page
*req
)
410 struct inode
*inode
= req
->wb_context
->path
.dentry
->d_inode
;
411 struct nfs_inode
*nfsi
= NFS_I(inode
);
413 BUG_ON (!NFS_WBACK_BUSY(req
));
415 spin_lock(&inode
->i_lock
);
416 set_page_private(req
->wb_page
, 0);
417 ClearPagePrivate(req
->wb_page
);
418 radix_tree_delete(&nfsi
->nfs_page_tree
, req
->wb_index
);
419 if (test_and_clear_bit(PG_NEED_FLUSH
, &req
->wb_flags
))
420 __set_page_dirty_nobuffers(req
->wb_page
);
423 spin_unlock(&inode
->i_lock
);
424 nfs_end_data_update(inode
);
427 spin_unlock(&inode
->i_lock
);
428 nfs_clear_request(req
);
429 nfs_release_request(req
);
433 nfs_redirty_request(struct nfs_page
*req
)
435 __set_page_dirty_nobuffers(req
->wb_page
);
439 * Check if a request is dirty
442 nfs_dirty_request(struct nfs_page
*req
)
444 struct page
*page
= req
->wb_page
;
446 if (page
== NULL
|| test_bit(PG_NEED_COMMIT
, &req
->wb_flags
))
448 return !PageWriteback(req
->wb_page
);
451 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
453 * Add a request to the inode's commit list.
456 nfs_mark_request_commit(struct nfs_page
*req
)
458 struct inode
*inode
= req
->wb_context
->path
.dentry
->d_inode
;
459 struct nfs_inode
*nfsi
= NFS_I(inode
);
461 spin_lock(&inode
->i_lock
);
463 set_bit(PG_NEED_COMMIT
, &(req
)->wb_flags
);
464 radix_tree_tag_set(&nfsi
->nfs_page_tree
,
466 NFS_PAGE_TAG_COMMIT
);
467 spin_unlock(&inode
->i_lock
);
468 inc_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
469 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
473 int nfs_write_need_commit(struct nfs_write_data
*data
)
475 return data
->verf
.committed
!= NFS_FILE_SYNC
;
479 int nfs_reschedule_unstable_write(struct nfs_page
*req
)
481 if (test_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
482 nfs_mark_request_commit(req
);
485 if (test_and_clear_bit(PG_NEED_RESCHED
, &req
->wb_flags
)) {
486 nfs_redirty_request(req
);
493 nfs_mark_request_commit(struct nfs_page
*req
)
498 int nfs_write_need_commit(struct nfs_write_data
*data
)
504 int nfs_reschedule_unstable_write(struct nfs_page
*req
)
511 * Wait for a request to complete.
513 * Interruptible by signals only if mounted with intr flag.
515 static int nfs_wait_on_requests_locked(struct inode
*inode
, pgoff_t idx_start
, unsigned int npages
)
517 struct nfs_inode
*nfsi
= NFS_I(inode
);
518 struct nfs_page
*req
;
519 pgoff_t idx_end
, next
;
520 unsigned int res
= 0;
526 idx_end
= idx_start
+ npages
- 1;
529 while (radix_tree_gang_lookup_tag(&nfsi
->nfs_page_tree
, (void **)&req
, next
, 1, NFS_PAGE_TAG_LOCKED
)) {
530 if (req
->wb_index
> idx_end
)
533 next
= req
->wb_index
+ 1;
534 BUG_ON(!NFS_WBACK_BUSY(req
));
536 kref_get(&req
->wb_kref
);
537 spin_unlock(&inode
->i_lock
);
538 error
= nfs_wait_on_request(req
);
539 nfs_release_request(req
);
540 spin_lock(&inode
->i_lock
);
548 static void nfs_cancel_commit_list(struct list_head
*head
)
550 struct nfs_page
*req
;
552 while(!list_empty(head
)) {
553 req
= nfs_list_entry(head
->next
);
554 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
555 nfs_list_remove_request(req
);
556 clear_bit(PG_NEED_COMMIT
, &(req
)->wb_flags
);
557 nfs_inode_remove_request(req
);
558 nfs_unlock_request(req
);
562 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
564 * nfs_scan_commit - Scan an inode for commit requests
565 * @inode: NFS inode to scan
566 * @dst: destination list
567 * @idx_start: lower bound of page->index to scan.
568 * @npages: idx_start + npages sets the upper bound to scan.
570 * Moves requests from the inode's 'commit' request list.
571 * The requests are *not* checked to ensure that they form a contiguous set.
574 nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, pgoff_t idx_start
, unsigned int npages
)
576 struct nfs_inode
*nfsi
= NFS_I(inode
);
579 if (nfsi
->ncommit
!= 0) {
580 res
= nfs_scan_list(nfsi
, dst
, idx_start
, npages
,
581 NFS_PAGE_TAG_COMMIT
);
582 nfsi
->ncommit
-= res
;
587 static inline int nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, pgoff_t idx_start
, unsigned int npages
)
594 * Try to update any existing write request, or create one if there is none.
595 * In order to match, the request's credentials must match those of
596 * the calling process.
598 * Note: Should always be called with the Page Lock held!
600 static struct nfs_page
* nfs_update_request(struct nfs_open_context
* ctx
,
601 struct page
*page
, unsigned int offset
, unsigned int bytes
)
603 struct address_space
*mapping
= page
->mapping
;
604 struct inode
*inode
= mapping
->host
;
605 struct nfs_page
*req
, *new = NULL
;
608 end
= offset
+ bytes
;
611 /* Loop over all inode entries and see if we find
612 * A request for the page we wish to update
614 spin_lock(&inode
->i_lock
);
615 req
= nfs_page_find_request_locked(page
);
617 if (!nfs_lock_request_dontget(req
)) {
620 spin_unlock(&inode
->i_lock
);
621 error
= nfs_wait_on_request(req
);
622 nfs_release_request(req
);
625 nfs_release_request(new);
626 return ERR_PTR(error
);
630 spin_unlock(&inode
->i_lock
);
632 nfs_release_request(new);
638 nfs_lock_request_dontget(new);
639 error
= nfs_inode_add_request(inode
, new);
641 spin_unlock(&inode
->i_lock
);
642 nfs_unlock_request(new);
643 return ERR_PTR(error
);
645 spin_unlock(&inode
->i_lock
);
648 spin_unlock(&inode
->i_lock
);
650 new = nfs_create_request(ctx
, inode
, page
, offset
, bytes
);
655 /* We have a request for our page.
656 * If the creds don't match, or the
657 * page addresses don't match,
658 * tell the caller to wait on the conflicting
661 rqend
= req
->wb_offset
+ req
->wb_bytes
;
662 if (req
->wb_context
!= ctx
663 || req
->wb_page
!= page
664 || !nfs_dirty_request(req
)
665 || offset
> rqend
|| end
< req
->wb_offset
) {
666 nfs_unlock_request(req
);
667 return ERR_PTR(-EBUSY
);
670 /* Okay, the request matches. Update the region */
671 if (offset
< req
->wb_offset
) {
672 req
->wb_offset
= offset
;
673 req
->wb_pgbase
= offset
;
674 req
->wb_bytes
= rqend
- req
->wb_offset
;
678 req
->wb_bytes
= end
- req
->wb_offset
;
683 int nfs_flush_incompatible(struct file
*file
, struct page
*page
)
685 struct nfs_open_context
*ctx
= (struct nfs_open_context
*)file
->private_data
;
686 struct nfs_page
*req
;
687 int do_flush
, status
;
689 * Look for a request corresponding to this page. If there
690 * is one, and it belongs to another file, we flush it out
691 * before we try to copy anything into the page. Do this
692 * due to the lack of an ACCESS-type call in NFSv2.
693 * Also do the same if we find a request from an existing
697 req
= nfs_page_find_request(page
);
700 do_flush
= req
->wb_page
!= page
|| req
->wb_context
!= ctx
701 || !nfs_dirty_request(req
);
702 nfs_release_request(req
);
705 status
= nfs_wb_page(page
->mapping
->host
, page
);
706 } while (status
== 0);
711 * Update and possibly write a cached page of an NFS file.
713 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
714 * things with a page scheduled for an RPC call (e.g. invalidate it).
716 int nfs_updatepage(struct file
*file
, struct page
*page
,
717 unsigned int offset
, unsigned int count
)
719 struct nfs_open_context
*ctx
= (struct nfs_open_context
*)file
->private_data
;
720 struct inode
*inode
= page
->mapping
->host
;
723 nfs_inc_stats(inode
, NFSIOS_VFSUPDATEPAGE
);
725 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
726 file
->f_path
.dentry
->d_parent
->d_name
.name
,
727 file
->f_path
.dentry
->d_name
.name
, count
,
728 (long long)(page_offset(page
) +offset
));
730 /* If we're not using byte range locks, and we know the page
731 * is entirely in cache, it may be more efficient to avoid
732 * fragmenting write requests.
734 if (PageUptodate(page
) && inode
->i_flock
== NULL
&& !(file
->f_mode
& O_SYNC
)) {
735 count
= max(count
+ offset
, nfs_page_length(page
));
739 status
= nfs_writepage_setup(ctx
, page
, offset
, count
);
740 __set_page_dirty_nobuffers(page
);
742 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
743 status
, (long long)i_size_read(inode
));
745 nfs_set_pageerror(page
);
749 static void nfs_writepage_release(struct nfs_page
*req
)
752 if (PageError(req
->wb_page
)) {
753 nfs_end_page_writeback(req
->wb_page
);
754 nfs_inode_remove_request(req
);
755 } else if (!nfs_reschedule_unstable_write(req
)) {
756 /* Set the PG_uptodate flag */
757 nfs_mark_uptodate(req
->wb_page
, req
->wb_pgbase
, req
->wb_bytes
);
758 nfs_end_page_writeback(req
->wb_page
);
759 nfs_inode_remove_request(req
);
761 nfs_end_page_writeback(req
->wb_page
);
762 nfs_clear_page_tag_locked(req
);
765 static inline int flush_task_priority(int how
)
767 switch (how
& (FLUSH_HIGHPRI
|FLUSH_LOWPRI
)) {
769 return RPC_PRIORITY_HIGH
;
771 return RPC_PRIORITY_LOW
;
773 return RPC_PRIORITY_NORMAL
;
777 * Set up the argument/result storage required for the RPC call.
779 static void nfs_write_rpcsetup(struct nfs_page
*req
,
780 struct nfs_write_data
*data
,
781 const struct rpc_call_ops
*call_ops
,
782 unsigned int count
, unsigned int offset
,
788 /* Set up the RPC argument and reply structs
789 * NB: take care not to mess about with data->commit et al. */
792 data
->inode
= inode
= req
->wb_context
->path
.dentry
->d_inode
;
793 data
->cred
= req
->wb_context
->cred
;
795 data
->args
.fh
= NFS_FH(inode
);
796 data
->args
.offset
= req_offset(req
) + offset
;
797 data
->args
.pgbase
= req
->wb_pgbase
+ offset
;
798 data
->args
.pages
= data
->pagevec
;
799 data
->args
.count
= count
;
800 data
->args
.context
= req
->wb_context
;
802 data
->res
.fattr
= &data
->fattr
;
803 data
->res
.count
= count
;
804 data
->res
.verf
= &data
->verf
;
805 nfs_fattr_init(&data
->fattr
);
807 /* Set up the initial task struct. */
808 flags
= (how
& FLUSH_SYNC
) ? 0 : RPC_TASK_ASYNC
;
809 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), flags
, call_ops
, data
);
810 NFS_PROTO(inode
)->write_setup(data
, how
);
812 data
->task
.tk_priority
= flush_task_priority(how
);
813 data
->task
.tk_cookie
= (unsigned long)inode
;
815 dprintk("NFS: %5u initiated write call "
816 "(req %s/%Ld, %u bytes @ offset %Lu)\n",
819 (long long)NFS_FILEID(inode
),
821 (unsigned long long)data
->args
.offset
);
824 static void nfs_execute_write(struct nfs_write_data
*data
)
826 struct rpc_clnt
*clnt
= NFS_CLIENT(data
->inode
);
829 rpc_clnt_sigmask(clnt
, &oldset
);
830 rpc_execute(&data
->task
);
831 rpc_clnt_sigunmask(clnt
, &oldset
);
835 * Generate multiple small requests to write out a single
836 * contiguous dirty area on one page.
838 static int nfs_flush_multi(struct inode
*inode
, struct list_head
*head
, unsigned int npages
, size_t count
, int how
)
840 struct nfs_page
*req
= nfs_list_entry(head
->next
);
841 struct page
*page
= req
->wb_page
;
842 struct nfs_write_data
*data
;
843 size_t wsize
= NFS_SERVER(inode
)->wsize
, nbytes
;
848 nfs_list_remove_request(req
);
852 size_t len
= min(nbytes
, wsize
);
854 data
= nfs_writedata_alloc(1);
857 list_add(&data
->pages
, &list
);
860 } while (nbytes
!= 0);
861 atomic_set(&req
->wb_complete
, requests
);
863 ClearPageError(page
);
867 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
868 list_del_init(&data
->pages
);
870 data
->pagevec
[0] = page
;
874 nfs_write_rpcsetup(req
, data
, &nfs_write_partial_ops
,
878 nfs_execute_write(data
);
879 } while (nbytes
!= 0);
884 while (!list_empty(&list
)) {
885 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
886 list_del(&data
->pages
);
887 nfs_writedata_release(data
);
889 nfs_redirty_request(req
);
890 nfs_end_page_writeback(req
->wb_page
);
891 nfs_clear_page_tag_locked(req
);
896 * Create an RPC task for the given write request and kick it.
897 * The page must have been locked by the caller.
899 * It may happen that the page we're passed is not marked dirty.
900 * This is the case if nfs_updatepage detects a conflicting request
901 * that has been written but not committed.
903 static int nfs_flush_one(struct inode
*inode
, struct list_head
*head
, unsigned int npages
, size_t count
, int how
)
905 struct nfs_page
*req
;
907 struct nfs_write_data
*data
;
909 data
= nfs_writedata_alloc(npages
);
913 pages
= data
->pagevec
;
914 while (!list_empty(head
)) {
915 req
= nfs_list_entry(head
->next
);
916 nfs_list_remove_request(req
);
917 nfs_list_add_request(req
, &data
->pages
);
918 ClearPageError(req
->wb_page
);
919 *pages
++ = req
->wb_page
;
921 req
= nfs_list_entry(data
->pages
.next
);
923 /* Set up the argument struct */
924 nfs_write_rpcsetup(req
, data
, &nfs_write_full_ops
, count
, 0, how
);
926 nfs_execute_write(data
);
929 while (!list_empty(head
)) {
930 req
= nfs_list_entry(head
->next
);
931 nfs_list_remove_request(req
);
932 nfs_redirty_request(req
);
933 nfs_end_page_writeback(req
->wb_page
);
934 nfs_clear_page_tag_locked(req
);
939 static void nfs_pageio_init_write(struct nfs_pageio_descriptor
*pgio
,
940 struct inode
*inode
, int ioflags
)
942 int wsize
= NFS_SERVER(inode
)->wsize
;
944 if (wsize
< PAGE_CACHE_SIZE
)
945 nfs_pageio_init(pgio
, inode
, nfs_flush_multi
, wsize
, ioflags
);
947 nfs_pageio_init(pgio
, inode
, nfs_flush_one
, wsize
, ioflags
);
951 * Handle a write reply that flushed part of a page.
953 static void nfs_writeback_done_partial(struct rpc_task
*task
, void *calldata
)
955 struct nfs_write_data
*data
= calldata
;
956 struct nfs_page
*req
= data
->req
;
957 struct page
*page
= req
->wb_page
;
959 dprintk("NFS: write (%s/%Ld %d@%Ld)",
960 req
->wb_context
->path
.dentry
->d_inode
->i_sb
->s_id
,
961 (long long)NFS_FILEID(req
->wb_context
->path
.dentry
->d_inode
),
963 (long long)req_offset(req
));
965 if (nfs_writeback_done(task
, data
) != 0)
968 if (task
->tk_status
< 0) {
969 nfs_set_pageerror(page
);
970 req
->wb_context
->error
= task
->tk_status
;
971 dprintk(", error = %d\n", task
->tk_status
);
975 if (nfs_write_need_commit(data
)) {
976 struct inode
*inode
= page
->mapping
->host
;
978 spin_lock(&inode
->i_lock
);
979 if (test_bit(PG_NEED_RESCHED
, &req
->wb_flags
)) {
980 /* Do nothing we need to resend the writes */
981 } else if (!test_and_set_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
982 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
983 dprintk(" defer commit\n");
984 } else if (memcmp(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
))) {
985 set_bit(PG_NEED_RESCHED
, &req
->wb_flags
);
986 clear_bit(PG_NEED_COMMIT
, &req
->wb_flags
);
987 dprintk(" server reboot detected\n");
989 spin_unlock(&inode
->i_lock
);
994 if (atomic_dec_and_test(&req
->wb_complete
))
995 nfs_writepage_release(req
);
998 static const struct rpc_call_ops nfs_write_partial_ops
= {
999 .rpc_call_done
= nfs_writeback_done_partial
,
1000 .rpc_release
= nfs_writedata_release
,
1004 * Handle a write reply that flushes a whole page.
1006 * FIXME: There is an inherent race with invalidate_inode_pages and
1007 * writebacks since the page->count is kept > 1 for as long
1008 * as the page has a write request pending.
1010 static void nfs_writeback_done_full(struct rpc_task
*task
, void *calldata
)
1012 struct nfs_write_data
*data
= calldata
;
1013 struct nfs_page
*req
;
1016 if (nfs_writeback_done(task
, data
) != 0)
1019 /* Update attributes as result of writeback. */
1020 while (!list_empty(&data
->pages
)) {
1021 req
= nfs_list_entry(data
->pages
.next
);
1022 nfs_list_remove_request(req
);
1023 page
= req
->wb_page
;
1025 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1026 req
->wb_context
->path
.dentry
->d_inode
->i_sb
->s_id
,
1027 (long long)NFS_FILEID(req
->wb_context
->path
.dentry
->d_inode
),
1029 (long long)req_offset(req
));
1031 if (task
->tk_status
< 0) {
1032 nfs_set_pageerror(page
);
1033 req
->wb_context
->error
= task
->tk_status
;
1034 dprintk(", error = %d\n", task
->tk_status
);
1035 goto remove_request
;
1038 if (nfs_write_need_commit(data
)) {
1039 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
1040 nfs_mark_request_commit(req
);
1041 nfs_end_page_writeback(page
);
1042 dprintk(" marked for commit\n");
1045 /* Set the PG_uptodate flag? */
1046 nfs_mark_uptodate(page
, req
->wb_pgbase
, req
->wb_bytes
);
1049 nfs_end_page_writeback(page
);
1050 nfs_inode_remove_request(req
);
1052 nfs_clear_page_tag_locked(req
);
1056 static const struct rpc_call_ops nfs_write_full_ops
= {
1057 .rpc_call_done
= nfs_writeback_done_full
,
1058 .rpc_release
= nfs_writedata_release
,
1063 * This function is called when the WRITE call is complete.
1065 int nfs_writeback_done(struct rpc_task
*task
, struct nfs_write_data
*data
)
1067 struct nfs_writeargs
*argp
= &data
->args
;
1068 struct nfs_writeres
*resp
= &data
->res
;
1071 dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1072 task
->tk_pid
, task
->tk_status
);
1075 * ->write_done will attempt to use post-op attributes to detect
1076 * conflicting writes by other clients. A strict interpretation
1077 * of close-to-open would allow us to continue caching even if
1078 * another writer had changed the file, but some applications
1079 * depend on tighter cache coherency when writing.
1081 status
= NFS_PROTO(data
->inode
)->write_done(task
, data
);
1084 nfs_add_stats(data
->inode
, NFSIOS_SERVERWRITTENBYTES
, resp
->count
);
1086 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1087 if (resp
->verf
->committed
< argp
->stable
&& task
->tk_status
>= 0) {
1088 /* We tried a write call, but the server did not
1089 * commit data to stable storage even though we
1091 * Note: There is a known bug in Tru64 < 5.0 in which
1092 * the server reports NFS_DATA_SYNC, but performs
1093 * NFS_FILE_SYNC. We therefore implement this checking
1094 * as a dprintk() in order to avoid filling syslog.
1096 static unsigned long complain
;
1098 if (time_before(complain
, jiffies
)) {
1099 dprintk("NFS: faulty NFS server %s:"
1100 " (committed = %d) != (stable = %d)\n",
1101 NFS_SERVER(data
->inode
)->nfs_client
->cl_hostname
,
1102 resp
->verf
->committed
, argp
->stable
);
1103 complain
= jiffies
+ 300 * HZ
;
1107 /* Is this a short write? */
1108 if (task
->tk_status
>= 0 && resp
->count
< argp
->count
) {
1109 static unsigned long complain
;
1111 nfs_inc_stats(data
->inode
, NFSIOS_SHORTWRITE
);
1113 /* Has the server at least made some progress? */
1114 if (resp
->count
!= 0) {
1115 /* Was this an NFSv2 write or an NFSv3 stable write? */
1116 if (resp
->verf
->committed
!= NFS_UNSTABLE
) {
1117 /* Resend from where the server left off */
1118 argp
->offset
+= resp
->count
;
1119 argp
->pgbase
+= resp
->count
;
1120 argp
->count
-= resp
->count
;
1122 /* Resend as a stable write in order to avoid
1123 * headaches in the case of a server crash.
1125 argp
->stable
= NFS_FILE_SYNC
;
1127 rpc_restart_call(task
);
1130 if (time_before(complain
, jiffies
)) {
1132 "NFS: Server wrote zero bytes, expected %u.\n",
1134 complain
= jiffies
+ 300 * HZ
;
1136 /* Can't do anything about it except throw an error. */
1137 task
->tk_status
= -EIO
;
1143 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1144 void nfs_commit_release(void *wdata
)
1146 nfs_commit_free(wdata
);
1150 * Set up the argument/result storage required for the RPC call.
1152 static void nfs_commit_rpcsetup(struct list_head
*head
,
1153 struct nfs_write_data
*data
,
1156 struct nfs_page
*first
;
1157 struct inode
*inode
;
1160 /* Set up the RPC argument and reply structs
1161 * NB: take care not to mess about with data->commit et al. */
1163 list_splice_init(head
, &data
->pages
);
1164 first
= nfs_list_entry(data
->pages
.next
);
1165 inode
= first
->wb_context
->path
.dentry
->d_inode
;
1167 data
->inode
= inode
;
1168 data
->cred
= first
->wb_context
->cred
;
1170 data
->args
.fh
= NFS_FH(data
->inode
);
1171 /* Note: we always request a commit of the entire inode */
1172 data
->args
.offset
= 0;
1173 data
->args
.count
= 0;
1174 data
->res
.count
= 0;
1175 data
->res
.fattr
= &data
->fattr
;
1176 data
->res
.verf
= &data
->verf
;
1177 nfs_fattr_init(&data
->fattr
);
1179 /* Set up the initial task struct. */
1180 flags
= (how
& FLUSH_SYNC
) ? 0 : RPC_TASK_ASYNC
;
1181 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), flags
, &nfs_commit_ops
, data
);
1182 NFS_PROTO(inode
)->commit_setup(data
, how
);
1184 data
->task
.tk_priority
= flush_task_priority(how
);
1185 data
->task
.tk_cookie
= (unsigned long)inode
;
1187 dprintk("NFS: %5u initiated commit call\n", data
->task
.tk_pid
);
1191 * Commit dirty pages
1194 nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
)
1196 struct nfs_write_data
*data
;
1197 struct nfs_page
*req
;
1199 data
= nfs_commit_alloc();
1204 /* Set up the argument struct */
1205 nfs_commit_rpcsetup(head
, data
, how
);
1207 nfs_execute_write(data
);
1210 while (!list_empty(head
)) {
1211 req
= nfs_list_entry(head
->next
);
1212 nfs_list_remove_request(req
);
1213 nfs_mark_request_commit(req
);
1214 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1215 nfs_clear_page_tag_locked(req
);
1221 * COMMIT call returned
1223 static void nfs_commit_done(struct rpc_task
*task
, void *calldata
)
1225 struct nfs_write_data
*data
= calldata
;
1226 struct nfs_page
*req
;
1228 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1229 task
->tk_pid
, task
->tk_status
);
1231 /* Call the NFS version-specific code */
1232 if (NFS_PROTO(data
->inode
)->commit_done(task
, data
) != 0)
1235 while (!list_empty(&data
->pages
)) {
1236 req
= nfs_list_entry(data
->pages
.next
);
1237 nfs_list_remove_request(req
);
1238 clear_bit(PG_NEED_COMMIT
, &(req
)->wb_flags
);
1239 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1241 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1242 req
->wb_context
->path
.dentry
->d_inode
->i_sb
->s_id
,
1243 (long long)NFS_FILEID(req
->wb_context
->path
.dentry
->d_inode
),
1245 (long long)req_offset(req
));
1246 if (task
->tk_status
< 0) {
1247 req
->wb_context
->error
= task
->tk_status
;
1248 nfs_inode_remove_request(req
);
1249 dprintk(", error = %d\n", task
->tk_status
);
1253 /* Okay, COMMIT succeeded, apparently. Check the verifier
1254 * returned by the server against all stored verfs. */
1255 if (!memcmp(req
->wb_verf
.verifier
, data
->verf
.verifier
, sizeof(data
->verf
.verifier
))) {
1256 /* We have a match */
1257 /* Set the PG_uptodate flag */
1258 nfs_mark_uptodate(req
->wb_page
, req
->wb_pgbase
,
1260 nfs_inode_remove_request(req
);
1264 /* We have a mismatch. Write the page again */
1265 dprintk(" mismatch\n");
1266 nfs_redirty_request(req
);
1268 nfs_clear_page_tag_locked(req
);
1272 static const struct rpc_call_ops nfs_commit_ops
= {
1273 .rpc_call_done
= nfs_commit_done
,
1274 .rpc_release
= nfs_commit_release
,
1277 int nfs_commit_inode(struct inode
*inode
, int how
)
1282 spin_lock(&inode
->i_lock
);
1283 res
= nfs_scan_commit(inode
, &head
, 0, 0);
1284 spin_unlock(&inode
->i_lock
);
1286 int error
= nfs_commit_list(inode
, &head
, how
);
1293 static inline int nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
)
1299 long nfs_sync_mapping_wait(struct address_space
*mapping
, struct writeback_control
*wbc
, int how
)
1301 struct inode
*inode
= mapping
->host
;
1302 pgoff_t idx_start
, idx_end
;
1303 unsigned int npages
= 0;
1305 int nocommit
= how
& FLUSH_NOCOMMIT
;
1309 if (wbc
->range_cyclic
)
1312 idx_start
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
1313 idx_end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
1314 if (idx_end
> idx_start
) {
1315 pgoff_t l_npages
= 1 + idx_end
- idx_start
;
1317 if (sizeof(npages
) != sizeof(l_npages
) &&
1318 (pgoff_t
)npages
!= l_npages
)
1322 how
&= ~FLUSH_NOCOMMIT
;
1323 spin_lock(&inode
->i_lock
);
1325 ret
= nfs_wait_on_requests_locked(inode
, idx_start
, npages
);
1330 pages
= nfs_scan_commit(inode
, &head
, idx_start
, npages
);
1333 if (how
& FLUSH_INVALIDATE
) {
1334 spin_unlock(&inode
->i_lock
);
1335 nfs_cancel_commit_list(&head
);
1337 spin_lock(&inode
->i_lock
);
1340 pages
+= nfs_scan_commit(inode
, &head
, 0, 0);
1341 spin_unlock(&inode
->i_lock
);
1342 ret
= nfs_commit_list(inode
, &head
, how
);
1343 spin_lock(&inode
->i_lock
);
1346 spin_unlock(&inode
->i_lock
);
1351 * flush the inode to disk.
1353 int nfs_wb_all(struct inode
*inode
)
1355 struct address_space
*mapping
= inode
->i_mapping
;
1356 struct writeback_control wbc
= {
1357 .bdi
= mapping
->backing_dev_info
,
1358 .sync_mode
= WB_SYNC_ALL
,
1359 .nr_to_write
= LONG_MAX
,
1360 .for_writepages
= 1,
1365 ret
= nfs_writepages(mapping
, &wbc
);
1368 ret
= nfs_sync_mapping_wait(mapping
, &wbc
, 0);
1372 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1376 int nfs_sync_mapping_range(struct address_space
*mapping
, loff_t range_start
, loff_t range_end
, int how
)
1378 struct writeback_control wbc
= {
1379 .bdi
= mapping
->backing_dev_info
,
1380 .sync_mode
= WB_SYNC_ALL
,
1381 .nr_to_write
= LONG_MAX
,
1382 .range_start
= range_start
,
1383 .range_end
= range_end
,
1384 .for_writepages
= 1,
1388 ret
= nfs_writepages(mapping
, &wbc
);
1391 ret
= nfs_sync_mapping_wait(mapping
, &wbc
, how
);
1395 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1399 int nfs_wb_page_cancel(struct inode
*inode
, struct page
*page
)
1401 struct nfs_page
*req
;
1402 loff_t range_start
= page_offset(page
);
1403 loff_t range_end
= range_start
+ (loff_t
)(PAGE_CACHE_SIZE
- 1);
1404 struct writeback_control wbc
= {
1405 .bdi
= page
->mapping
->backing_dev_info
,
1406 .sync_mode
= WB_SYNC_ALL
,
1407 .nr_to_write
= LONG_MAX
,
1408 .range_start
= range_start
,
1409 .range_end
= range_end
,
1413 BUG_ON(!PageLocked(page
));
1415 req
= nfs_page_find_request(page
);
1418 if (test_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
1419 nfs_release_request(req
);
1422 if (nfs_lock_request_dontget(req
)) {
1423 nfs_inode_remove_request(req
);
1425 * In case nfs_inode_remove_request has marked the
1426 * page as being dirty
1428 cancel_dirty_page(page
, PAGE_CACHE_SIZE
);
1429 nfs_unlock_request(req
);
1432 ret
= nfs_wait_on_request(req
);
1436 if (!PagePrivate(page
))
1438 ret
= nfs_sync_mapping_wait(page
->mapping
, &wbc
, FLUSH_INVALIDATE
);
1443 int nfs_wb_page_priority(struct inode
*inode
, struct page
*page
, int how
)
1445 loff_t range_start
= page_offset(page
);
1446 loff_t range_end
= range_start
+ (loff_t
)(PAGE_CACHE_SIZE
- 1);
1447 struct writeback_control wbc
= {
1448 .bdi
= page
->mapping
->backing_dev_info
,
1449 .sync_mode
= WB_SYNC_ALL
,
1450 .nr_to_write
= LONG_MAX
,
1451 .range_start
= range_start
,
1452 .range_end
= range_end
,
1456 BUG_ON(!PageLocked(page
));
1457 if (clear_page_dirty_for_io(page
)) {
1458 ret
= nfs_writepage_locked(page
, &wbc
);
1462 if (!PagePrivate(page
))
1464 ret
= nfs_sync_mapping_wait(page
->mapping
, &wbc
, how
);
1468 __mark_inode_dirty(inode
, I_DIRTY_PAGES
);
1473 * Write back all requests on one page - we do this before reading it.
1475 int nfs_wb_page(struct inode
*inode
, struct page
* page
)
1477 return nfs_wb_page_priority(inode
, page
, FLUSH_STABLE
);
1480 int nfs_set_page_dirty(struct page
*page
)
1482 struct address_space
*mapping
= page
->mapping
;
1483 struct inode
*inode
;
1484 struct nfs_page
*req
;
1489 inode
= mapping
->host
;
1492 spin_lock(&inode
->i_lock
);
1493 req
= nfs_page_find_request_locked(page
);
1495 /* Mark any existing write requests for flushing */
1496 ret
= !test_and_set_bit(PG_NEED_FLUSH
, &req
->wb_flags
);
1497 spin_unlock(&inode
->i_lock
);
1498 nfs_release_request(req
);
1501 ret
= __set_page_dirty_nobuffers(page
);
1502 spin_unlock(&inode
->i_lock
);
1505 return !TestSetPageDirty(page
);
1509 int __init
nfs_init_writepagecache(void)
1511 nfs_wdata_cachep
= kmem_cache_create("nfs_write_data",
1512 sizeof(struct nfs_write_data
),
1513 0, SLAB_HWCACHE_ALIGN
,
1515 if (nfs_wdata_cachep
== NULL
)
1518 nfs_wdata_mempool
= mempool_create_slab_pool(MIN_POOL_WRITE
,
1520 if (nfs_wdata_mempool
== NULL
)
1523 nfs_commit_mempool
= mempool_create_slab_pool(MIN_POOL_COMMIT
,
1525 if (nfs_commit_mempool
== NULL
)
1529 * NFS congestion size, scale with available memory.
1541 * This allows larger machines to have larger/more transfers.
1542 * Limit the default to 256M
1544 nfs_congestion_kb
= (16*int_sqrt(totalram_pages
)) << (PAGE_SHIFT
-10);
1545 if (nfs_congestion_kb
> 256*1024)
1546 nfs_congestion_kb
= 256*1024;
1551 void nfs_destroy_writepagecache(void)
1553 mempool_destroy(nfs_commit_mempool
);
1554 mempool_destroy(nfs_wdata_mempool
);
1555 kmem_cache_destroy(nfs_wdata_cachep
);