4 * Write file data over NFS.
6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
9 #include <linux/types.h>
10 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_mount.h>
20 #include <linux/nfs_page.h>
21 #include <linux/backing-dev.h>
23 #include <asm/uaccess.h>
24 #include <linux/smp_lock.h>
26 #include "delegation.h"
30 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
32 #define MIN_POOL_WRITE (32)
33 #define MIN_POOL_COMMIT (4)
36 * Local function declarations
38 static struct nfs_page
* nfs_update_request(struct nfs_open_context
*,
40 unsigned int, unsigned int);
41 static long nfs_flush_mapping(struct address_space
*mapping
, struct writeback_control
*wbc
, int how
);
42 static const struct rpc_call_ops nfs_write_partial_ops
;
43 static const struct rpc_call_ops nfs_write_full_ops
;
44 static const struct rpc_call_ops nfs_commit_ops
;
46 static struct kmem_cache
*nfs_wdata_cachep
;
47 static mempool_t
*nfs_wdata_mempool
;
48 static mempool_t
*nfs_commit_mempool
;
50 struct nfs_write_data
*nfs_commit_alloc(void)
52 struct nfs_write_data
*p
= mempool_alloc(nfs_commit_mempool
, GFP_NOFS
);
55 memset(p
, 0, sizeof(*p
));
56 INIT_LIST_HEAD(&p
->pages
);
61 void nfs_commit_rcu_free(struct rcu_head
*head
)
63 struct nfs_write_data
*p
= container_of(head
, struct nfs_write_data
, task
.u
.tk_rcu
);
64 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
66 mempool_free(p
, nfs_commit_mempool
);
69 void nfs_commit_free(struct nfs_write_data
*wdata
)
71 call_rcu_bh(&wdata
->task
.u
.tk_rcu
, nfs_commit_rcu_free
);
74 struct nfs_write_data
*nfs_writedata_alloc(size_t len
)
76 unsigned int pagecount
= (len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
77 struct nfs_write_data
*p
= mempool_alloc(nfs_wdata_mempool
, GFP_NOFS
);
80 memset(p
, 0, sizeof(*p
));
81 INIT_LIST_HEAD(&p
->pages
);
82 p
->npages
= pagecount
;
83 if (pagecount
<= ARRAY_SIZE(p
->page_array
))
84 p
->pagevec
= p
->page_array
;
86 p
->pagevec
= kcalloc(pagecount
, sizeof(struct page
*), GFP_NOFS
);
88 mempool_free(p
, nfs_wdata_mempool
);
96 static void nfs_writedata_rcu_free(struct rcu_head
*head
)
98 struct nfs_write_data
*p
= container_of(head
, struct nfs_write_data
, task
.u
.tk_rcu
);
99 if (p
&& (p
->pagevec
!= &p
->page_array
[0]))
101 mempool_free(p
, nfs_wdata_mempool
);
104 static void nfs_writedata_free(struct nfs_write_data
*wdata
)
106 call_rcu_bh(&wdata
->task
.u
.tk_rcu
, nfs_writedata_rcu_free
);
109 void nfs_writedata_release(void *wdata
)
111 nfs_writedata_free(wdata
);
114 static struct nfs_page
*nfs_page_find_request_locked(struct page
*page
)
116 struct nfs_page
*req
= NULL
;
118 if (PagePrivate(page
)) {
119 req
= (struct nfs_page
*)page_private(page
);
121 atomic_inc(&req
->wb_count
);
126 static struct nfs_page
*nfs_page_find_request(struct page
*page
)
128 struct nfs_page
*req
= NULL
;
129 spinlock_t
*req_lock
= &NFS_I(page
->mapping
->host
)->req_lock
;
132 req
= nfs_page_find_request_locked(page
);
133 spin_unlock(req_lock
);
137 /* Adjust the file length if we're writing beyond the end */
138 static void nfs_grow_file(struct page
*page
, unsigned int offset
, unsigned int count
)
140 struct inode
*inode
= page
->mapping
->host
;
141 loff_t end
, i_size
= i_size_read(inode
);
142 unsigned long end_index
= (i_size
- 1) >> PAGE_CACHE_SHIFT
;
144 if (i_size
> 0 && page
->index
< end_index
)
146 end
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + ((loff_t
)offset
+count
);
149 nfs_inc_stats(inode
, NFSIOS_EXTENDWRITE
);
150 i_size_write(inode
, end
);
153 /* A writeback failed: mark the page as bad, and invalidate the page cache */
154 static void nfs_set_pageerror(struct page
*page
)
157 nfs_zap_mapping(page
->mapping
->host
, page
->mapping
);
160 /* We can set the PG_uptodate flag if we see that a write request
161 * covers the full page.
163 static void nfs_mark_uptodate(struct page
*page
, unsigned int base
, unsigned int count
)
165 if (PageUptodate(page
))
169 if (count
!= nfs_page_length(page
))
171 if (count
!= PAGE_CACHE_SIZE
)
172 memclear_highpage_flush(page
, count
, PAGE_CACHE_SIZE
- count
);
173 SetPageUptodate(page
);
176 static int nfs_writepage_setup(struct nfs_open_context
*ctx
, struct page
*page
,
177 unsigned int offset
, unsigned int count
)
179 struct nfs_page
*req
;
183 req
= nfs_update_request(ctx
, page
, offset
, count
);
189 ret
= nfs_wb_page(page
->mapping
->host
, page
);
193 /* Update file length */
194 nfs_grow_file(page
, offset
, count
);
195 /* Set the PG_uptodate flag? */
196 nfs_mark_uptodate(page
, offset
, count
);
197 nfs_unlock_request(req
);
201 static int wb_priority(struct writeback_control
*wbc
)
203 if (wbc
->for_reclaim
)
204 return FLUSH_HIGHPRI
;
205 if (wbc
->for_kupdate
)
211 * NFS congestion control
214 int nfs_congestion_kb
;
216 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
217 #define NFS_CONGESTION_OFF_THRESH \
218 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
220 static int nfs_set_page_writeback(struct page
*page
)
222 int ret
= test_set_page_writeback(page
);
225 struct inode
*inode
= page
->mapping
->host
;
226 struct nfs_server
*nfss
= NFS_SERVER(inode
);
228 if (atomic_inc_return(&nfss
->writeback
) >
229 NFS_CONGESTION_ON_THRESH
)
230 set_bdi_congested(&nfss
->backing_dev_info
, WRITE
);
235 static void nfs_end_page_writeback(struct page
*page
)
237 struct inode
*inode
= page
->mapping
->host
;
238 struct nfs_server
*nfss
= NFS_SERVER(inode
);
240 end_page_writeback(page
);
241 if (atomic_dec_return(&nfss
->writeback
) < NFS_CONGESTION_OFF_THRESH
) {
242 clear_bdi_congested(&nfss
->backing_dev_info
, WRITE
);
243 congestion_end(WRITE
);
248 * Find an associated nfs write request, and prepare to flush it out
249 * Returns 1 if there was no write request, or if the request was
250 * already tagged by nfs_set_page_dirty.Returns 0 if the request
252 * May also return an error if the user signalled nfs_wait_on_request().
254 static int nfs_page_mark_flush(struct page
*page
)
256 struct nfs_page
*req
;
257 struct nfs_inode
*nfsi
= NFS_I(page
->mapping
->host
);
258 spinlock_t
*req_lock
= &nfsi
->req_lock
;
263 req
= nfs_page_find_request_locked(page
);
265 spin_unlock(req_lock
);
268 if (nfs_lock_request_dontget(req
))
270 /* Note: If we hold the page lock, as is the case in nfs_writepage,
271 * then the call to nfs_lock_request_dontget() will always
272 * succeed provided that someone hasn't already marked the
273 * request as dirty (in which case we don't care).
275 spin_unlock(req_lock
);
276 ret
= nfs_wait_on_request(req
);
277 nfs_release_request(req
);
282 if (test_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
283 /* This request is marked for commit */
284 spin_unlock(req_lock
);
285 nfs_unlock_request(req
);
288 if (nfs_set_page_writeback(page
) == 0) {
289 nfs_list_remove_request(req
);
290 /* add the request to the inode's dirty list. */
291 radix_tree_tag_set(&nfsi
->nfs_page_tree
,
292 req
->wb_index
, NFS_PAGE_TAG_DIRTY
);
293 nfs_list_add_request(req
, &nfsi
->dirty
);
295 spin_unlock(req_lock
);
296 __mark_inode_dirty(page
->mapping
->host
, I_DIRTY_PAGES
);
298 spin_unlock(req_lock
);
299 ret
= test_bit(PG_NEED_FLUSH
, &req
->wb_flags
);
300 nfs_unlock_request(req
);
305 * Write an mmapped page to the server.
307 static int nfs_writepage_locked(struct page
*page
, struct writeback_control
*wbc
)
309 struct nfs_open_context
*ctx
;
310 struct inode
*inode
= page
->mapping
->host
;
314 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGE
);
315 nfs_add_stats(inode
, NFSIOS_WRITEPAGES
, 1);
317 err
= nfs_page_mark_flush(page
);
321 offset
= nfs_page_length(page
);
325 ctx
= nfs_find_open_context(inode
, NULL
, FMODE_WRITE
);
330 err
= nfs_writepage_setup(ctx
, page
, 0, offset
);
331 put_nfs_open_context(ctx
);
334 err
= nfs_page_mark_flush(page
);
338 if (!wbc
->for_writepages
)
339 nfs_flush_mapping(page
->mapping
, wbc
, FLUSH_STABLE
|wb_priority(wbc
));
343 int nfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
347 err
= nfs_writepage_locked(page
, wbc
);
352 int nfs_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
354 struct inode
*inode
= mapping
->host
;
357 nfs_inc_stats(inode
, NFSIOS_VFSWRITEPAGES
);
359 err
= generic_writepages(mapping
, wbc
);
362 err
= nfs_flush_mapping(mapping
, wbc
, wb_priority(wbc
));
365 nfs_add_stats(inode
, NFSIOS_WRITEPAGES
, err
);
372 * Insert a write request into an inode
374 static int nfs_inode_add_request(struct inode
*inode
, struct nfs_page
*req
)
376 struct nfs_inode
*nfsi
= NFS_I(inode
);
379 error
= radix_tree_insert(&nfsi
->nfs_page_tree
, req
->wb_index
, req
);
380 BUG_ON(error
== -EEXIST
);
385 nfs_begin_data_update(inode
);
386 if (nfs_have_delegation(inode
, FMODE_WRITE
))
389 SetPagePrivate(req
->wb_page
);
390 set_page_private(req
->wb_page
, (unsigned long)req
);
391 if (PageDirty(req
->wb_page
))
392 set_bit(PG_NEED_FLUSH
, &req
->wb_flags
);
394 atomic_inc(&req
->wb_count
);
399 * Remove a write request from an inode
401 static void nfs_inode_remove_request(struct nfs_page
*req
)
403 struct inode
*inode
= req
->wb_context
->dentry
->d_inode
;
404 struct nfs_inode
*nfsi
= NFS_I(inode
);
406 BUG_ON (!NFS_WBACK_BUSY(req
));
408 spin_lock(&nfsi
->req_lock
);
409 set_page_private(req
->wb_page
, 0);
410 ClearPagePrivate(req
->wb_page
);
411 radix_tree_delete(&nfsi
->nfs_page_tree
, req
->wb_index
);
412 if (test_and_clear_bit(PG_NEED_FLUSH
, &req
->wb_flags
))
413 __set_page_dirty_nobuffers(req
->wb_page
);
416 spin_unlock(&nfsi
->req_lock
);
417 nfs_end_data_update(inode
);
420 spin_unlock(&nfsi
->req_lock
);
421 nfs_clear_request(req
);
422 nfs_release_request(req
);
426 nfs_redirty_request(struct nfs_page
*req
)
428 __set_page_dirty_nobuffers(req
->wb_page
);
432 * Check if a request is dirty
435 nfs_dirty_request(struct nfs_page
*req
)
437 struct page
*page
= req
->wb_page
;
439 if (page
== NULL
|| test_bit(PG_NEED_COMMIT
, &req
->wb_flags
))
441 return !PageWriteback(req
->wb_page
);
444 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
446 * Add a request to the inode's commit list.
449 nfs_mark_request_commit(struct nfs_page
*req
)
451 struct inode
*inode
= req
->wb_context
->dentry
->d_inode
;
452 struct nfs_inode
*nfsi
= NFS_I(inode
);
454 spin_lock(&nfsi
->req_lock
);
455 nfs_list_add_request(req
, &nfsi
->commit
);
457 set_bit(PG_NEED_COMMIT
, &(req
)->wb_flags
);
458 spin_unlock(&nfsi
->req_lock
);
459 inc_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
460 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
464 int nfs_write_need_commit(struct nfs_write_data
*data
)
466 return data
->verf
.committed
!= NFS_FILE_SYNC
;
470 int nfs_reschedule_unstable_write(struct nfs_page
*req
)
472 if (test_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
473 nfs_mark_request_commit(req
);
476 if (test_and_clear_bit(PG_NEED_RESCHED
, &req
->wb_flags
)) {
477 nfs_redirty_request(req
);
484 nfs_mark_request_commit(struct nfs_page
*req
)
489 int nfs_write_need_commit(struct nfs_write_data
*data
)
495 int nfs_reschedule_unstable_write(struct nfs_page
*req
)
502 * Wait for a request to complete.
504 * Interruptible by signals only if mounted with intr flag.
506 static int nfs_wait_on_requests_locked(struct inode
*inode
, unsigned long idx_start
, unsigned int npages
)
508 struct nfs_inode
*nfsi
= NFS_I(inode
);
509 struct nfs_page
*req
;
510 unsigned long idx_end
, next
;
511 unsigned int res
= 0;
517 idx_end
= idx_start
+ npages
- 1;
520 while (radix_tree_gang_lookup_tag(&nfsi
->nfs_page_tree
, (void **)&req
, next
, 1, NFS_PAGE_TAG_WRITEBACK
)) {
521 if (req
->wb_index
> idx_end
)
524 next
= req
->wb_index
+ 1;
525 BUG_ON(!NFS_WBACK_BUSY(req
));
527 atomic_inc(&req
->wb_count
);
528 spin_unlock(&nfsi
->req_lock
);
529 error
= nfs_wait_on_request(req
);
530 nfs_release_request(req
);
531 spin_lock(&nfsi
->req_lock
);
539 static void nfs_cancel_dirty_list(struct list_head
*head
)
541 struct nfs_page
*req
;
542 while(!list_empty(head
)) {
543 req
= nfs_list_entry(head
->next
);
544 nfs_list_remove_request(req
);
545 nfs_end_page_writeback(req
->wb_page
);
546 nfs_inode_remove_request(req
);
547 nfs_clear_page_writeback(req
);
551 static void nfs_cancel_commit_list(struct list_head
*head
)
553 struct nfs_page
*req
;
555 while(!list_empty(head
)) {
556 req
= nfs_list_entry(head
->next
);
557 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
558 nfs_list_remove_request(req
);
559 clear_bit(PG_NEED_COMMIT
, &(req
)->wb_flags
);
560 nfs_inode_remove_request(req
);
561 nfs_unlock_request(req
);
565 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
567 * nfs_scan_commit - Scan an inode for commit requests
568 * @inode: NFS inode to scan
569 * @dst: destination list
570 * @idx_start: lower bound of page->index to scan.
571 * @npages: idx_start + npages sets the upper bound to scan.
573 * Moves requests from the inode's 'commit' request list.
574 * The requests are *not* checked to ensure that they form a contiguous set.
577 nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, unsigned long idx_start
, unsigned int npages
)
579 struct nfs_inode
*nfsi
= NFS_I(inode
);
582 if (nfsi
->ncommit
!= 0) {
583 res
= nfs_scan_list(nfsi
, &nfsi
->commit
, dst
, idx_start
, npages
);
584 nfsi
->ncommit
-= res
;
585 if ((nfsi
->ncommit
== 0) != list_empty(&nfsi
->commit
))
586 printk(KERN_ERR
"NFS: desynchronized value of nfs_i.ncommit.\n");
591 static inline int nfs_scan_commit(struct inode
*inode
, struct list_head
*dst
, unsigned long idx_start
, unsigned int npages
)
597 static int nfs_wait_on_write_congestion(struct address_space
*mapping
)
599 struct inode
*inode
= mapping
->host
;
600 struct backing_dev_info
*bdi
= mapping
->backing_dev_info
;
605 if (!bdi_write_congested(bdi
))
608 nfs_inc_stats(inode
, NFSIOS_CONGESTIONWAIT
);
611 struct rpc_clnt
*clnt
= NFS_CLIENT(inode
);
614 rpc_clnt_sigmask(clnt
, &oldset
);
615 ret
= congestion_wait_interruptible(WRITE
, HZ
/10);
616 rpc_clnt_sigunmask(clnt
, &oldset
);
617 if (ret
== -ERESTARTSYS
)
620 } while (bdi_write_congested(bdi
));
626 * Try to update any existing write request, or create one if there is none.
627 * In order to match, the request's credentials must match those of
628 * the calling process.
630 * Note: Should always be called with the Page Lock held!
632 static struct nfs_page
* nfs_update_request(struct nfs_open_context
* ctx
,
633 struct page
*page
, unsigned int offset
, unsigned int bytes
)
635 struct address_space
*mapping
= page
->mapping
;
636 struct inode
*inode
= mapping
->host
;
637 struct nfs_inode
*nfsi
= NFS_I(inode
);
638 struct nfs_page
*req
, *new = NULL
;
639 unsigned long rqend
, end
;
641 end
= offset
+ bytes
;
643 if (nfs_wait_on_write_congestion(mapping
))
644 return ERR_PTR(-ERESTARTSYS
);
646 /* Loop over all inode entries and see if we find
647 * A request for the page we wish to update
649 spin_lock(&nfsi
->req_lock
);
650 req
= nfs_page_find_request_locked(page
);
652 if (!nfs_lock_request_dontget(req
)) {
655 spin_unlock(&nfsi
->req_lock
);
656 error
= nfs_wait_on_request(req
);
657 nfs_release_request(req
);
660 nfs_release_request(new);
661 return ERR_PTR(error
);
665 spin_unlock(&nfsi
->req_lock
);
667 nfs_release_request(new);
673 nfs_lock_request_dontget(new);
674 error
= nfs_inode_add_request(inode
, new);
676 spin_unlock(&nfsi
->req_lock
);
677 nfs_unlock_request(new);
678 return ERR_PTR(error
);
680 spin_unlock(&nfsi
->req_lock
);
683 spin_unlock(&nfsi
->req_lock
);
685 new = nfs_create_request(ctx
, inode
, page
, offset
, bytes
);
690 /* We have a request for our page.
691 * If the creds don't match, or the
692 * page addresses don't match,
693 * tell the caller to wait on the conflicting
696 rqend
= req
->wb_offset
+ req
->wb_bytes
;
697 if (req
->wb_context
!= ctx
698 || req
->wb_page
!= page
699 || !nfs_dirty_request(req
)
700 || offset
> rqend
|| end
< req
->wb_offset
) {
701 nfs_unlock_request(req
);
702 return ERR_PTR(-EBUSY
);
705 /* Okay, the request matches. Update the region */
706 if (offset
< req
->wb_offset
) {
707 req
->wb_offset
= offset
;
708 req
->wb_pgbase
= offset
;
709 req
->wb_bytes
= rqend
- req
->wb_offset
;
713 req
->wb_bytes
= end
- req
->wb_offset
;
718 int nfs_flush_incompatible(struct file
*file
, struct page
*page
)
720 struct nfs_open_context
*ctx
= (struct nfs_open_context
*)file
->private_data
;
721 struct nfs_page
*req
;
722 int do_flush
, status
;
724 * Look for a request corresponding to this page. If there
725 * is one, and it belongs to another file, we flush it out
726 * before we try to copy anything into the page. Do this
727 * due to the lack of an ACCESS-type call in NFSv2.
728 * Also do the same if we find a request from an existing
732 req
= nfs_page_find_request(page
);
735 do_flush
= req
->wb_page
!= page
|| req
->wb_context
!= ctx
736 || !nfs_dirty_request(req
);
737 nfs_release_request(req
);
740 status
= nfs_wb_page(page
->mapping
->host
, page
);
741 } while (status
== 0);
746 * Update and possibly write a cached page of an NFS file.
748 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
749 * things with a page scheduled for an RPC call (e.g. invalidate it).
751 int nfs_updatepage(struct file
*file
, struct page
*page
,
752 unsigned int offset
, unsigned int count
)
754 struct nfs_open_context
*ctx
= (struct nfs_open_context
*)file
->private_data
;
755 struct inode
*inode
= page
->mapping
->host
;
758 nfs_inc_stats(inode
, NFSIOS_VFSUPDATEPAGE
);
760 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
761 file
->f_path
.dentry
->d_parent
->d_name
.name
,
762 file
->f_path
.dentry
->d_name
.name
, count
,
763 (long long)(page_offset(page
) +offset
));
765 /* If we're not using byte range locks, and we know the page
766 * is entirely in cache, it may be more efficient to avoid
767 * fragmenting write requests.
769 if (PageUptodate(page
) && inode
->i_flock
== NULL
&& !(file
->f_mode
& O_SYNC
)) {
770 count
= max(count
+ offset
, nfs_page_length(page
));
774 status
= nfs_writepage_setup(ctx
, page
, offset
, count
);
775 __set_page_dirty_nobuffers(page
);
777 dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
778 status
, (long long)i_size_read(inode
));
780 nfs_set_pageerror(page
);
784 static void nfs_writepage_release(struct nfs_page
*req
)
787 if (PageError(req
->wb_page
) || !nfs_reschedule_unstable_write(req
)) {
788 nfs_end_page_writeback(req
->wb_page
);
789 nfs_inode_remove_request(req
);
791 nfs_end_page_writeback(req
->wb_page
);
792 nfs_clear_page_writeback(req
);
795 static inline int flush_task_priority(int how
)
797 switch (how
& (FLUSH_HIGHPRI
|FLUSH_LOWPRI
)) {
799 return RPC_PRIORITY_HIGH
;
801 return RPC_PRIORITY_LOW
;
803 return RPC_PRIORITY_NORMAL
;
807 * Set up the argument/result storage required for the RPC call.
809 static void nfs_write_rpcsetup(struct nfs_page
*req
,
810 struct nfs_write_data
*data
,
811 const struct rpc_call_ops
*call_ops
,
812 unsigned int count
, unsigned int offset
,
818 /* Set up the RPC argument and reply structs
819 * NB: take care not to mess about with data->commit et al. */
822 data
->inode
= inode
= req
->wb_context
->dentry
->d_inode
;
823 data
->cred
= req
->wb_context
->cred
;
825 data
->args
.fh
= NFS_FH(inode
);
826 data
->args
.offset
= req_offset(req
) + offset
;
827 data
->args
.pgbase
= req
->wb_pgbase
+ offset
;
828 data
->args
.pages
= data
->pagevec
;
829 data
->args
.count
= count
;
830 data
->args
.context
= req
->wb_context
;
832 data
->res
.fattr
= &data
->fattr
;
833 data
->res
.count
= count
;
834 data
->res
.verf
= &data
->verf
;
835 nfs_fattr_init(&data
->fattr
);
837 /* Set up the initial task struct. */
838 flags
= (how
& FLUSH_SYNC
) ? 0 : RPC_TASK_ASYNC
;
839 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), flags
, call_ops
, data
);
840 NFS_PROTO(inode
)->write_setup(data
, how
);
842 data
->task
.tk_priority
= flush_task_priority(how
);
843 data
->task
.tk_cookie
= (unsigned long)inode
;
845 dprintk("NFS: %5u initiated write call "
846 "(req %s/%Ld, %u bytes @ offset %Lu)\n",
849 (long long)NFS_FILEID(inode
),
851 (unsigned long long)data
->args
.offset
);
854 static void nfs_execute_write(struct nfs_write_data
*data
)
856 struct rpc_clnt
*clnt
= NFS_CLIENT(data
->inode
);
859 rpc_clnt_sigmask(clnt
, &oldset
);
860 rpc_execute(&data
->task
);
861 rpc_clnt_sigunmask(clnt
, &oldset
);
865 * Generate multiple small requests to write out a single
866 * contiguous dirty area on one page.
868 static int nfs_flush_multi(struct inode
*inode
, struct list_head
*head
, int how
)
870 struct nfs_page
*req
= nfs_list_entry(head
->next
);
871 struct page
*page
= req
->wb_page
;
872 struct nfs_write_data
*data
;
873 size_t wsize
= NFS_SERVER(inode
)->wsize
, nbytes
;
878 nfs_list_remove_request(req
);
880 nbytes
= req
->wb_bytes
;
882 size_t len
= min(nbytes
, wsize
);
884 data
= nfs_writedata_alloc(len
);
887 list_add(&data
->pages
, &list
);
890 } while (nbytes
!= 0);
891 atomic_set(&req
->wb_complete
, requests
);
893 ClearPageError(page
);
895 nbytes
= req
->wb_bytes
;
897 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
898 list_del_init(&data
->pages
);
900 data
->pagevec
[0] = page
;
902 if (nbytes
> wsize
) {
903 nfs_write_rpcsetup(req
, data
, &nfs_write_partial_ops
,
908 nfs_write_rpcsetup(req
, data
, &nfs_write_partial_ops
,
909 nbytes
, offset
, how
);
912 nfs_execute_write(data
);
913 } while (nbytes
!= 0);
918 while (!list_empty(&list
)) {
919 data
= list_entry(list
.next
, struct nfs_write_data
, pages
);
920 list_del(&data
->pages
);
921 nfs_writedata_release(data
);
923 nfs_redirty_request(req
);
924 nfs_end_page_writeback(req
->wb_page
);
925 nfs_clear_page_writeback(req
);
930 * Create an RPC task for the given write request and kick it.
931 * The page must have been locked by the caller.
933 * It may happen that the page we're passed is not marked dirty.
934 * This is the case if nfs_updatepage detects a conflicting request
935 * that has been written but not committed.
937 static int nfs_flush_one(struct inode
*inode
, struct list_head
*head
, int how
)
939 struct nfs_page
*req
;
941 struct nfs_write_data
*data
;
944 data
= nfs_writedata_alloc(NFS_SERVER(inode
)->wsize
);
948 pages
= data
->pagevec
;
950 while (!list_empty(head
)) {
951 req
= nfs_list_entry(head
->next
);
952 nfs_list_remove_request(req
);
953 nfs_list_add_request(req
, &data
->pages
);
954 ClearPageError(req
->wb_page
);
955 *pages
++ = req
->wb_page
;
956 count
+= req
->wb_bytes
;
958 req
= nfs_list_entry(data
->pages
.next
);
960 /* Set up the argument struct */
961 nfs_write_rpcsetup(req
, data
, &nfs_write_full_ops
, count
, 0, how
);
963 nfs_execute_write(data
);
966 while (!list_empty(head
)) {
967 struct nfs_page
*req
= nfs_list_entry(head
->next
);
968 nfs_list_remove_request(req
);
969 nfs_redirty_request(req
);
970 nfs_end_page_writeback(req
->wb_page
);
971 nfs_clear_page_writeback(req
);
976 static int nfs_flush_list(struct inode
*inode
, struct list_head
*head
, int npages
, int how
)
978 LIST_HEAD(one_request
);
979 int (*flush_one
)(struct inode
*, struct list_head
*, int);
980 struct nfs_page
*req
;
981 int wpages
= NFS_SERVER(inode
)->wpages
;
982 int wsize
= NFS_SERVER(inode
)->wsize
;
985 flush_one
= nfs_flush_one
;
986 if (wsize
< PAGE_CACHE_SIZE
)
987 flush_one
= nfs_flush_multi
;
988 /* For single writes, FLUSH_STABLE is more efficient */
989 if (npages
<= wpages
&& npages
== NFS_I(inode
)->npages
990 && nfs_list_entry(head
->next
)->wb_bytes
<= wsize
)
994 nfs_coalesce_requests(head
, &one_request
, wpages
);
995 req
= nfs_list_entry(one_request
.next
);
996 error
= flush_one(inode
, &one_request
, how
);
999 } while (!list_empty(head
));
1002 while (!list_empty(head
)) {
1003 req
= nfs_list_entry(head
->next
);
1004 nfs_list_remove_request(req
);
1005 nfs_redirty_request(req
);
1006 nfs_end_page_writeback(req
->wb_page
);
1007 nfs_clear_page_writeback(req
);
1013 * Handle a write reply that flushed part of a page.
1015 static void nfs_writeback_done_partial(struct rpc_task
*task
, void *calldata
)
1017 struct nfs_write_data
*data
= calldata
;
1018 struct nfs_page
*req
= data
->req
;
1019 struct page
*page
= req
->wb_page
;
1021 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1022 req
->wb_context
->dentry
->d_inode
->i_sb
->s_id
,
1023 (long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
1025 (long long)req_offset(req
));
1027 if (nfs_writeback_done(task
, data
) != 0)
1030 if (task
->tk_status
< 0) {
1031 nfs_set_pageerror(page
);
1032 req
->wb_context
->error
= task
->tk_status
;
1033 dprintk(", error = %d\n", task
->tk_status
);
1037 if (nfs_write_need_commit(data
)) {
1038 spinlock_t
*req_lock
= &NFS_I(page
->mapping
->host
)->req_lock
;
1040 spin_lock(req_lock
);
1041 if (test_bit(PG_NEED_RESCHED
, &req
->wb_flags
)) {
1042 /* Do nothing we need to resend the writes */
1043 } else if (!test_and_set_bit(PG_NEED_COMMIT
, &req
->wb_flags
)) {
1044 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
1045 dprintk(" defer commit\n");
1046 } else if (memcmp(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
))) {
1047 set_bit(PG_NEED_RESCHED
, &req
->wb_flags
);
1048 clear_bit(PG_NEED_COMMIT
, &req
->wb_flags
);
1049 dprintk(" server reboot detected\n");
1051 spin_unlock(req_lock
);
1056 if (atomic_dec_and_test(&req
->wb_complete
))
1057 nfs_writepage_release(req
);
1060 static const struct rpc_call_ops nfs_write_partial_ops
= {
1061 .rpc_call_done
= nfs_writeback_done_partial
,
1062 .rpc_release
= nfs_writedata_release
,
1066 * Handle a write reply that flushes a whole page.
1068 * FIXME: There is an inherent race with invalidate_inode_pages and
1069 * writebacks since the page->count is kept > 1 for as long
1070 * as the page has a write request pending.
1072 static void nfs_writeback_done_full(struct rpc_task
*task
, void *calldata
)
1074 struct nfs_write_data
*data
= calldata
;
1075 struct nfs_page
*req
;
1078 if (nfs_writeback_done(task
, data
) != 0)
1081 /* Update attributes as result of writeback. */
1082 while (!list_empty(&data
->pages
)) {
1083 req
= nfs_list_entry(data
->pages
.next
);
1084 nfs_list_remove_request(req
);
1085 page
= req
->wb_page
;
1087 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1088 req
->wb_context
->dentry
->d_inode
->i_sb
->s_id
,
1089 (long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
1091 (long long)req_offset(req
));
1093 if (task
->tk_status
< 0) {
1094 nfs_set_pageerror(page
);
1095 req
->wb_context
->error
= task
->tk_status
;
1096 dprintk(", error = %d\n", task
->tk_status
);
1097 goto remove_request
;
1100 if (nfs_write_need_commit(data
)) {
1101 memcpy(&req
->wb_verf
, &data
->verf
, sizeof(req
->wb_verf
));
1102 nfs_mark_request_commit(req
);
1103 nfs_end_page_writeback(page
);
1104 dprintk(" marked for commit\n");
1109 nfs_end_page_writeback(page
);
1110 nfs_inode_remove_request(req
);
1112 nfs_clear_page_writeback(req
);
1116 static const struct rpc_call_ops nfs_write_full_ops
= {
1117 .rpc_call_done
= nfs_writeback_done_full
,
1118 .rpc_release
= nfs_writedata_release
,
1123 * This function is called when the WRITE call is complete.
1125 int nfs_writeback_done(struct rpc_task
*task
, struct nfs_write_data
*data
)
1127 struct nfs_writeargs
*argp
= &data
->args
;
1128 struct nfs_writeres
*resp
= &data
->res
;
1131 dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1132 task
->tk_pid
, task
->tk_status
);
1135 * ->write_done will attempt to use post-op attributes to detect
1136 * conflicting writes by other clients. A strict interpretation
1137 * of close-to-open would allow us to continue caching even if
1138 * another writer had changed the file, but some applications
1139 * depend on tighter cache coherency when writing.
1141 status
= NFS_PROTO(data
->inode
)->write_done(task
, data
);
1144 nfs_add_stats(data
->inode
, NFSIOS_SERVERWRITTENBYTES
, resp
->count
);
1146 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1147 if (resp
->verf
->committed
< argp
->stable
&& task
->tk_status
>= 0) {
1148 /* We tried a write call, but the server did not
1149 * commit data to stable storage even though we
1151 * Note: There is a known bug in Tru64 < 5.0 in which
1152 * the server reports NFS_DATA_SYNC, but performs
1153 * NFS_FILE_SYNC. We therefore implement this checking
1154 * as a dprintk() in order to avoid filling syslog.
1156 static unsigned long complain
;
1158 if (time_before(complain
, jiffies
)) {
1159 dprintk("NFS: faulty NFS server %s:"
1160 " (committed = %d) != (stable = %d)\n",
1161 NFS_SERVER(data
->inode
)->nfs_client
->cl_hostname
,
1162 resp
->verf
->committed
, argp
->stable
);
1163 complain
= jiffies
+ 300 * HZ
;
1167 /* Is this a short write? */
1168 if (task
->tk_status
>= 0 && resp
->count
< argp
->count
) {
1169 static unsigned long complain
;
1171 nfs_inc_stats(data
->inode
, NFSIOS_SHORTWRITE
);
1173 /* Has the server at least made some progress? */
1174 if (resp
->count
!= 0) {
1175 /* Was this an NFSv2 write or an NFSv3 stable write? */
1176 if (resp
->verf
->committed
!= NFS_UNSTABLE
) {
1177 /* Resend from where the server left off */
1178 argp
->offset
+= resp
->count
;
1179 argp
->pgbase
+= resp
->count
;
1180 argp
->count
-= resp
->count
;
1182 /* Resend as a stable write in order to avoid
1183 * headaches in the case of a server crash.
1185 argp
->stable
= NFS_FILE_SYNC
;
1187 rpc_restart_call(task
);
1190 if (time_before(complain
, jiffies
)) {
1192 "NFS: Server wrote zero bytes, expected %u.\n",
1194 complain
= jiffies
+ 300 * HZ
;
1196 /* Can't do anything about it except throw an error. */
1197 task
->tk_status
= -EIO
;
1203 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1204 void nfs_commit_release(void *wdata
)
1206 nfs_commit_free(wdata
);
1210 * Set up the argument/result storage required for the RPC call.
1212 static void nfs_commit_rpcsetup(struct list_head
*head
,
1213 struct nfs_write_data
*data
,
1216 struct nfs_page
*first
;
1217 struct inode
*inode
;
1220 /* Set up the RPC argument and reply structs
1221 * NB: take care not to mess about with data->commit et al. */
1223 list_splice_init(head
, &data
->pages
);
1224 first
= nfs_list_entry(data
->pages
.next
);
1225 inode
= first
->wb_context
->dentry
->d_inode
;
1227 data
->inode
= inode
;
1228 data
->cred
= first
->wb_context
->cred
;
1230 data
->args
.fh
= NFS_FH(data
->inode
);
1231 /* Note: we always request a commit of the entire inode */
1232 data
->args
.offset
= 0;
1233 data
->args
.count
= 0;
1234 data
->res
.count
= 0;
1235 data
->res
.fattr
= &data
->fattr
;
1236 data
->res
.verf
= &data
->verf
;
1237 nfs_fattr_init(&data
->fattr
);
1239 /* Set up the initial task struct. */
1240 flags
= (how
& FLUSH_SYNC
) ? 0 : RPC_TASK_ASYNC
;
1241 rpc_init_task(&data
->task
, NFS_CLIENT(inode
), flags
, &nfs_commit_ops
, data
);
1242 NFS_PROTO(inode
)->commit_setup(data
, how
);
1244 data
->task
.tk_priority
= flush_task_priority(how
);
1245 data
->task
.tk_cookie
= (unsigned long)inode
;
1247 dprintk("NFS: %5u initiated commit call\n", data
->task
.tk_pid
);
1251 * Commit dirty pages
1254 nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
)
1256 struct nfs_write_data
*data
;
1257 struct nfs_page
*req
;
1259 data
= nfs_commit_alloc();
1264 /* Set up the argument struct */
1265 nfs_commit_rpcsetup(head
, data
, how
);
1267 nfs_execute_write(data
);
1270 while (!list_empty(head
)) {
1271 req
= nfs_list_entry(head
->next
);
1272 nfs_list_remove_request(req
);
1273 nfs_mark_request_commit(req
);
1274 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1275 nfs_clear_page_writeback(req
);
1281 * COMMIT call returned
1283 static void nfs_commit_done(struct rpc_task
*task
, void *calldata
)
1285 struct nfs_write_data
*data
= calldata
;
1286 struct nfs_page
*req
;
1288 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1289 task
->tk_pid
, task
->tk_status
);
1291 /* Call the NFS version-specific code */
1292 if (NFS_PROTO(data
->inode
)->commit_done(task
, data
) != 0)
1295 while (!list_empty(&data
->pages
)) {
1296 req
= nfs_list_entry(data
->pages
.next
);
1297 nfs_list_remove_request(req
);
1298 clear_bit(PG_NEED_COMMIT
, &(req
)->wb_flags
);
1299 dec_zone_page_state(req
->wb_page
, NR_UNSTABLE_NFS
);
1301 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1302 req
->wb_context
->dentry
->d_inode
->i_sb
->s_id
,
1303 (long long)NFS_FILEID(req
->wb_context
->dentry
->d_inode
),
1305 (long long)req_offset(req
));
1306 if (task
->tk_status
< 0) {
1307 req
->wb_context
->error
= task
->tk_status
;
1308 nfs_inode_remove_request(req
);
1309 dprintk(", error = %d\n", task
->tk_status
);
1313 /* Okay, COMMIT succeeded, apparently. Check the verifier
1314 * returned by the server against all stored verfs. */
1315 if (!memcmp(req
->wb_verf
.verifier
, data
->verf
.verifier
, sizeof(data
->verf
.verifier
))) {
1316 /* We have a match */
1317 nfs_inode_remove_request(req
);
1321 /* We have a mismatch. Write the page again */
1322 dprintk(" mismatch\n");
1323 nfs_redirty_request(req
);
1325 nfs_clear_page_writeback(req
);
1329 static const struct rpc_call_ops nfs_commit_ops
= {
1330 .rpc_call_done
= nfs_commit_done
,
1331 .rpc_release
= nfs_commit_release
,
1334 static inline int nfs_commit_list(struct inode
*inode
, struct list_head
*head
, int how
)
1340 static long nfs_flush_mapping(struct address_space
*mapping
, struct writeback_control
*wbc
, int how
)
1342 struct nfs_inode
*nfsi
= NFS_I(mapping
->host
);
1346 spin_lock(&nfsi
->req_lock
);
1347 res
= nfs_scan_dirty(mapping
, wbc
, &head
);
1348 spin_unlock(&nfsi
->req_lock
);
1350 int error
= nfs_flush_list(mapping
->host
, &head
, res
, how
);
1357 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1358 int nfs_commit_inode(struct inode
*inode
, int how
)
1360 struct nfs_inode
*nfsi
= NFS_I(inode
);
1364 spin_lock(&nfsi
->req_lock
);
1365 res
= nfs_scan_commit(inode
, &head
, 0, 0);
1366 spin_unlock(&nfsi
->req_lock
);
1368 int error
= nfs_commit_list(inode
, &head
, how
);
1376 long nfs_sync_mapping_wait(struct address_space
*mapping
, struct writeback_control
*wbc
, int how
)
1378 struct inode
*inode
= mapping
->host
;
1379 struct nfs_inode
*nfsi
= NFS_I(inode
);
1380 unsigned long idx_start
, idx_end
;
1381 unsigned int npages
= 0;
1383 int nocommit
= how
& FLUSH_NOCOMMIT
;
1387 if (wbc
->range_cyclic
)
1390 idx_start
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
1391 idx_end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
1392 if (idx_end
> idx_start
) {
1393 unsigned long l_npages
= 1 + idx_end
- idx_start
;
1395 if (sizeof(npages
) != sizeof(l_npages
) &&
1396 (unsigned long)npages
!= l_npages
)
1400 how
&= ~FLUSH_NOCOMMIT
;
1401 spin_lock(&nfsi
->req_lock
);
1403 wbc
->pages_skipped
= 0;
1404 ret
= nfs_wait_on_requests_locked(inode
, idx_start
, npages
);
1407 pages
= nfs_scan_dirty(mapping
, wbc
, &head
);
1409 spin_unlock(&nfsi
->req_lock
);
1410 if (how
& FLUSH_INVALIDATE
) {
1411 nfs_cancel_dirty_list(&head
);
1414 ret
= nfs_flush_list(inode
, &head
, pages
, how
);
1415 spin_lock(&nfsi
->req_lock
);
1418 if (wbc
->pages_skipped
!= 0)
1422 pages
= nfs_scan_commit(inode
, &head
, idx_start
, npages
);
1424 if (wbc
->pages_skipped
!= 0)
1428 if (how
& FLUSH_INVALIDATE
) {
1429 spin_unlock(&nfsi
->req_lock
);
1430 nfs_cancel_commit_list(&head
);
1432 spin_lock(&nfsi
->req_lock
);
1435 pages
+= nfs_scan_commit(inode
, &head
, 0, 0);
1436 spin_unlock(&nfsi
->req_lock
);
1437 ret
= nfs_commit_list(inode
, &head
, how
);
1438 spin_lock(&nfsi
->req_lock
);
1440 spin_unlock(&nfsi
->req_lock
);
1445 * flush the inode to disk.
1447 int nfs_wb_all(struct inode
*inode
)
1449 struct address_space
*mapping
= inode
->i_mapping
;
1450 struct writeback_control wbc
= {
1451 .bdi
= mapping
->backing_dev_info
,
1452 .sync_mode
= WB_SYNC_ALL
,
1453 .nr_to_write
= LONG_MAX
,
1454 .for_writepages
= 1,
1459 ret
= generic_writepages(mapping
, &wbc
);
1462 ret
= nfs_sync_mapping_wait(mapping
, &wbc
, 0);
1466 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1470 int nfs_sync_mapping_range(struct address_space
*mapping
, loff_t range_start
, loff_t range_end
, int how
)
1472 struct writeback_control wbc
= {
1473 .bdi
= mapping
->backing_dev_info
,
1474 .sync_mode
= WB_SYNC_ALL
,
1475 .nr_to_write
= LONG_MAX
,
1476 .range_start
= range_start
,
1477 .range_end
= range_end
,
1478 .for_writepages
= 1,
1482 if (!(how
& FLUSH_NOWRITEPAGE
)) {
1483 ret
= generic_writepages(mapping
, &wbc
);
1487 ret
= nfs_sync_mapping_wait(mapping
, &wbc
, how
);
1491 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1495 int nfs_wb_page_priority(struct inode
*inode
, struct page
*page
, int how
)
1497 loff_t range_start
= page_offset(page
);
1498 loff_t range_end
= range_start
+ (loff_t
)(PAGE_CACHE_SIZE
- 1);
1499 struct writeback_control wbc
= {
1500 .bdi
= page
->mapping
->backing_dev_info
,
1501 .sync_mode
= WB_SYNC_ALL
,
1502 .nr_to_write
= LONG_MAX
,
1503 .range_start
= range_start
,
1504 .range_end
= range_end
,
1508 BUG_ON(!PageLocked(page
));
1509 if (!(how
& FLUSH_NOWRITEPAGE
) && clear_page_dirty_for_io(page
)) {
1510 ret
= nfs_writepage_locked(page
, &wbc
);
1514 if (!PagePrivate(page
))
1516 ret
= nfs_sync_mapping_wait(page
->mapping
, &wbc
, how
);
1520 __mark_inode_dirty(inode
, I_DIRTY_PAGES
);
1525 * Write back all requests on one page - we do this before reading it.
1527 int nfs_wb_page(struct inode
*inode
, struct page
* page
)
1529 return nfs_wb_page_priority(inode
, page
, FLUSH_STABLE
);
1532 int nfs_set_page_dirty(struct page
*page
)
1534 spinlock_t
*req_lock
= &NFS_I(page
->mapping
->host
)->req_lock
;
1535 struct nfs_page
*req
;
1538 spin_lock(req_lock
);
1539 req
= nfs_page_find_request_locked(page
);
1541 /* Mark any existing write requests for flushing */
1542 ret
= !test_and_set_bit(PG_NEED_FLUSH
, &req
->wb_flags
);
1543 spin_unlock(req_lock
);
1544 nfs_release_request(req
);
1547 ret
= __set_page_dirty_nobuffers(page
);
1548 spin_unlock(req_lock
);
1553 int __init
nfs_init_writepagecache(void)
1555 nfs_wdata_cachep
= kmem_cache_create("nfs_write_data",
1556 sizeof(struct nfs_write_data
),
1557 0, SLAB_HWCACHE_ALIGN
,
1559 if (nfs_wdata_cachep
== NULL
)
1562 nfs_wdata_mempool
= mempool_create_slab_pool(MIN_POOL_WRITE
,
1564 if (nfs_wdata_mempool
== NULL
)
1567 nfs_commit_mempool
= mempool_create_slab_pool(MIN_POOL_COMMIT
,
1569 if (nfs_commit_mempool
== NULL
)
1573 * NFS congestion size, scale with available memory.
1585 * This allows larger machines to have larger/more transfers.
1586 * Limit the default to 256M
1588 nfs_congestion_kb
= (16*int_sqrt(totalram_pages
)) << (PAGE_SHIFT
-10);
1589 if (nfs_congestion_kb
> 256*1024)
1590 nfs_congestion_kb
= 256*1024;
1595 void nfs_destroy_writepagecache(void)
1597 mempool_destroy(nfs_commit_mempool
);
1598 mempool_destroy(nfs_wdata_mempool
);
1599 kmem_cache_destroy(nfs_wdata_cachep
);