4 * Client-side procedure declarations for NFSv4.
6 * Copyright (c) 2002 The Regents of the University of Michigan.
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/sunrpc/gss_api.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/nfs_idmap.h>
56 #include <linux/sunrpc/bc_xprt.h>
57 #include <linux/xattr.h>
58 #include <linux/utsname.h>
59 #include <linux/freezer.h>
62 #include "delegation.h"
69 #define NFSDBG_FACILITY NFSDBG_PROC
71 #define NFS4_POLL_RETRY_MIN (HZ/10)
72 #define NFS4_POLL_RETRY_MAX (15*HZ)
74 #define NFS4_MAX_LOOP_ON_RECOVER (10)
76 static unsigned short max_session_slots
= NFS4_DEF_SLOT_TABLE_SIZE
;
79 static int _nfs4_proc_open(struct nfs4_opendata
*data
);
80 static int _nfs4_recover_proc_open(struct nfs4_opendata
*data
);
81 static int nfs4_do_fsinfo(struct nfs_server
*, struct nfs_fh
*, struct nfs_fsinfo
*);
82 static int nfs4_async_handle_error(struct rpc_task
*, const struct nfs_server
*, struct nfs4_state
*);
83 static void nfs_fixup_referral_attributes(struct nfs_fattr
*fattr
);
84 static int nfs4_proc_getattr(struct nfs_server
*, struct nfs_fh
*, struct nfs_fattr
*);
85 static int _nfs4_proc_getattr(struct nfs_server
*server
, struct nfs_fh
*fhandle
, struct nfs_fattr
*fattr
);
86 static int nfs4_do_setattr(struct inode
*inode
, struct rpc_cred
*cred
,
87 struct nfs_fattr
*fattr
, struct iattr
*sattr
,
88 struct nfs4_state
*state
);
89 #ifdef CONFIG_NFS_V4_1
90 static int nfs41_test_stateid(struct nfs_server
*, nfs4_stateid
*);
91 static int nfs41_free_stateid(struct nfs_server
*, nfs4_stateid
*);
93 /* Prevent leaks of NFSv4 errors into userland */
94 static int nfs4_map_errors(int err
)
99 case -NFS4ERR_RESOURCE
:
101 case -NFS4ERR_WRONGSEC
:
103 case -NFS4ERR_BADOWNER
:
104 case -NFS4ERR_BADNAME
:
106 case -NFS4ERR_SHARE_DENIED
:
109 dprintk("%s could not handle NFSv4 error %d\n",
117 * This is our standard bitmap for GETATTR requests.
119 const u32 nfs4_fattr_bitmap
[2] = {
121 | FATTR4_WORD0_CHANGE
124 | FATTR4_WORD0_FILEID
,
126 | FATTR4_WORD1_NUMLINKS
128 | FATTR4_WORD1_OWNER_GROUP
129 | FATTR4_WORD1_RAWDEV
130 | FATTR4_WORD1_SPACE_USED
131 | FATTR4_WORD1_TIME_ACCESS
132 | FATTR4_WORD1_TIME_METADATA
133 | FATTR4_WORD1_TIME_MODIFY
136 const u32 nfs4_statfs_bitmap
[2] = {
137 FATTR4_WORD0_FILES_AVAIL
138 | FATTR4_WORD0_FILES_FREE
139 | FATTR4_WORD0_FILES_TOTAL
,
140 FATTR4_WORD1_SPACE_AVAIL
141 | FATTR4_WORD1_SPACE_FREE
142 | FATTR4_WORD1_SPACE_TOTAL
145 const u32 nfs4_pathconf_bitmap
[2] = {
147 | FATTR4_WORD0_MAXNAME
,
151 const u32 nfs4_fsinfo_bitmap
[3] = { FATTR4_WORD0_MAXFILESIZE
152 | FATTR4_WORD0_MAXREAD
153 | FATTR4_WORD0_MAXWRITE
154 | FATTR4_WORD0_LEASE_TIME
,
155 FATTR4_WORD1_TIME_DELTA
156 | FATTR4_WORD1_FS_LAYOUT_TYPES
,
157 FATTR4_WORD2_LAYOUT_BLKSIZE
160 const u32 nfs4_fs_locations_bitmap
[2] = {
162 | FATTR4_WORD0_CHANGE
165 | FATTR4_WORD0_FILEID
166 | FATTR4_WORD0_FS_LOCATIONS
,
168 | FATTR4_WORD1_NUMLINKS
170 | FATTR4_WORD1_OWNER_GROUP
171 | FATTR4_WORD1_RAWDEV
172 | FATTR4_WORD1_SPACE_USED
173 | FATTR4_WORD1_TIME_ACCESS
174 | FATTR4_WORD1_TIME_METADATA
175 | FATTR4_WORD1_TIME_MODIFY
176 | FATTR4_WORD1_MOUNTED_ON_FILEID
179 static void nfs4_setup_readdir(u64 cookie
, __be32
*verifier
, struct dentry
*dentry
,
180 struct nfs4_readdir_arg
*readdir
)
184 BUG_ON(readdir
->count
< 80);
186 readdir
->cookie
= cookie
;
187 memcpy(&readdir
->verifier
, verifier
, sizeof(readdir
->verifier
));
192 memset(&readdir
->verifier
, 0, sizeof(readdir
->verifier
));
197 * NFSv4 servers do not return entries for '.' and '..'
198 * Therefore, we fake these entries here. We let '.'
199 * have cookie 0 and '..' have cookie 1. Note that
200 * when talking to the server, we always send cookie 0
203 start
= p
= kmap_atomic(*readdir
->pages
);
206 *p
++ = xdr_one
; /* next */
207 *p
++ = xdr_zero
; /* cookie, first word */
208 *p
++ = xdr_one
; /* cookie, second word */
209 *p
++ = xdr_one
; /* entry len */
210 memcpy(p
, ".\0\0\0", 4); /* entry */
212 *p
++ = xdr_one
; /* bitmap length */
213 *p
++ = htonl(FATTR4_WORD0_FILEID
); /* bitmap */
214 *p
++ = htonl(8); /* attribute buffer length */
215 p
= xdr_encode_hyper(p
, NFS_FILEID(dentry
->d_inode
));
218 *p
++ = xdr_one
; /* next */
219 *p
++ = xdr_zero
; /* cookie, first word */
220 *p
++ = xdr_two
; /* cookie, second word */
221 *p
++ = xdr_two
; /* entry len */
222 memcpy(p
, "..\0\0", 4); /* entry */
224 *p
++ = xdr_one
; /* bitmap length */
225 *p
++ = htonl(FATTR4_WORD0_FILEID
); /* bitmap */
226 *p
++ = htonl(8); /* attribute buffer length */
227 p
= xdr_encode_hyper(p
, NFS_FILEID(dentry
->d_parent
->d_inode
));
229 readdir
->pgbase
= (char *)p
- (char *)start
;
230 readdir
->count
-= readdir
->pgbase
;
231 kunmap_atomic(start
);
234 static int nfs4_wait_clnt_recover(struct nfs_client
*clp
)
240 res
= wait_on_bit(&clp
->cl_state
, NFS4CLNT_MANAGER_RUNNING
,
241 nfs_wait_bit_killable
, TASK_KILLABLE
);
245 static int nfs4_delay(struct rpc_clnt
*clnt
, long *timeout
)
252 *timeout
= NFS4_POLL_RETRY_MIN
;
253 if (*timeout
> NFS4_POLL_RETRY_MAX
)
254 *timeout
= NFS4_POLL_RETRY_MAX
;
255 freezable_schedule_timeout_killable(*timeout
);
256 if (fatal_signal_pending(current
))
262 /* This is the error handling routine for processes that are allowed
265 static int nfs4_handle_exception(struct nfs_server
*server
, int errorcode
, struct nfs4_exception
*exception
)
267 struct nfs_client
*clp
= server
->nfs_client
;
268 struct nfs4_state
*state
= exception
->state
;
269 struct inode
*inode
= exception
->inode
;
272 exception
->retry
= 0;
276 case -NFS4ERR_OPENMODE
:
277 if (inode
&& nfs_have_delegation(inode
, FMODE_READ
)) {
278 nfs_inode_return_delegation(inode
);
279 exception
->retry
= 1;
284 nfs4_schedule_stateid_recovery(server
, state
);
285 goto wait_on_recovery
;
286 case -NFS4ERR_DELEG_REVOKED
:
287 case -NFS4ERR_ADMIN_REVOKED
:
288 case -NFS4ERR_BAD_STATEID
:
291 nfs_remove_bad_delegation(state
->inode
);
292 nfs4_schedule_stateid_recovery(server
, state
);
293 goto wait_on_recovery
;
294 case -NFS4ERR_EXPIRED
:
296 nfs4_schedule_stateid_recovery(server
, state
);
297 case -NFS4ERR_STALE_STATEID
:
298 case -NFS4ERR_STALE_CLIENTID
:
299 nfs4_schedule_lease_recovery(clp
);
300 goto wait_on_recovery
;
301 #if defined(CONFIG_NFS_V4_1)
302 case -NFS4ERR_BADSESSION
:
303 case -NFS4ERR_BADSLOT
:
304 case -NFS4ERR_BAD_HIGH_SLOT
:
305 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION
:
306 case -NFS4ERR_DEADSESSION
:
307 case -NFS4ERR_SEQ_FALSE_RETRY
:
308 case -NFS4ERR_SEQ_MISORDERED
:
309 dprintk("%s ERROR: %d Reset session\n", __func__
,
311 nfs4_schedule_session_recovery(clp
->cl_session
, errorcode
);
312 exception
->retry
= 1;
314 #endif /* defined(CONFIG_NFS_V4_1) */
315 case -NFS4ERR_FILE_OPEN
:
316 if (exception
->timeout
> HZ
) {
317 /* We have retried a decent amount, time to
326 ret
= nfs4_delay(server
->client
, &exception
->timeout
);
329 case -NFS4ERR_RETRY_UNCACHED_REP
:
330 case -NFS4ERR_OLD_STATEID
:
331 exception
->retry
= 1;
333 case -NFS4ERR_BADOWNER
:
334 /* The following works around a Linux server bug! */
335 case -NFS4ERR_BADNAME
:
336 if (server
->caps
& NFS_CAP_UIDGID_NOMAP
) {
337 server
->caps
&= ~NFS_CAP_UIDGID_NOMAP
;
338 exception
->retry
= 1;
339 printk(KERN_WARNING
"NFS: v4 server %s "
340 "does not accept raw "
342 "Reenabling the idmapper.\n",
343 server
->nfs_client
->cl_hostname
);
346 /* We failed to handle the error */
347 return nfs4_map_errors(ret
);
349 ret
= nfs4_wait_clnt_recover(clp
);
351 exception
->retry
= 1;
356 static void do_renew_lease(struct nfs_client
*clp
, unsigned long timestamp
)
358 spin_lock(&clp
->cl_lock
);
359 if (time_before(clp
->cl_last_renewal
,timestamp
))
360 clp
->cl_last_renewal
= timestamp
;
361 spin_unlock(&clp
->cl_lock
);
364 static void renew_lease(const struct nfs_server
*server
, unsigned long timestamp
)
366 do_renew_lease(server
->nfs_client
, timestamp
);
369 #if defined(CONFIG_NFS_V4_1)
372 * nfs4_free_slot - free a slot and efficiently update slot table.
374 * freeing a slot is trivially done by clearing its respective bit
376 * If the freed slotid equals highest_used_slotid we want to update it
377 * so that the server would be able to size down the slot table if needed,
378 * otherwise we know that the highest_used_slotid is still in use.
379 * When updating highest_used_slotid there may be "holes" in the bitmap
380 * so we need to scan down from highest_used_slotid to 0 looking for the now
381 * highest slotid in use.
382 * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
384 * Must be called while holding tbl->slot_tbl_lock
387 nfs4_free_slot(struct nfs4_slot_table
*tbl
, u32 slotid
)
389 BUG_ON(slotid
>= NFS4_MAX_SLOT_TABLE
);
390 /* clear used bit in bitmap */
391 __clear_bit(slotid
, tbl
->used_slots
);
393 /* update highest_used_slotid when it is freed */
394 if (slotid
== tbl
->highest_used_slotid
) {
395 slotid
= find_last_bit(tbl
->used_slots
, tbl
->max_slots
);
396 if (slotid
< tbl
->max_slots
)
397 tbl
->highest_used_slotid
= slotid
;
399 tbl
->highest_used_slotid
= NFS4_NO_SLOT
;
401 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__
,
402 slotid
, tbl
->highest_used_slotid
);
405 bool nfs4_set_task_privileged(struct rpc_task
*task
, void *dummy
)
407 rpc_task_set_priority(task
, RPC_PRIORITY_PRIVILEGED
);
412 * Signal state manager thread if session fore channel is drained
414 static void nfs4_check_drain_fc_complete(struct nfs4_session
*ses
)
416 if (!test_bit(NFS4_SESSION_DRAINING
, &ses
->session_state
)) {
417 rpc_wake_up_first(&ses
->fc_slot_table
.slot_tbl_waitq
,
418 nfs4_set_task_privileged
, NULL
);
422 if (ses
->fc_slot_table
.highest_used_slotid
!= NFS4_NO_SLOT
)
425 dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__
);
426 complete(&ses
->fc_slot_table
.complete
);
430 * Signal state manager thread if session back channel is drained
432 void nfs4_check_drain_bc_complete(struct nfs4_session
*ses
)
434 if (!test_bit(NFS4_SESSION_DRAINING
, &ses
->session_state
) ||
435 ses
->bc_slot_table
.highest_used_slotid
!= NFS4_NO_SLOT
)
437 dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__
);
438 complete(&ses
->bc_slot_table
.complete
);
441 static void nfs41_sequence_free_slot(struct nfs4_sequence_res
*res
)
443 struct nfs4_slot_table
*tbl
;
445 tbl
= &res
->sr_session
->fc_slot_table
;
447 /* just wake up the next guy waiting since
448 * we may have not consumed a slot after all */
449 dprintk("%s: No slot\n", __func__
);
453 spin_lock(&tbl
->slot_tbl_lock
);
454 nfs4_free_slot(tbl
, res
->sr_slot
- tbl
->slots
);
455 nfs4_check_drain_fc_complete(res
->sr_session
);
456 spin_unlock(&tbl
->slot_tbl_lock
);
460 static int nfs41_sequence_done(struct rpc_task
*task
, struct nfs4_sequence_res
*res
)
462 unsigned long timestamp
;
463 struct nfs_client
*clp
;
466 * sr_status remains 1 if an RPC level error occurred. The server
467 * may or may not have processed the sequence operation..
468 * Proceed as if the server received and processed the sequence
471 if (res
->sr_status
== 1)
472 res
->sr_status
= NFS_OK
;
474 /* don't increment the sequence number if the task wasn't sent */
475 if (!RPC_WAS_SENT(task
))
478 /* Check the SEQUENCE operation status */
479 switch (res
->sr_status
) {
481 /* Update the slot's sequence and clientid lease timer */
482 ++res
->sr_slot
->seq_nr
;
483 timestamp
= res
->sr_renewal_time
;
484 clp
= res
->sr_session
->clp
;
485 do_renew_lease(clp
, timestamp
);
486 /* Check sequence flags */
487 if (res
->sr_status_flags
!= 0)
488 nfs4_schedule_lease_recovery(clp
);
491 /* The server detected a resend of the RPC call and
492 * returned NFS4ERR_DELAY as per Section 2.10.6.2
495 dprintk("%s: slot=%td seq=%d: Operation in progress\n",
497 res
->sr_slot
- res
->sr_session
->fc_slot_table
.slots
,
498 res
->sr_slot
->seq_nr
);
501 /* Just update the slot sequence no. */
502 ++res
->sr_slot
->seq_nr
;
505 /* The session may be reset by one of the error handlers. */
506 dprintk("%s: Error %d free the slot \n", __func__
, res
->sr_status
);
507 nfs41_sequence_free_slot(res
);
510 if (!rpc_restart_call(task
))
512 rpc_delay(task
, NFS4_POLL_RETRY_MAX
);
516 static int nfs4_sequence_done(struct rpc_task
*task
,
517 struct nfs4_sequence_res
*res
)
519 if (res
->sr_session
== NULL
)
521 return nfs41_sequence_done(task
, res
);
525 * nfs4_find_slot - efficiently look for a free slot
527 * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
528 * If found, we mark the slot as used, update the highest_used_slotid,
529 * and respectively set up the sequence operation args.
530 * The slot number is returned if found, or NFS4_NO_SLOT otherwise.
532 * Note: must be called with under the slot_tbl_lock.
535 nfs4_find_slot(struct nfs4_slot_table
*tbl
)
538 u32 ret_id
= NFS4_NO_SLOT
;
540 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
541 __func__
, tbl
->used_slots
[0], tbl
->highest_used_slotid
,
543 slotid
= find_first_zero_bit(tbl
->used_slots
, tbl
->max_slots
);
544 if (slotid
>= tbl
->max_slots
)
546 __set_bit(slotid
, tbl
->used_slots
);
547 if (slotid
> tbl
->highest_used_slotid
||
548 tbl
->highest_used_slotid
== NFS4_NO_SLOT
)
549 tbl
->highest_used_slotid
= slotid
;
552 dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
553 __func__
, tbl
->used_slots
[0], tbl
->highest_used_slotid
, ret_id
);
557 static void nfs41_init_sequence(struct nfs4_sequence_args
*args
,
558 struct nfs4_sequence_res
*res
, int cache_reply
)
560 args
->sa_session
= NULL
;
561 args
->sa_cache_this
= 0;
563 args
->sa_cache_this
= 1;
564 res
->sr_session
= NULL
;
568 int nfs41_setup_sequence(struct nfs4_session
*session
,
569 struct nfs4_sequence_args
*args
,
570 struct nfs4_sequence_res
*res
,
571 struct rpc_task
*task
)
573 struct nfs4_slot
*slot
;
574 struct nfs4_slot_table
*tbl
;
577 dprintk("--> %s\n", __func__
);
578 /* slot already allocated? */
579 if (res
->sr_slot
!= NULL
)
582 tbl
= &session
->fc_slot_table
;
584 spin_lock(&tbl
->slot_tbl_lock
);
585 if (test_bit(NFS4_SESSION_DRAINING
, &session
->session_state
) &&
586 !rpc_task_has_priority(task
, RPC_PRIORITY_PRIVILEGED
)) {
587 /* The state manager will wait until the slot table is empty */
588 rpc_sleep_on(&tbl
->slot_tbl_waitq
, task
, NULL
);
589 spin_unlock(&tbl
->slot_tbl_lock
);
590 dprintk("%s session is draining\n", __func__
);
594 if (!rpc_queue_empty(&tbl
->slot_tbl_waitq
) &&
595 !rpc_task_has_priority(task
, RPC_PRIORITY_PRIVILEGED
)) {
596 rpc_sleep_on(&tbl
->slot_tbl_waitq
, task
, NULL
);
597 spin_unlock(&tbl
->slot_tbl_lock
);
598 dprintk("%s enforce FIFO order\n", __func__
);
602 slotid
= nfs4_find_slot(tbl
);
603 if (slotid
== NFS4_NO_SLOT
) {
604 rpc_sleep_on(&tbl
->slot_tbl_waitq
, task
, NULL
);
605 spin_unlock(&tbl
->slot_tbl_lock
);
606 dprintk("<-- %s: no free slots\n", __func__
);
609 spin_unlock(&tbl
->slot_tbl_lock
);
611 rpc_task_set_priority(task
, RPC_PRIORITY_NORMAL
);
612 slot
= tbl
->slots
+ slotid
;
613 args
->sa_session
= session
;
614 args
->sa_slotid
= slotid
;
616 dprintk("<-- %s slotid=%d seqid=%d\n", __func__
, slotid
, slot
->seq_nr
);
618 res
->sr_session
= session
;
620 res
->sr_renewal_time
= jiffies
;
621 res
->sr_status_flags
= 0;
623 * sr_status is only set in decode_sequence, and so will remain
624 * set to 1 if an rpc level failure occurs.
629 EXPORT_SYMBOL_GPL(nfs41_setup_sequence
);
631 int nfs4_setup_sequence(const struct nfs_server
*server
,
632 struct nfs4_sequence_args
*args
,
633 struct nfs4_sequence_res
*res
,
634 struct rpc_task
*task
)
636 struct nfs4_session
*session
= nfs4_get_session(server
);
642 dprintk("--> %s clp %p session %p sr_slot %td\n",
643 __func__
, session
->clp
, session
, res
->sr_slot
?
644 res
->sr_slot
- session
->fc_slot_table
.slots
: -1);
646 ret
= nfs41_setup_sequence(session
, args
, res
, task
);
648 dprintk("<-- %s status=%d\n", __func__
, ret
);
652 struct nfs41_call_sync_data
{
653 const struct nfs_server
*seq_server
;
654 struct nfs4_sequence_args
*seq_args
;
655 struct nfs4_sequence_res
*seq_res
;
658 static void nfs41_call_sync_prepare(struct rpc_task
*task
, void *calldata
)
660 struct nfs41_call_sync_data
*data
= calldata
;
662 dprintk("--> %s data->seq_server %p\n", __func__
, data
->seq_server
);
664 if (nfs4_setup_sequence(data
->seq_server
, data
->seq_args
,
665 data
->seq_res
, task
))
667 rpc_call_start(task
);
670 static void nfs41_call_priv_sync_prepare(struct rpc_task
*task
, void *calldata
)
672 rpc_task_set_priority(task
, RPC_PRIORITY_PRIVILEGED
);
673 nfs41_call_sync_prepare(task
, calldata
);
676 static void nfs41_call_sync_done(struct rpc_task
*task
, void *calldata
)
678 struct nfs41_call_sync_data
*data
= calldata
;
680 nfs41_sequence_done(task
, data
->seq_res
);
683 static const struct rpc_call_ops nfs41_call_sync_ops
= {
684 .rpc_call_prepare
= nfs41_call_sync_prepare
,
685 .rpc_call_done
= nfs41_call_sync_done
,
688 static const struct rpc_call_ops nfs41_call_priv_sync_ops
= {
689 .rpc_call_prepare
= nfs41_call_priv_sync_prepare
,
690 .rpc_call_done
= nfs41_call_sync_done
,
693 static int nfs4_call_sync_sequence(struct rpc_clnt
*clnt
,
694 struct nfs_server
*server
,
695 struct rpc_message
*msg
,
696 struct nfs4_sequence_args
*args
,
697 struct nfs4_sequence_res
*res
,
701 struct rpc_task
*task
;
702 struct nfs41_call_sync_data data
= {
703 .seq_server
= server
,
707 struct rpc_task_setup task_setup
= {
710 .callback_ops
= &nfs41_call_sync_ops
,
711 .callback_data
= &data
715 task_setup
.callback_ops
= &nfs41_call_priv_sync_ops
;
716 task
= rpc_run_task(&task_setup
);
720 ret
= task
->tk_status
;
726 int _nfs4_call_sync_session(struct rpc_clnt
*clnt
,
727 struct nfs_server
*server
,
728 struct rpc_message
*msg
,
729 struct nfs4_sequence_args
*args
,
730 struct nfs4_sequence_res
*res
,
733 nfs41_init_sequence(args
, res
, cache_reply
);
734 return nfs4_call_sync_sequence(clnt
, server
, msg
, args
, res
, 0);
739 void nfs41_init_sequence(struct nfs4_sequence_args
*args
,
740 struct nfs4_sequence_res
*res
, int cache_reply
)
744 static int nfs4_sequence_done(struct rpc_task
*task
,
745 struct nfs4_sequence_res
*res
)
749 #endif /* CONFIG_NFS_V4_1 */
751 int _nfs4_call_sync(struct rpc_clnt
*clnt
,
752 struct nfs_server
*server
,
753 struct rpc_message
*msg
,
754 struct nfs4_sequence_args
*args
,
755 struct nfs4_sequence_res
*res
,
758 nfs41_init_sequence(args
, res
, cache_reply
);
759 return rpc_call_sync(clnt
, msg
, 0);
763 int nfs4_call_sync(struct rpc_clnt
*clnt
,
764 struct nfs_server
*server
,
765 struct rpc_message
*msg
,
766 struct nfs4_sequence_args
*args
,
767 struct nfs4_sequence_res
*res
,
770 return server
->nfs_client
->cl_mvops
->call_sync(clnt
, server
, msg
,
771 args
, res
, cache_reply
);
774 static void update_changeattr(struct inode
*dir
, struct nfs4_change_info
*cinfo
)
776 struct nfs_inode
*nfsi
= NFS_I(dir
);
778 spin_lock(&dir
->i_lock
);
779 nfsi
->cache_validity
|= NFS_INO_INVALID_ATTR
|NFS_INO_INVALID_DATA
;
780 if (!cinfo
->atomic
|| cinfo
->before
!= dir
->i_version
)
781 nfs_force_lookup_revalidate(dir
);
782 dir
->i_version
= cinfo
->after
;
783 spin_unlock(&dir
->i_lock
);
786 struct nfs4_opendata
{
788 struct nfs_openargs o_arg
;
789 struct nfs_openres o_res
;
790 struct nfs_open_confirmargs c_arg
;
791 struct nfs_open_confirmres c_res
;
792 struct nfs4_string owner_name
;
793 struct nfs4_string group_name
;
794 struct nfs_fattr f_attr
;
796 struct dentry
*dentry
;
797 struct nfs4_state_owner
*owner
;
798 struct nfs4_state
*state
;
800 unsigned long timestamp
;
801 unsigned int rpc_done
: 1;
807 static void nfs4_init_opendata_res(struct nfs4_opendata
*p
)
809 p
->o_res
.f_attr
= &p
->f_attr
;
810 p
->o_res
.seqid
= p
->o_arg
.seqid
;
811 p
->c_res
.seqid
= p
->c_arg
.seqid
;
812 p
->o_res
.server
= p
->o_arg
.server
;
813 nfs_fattr_init(&p
->f_attr
);
814 nfs_fattr_init_names(&p
->f_attr
, &p
->owner_name
, &p
->group_name
);
817 static struct nfs4_opendata
*nfs4_opendata_alloc(struct dentry
*dentry
,
818 struct nfs4_state_owner
*sp
, fmode_t fmode
, int flags
,
819 const struct iattr
*attrs
,
822 struct dentry
*parent
= dget_parent(dentry
);
823 struct inode
*dir
= parent
->d_inode
;
824 struct nfs_server
*server
= NFS_SERVER(dir
);
825 struct nfs4_opendata
*p
;
827 p
= kzalloc(sizeof(*p
), gfp_mask
);
830 p
->o_arg
.seqid
= nfs_alloc_seqid(&sp
->so_seqid
, gfp_mask
);
831 if (p
->o_arg
.seqid
== NULL
)
833 nfs_sb_active(dentry
->d_sb
);
834 p
->dentry
= dget(dentry
);
837 atomic_inc(&sp
->so_count
);
838 p
->o_arg
.fh
= NFS_FH(dir
);
839 p
->o_arg
.open_flags
= flags
;
840 p
->o_arg
.fmode
= fmode
& (FMODE_READ
|FMODE_WRITE
);
841 p
->o_arg
.clientid
= server
->nfs_client
->cl_clientid
;
842 p
->o_arg
.id
.create_time
= ktime_to_ns(sp
->so_seqid
.create_time
);
843 p
->o_arg
.id
.uniquifier
= sp
->so_seqid
.owner_id
;
844 p
->o_arg
.name
= &dentry
->d_name
;
845 p
->o_arg
.server
= server
;
846 p
->o_arg
.bitmask
= server
->attr_bitmask
;
847 p
->o_arg
.claim
= NFS4_OPEN_CLAIM_NULL
;
848 if (attrs
!= NULL
&& attrs
->ia_valid
!= 0) {
851 p
->o_arg
.u
.attrs
= &p
->attrs
;
852 memcpy(&p
->attrs
, attrs
, sizeof(p
->attrs
));
855 verf
[1] = current
->pid
;
856 memcpy(p
->o_arg
.u
.verifier
.data
, verf
,
857 sizeof(p
->o_arg
.u
.verifier
.data
));
859 p
->c_arg
.fh
= &p
->o_res
.fh
;
860 p
->c_arg
.stateid
= &p
->o_res
.stateid
;
861 p
->c_arg
.seqid
= p
->o_arg
.seqid
;
862 nfs4_init_opendata_res(p
);
872 static void nfs4_opendata_free(struct kref
*kref
)
874 struct nfs4_opendata
*p
= container_of(kref
,
875 struct nfs4_opendata
, kref
);
876 struct super_block
*sb
= p
->dentry
->d_sb
;
878 nfs_free_seqid(p
->o_arg
.seqid
);
879 if (p
->state
!= NULL
)
880 nfs4_put_open_state(p
->state
);
881 nfs4_put_state_owner(p
->owner
);
885 nfs_fattr_free_names(&p
->f_attr
);
889 static void nfs4_opendata_put(struct nfs4_opendata
*p
)
892 kref_put(&p
->kref
, nfs4_opendata_free
);
895 static int nfs4_wait_for_completion_rpc_task(struct rpc_task
*task
)
899 ret
= rpc_wait_for_completion_task(task
);
903 static int can_open_cached(struct nfs4_state
*state
, fmode_t mode
, int open_mode
)
907 if (open_mode
& (O_EXCL
|O_TRUNC
))
909 switch (mode
& (FMODE_READ
|FMODE_WRITE
)) {
911 ret
|= test_bit(NFS_O_RDONLY_STATE
, &state
->flags
) != 0
912 && state
->n_rdonly
!= 0;
915 ret
|= test_bit(NFS_O_WRONLY_STATE
, &state
->flags
) != 0
916 && state
->n_wronly
!= 0;
918 case FMODE_READ
|FMODE_WRITE
:
919 ret
|= test_bit(NFS_O_RDWR_STATE
, &state
->flags
) != 0
920 && state
->n_rdwr
!= 0;
926 static int can_open_delegated(struct nfs_delegation
*delegation
, fmode_t fmode
)
928 if (delegation
== NULL
)
930 if ((delegation
->type
& fmode
) != fmode
)
932 if (test_bit(NFS_DELEGATION_NEED_RECLAIM
, &delegation
->flags
))
934 nfs_mark_delegation_referenced(delegation
);
938 static void update_open_stateflags(struct nfs4_state
*state
, fmode_t fmode
)
947 case FMODE_READ
|FMODE_WRITE
:
950 nfs4_state_set_mode_locked(state
, state
->state
| fmode
);
953 static void nfs_set_open_stateid_locked(struct nfs4_state
*state
, nfs4_stateid
*stateid
, fmode_t fmode
)
955 if (test_bit(NFS_DELEGATED_STATE
, &state
->flags
) == 0)
956 nfs4_stateid_copy(&state
->stateid
, stateid
);
957 nfs4_stateid_copy(&state
->open_stateid
, stateid
);
960 set_bit(NFS_O_RDONLY_STATE
, &state
->flags
);
963 set_bit(NFS_O_WRONLY_STATE
, &state
->flags
);
965 case FMODE_READ
|FMODE_WRITE
:
966 set_bit(NFS_O_RDWR_STATE
, &state
->flags
);
970 static void nfs_set_open_stateid(struct nfs4_state
*state
, nfs4_stateid
*stateid
, fmode_t fmode
)
972 write_seqlock(&state
->seqlock
);
973 nfs_set_open_stateid_locked(state
, stateid
, fmode
);
974 write_sequnlock(&state
->seqlock
);
977 static void __update_open_stateid(struct nfs4_state
*state
, nfs4_stateid
*open_stateid
, const nfs4_stateid
*deleg_stateid
, fmode_t fmode
)
980 * Protect the call to nfs4_state_set_mode_locked and
981 * serialise the stateid update
983 write_seqlock(&state
->seqlock
);
984 if (deleg_stateid
!= NULL
) {
985 nfs4_stateid_copy(&state
->stateid
, deleg_stateid
);
986 set_bit(NFS_DELEGATED_STATE
, &state
->flags
);
988 if (open_stateid
!= NULL
)
989 nfs_set_open_stateid_locked(state
, open_stateid
, fmode
);
990 write_sequnlock(&state
->seqlock
);
991 spin_lock(&state
->owner
->so_lock
);
992 update_open_stateflags(state
, fmode
);
993 spin_unlock(&state
->owner
->so_lock
);
996 static int update_open_stateid(struct nfs4_state
*state
, nfs4_stateid
*open_stateid
, nfs4_stateid
*delegation
, fmode_t fmode
)
998 struct nfs_inode
*nfsi
= NFS_I(state
->inode
);
999 struct nfs_delegation
*deleg_cur
;
1002 fmode
&= (FMODE_READ
|FMODE_WRITE
);
1005 deleg_cur
= rcu_dereference(nfsi
->delegation
);
1006 if (deleg_cur
== NULL
)
1009 spin_lock(&deleg_cur
->lock
);
1010 if (nfsi
->delegation
!= deleg_cur
||
1011 (deleg_cur
->type
& fmode
) != fmode
)
1012 goto no_delegation_unlock
;
1014 if (delegation
== NULL
)
1015 delegation
= &deleg_cur
->stateid
;
1016 else if (!nfs4_stateid_match(&deleg_cur
->stateid
, delegation
))
1017 goto no_delegation_unlock
;
1019 nfs_mark_delegation_referenced(deleg_cur
);
1020 __update_open_stateid(state
, open_stateid
, &deleg_cur
->stateid
, fmode
);
1022 no_delegation_unlock
:
1023 spin_unlock(&deleg_cur
->lock
);
1027 if (!ret
&& open_stateid
!= NULL
) {
1028 __update_open_stateid(state
, open_stateid
, NULL
, fmode
);
1036 static void nfs4_return_incompatible_delegation(struct inode
*inode
, fmode_t fmode
)
1038 struct nfs_delegation
*delegation
;
1041 delegation
= rcu_dereference(NFS_I(inode
)->delegation
);
1042 if (delegation
== NULL
|| (delegation
->type
& fmode
) == fmode
) {
1047 nfs_inode_return_delegation(inode
);
1050 static struct nfs4_state
*nfs4_try_open_cached(struct nfs4_opendata
*opendata
)
1052 struct nfs4_state
*state
= opendata
->state
;
1053 struct nfs_inode
*nfsi
= NFS_I(state
->inode
);
1054 struct nfs_delegation
*delegation
;
1055 int open_mode
= opendata
->o_arg
.open_flags
& (O_EXCL
|O_TRUNC
);
1056 fmode_t fmode
= opendata
->o_arg
.fmode
;
1057 nfs4_stateid stateid
;
1061 if (can_open_cached(state
, fmode
, open_mode
)) {
1062 spin_lock(&state
->owner
->so_lock
);
1063 if (can_open_cached(state
, fmode
, open_mode
)) {
1064 update_open_stateflags(state
, fmode
);
1065 spin_unlock(&state
->owner
->so_lock
);
1066 goto out_return_state
;
1068 spin_unlock(&state
->owner
->so_lock
);
1071 delegation
= rcu_dereference(nfsi
->delegation
);
1072 if (!can_open_delegated(delegation
, fmode
)) {
1076 /* Save the delegation */
1077 nfs4_stateid_copy(&stateid
, &delegation
->stateid
);
1079 ret
= nfs_may_open(state
->inode
, state
->owner
->so_cred
, open_mode
);
1084 /* Try to update the stateid using the delegation */
1085 if (update_open_stateid(state
, NULL
, &stateid
, fmode
))
1086 goto out_return_state
;
1089 return ERR_PTR(ret
);
1091 atomic_inc(&state
->count
);
1095 static struct nfs4_state
*nfs4_opendata_to_nfs4_state(struct nfs4_opendata
*data
)
1097 struct inode
*inode
;
1098 struct nfs4_state
*state
= NULL
;
1099 struct nfs_delegation
*delegation
;
1102 if (!data
->rpc_done
) {
1103 state
= nfs4_try_open_cached(data
);
1108 if (!(data
->f_attr
.valid
& NFS_ATTR_FATTR
))
1110 inode
= nfs_fhget(data
->dir
->d_sb
, &data
->o_res
.fh
, &data
->f_attr
);
1111 ret
= PTR_ERR(inode
);
1115 state
= nfs4_get_open_state(inode
, data
->owner
);
1118 if (data
->o_res
.delegation_type
!= 0) {
1119 struct nfs_client
*clp
= NFS_SERVER(inode
)->nfs_client
;
1120 int delegation_flags
= 0;
1123 delegation
= rcu_dereference(NFS_I(inode
)->delegation
);
1125 delegation_flags
= delegation
->flags
;
1127 if (data
->o_arg
.claim
== NFS4_OPEN_CLAIM_DELEGATE_CUR
) {
1128 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1129 "returning a delegation for "
1130 "OPEN(CLAIM_DELEGATE_CUR)\n",
1132 } else if ((delegation_flags
& 1UL<<NFS_DELEGATION_NEED_RECLAIM
) == 0)
1133 nfs_inode_set_delegation(state
->inode
,
1134 data
->owner
->so_cred
,
1137 nfs_inode_reclaim_delegation(state
->inode
,
1138 data
->owner
->so_cred
,
1142 update_open_stateid(state
, &data
->o_res
.stateid
, NULL
,
1150 return ERR_PTR(ret
);
1153 static struct nfs_open_context
*nfs4_state_find_open_context(struct nfs4_state
*state
)
1155 struct nfs_inode
*nfsi
= NFS_I(state
->inode
);
1156 struct nfs_open_context
*ctx
;
1158 spin_lock(&state
->inode
->i_lock
);
1159 list_for_each_entry(ctx
, &nfsi
->open_files
, list
) {
1160 if (ctx
->state
!= state
)
1162 get_nfs_open_context(ctx
);
1163 spin_unlock(&state
->inode
->i_lock
);
1166 spin_unlock(&state
->inode
->i_lock
);
1167 return ERR_PTR(-ENOENT
);
1170 static struct nfs4_opendata
*nfs4_open_recoverdata_alloc(struct nfs_open_context
*ctx
, struct nfs4_state
*state
)
1172 struct nfs4_opendata
*opendata
;
1174 opendata
= nfs4_opendata_alloc(ctx
->dentry
, state
->owner
, 0, 0, NULL
, GFP_NOFS
);
1175 if (opendata
== NULL
)
1176 return ERR_PTR(-ENOMEM
);
1177 opendata
->state
= state
;
1178 atomic_inc(&state
->count
);
1182 static int nfs4_open_recover_helper(struct nfs4_opendata
*opendata
, fmode_t fmode
, struct nfs4_state
**res
)
1184 struct nfs4_state
*newstate
;
1187 opendata
->o_arg
.open_flags
= 0;
1188 opendata
->o_arg
.fmode
= fmode
;
1189 memset(&opendata
->o_res
, 0, sizeof(opendata
->o_res
));
1190 memset(&opendata
->c_res
, 0, sizeof(opendata
->c_res
));
1191 nfs4_init_opendata_res(opendata
);
1192 ret
= _nfs4_recover_proc_open(opendata
);
1195 newstate
= nfs4_opendata_to_nfs4_state(opendata
);
1196 if (IS_ERR(newstate
))
1197 return PTR_ERR(newstate
);
1198 nfs4_close_state(newstate
, fmode
);
1203 static int nfs4_open_recover(struct nfs4_opendata
*opendata
, struct nfs4_state
*state
)
1205 struct nfs4_state
*newstate
;
1208 /* memory barrier prior to reading state->n_* */
1209 clear_bit(NFS_DELEGATED_STATE
, &state
->flags
);
1211 if (state
->n_rdwr
!= 0) {
1212 clear_bit(NFS_O_RDWR_STATE
, &state
->flags
);
1213 ret
= nfs4_open_recover_helper(opendata
, FMODE_READ
|FMODE_WRITE
, &newstate
);
1216 if (newstate
!= state
)
1219 if (state
->n_wronly
!= 0) {
1220 clear_bit(NFS_O_WRONLY_STATE
, &state
->flags
);
1221 ret
= nfs4_open_recover_helper(opendata
, FMODE_WRITE
, &newstate
);
1224 if (newstate
!= state
)
1227 if (state
->n_rdonly
!= 0) {
1228 clear_bit(NFS_O_RDONLY_STATE
, &state
->flags
);
1229 ret
= nfs4_open_recover_helper(opendata
, FMODE_READ
, &newstate
);
1232 if (newstate
!= state
)
1236 * We may have performed cached opens for all three recoveries.
1237 * Check if we need to update the current stateid.
1239 if (test_bit(NFS_DELEGATED_STATE
, &state
->flags
) == 0 &&
1240 !nfs4_stateid_match(&state
->stateid
, &state
->open_stateid
)) {
1241 write_seqlock(&state
->seqlock
);
1242 if (test_bit(NFS_DELEGATED_STATE
, &state
->flags
) == 0)
1243 nfs4_stateid_copy(&state
->stateid
, &state
->open_stateid
);
1244 write_sequnlock(&state
->seqlock
);
1251 * reclaim state on the server after a reboot.
1253 static int _nfs4_do_open_reclaim(struct nfs_open_context
*ctx
, struct nfs4_state
*state
)
1255 struct nfs_delegation
*delegation
;
1256 struct nfs4_opendata
*opendata
;
1257 fmode_t delegation_type
= 0;
1260 opendata
= nfs4_open_recoverdata_alloc(ctx
, state
);
1261 if (IS_ERR(opendata
))
1262 return PTR_ERR(opendata
);
1263 opendata
->o_arg
.claim
= NFS4_OPEN_CLAIM_PREVIOUS
;
1264 opendata
->o_arg
.fh
= NFS_FH(state
->inode
);
1266 delegation
= rcu_dereference(NFS_I(state
->inode
)->delegation
);
1267 if (delegation
!= NULL
&& test_bit(NFS_DELEGATION_NEED_RECLAIM
, &delegation
->flags
) != 0)
1268 delegation_type
= delegation
->type
;
1270 opendata
->o_arg
.u
.delegation_type
= delegation_type
;
1271 status
= nfs4_open_recover(opendata
, state
);
1272 nfs4_opendata_put(opendata
);
1276 static int nfs4_do_open_reclaim(struct nfs_open_context
*ctx
, struct nfs4_state
*state
)
1278 struct nfs_server
*server
= NFS_SERVER(state
->inode
);
1279 struct nfs4_exception exception
= { };
1282 err
= _nfs4_do_open_reclaim(ctx
, state
);
1283 if (err
!= -NFS4ERR_DELAY
)
1285 nfs4_handle_exception(server
, err
, &exception
);
1286 } while (exception
.retry
);
1290 static int nfs4_open_reclaim(struct nfs4_state_owner
*sp
, struct nfs4_state
*state
)
1292 struct nfs_open_context
*ctx
;
1295 ctx
= nfs4_state_find_open_context(state
);
1297 return PTR_ERR(ctx
);
1298 ret
= nfs4_do_open_reclaim(ctx
, state
);
1299 put_nfs_open_context(ctx
);
1303 static int _nfs4_open_delegation_recall(struct nfs_open_context
*ctx
, struct nfs4_state
*state
, const nfs4_stateid
*stateid
)
1305 struct nfs4_opendata
*opendata
;
1308 opendata
= nfs4_open_recoverdata_alloc(ctx
, state
);
1309 if (IS_ERR(opendata
))
1310 return PTR_ERR(opendata
);
1311 opendata
->o_arg
.claim
= NFS4_OPEN_CLAIM_DELEGATE_CUR
;
1312 nfs4_stateid_copy(&opendata
->o_arg
.u
.delegation
, stateid
);
1313 ret
= nfs4_open_recover(opendata
, state
);
1314 nfs4_opendata_put(opendata
);
1318 int nfs4_open_delegation_recall(struct nfs_open_context
*ctx
, struct nfs4_state
*state
, const nfs4_stateid
*stateid
)
1320 struct nfs4_exception exception
= { };
1321 struct nfs_server
*server
= NFS_SERVER(state
->inode
);
1324 err
= _nfs4_open_delegation_recall(ctx
, state
, stateid
);
1330 case -NFS4ERR_BADSESSION
:
1331 case -NFS4ERR_BADSLOT
:
1332 case -NFS4ERR_BAD_HIGH_SLOT
:
1333 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION
:
1334 case -NFS4ERR_DEADSESSION
:
1335 nfs4_schedule_session_recovery(server
->nfs_client
->cl_session
, err
);
1337 case -NFS4ERR_STALE_CLIENTID
:
1338 case -NFS4ERR_STALE_STATEID
:
1339 case -NFS4ERR_EXPIRED
:
1340 /* Don't recall a delegation if it was lost */
1341 nfs4_schedule_lease_recovery(server
->nfs_client
);
1345 * The show must go on: exit, but mark the
1346 * stateid as needing recovery.
1348 case -NFS4ERR_DELEG_REVOKED
:
1349 case -NFS4ERR_ADMIN_REVOKED
:
1350 case -NFS4ERR_BAD_STATEID
:
1351 nfs_inode_find_state_and_recover(state
->inode
,
1353 nfs4_schedule_stateid_recovery(server
, state
);
1356 * User RPCSEC_GSS context has expired.
1357 * We cannot recover this stateid now, so
1358 * skip it and allow recovery thread to
1365 err
= nfs4_handle_exception(server
, err
, &exception
);
1366 } while (exception
.retry
);
1371 static void nfs4_open_confirm_done(struct rpc_task
*task
, void *calldata
)
1373 struct nfs4_opendata
*data
= calldata
;
1375 data
->rpc_status
= task
->tk_status
;
1376 if (data
->rpc_status
== 0) {
1377 nfs4_stateid_copy(&data
->o_res
.stateid
, &data
->c_res
.stateid
);
1378 nfs_confirm_seqid(&data
->owner
->so_seqid
, 0);
1379 renew_lease(data
->o_res
.server
, data
->timestamp
);
1384 static void nfs4_open_confirm_release(void *calldata
)
1386 struct nfs4_opendata
*data
= calldata
;
1387 struct nfs4_state
*state
= NULL
;
1389 /* If this request hasn't been cancelled, do nothing */
1390 if (data
->cancelled
== 0)
1392 /* In case of error, no cleanup! */
1393 if (!data
->rpc_done
)
1395 state
= nfs4_opendata_to_nfs4_state(data
);
1397 nfs4_close_state(state
, data
->o_arg
.fmode
);
1399 nfs4_opendata_put(data
);
1402 static const struct rpc_call_ops nfs4_open_confirm_ops
= {
1403 .rpc_call_done
= nfs4_open_confirm_done
,
1404 .rpc_release
= nfs4_open_confirm_release
,
1408 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1410 static int _nfs4_proc_open_confirm(struct nfs4_opendata
*data
)
1412 struct nfs_server
*server
= NFS_SERVER(data
->dir
->d_inode
);
1413 struct rpc_task
*task
;
1414 struct rpc_message msg
= {
1415 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_OPEN_CONFIRM
],
1416 .rpc_argp
= &data
->c_arg
,
1417 .rpc_resp
= &data
->c_res
,
1418 .rpc_cred
= data
->owner
->so_cred
,
1420 struct rpc_task_setup task_setup_data
= {
1421 .rpc_client
= server
->client
,
1422 .rpc_message
= &msg
,
1423 .callback_ops
= &nfs4_open_confirm_ops
,
1424 .callback_data
= data
,
1425 .workqueue
= nfsiod_workqueue
,
1426 .flags
= RPC_TASK_ASYNC
,
1430 kref_get(&data
->kref
);
1432 data
->rpc_status
= 0;
1433 data
->timestamp
= jiffies
;
1434 task
= rpc_run_task(&task_setup_data
);
1436 return PTR_ERR(task
);
1437 status
= nfs4_wait_for_completion_rpc_task(task
);
1439 data
->cancelled
= 1;
1442 status
= data
->rpc_status
;
1447 static void nfs4_open_prepare(struct rpc_task
*task
, void *calldata
)
1449 struct nfs4_opendata
*data
= calldata
;
1450 struct nfs4_state_owner
*sp
= data
->owner
;
1452 if (nfs_wait_on_sequence(data
->o_arg
.seqid
, task
) != 0)
1455 * Check if we still need to send an OPEN call, or if we can use
1456 * a delegation instead.
1458 if (data
->state
!= NULL
) {
1459 struct nfs_delegation
*delegation
;
1461 if (can_open_cached(data
->state
, data
->o_arg
.fmode
, data
->o_arg
.open_flags
))
1464 delegation
= rcu_dereference(NFS_I(data
->state
->inode
)->delegation
);
1465 if (data
->o_arg
.claim
!= NFS4_OPEN_CLAIM_DELEGATE_CUR
&&
1466 can_open_delegated(delegation
, data
->o_arg
.fmode
))
1467 goto unlock_no_action
;
1470 /* Update client id. */
1471 data
->o_arg
.clientid
= sp
->so_server
->nfs_client
->cl_clientid
;
1472 if (data
->o_arg
.claim
== NFS4_OPEN_CLAIM_PREVIOUS
) {
1473 task
->tk_msg
.rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_OPEN_NOATTR
];
1474 nfs_copy_fh(&data
->o_res
.fh
, data
->o_arg
.fh
);
1476 data
->timestamp
= jiffies
;
1477 if (nfs4_setup_sequence(data
->o_arg
.server
,
1478 &data
->o_arg
.seq_args
,
1479 &data
->o_res
.seq_res
, task
))
1481 rpc_call_start(task
);
1486 task
->tk_action
= NULL
;
1490 static void nfs4_recover_open_prepare(struct rpc_task
*task
, void *calldata
)
1492 rpc_task_set_priority(task
, RPC_PRIORITY_PRIVILEGED
);
1493 nfs4_open_prepare(task
, calldata
);
1496 static void nfs4_open_done(struct rpc_task
*task
, void *calldata
)
1498 struct nfs4_opendata
*data
= calldata
;
1500 data
->rpc_status
= task
->tk_status
;
1502 if (!nfs4_sequence_done(task
, &data
->o_res
.seq_res
))
1505 if (task
->tk_status
== 0) {
1506 switch (data
->o_res
.f_attr
->mode
& S_IFMT
) {
1510 data
->rpc_status
= -ELOOP
;
1513 data
->rpc_status
= -EISDIR
;
1516 data
->rpc_status
= -ENOTDIR
;
1518 renew_lease(data
->o_res
.server
, data
->timestamp
);
1519 if (!(data
->o_res
.rflags
& NFS4_OPEN_RESULT_CONFIRM
))
1520 nfs_confirm_seqid(&data
->owner
->so_seqid
, 0);
1525 static void nfs4_open_release(void *calldata
)
1527 struct nfs4_opendata
*data
= calldata
;
1528 struct nfs4_state
*state
= NULL
;
1530 /* If this request hasn't been cancelled, do nothing */
1531 if (data
->cancelled
== 0)
1533 /* In case of error, no cleanup! */
1534 if (data
->rpc_status
!= 0 || !data
->rpc_done
)
1536 /* In case we need an open_confirm, no cleanup! */
1537 if (data
->o_res
.rflags
& NFS4_OPEN_RESULT_CONFIRM
)
1539 state
= nfs4_opendata_to_nfs4_state(data
);
1541 nfs4_close_state(state
, data
->o_arg
.fmode
);
1543 nfs4_opendata_put(data
);
1546 static const struct rpc_call_ops nfs4_open_ops
= {
1547 .rpc_call_prepare
= nfs4_open_prepare
,
1548 .rpc_call_done
= nfs4_open_done
,
1549 .rpc_release
= nfs4_open_release
,
1552 static const struct rpc_call_ops nfs4_recover_open_ops
= {
1553 .rpc_call_prepare
= nfs4_recover_open_prepare
,
1554 .rpc_call_done
= nfs4_open_done
,
1555 .rpc_release
= nfs4_open_release
,
1558 static int nfs4_run_open_task(struct nfs4_opendata
*data
, int isrecover
)
1560 struct inode
*dir
= data
->dir
->d_inode
;
1561 struct nfs_server
*server
= NFS_SERVER(dir
);
1562 struct nfs_openargs
*o_arg
= &data
->o_arg
;
1563 struct nfs_openres
*o_res
= &data
->o_res
;
1564 struct rpc_task
*task
;
1565 struct rpc_message msg
= {
1566 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_OPEN
],
1569 .rpc_cred
= data
->owner
->so_cred
,
1571 struct rpc_task_setup task_setup_data
= {
1572 .rpc_client
= server
->client
,
1573 .rpc_message
= &msg
,
1574 .callback_ops
= &nfs4_open_ops
,
1575 .callback_data
= data
,
1576 .workqueue
= nfsiod_workqueue
,
1577 .flags
= RPC_TASK_ASYNC
,
1581 nfs41_init_sequence(&o_arg
->seq_args
, &o_res
->seq_res
, 1);
1582 kref_get(&data
->kref
);
1584 data
->rpc_status
= 0;
1585 data
->cancelled
= 0;
1587 task_setup_data
.callback_ops
= &nfs4_recover_open_ops
;
1588 task
= rpc_run_task(&task_setup_data
);
1590 return PTR_ERR(task
);
1591 status
= nfs4_wait_for_completion_rpc_task(task
);
1593 data
->cancelled
= 1;
1596 status
= data
->rpc_status
;
1602 static int _nfs4_recover_proc_open(struct nfs4_opendata
*data
)
1604 struct inode
*dir
= data
->dir
->d_inode
;
1605 struct nfs_openres
*o_res
= &data
->o_res
;
1608 status
= nfs4_run_open_task(data
, 1);
1609 if (status
!= 0 || !data
->rpc_done
)
1612 nfs_fattr_map_and_free_names(NFS_SERVER(dir
), &data
->f_attr
);
1614 if (o_res
->rflags
& NFS4_OPEN_RESULT_CONFIRM
) {
1615 status
= _nfs4_proc_open_confirm(data
);
1624 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1626 static int _nfs4_proc_open(struct nfs4_opendata
*data
)
1628 struct inode
*dir
= data
->dir
->d_inode
;
1629 struct nfs_server
*server
= NFS_SERVER(dir
);
1630 struct nfs_openargs
*o_arg
= &data
->o_arg
;
1631 struct nfs_openres
*o_res
= &data
->o_res
;
1634 status
= nfs4_run_open_task(data
, 0);
1635 if (!data
->rpc_done
)
1638 if (status
== -NFS4ERR_BADNAME
&&
1639 !(o_arg
->open_flags
& O_CREAT
))
1644 nfs_fattr_map_and_free_names(server
, &data
->f_attr
);
1646 if (o_arg
->open_flags
& O_CREAT
)
1647 update_changeattr(dir
, &o_res
->cinfo
);
1648 if ((o_res
->rflags
& NFS4_OPEN_RESULT_LOCKTYPE_POSIX
) == 0)
1649 server
->caps
&= ~NFS_CAP_POSIX_LOCK
;
1650 if(o_res
->rflags
& NFS4_OPEN_RESULT_CONFIRM
) {
1651 status
= _nfs4_proc_open_confirm(data
);
1655 if (!(o_res
->f_attr
->valid
& NFS_ATTR_FATTR
))
1656 _nfs4_proc_getattr(server
, &o_res
->fh
, o_res
->f_attr
);
1660 static int nfs4_client_recover_expired_lease(struct nfs_client
*clp
)
1665 for (loop
= NFS4_MAX_LOOP_ON_RECOVER
; loop
!= 0; loop
--) {
1666 ret
= nfs4_wait_clnt_recover(clp
);
1669 if (!test_bit(NFS4CLNT_LEASE_EXPIRED
, &clp
->cl_state
) &&
1670 !test_bit(NFS4CLNT_CHECK_LEASE
,&clp
->cl_state
))
1672 nfs4_schedule_state_manager(clp
);
1678 static int nfs4_recover_expired_lease(struct nfs_server
*server
)
1680 return nfs4_client_recover_expired_lease(server
->nfs_client
);
1685 * reclaim state on the server after a network partition.
1686 * Assumes caller holds the appropriate lock
1688 static int _nfs4_open_expired(struct nfs_open_context
*ctx
, struct nfs4_state
*state
)
1690 struct nfs4_opendata
*opendata
;
1693 opendata
= nfs4_open_recoverdata_alloc(ctx
, state
);
1694 if (IS_ERR(opendata
))
1695 return PTR_ERR(opendata
);
1696 ret
= nfs4_open_recover(opendata
, state
);
1698 d_drop(ctx
->dentry
);
1699 nfs4_opendata_put(opendata
);
1703 static int nfs4_do_open_expired(struct nfs_open_context
*ctx
, struct nfs4_state
*state
)
1705 struct nfs_server
*server
= NFS_SERVER(state
->inode
);
1706 struct nfs4_exception exception
= { };
1710 err
= _nfs4_open_expired(ctx
, state
);
1714 case -NFS4ERR_GRACE
:
1715 case -NFS4ERR_DELAY
:
1716 nfs4_handle_exception(server
, err
, &exception
);
1719 } while (exception
.retry
);
1724 static int nfs4_open_expired(struct nfs4_state_owner
*sp
, struct nfs4_state
*state
)
1726 struct nfs_open_context
*ctx
;
1729 ctx
= nfs4_state_find_open_context(state
);
1731 return PTR_ERR(ctx
);
1732 ret
= nfs4_do_open_expired(ctx
, state
);
1733 put_nfs_open_context(ctx
);
1737 #if defined(CONFIG_NFS_V4_1)
1738 static int nfs41_check_expired_stateid(struct nfs4_state
*state
, nfs4_stateid
*stateid
, unsigned int flags
)
1740 int status
= NFS_OK
;
1741 struct nfs_server
*server
= NFS_SERVER(state
->inode
);
1743 if (state
->flags
& flags
) {
1744 status
= nfs41_test_stateid(server
, stateid
);
1745 if (status
!= NFS_OK
) {
1746 nfs41_free_stateid(server
, stateid
);
1747 state
->flags
&= ~flags
;
1753 static int nfs41_open_expired(struct nfs4_state_owner
*sp
, struct nfs4_state
*state
)
1755 int deleg_status
, open_status
;
1756 int deleg_flags
= 1 << NFS_DELEGATED_STATE
;
1757 int open_flags
= (1 << NFS_O_RDONLY_STATE
) | (1 << NFS_O_WRONLY_STATE
) | (1 << NFS_O_RDWR_STATE
);
1759 deleg_status
= nfs41_check_expired_stateid(state
, &state
->stateid
, deleg_flags
);
1760 open_status
= nfs41_check_expired_stateid(state
, &state
->open_stateid
, open_flags
);
1762 if ((deleg_status
== NFS_OK
) && (open_status
== NFS_OK
))
1764 return nfs4_open_expired(sp
, state
);
1769 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1770 * fields corresponding to attributes that were used to store the verifier.
1771 * Make sure we clobber those fields in the later setattr call
1773 static inline void nfs4_exclusive_attrset(struct nfs4_opendata
*opendata
, struct iattr
*sattr
)
1775 if ((opendata
->o_res
.attrset
[1] & FATTR4_WORD1_TIME_ACCESS
) &&
1776 !(sattr
->ia_valid
& ATTR_ATIME_SET
))
1777 sattr
->ia_valid
|= ATTR_ATIME
;
1779 if ((opendata
->o_res
.attrset
[1] & FATTR4_WORD1_TIME_MODIFY
) &&
1780 !(sattr
->ia_valid
& ATTR_MTIME_SET
))
1781 sattr
->ia_valid
|= ATTR_MTIME
;
1785 * Returns a referenced nfs4_state
1787 static int _nfs4_do_open(struct inode
*dir
,
1788 struct dentry
*dentry
,
1791 struct iattr
*sattr
,
1792 struct rpc_cred
*cred
,
1793 struct nfs4_state
**res
,
1794 struct nfs4_threshold
**ctx_th
)
1796 struct nfs4_state_owner
*sp
;
1797 struct nfs4_state
*state
= NULL
;
1798 struct nfs_server
*server
= NFS_SERVER(dir
);
1799 struct nfs4_opendata
*opendata
;
1802 /* Protect against reboot recovery conflicts */
1804 sp
= nfs4_get_state_owner(server
, cred
, GFP_KERNEL
);
1806 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
1809 status
= nfs4_recover_expired_lease(server
);
1811 goto err_put_state_owner
;
1812 if (dentry
->d_inode
!= NULL
)
1813 nfs4_return_incompatible_delegation(dentry
->d_inode
, fmode
);
1815 opendata
= nfs4_opendata_alloc(dentry
, sp
, fmode
, flags
, sattr
, GFP_KERNEL
);
1816 if (opendata
== NULL
)
1817 goto err_put_state_owner
;
1819 if (ctx_th
&& server
->attr_bitmask
[2] & FATTR4_WORD2_MDSTHRESHOLD
) {
1820 opendata
->f_attr
.mdsthreshold
= pnfs_mdsthreshold_alloc();
1821 if (!opendata
->f_attr
.mdsthreshold
)
1822 goto err_opendata_put
;
1824 if (dentry
->d_inode
!= NULL
)
1825 opendata
->state
= nfs4_get_open_state(dentry
->d_inode
, sp
);
1827 status
= _nfs4_proc_open(opendata
);
1829 goto err_opendata_put
;
1831 state
= nfs4_opendata_to_nfs4_state(opendata
);
1832 status
= PTR_ERR(state
);
1834 goto err_opendata_put
;
1835 if (server
->caps
& NFS_CAP_POSIX_LOCK
)
1836 set_bit(NFS_STATE_POSIX_LOCKS
, &state
->flags
);
1838 if (opendata
->o_arg
.open_flags
& O_EXCL
) {
1839 nfs4_exclusive_attrset(opendata
, sattr
);
1841 nfs_fattr_init(opendata
->o_res
.f_attr
);
1842 status
= nfs4_do_setattr(state
->inode
, cred
,
1843 opendata
->o_res
.f_attr
, sattr
,
1846 nfs_setattr_update_inode(state
->inode
, sattr
);
1847 nfs_post_op_update_inode(state
->inode
, opendata
->o_res
.f_attr
);
1850 if (pnfs_use_threshold(ctx_th
, opendata
->f_attr
.mdsthreshold
, server
))
1851 *ctx_th
= opendata
->f_attr
.mdsthreshold
;
1853 kfree(opendata
->f_attr
.mdsthreshold
);
1854 opendata
->f_attr
.mdsthreshold
= NULL
;
1856 nfs4_opendata_put(opendata
);
1857 nfs4_put_state_owner(sp
);
1861 kfree(opendata
->f_attr
.mdsthreshold
);
1862 nfs4_opendata_put(opendata
);
1863 err_put_state_owner
:
1864 nfs4_put_state_owner(sp
);
1871 static struct nfs4_state
*nfs4_do_open(struct inode
*dir
,
1872 struct dentry
*dentry
,
1875 struct iattr
*sattr
,
1876 struct rpc_cred
*cred
,
1877 struct nfs4_threshold
**ctx_th
)
1879 struct nfs4_exception exception
= { };
1880 struct nfs4_state
*res
;
1884 status
= _nfs4_do_open(dir
, dentry
, fmode
, flags
, sattr
, cred
,
1888 /* NOTE: BAD_SEQID means the server and client disagree about the
1889 * book-keeping w.r.t. state-changing operations
1890 * (OPEN/CLOSE/LOCK/LOCKU...)
1891 * It is actually a sign of a bug on the client or on the server.
1893 * If we receive a BAD_SEQID error in the particular case of
1894 * doing an OPEN, we assume that nfs_increment_open_seqid() will
1895 * have unhashed the old state_owner for us, and that we can
1896 * therefore safely retry using a new one. We should still warn
1897 * the user though...
1899 if (status
== -NFS4ERR_BAD_SEQID
) {
1900 pr_warn_ratelimited("NFS: v4 server %s "
1901 " returned a bad sequence-id error!\n",
1902 NFS_SERVER(dir
)->nfs_client
->cl_hostname
);
1903 exception
.retry
= 1;
1907 * BAD_STATEID on OPEN means that the server cancelled our
1908 * state before it received the OPEN_CONFIRM.
1909 * Recover by retrying the request as per the discussion
1910 * on Page 181 of RFC3530.
1912 if (status
== -NFS4ERR_BAD_STATEID
) {
1913 exception
.retry
= 1;
1916 if (status
== -EAGAIN
) {
1917 /* We must have found a delegation */
1918 exception
.retry
= 1;
1921 res
= ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir
),
1922 status
, &exception
));
1923 } while (exception
.retry
);
1927 static int _nfs4_do_setattr(struct inode
*inode
, struct rpc_cred
*cred
,
1928 struct nfs_fattr
*fattr
, struct iattr
*sattr
,
1929 struct nfs4_state
*state
)
1931 struct nfs_server
*server
= NFS_SERVER(inode
);
1932 struct nfs_setattrargs arg
= {
1933 .fh
= NFS_FH(inode
),
1936 .bitmask
= server
->attr_bitmask
,
1938 struct nfs_setattrres res
= {
1942 struct rpc_message msg
= {
1943 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_SETATTR
],
1948 unsigned long timestamp
= jiffies
;
1951 nfs_fattr_init(fattr
);
1953 if (state
!= NULL
) {
1954 nfs4_select_rw_stateid(&arg
.stateid
, state
, FMODE_WRITE
,
1955 current
->files
, current
->tgid
);
1956 } else if (nfs4_copy_delegation_stateid(&arg
.stateid
, inode
,
1958 /* Use that stateid */
1960 nfs4_stateid_copy(&arg
.stateid
, &zero_stateid
);
1962 status
= nfs4_call_sync(server
->client
, server
, &msg
, &arg
.seq_args
, &res
.seq_res
, 1);
1963 if (status
== 0 && state
!= NULL
)
1964 renew_lease(server
, timestamp
);
1968 static int nfs4_do_setattr(struct inode
*inode
, struct rpc_cred
*cred
,
1969 struct nfs_fattr
*fattr
, struct iattr
*sattr
,
1970 struct nfs4_state
*state
)
1972 struct nfs_server
*server
= NFS_SERVER(inode
);
1973 struct nfs4_exception exception
= {
1979 err
= _nfs4_do_setattr(inode
, cred
, fattr
, sattr
, state
);
1981 case -NFS4ERR_OPENMODE
:
1982 if (state
&& !(state
->state
& FMODE_WRITE
)) {
1984 if (sattr
->ia_valid
& ATTR_OPEN
)
1989 err
= nfs4_handle_exception(server
, err
, &exception
);
1990 } while (exception
.retry
);
1995 struct nfs4_closedata
{
1996 struct inode
*inode
;
1997 struct nfs4_state
*state
;
1998 struct nfs_closeargs arg
;
1999 struct nfs_closeres res
;
2000 struct nfs_fattr fattr
;
2001 unsigned long timestamp
;
2006 static void nfs4_free_closedata(void *data
)
2008 struct nfs4_closedata
*calldata
= data
;
2009 struct nfs4_state_owner
*sp
= calldata
->state
->owner
;
2010 struct super_block
*sb
= calldata
->state
->inode
->i_sb
;
2013 pnfs_roc_release(calldata
->state
->inode
);
2014 nfs4_put_open_state(calldata
->state
);
2015 nfs_free_seqid(calldata
->arg
.seqid
);
2016 nfs4_put_state_owner(sp
);
2017 nfs_sb_deactive(sb
);
2021 static void nfs4_close_clear_stateid_flags(struct nfs4_state
*state
,
2024 spin_lock(&state
->owner
->so_lock
);
2025 if (!(fmode
& FMODE_READ
))
2026 clear_bit(NFS_O_RDONLY_STATE
, &state
->flags
);
2027 if (!(fmode
& FMODE_WRITE
))
2028 clear_bit(NFS_O_WRONLY_STATE
, &state
->flags
);
2029 clear_bit(NFS_O_RDWR_STATE
, &state
->flags
);
2030 spin_unlock(&state
->owner
->so_lock
);
2033 static void nfs4_close_done(struct rpc_task
*task
, void *data
)
2035 struct nfs4_closedata
*calldata
= data
;
2036 struct nfs4_state
*state
= calldata
->state
;
2037 struct nfs_server
*server
= NFS_SERVER(calldata
->inode
);
2039 dprintk("%s: begin!\n", __func__
);
2040 if (!nfs4_sequence_done(task
, &calldata
->res
.seq_res
))
2042 /* hmm. we are done with the inode, and in the process of freeing
2043 * the state_owner. we keep this around to process errors
2045 switch (task
->tk_status
) {
2048 pnfs_roc_set_barrier(state
->inode
,
2049 calldata
->roc_barrier
);
2050 nfs_set_open_stateid(state
, &calldata
->res
.stateid
, 0);
2051 renew_lease(server
, calldata
->timestamp
);
2052 nfs4_close_clear_stateid_flags(state
,
2053 calldata
->arg
.fmode
);
2055 case -NFS4ERR_STALE_STATEID
:
2056 case -NFS4ERR_OLD_STATEID
:
2057 case -NFS4ERR_BAD_STATEID
:
2058 case -NFS4ERR_EXPIRED
:
2059 if (calldata
->arg
.fmode
== 0)
2062 if (nfs4_async_handle_error(task
, server
, state
) == -EAGAIN
)
2063 rpc_restart_call_prepare(task
);
2065 nfs_release_seqid(calldata
->arg
.seqid
);
2066 nfs_refresh_inode(calldata
->inode
, calldata
->res
.fattr
);
2067 dprintk("%s: done, ret = %d!\n", __func__
, task
->tk_status
);
2070 static void nfs4_close_prepare(struct rpc_task
*task
, void *data
)
2072 struct nfs4_closedata
*calldata
= data
;
2073 struct nfs4_state
*state
= calldata
->state
;
2076 dprintk("%s: begin!\n", __func__
);
2077 if (nfs_wait_on_sequence(calldata
->arg
.seqid
, task
) != 0)
2080 task
->tk_msg
.rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_OPEN_DOWNGRADE
];
2081 calldata
->arg
.fmode
= FMODE_READ
|FMODE_WRITE
;
2082 spin_lock(&state
->owner
->so_lock
);
2083 /* Calculate the change in open mode */
2084 if (state
->n_rdwr
== 0) {
2085 if (state
->n_rdonly
== 0) {
2086 call_close
|= test_bit(NFS_O_RDONLY_STATE
, &state
->flags
);
2087 call_close
|= test_bit(NFS_O_RDWR_STATE
, &state
->flags
);
2088 calldata
->arg
.fmode
&= ~FMODE_READ
;
2090 if (state
->n_wronly
== 0) {
2091 call_close
|= test_bit(NFS_O_WRONLY_STATE
, &state
->flags
);
2092 call_close
|= test_bit(NFS_O_RDWR_STATE
, &state
->flags
);
2093 calldata
->arg
.fmode
&= ~FMODE_WRITE
;
2096 spin_unlock(&state
->owner
->so_lock
);
2099 /* Note: exit _without_ calling nfs4_close_done */
2100 task
->tk_action
= NULL
;
2104 if (calldata
->arg
.fmode
== 0) {
2105 task
->tk_msg
.rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_CLOSE
];
2106 if (calldata
->roc
&&
2107 pnfs_roc_drain(calldata
->inode
, &calldata
->roc_barrier
)) {
2108 rpc_sleep_on(&NFS_SERVER(calldata
->inode
)->roc_rpcwaitq
,
2114 nfs_fattr_init(calldata
->res
.fattr
);
2115 calldata
->timestamp
= jiffies
;
2116 if (nfs4_setup_sequence(NFS_SERVER(calldata
->inode
),
2117 &calldata
->arg
.seq_args
,
2118 &calldata
->res
.seq_res
,
2121 rpc_call_start(task
);
2123 dprintk("%s: done!\n", __func__
);
2126 static const struct rpc_call_ops nfs4_close_ops
= {
2127 .rpc_call_prepare
= nfs4_close_prepare
,
2128 .rpc_call_done
= nfs4_close_done
,
2129 .rpc_release
= nfs4_free_closedata
,
2133 * It is possible for data to be read/written from a mem-mapped file
2134 * after the sys_close call (which hits the vfs layer as a flush).
2135 * This means that we can't safely call nfsv4 close on a file until
2136 * the inode is cleared. This in turn means that we are not good
2137 * NFSv4 citizens - we do not indicate to the server to update the file's
2138 * share state even when we are done with one of the three share
2139 * stateid's in the inode.
2141 * NOTE: Caller must be holding the sp->so_owner semaphore!
2143 int nfs4_do_close(struct nfs4_state
*state
, gfp_t gfp_mask
, int wait
, bool roc
)
2145 struct nfs_server
*server
= NFS_SERVER(state
->inode
);
2146 struct nfs4_closedata
*calldata
;
2147 struct nfs4_state_owner
*sp
= state
->owner
;
2148 struct rpc_task
*task
;
2149 struct rpc_message msg
= {
2150 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_CLOSE
],
2151 .rpc_cred
= state
->owner
->so_cred
,
2153 struct rpc_task_setup task_setup_data
= {
2154 .rpc_client
= server
->client
,
2155 .rpc_message
= &msg
,
2156 .callback_ops
= &nfs4_close_ops
,
2157 .workqueue
= nfsiod_workqueue
,
2158 .flags
= RPC_TASK_ASYNC
,
2160 int status
= -ENOMEM
;
2162 calldata
= kzalloc(sizeof(*calldata
), gfp_mask
);
2163 if (calldata
== NULL
)
2165 nfs41_init_sequence(&calldata
->arg
.seq_args
, &calldata
->res
.seq_res
, 1);
2166 calldata
->inode
= state
->inode
;
2167 calldata
->state
= state
;
2168 calldata
->arg
.fh
= NFS_FH(state
->inode
);
2169 calldata
->arg
.stateid
= &state
->open_stateid
;
2170 /* Serialization for the sequence id */
2171 calldata
->arg
.seqid
= nfs_alloc_seqid(&state
->owner
->so_seqid
, gfp_mask
);
2172 if (calldata
->arg
.seqid
== NULL
)
2173 goto out_free_calldata
;
2174 calldata
->arg
.fmode
= 0;
2175 calldata
->arg
.bitmask
= server
->cache_consistency_bitmask
;
2176 calldata
->res
.fattr
= &calldata
->fattr
;
2177 calldata
->res
.seqid
= calldata
->arg
.seqid
;
2178 calldata
->res
.server
= server
;
2179 calldata
->roc
= roc
;
2180 nfs_sb_active(calldata
->inode
->i_sb
);
2182 msg
.rpc_argp
= &calldata
->arg
;
2183 msg
.rpc_resp
= &calldata
->res
;
2184 task_setup_data
.callback_data
= calldata
;
2185 task
= rpc_run_task(&task_setup_data
);
2187 return PTR_ERR(task
);
2190 status
= rpc_wait_for_completion_task(task
);
2197 pnfs_roc_release(state
->inode
);
2198 nfs4_put_open_state(state
);
2199 nfs4_put_state_owner(sp
);
2203 static struct inode
*
2204 nfs4_atomic_open(struct inode
*dir
, struct nfs_open_context
*ctx
, int open_flags
, struct iattr
*attr
)
2206 struct nfs4_state
*state
;
2208 /* Protect against concurrent sillydeletes */
2209 state
= nfs4_do_open(dir
, ctx
->dentry
, ctx
->mode
, open_flags
, attr
,
2210 ctx
->cred
, &ctx
->mdsthreshold
);
2212 return ERR_CAST(state
);
2214 return igrab(state
->inode
);
2217 static void nfs4_close_context(struct nfs_open_context
*ctx
, int is_sync
)
2219 if (ctx
->state
== NULL
)
2222 nfs4_close_sync(ctx
->state
, ctx
->mode
);
2224 nfs4_close_state(ctx
->state
, ctx
->mode
);
2227 static int _nfs4_server_capabilities(struct nfs_server
*server
, struct nfs_fh
*fhandle
)
2229 struct nfs4_server_caps_arg args
= {
2232 struct nfs4_server_caps_res res
= {};
2233 struct rpc_message msg
= {
2234 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_SERVER_CAPS
],
2240 status
= nfs4_call_sync(server
->client
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 0);
2242 memcpy(server
->attr_bitmask
, res
.attr_bitmask
, sizeof(server
->attr_bitmask
));
2243 server
->caps
&= ~(NFS_CAP_ACLS
|NFS_CAP_HARDLINKS
|
2244 NFS_CAP_SYMLINKS
|NFS_CAP_FILEID
|
2245 NFS_CAP_MODE
|NFS_CAP_NLINK
|NFS_CAP_OWNER
|
2246 NFS_CAP_OWNER_GROUP
|NFS_CAP_ATIME
|
2247 NFS_CAP_CTIME
|NFS_CAP_MTIME
);
2248 if (res
.attr_bitmask
[0] & FATTR4_WORD0_ACL
)
2249 server
->caps
|= NFS_CAP_ACLS
;
2250 if (res
.has_links
!= 0)
2251 server
->caps
|= NFS_CAP_HARDLINKS
;
2252 if (res
.has_symlinks
!= 0)
2253 server
->caps
|= NFS_CAP_SYMLINKS
;
2254 if (res
.attr_bitmask
[0] & FATTR4_WORD0_FILEID
)
2255 server
->caps
|= NFS_CAP_FILEID
;
2256 if (res
.attr_bitmask
[1] & FATTR4_WORD1_MODE
)
2257 server
->caps
|= NFS_CAP_MODE
;
2258 if (res
.attr_bitmask
[1] & FATTR4_WORD1_NUMLINKS
)
2259 server
->caps
|= NFS_CAP_NLINK
;
2260 if (res
.attr_bitmask
[1] & FATTR4_WORD1_OWNER
)
2261 server
->caps
|= NFS_CAP_OWNER
;
2262 if (res
.attr_bitmask
[1] & FATTR4_WORD1_OWNER_GROUP
)
2263 server
->caps
|= NFS_CAP_OWNER_GROUP
;
2264 if (res
.attr_bitmask
[1] & FATTR4_WORD1_TIME_ACCESS
)
2265 server
->caps
|= NFS_CAP_ATIME
;
2266 if (res
.attr_bitmask
[1] & FATTR4_WORD1_TIME_METADATA
)
2267 server
->caps
|= NFS_CAP_CTIME
;
2268 if (res
.attr_bitmask
[1] & FATTR4_WORD1_TIME_MODIFY
)
2269 server
->caps
|= NFS_CAP_MTIME
;
2271 memcpy(server
->cache_consistency_bitmask
, res
.attr_bitmask
, sizeof(server
->cache_consistency_bitmask
));
2272 server
->cache_consistency_bitmask
[0] &= FATTR4_WORD0_CHANGE
|FATTR4_WORD0_SIZE
;
2273 server
->cache_consistency_bitmask
[1] &= FATTR4_WORD1_TIME_METADATA
|FATTR4_WORD1_TIME_MODIFY
;
2274 server
->acl_bitmask
= res
.acl_bitmask
;
2275 server
->fh_expire_type
= res
.fh_expire_type
;
2281 int nfs4_server_capabilities(struct nfs_server
*server
, struct nfs_fh
*fhandle
)
2283 struct nfs4_exception exception
= { };
2286 err
= nfs4_handle_exception(server
,
2287 _nfs4_server_capabilities(server
, fhandle
),
2289 } while (exception
.retry
);
2293 static int _nfs4_lookup_root(struct nfs_server
*server
, struct nfs_fh
*fhandle
,
2294 struct nfs_fsinfo
*info
)
2296 struct nfs4_lookup_root_arg args
= {
2297 .bitmask
= nfs4_fattr_bitmap
,
2299 struct nfs4_lookup_res res
= {
2301 .fattr
= info
->fattr
,
2304 struct rpc_message msg
= {
2305 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_LOOKUP_ROOT
],
2310 nfs_fattr_init(info
->fattr
);
2311 return nfs4_call_sync(server
->client
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 0);
2314 static int nfs4_lookup_root(struct nfs_server
*server
, struct nfs_fh
*fhandle
,
2315 struct nfs_fsinfo
*info
)
2317 struct nfs4_exception exception
= { };
2320 err
= _nfs4_lookup_root(server
, fhandle
, info
);
2323 case -NFS4ERR_WRONGSEC
:
2326 err
= nfs4_handle_exception(server
, err
, &exception
);
2328 } while (exception
.retry
);
2333 static int nfs4_lookup_root_sec(struct nfs_server
*server
, struct nfs_fh
*fhandle
,
2334 struct nfs_fsinfo
*info
, rpc_authflavor_t flavor
)
2336 struct rpc_auth
*auth
;
2339 auth
= rpcauth_create(flavor
, server
->client
);
2344 ret
= nfs4_lookup_root(server
, fhandle
, info
);
2349 static int nfs4_find_root_sec(struct nfs_server
*server
, struct nfs_fh
*fhandle
,
2350 struct nfs_fsinfo
*info
)
2352 int i
, len
, status
= 0;
2353 rpc_authflavor_t flav_array
[NFS_MAX_SECFLAVORS
];
2355 len
= gss_mech_list_pseudoflavors(&flav_array
[0]);
2356 flav_array
[len
] = RPC_AUTH_NULL
;
2359 for (i
= 0; i
< len
; i
++) {
2360 status
= nfs4_lookup_root_sec(server
, fhandle
, info
, flav_array
[i
]);
2361 if (status
== -NFS4ERR_WRONGSEC
|| status
== -EACCES
)
2366 * -EACCESS could mean that the user doesn't have correct permissions
2367 * to access the mount. It could also mean that we tried to mount
2368 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
2369 * existing mount programs don't handle -EACCES very well so it should
2370 * be mapped to -EPERM instead.
2372 if (status
== -EACCES
)
2378 * get the file handle for the "/" directory on the server
2380 int nfs4_proc_get_rootfh(struct nfs_server
*server
, struct nfs_fh
*fhandle
,
2381 struct nfs_fsinfo
*info
)
2383 int minor_version
= server
->nfs_client
->cl_minorversion
;
2384 int status
= nfs4_lookup_root(server
, fhandle
, info
);
2385 if ((status
== -NFS4ERR_WRONGSEC
) && !(server
->flags
& NFS_MOUNT_SECFLAVOUR
))
2387 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
2388 * by nfs4_map_errors() as this function exits.
2390 status
= nfs_v4_minor_ops
[minor_version
]->find_root_sec(server
, fhandle
, info
);
2392 status
= nfs4_server_capabilities(server
, fhandle
);
2394 status
= nfs4_do_fsinfo(server
, fhandle
, info
);
2395 return nfs4_map_errors(status
);
2398 static int nfs4_proc_get_root(struct nfs_server
*server
, struct nfs_fh
*mntfh
,
2399 struct nfs_fsinfo
*info
)
2402 struct nfs_fattr
*fattr
= info
->fattr
;
2404 error
= nfs4_server_capabilities(server
, mntfh
);
2406 dprintk("nfs4_get_root: getcaps error = %d\n", -error
);
2410 error
= nfs4_proc_getattr(server
, mntfh
, fattr
);
2412 dprintk("nfs4_get_root: getattr error = %d\n", -error
);
2416 if (fattr
->valid
& NFS_ATTR_FATTR_FSID
&&
2417 !nfs_fsid_equal(&server
->fsid
, &fattr
->fsid
))
2418 memcpy(&server
->fsid
, &fattr
->fsid
, sizeof(server
->fsid
));
2424 * Get locations and (maybe) other attributes of a referral.
2425 * Note that we'll actually follow the referral later when
2426 * we detect fsid mismatch in inode revalidation
2428 static int nfs4_get_referral(struct rpc_clnt
*client
, struct inode
*dir
,
2429 const struct qstr
*name
, struct nfs_fattr
*fattr
,
2430 struct nfs_fh
*fhandle
)
2432 int status
= -ENOMEM
;
2433 struct page
*page
= NULL
;
2434 struct nfs4_fs_locations
*locations
= NULL
;
2436 page
= alloc_page(GFP_KERNEL
);
2439 locations
= kmalloc(sizeof(struct nfs4_fs_locations
), GFP_KERNEL
);
2440 if (locations
== NULL
)
2443 status
= nfs4_proc_fs_locations(client
, dir
, name
, locations
, page
);
2446 /* Make sure server returned a different fsid for the referral */
2447 if (nfs_fsid_equal(&NFS_SERVER(dir
)->fsid
, &locations
->fattr
.fsid
)) {
2448 dprintk("%s: server did not return a different fsid for"
2449 " a referral at %s\n", __func__
, name
->name
);
2453 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2454 nfs_fixup_referral_attributes(&locations
->fattr
);
2456 /* replace the lookup nfs_fattr with the locations nfs_fattr */
2457 memcpy(fattr
, &locations
->fattr
, sizeof(struct nfs_fattr
));
2458 memset(fhandle
, 0, sizeof(struct nfs_fh
));
2466 static int _nfs4_proc_getattr(struct nfs_server
*server
, struct nfs_fh
*fhandle
, struct nfs_fattr
*fattr
)
2468 struct nfs4_getattr_arg args
= {
2470 .bitmask
= server
->attr_bitmask
,
2472 struct nfs4_getattr_res res
= {
2476 struct rpc_message msg
= {
2477 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_GETATTR
],
2482 nfs_fattr_init(fattr
);
2483 return nfs4_call_sync(server
->client
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 0);
2486 static int nfs4_proc_getattr(struct nfs_server
*server
, struct nfs_fh
*fhandle
, struct nfs_fattr
*fattr
)
2488 struct nfs4_exception exception
= { };
2491 err
= nfs4_handle_exception(server
,
2492 _nfs4_proc_getattr(server
, fhandle
, fattr
),
2494 } while (exception
.retry
);
2499 * The file is not closed if it is opened due to the a request to change
2500 * the size of the file. The open call will not be needed once the
2501 * VFS layer lookup-intents are implemented.
2503 * Close is called when the inode is destroyed.
2504 * If we haven't opened the file for O_WRONLY, we
2505 * need to in the size_change case to obtain a stateid.
2508 * Because OPEN is always done by name in nfsv4, it is
2509 * possible that we opened a different file by the same
2510 * name. We can recognize this race condition, but we
2511 * can't do anything about it besides returning an error.
2513 * This will be fixed with VFS changes (lookup-intent).
2516 nfs4_proc_setattr(struct dentry
*dentry
, struct nfs_fattr
*fattr
,
2517 struct iattr
*sattr
)
2519 struct inode
*inode
= dentry
->d_inode
;
2520 struct rpc_cred
*cred
= NULL
;
2521 struct nfs4_state
*state
= NULL
;
2524 if (pnfs_ld_layoutret_on_setattr(inode
))
2525 pnfs_return_layout(inode
);
2527 nfs_fattr_init(fattr
);
2529 /* Search for an existing open(O_WRITE) file */
2530 if (sattr
->ia_valid
& ATTR_FILE
) {
2531 struct nfs_open_context
*ctx
;
2533 ctx
= nfs_file_open_context(sattr
->ia_file
);
2540 /* Deal with open(O_TRUNC) */
2541 if (sattr
->ia_valid
& ATTR_OPEN
)
2542 sattr
->ia_valid
&= ~(ATTR_MTIME
|ATTR_CTIME
|ATTR_OPEN
);
2544 status
= nfs4_do_setattr(inode
, cred
, fattr
, sattr
, state
);
2546 nfs_setattr_update_inode(inode
, sattr
);
2550 static int _nfs4_proc_lookup(struct rpc_clnt
*clnt
, struct inode
*dir
,
2551 const struct qstr
*name
, struct nfs_fh
*fhandle
,
2552 struct nfs_fattr
*fattr
)
2554 struct nfs_server
*server
= NFS_SERVER(dir
);
2556 struct nfs4_lookup_arg args
= {
2557 .bitmask
= server
->attr_bitmask
,
2558 .dir_fh
= NFS_FH(dir
),
2561 struct nfs4_lookup_res res
= {
2566 struct rpc_message msg
= {
2567 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_LOOKUP
],
2572 nfs_fattr_init(fattr
);
2574 dprintk("NFS call lookup %s\n", name
->name
);
2575 status
= nfs4_call_sync(clnt
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 0);
2576 dprintk("NFS reply lookup: %d\n", status
);
2580 static void nfs_fixup_secinfo_attributes(struct nfs_fattr
*fattr
)
2582 fattr
->valid
|= NFS_ATTR_FATTR_TYPE
| NFS_ATTR_FATTR_MODE
|
2583 NFS_ATTR_FATTR_NLINK
| NFS_ATTR_FATTR_MOUNTPOINT
;
2584 fattr
->mode
= S_IFDIR
| S_IRUGO
| S_IXUGO
;
2588 static int nfs4_proc_lookup_common(struct rpc_clnt
**clnt
, struct inode
*dir
,
2589 struct qstr
*name
, struct nfs_fh
*fhandle
,
2590 struct nfs_fattr
*fattr
)
2592 struct nfs4_exception exception
= { };
2593 struct rpc_clnt
*client
= *clnt
;
2596 err
= _nfs4_proc_lookup(client
, dir
, name
, fhandle
, fattr
);
2598 case -NFS4ERR_BADNAME
:
2601 case -NFS4ERR_MOVED
:
2602 err
= nfs4_get_referral(client
, dir
, name
, fattr
, fhandle
);
2604 case -NFS4ERR_WRONGSEC
:
2606 if (client
!= *clnt
)
2609 client
= nfs4_create_sec_client(client
, dir
, name
);
2611 return PTR_ERR(client
);
2613 exception
.retry
= 1;
2616 err
= nfs4_handle_exception(NFS_SERVER(dir
), err
, &exception
);
2618 } while (exception
.retry
);
2623 else if (client
!= *clnt
)
2624 rpc_shutdown_client(client
);
2629 static int nfs4_proc_lookup(struct inode
*dir
, struct qstr
*name
,
2630 struct nfs_fh
*fhandle
, struct nfs_fattr
*fattr
)
2633 struct rpc_clnt
*client
= NFS_CLIENT(dir
);
2635 status
= nfs4_proc_lookup_common(&client
, dir
, name
, fhandle
, fattr
);
2636 if (client
!= NFS_CLIENT(dir
)) {
2637 rpc_shutdown_client(client
);
2638 nfs_fixup_secinfo_attributes(fattr
);
2644 nfs4_proc_lookup_mountpoint(struct inode
*dir
, struct qstr
*name
,
2645 struct nfs_fh
*fhandle
, struct nfs_fattr
*fattr
)
2648 struct rpc_clnt
*client
= rpc_clone_client(NFS_CLIENT(dir
));
2650 status
= nfs4_proc_lookup_common(&client
, dir
, name
, fhandle
, fattr
);
2652 rpc_shutdown_client(client
);
2653 return ERR_PTR(status
);
2658 static int _nfs4_proc_access(struct inode
*inode
, struct nfs_access_entry
*entry
)
2660 struct nfs_server
*server
= NFS_SERVER(inode
);
2661 struct nfs4_accessargs args
= {
2662 .fh
= NFS_FH(inode
),
2663 .bitmask
= server
->cache_consistency_bitmask
,
2665 struct nfs4_accessres res
= {
2668 struct rpc_message msg
= {
2669 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_ACCESS
],
2672 .rpc_cred
= entry
->cred
,
2674 int mode
= entry
->mask
;
2678 * Determine which access bits we want to ask for...
2680 if (mode
& MAY_READ
)
2681 args
.access
|= NFS4_ACCESS_READ
;
2682 if (S_ISDIR(inode
->i_mode
)) {
2683 if (mode
& MAY_WRITE
)
2684 args
.access
|= NFS4_ACCESS_MODIFY
| NFS4_ACCESS_EXTEND
| NFS4_ACCESS_DELETE
;
2685 if (mode
& MAY_EXEC
)
2686 args
.access
|= NFS4_ACCESS_LOOKUP
;
2688 if (mode
& MAY_WRITE
)
2689 args
.access
|= NFS4_ACCESS_MODIFY
| NFS4_ACCESS_EXTEND
;
2690 if (mode
& MAY_EXEC
)
2691 args
.access
|= NFS4_ACCESS_EXECUTE
;
2694 res
.fattr
= nfs_alloc_fattr();
2695 if (res
.fattr
== NULL
)
2698 status
= nfs4_call_sync(server
->client
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 0);
2701 if (res
.access
& NFS4_ACCESS_READ
)
2702 entry
->mask
|= MAY_READ
;
2703 if (res
.access
& (NFS4_ACCESS_MODIFY
| NFS4_ACCESS_EXTEND
| NFS4_ACCESS_DELETE
))
2704 entry
->mask
|= MAY_WRITE
;
2705 if (res
.access
& (NFS4_ACCESS_LOOKUP
|NFS4_ACCESS_EXECUTE
))
2706 entry
->mask
|= MAY_EXEC
;
2707 nfs_refresh_inode(inode
, res
.fattr
);
2709 nfs_free_fattr(res
.fattr
);
2713 static int nfs4_proc_access(struct inode
*inode
, struct nfs_access_entry
*entry
)
2715 struct nfs4_exception exception
= { };
2718 err
= nfs4_handle_exception(NFS_SERVER(inode
),
2719 _nfs4_proc_access(inode
, entry
),
2721 } while (exception
.retry
);
2726 * TODO: For the time being, we don't try to get any attributes
2727 * along with any of the zero-copy operations READ, READDIR,
2730 * In the case of the first three, we want to put the GETATTR
2731 * after the read-type operation -- this is because it is hard
2732 * to predict the length of a GETATTR response in v4, and thus
2733 * align the READ data correctly. This means that the GETATTR
2734 * may end up partially falling into the page cache, and we should
2735 * shift it into the 'tail' of the xdr_buf before processing.
2736 * To do this efficiently, we need to know the total length
2737 * of data received, which doesn't seem to be available outside
2740 * In the case of WRITE, we also want to put the GETATTR after
2741 * the operation -- in this case because we want to make sure
2742 * we get the post-operation mtime and size. This means that
2743 * we can't use xdr_encode_pages() as written: we need a variant
2744 * of it which would leave room in the 'tail' iovec.
2746 * Both of these changes to the XDR layer would in fact be quite
2747 * minor, but I decided to leave them for a subsequent patch.
2749 static int _nfs4_proc_readlink(struct inode
*inode
, struct page
*page
,
2750 unsigned int pgbase
, unsigned int pglen
)
2752 struct nfs4_readlink args
= {
2753 .fh
= NFS_FH(inode
),
2758 struct nfs4_readlink_res res
;
2759 struct rpc_message msg
= {
2760 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_READLINK
],
2765 return nfs4_call_sync(NFS_SERVER(inode
)->client
, NFS_SERVER(inode
), &msg
, &args
.seq_args
, &res
.seq_res
, 0);
2768 static int nfs4_proc_readlink(struct inode
*inode
, struct page
*page
,
2769 unsigned int pgbase
, unsigned int pglen
)
2771 struct nfs4_exception exception
= { };
2774 err
= nfs4_handle_exception(NFS_SERVER(inode
),
2775 _nfs4_proc_readlink(inode
, page
, pgbase
, pglen
),
2777 } while (exception
.retry
);
2783 * We will need to arrange for the VFS layer to provide an atomic open.
2784 * Until then, this create/open method is prone to inefficiency and race
2785 * conditions due to the lookup, create, and open VFS calls from sys_open()
2786 * placed on the wire.
2788 * Given the above sorry state of affairs, I'm simply sending an OPEN.
2789 * The file will be opened again in the subsequent VFS open call
2790 * (nfs4_proc_file_open).
2792 * The open for read will just hang around to be used by any process that
2793 * opens the file O_RDONLY. This will all be resolved with the VFS changes.
2797 nfs4_proc_create(struct inode
*dir
, struct dentry
*dentry
, struct iattr
*sattr
,
2798 int flags
, struct nfs_open_context
*ctx
)
2800 struct dentry
*de
= dentry
;
2801 struct nfs4_state
*state
;
2802 struct rpc_cred
*cred
= NULL
;
2811 sattr
->ia_mode
&= ~current_umask();
2812 state
= nfs4_do_open(dir
, de
, fmode
, flags
, sattr
, cred
, NULL
);
2814 if (IS_ERR(state
)) {
2815 status
= PTR_ERR(state
);
2818 d_add(dentry
, igrab(state
->inode
));
2819 nfs_set_verifier(dentry
, nfs_save_change_attribute(dir
));
2823 nfs4_close_sync(state
, fmode
);
2828 static int _nfs4_proc_remove(struct inode
*dir
, struct qstr
*name
)
2830 struct nfs_server
*server
= NFS_SERVER(dir
);
2831 struct nfs_removeargs args
= {
2835 struct nfs_removeres res
= {
2838 struct rpc_message msg
= {
2839 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_REMOVE
],
2845 status
= nfs4_call_sync(server
->client
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 1);
2847 update_changeattr(dir
, &res
.cinfo
);
2851 static int nfs4_proc_remove(struct inode
*dir
, struct qstr
*name
)
2853 struct nfs4_exception exception
= { };
2856 err
= nfs4_handle_exception(NFS_SERVER(dir
),
2857 _nfs4_proc_remove(dir
, name
),
2859 } while (exception
.retry
);
2863 static void nfs4_proc_unlink_setup(struct rpc_message
*msg
, struct inode
*dir
)
2865 struct nfs_server
*server
= NFS_SERVER(dir
);
2866 struct nfs_removeargs
*args
= msg
->rpc_argp
;
2867 struct nfs_removeres
*res
= msg
->rpc_resp
;
2869 res
->server
= server
;
2870 msg
->rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_REMOVE
];
2871 nfs41_init_sequence(&args
->seq_args
, &res
->seq_res
, 1);
2874 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task
*task
, struct nfs_unlinkdata
*data
)
2876 if (nfs4_setup_sequence(NFS_SERVER(data
->dir
),
2877 &data
->args
.seq_args
,
2881 rpc_call_start(task
);
2884 static int nfs4_proc_unlink_done(struct rpc_task
*task
, struct inode
*dir
)
2886 struct nfs_removeres
*res
= task
->tk_msg
.rpc_resp
;
2888 if (!nfs4_sequence_done(task
, &res
->seq_res
))
2890 if (nfs4_async_handle_error(task
, res
->server
, NULL
) == -EAGAIN
)
2892 update_changeattr(dir
, &res
->cinfo
);
2896 static void nfs4_proc_rename_setup(struct rpc_message
*msg
, struct inode
*dir
)
2898 struct nfs_server
*server
= NFS_SERVER(dir
);
2899 struct nfs_renameargs
*arg
= msg
->rpc_argp
;
2900 struct nfs_renameres
*res
= msg
->rpc_resp
;
2902 msg
->rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_RENAME
];
2903 res
->server
= server
;
2904 nfs41_init_sequence(&arg
->seq_args
, &res
->seq_res
, 1);
2907 static void nfs4_proc_rename_rpc_prepare(struct rpc_task
*task
, struct nfs_renamedata
*data
)
2909 if (nfs4_setup_sequence(NFS_SERVER(data
->old_dir
),
2910 &data
->args
.seq_args
,
2914 rpc_call_start(task
);
2917 static int nfs4_proc_rename_done(struct rpc_task
*task
, struct inode
*old_dir
,
2918 struct inode
*new_dir
)
2920 struct nfs_renameres
*res
= task
->tk_msg
.rpc_resp
;
2922 if (!nfs4_sequence_done(task
, &res
->seq_res
))
2924 if (nfs4_async_handle_error(task
, res
->server
, NULL
) == -EAGAIN
)
2927 update_changeattr(old_dir
, &res
->old_cinfo
);
2928 update_changeattr(new_dir
, &res
->new_cinfo
);
2932 static int _nfs4_proc_rename(struct inode
*old_dir
, struct qstr
*old_name
,
2933 struct inode
*new_dir
, struct qstr
*new_name
)
2935 struct nfs_server
*server
= NFS_SERVER(old_dir
);
2936 struct nfs_renameargs arg
= {
2937 .old_dir
= NFS_FH(old_dir
),
2938 .new_dir
= NFS_FH(new_dir
),
2939 .old_name
= old_name
,
2940 .new_name
= new_name
,
2942 struct nfs_renameres res
= {
2945 struct rpc_message msg
= {
2946 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_RENAME
],
2950 int status
= -ENOMEM
;
2952 status
= nfs4_call_sync(server
->client
, server
, &msg
, &arg
.seq_args
, &res
.seq_res
, 1);
2954 update_changeattr(old_dir
, &res
.old_cinfo
);
2955 update_changeattr(new_dir
, &res
.new_cinfo
);
2960 static int nfs4_proc_rename(struct inode
*old_dir
, struct qstr
*old_name
,
2961 struct inode
*new_dir
, struct qstr
*new_name
)
2963 struct nfs4_exception exception
= { };
2966 err
= nfs4_handle_exception(NFS_SERVER(old_dir
),
2967 _nfs4_proc_rename(old_dir
, old_name
,
2970 } while (exception
.retry
);
2974 static int _nfs4_proc_link(struct inode
*inode
, struct inode
*dir
, struct qstr
*name
)
2976 struct nfs_server
*server
= NFS_SERVER(inode
);
2977 struct nfs4_link_arg arg
= {
2978 .fh
= NFS_FH(inode
),
2979 .dir_fh
= NFS_FH(dir
),
2981 .bitmask
= server
->attr_bitmask
,
2983 struct nfs4_link_res res
= {
2986 struct rpc_message msg
= {
2987 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_LINK
],
2991 int status
= -ENOMEM
;
2993 res
.fattr
= nfs_alloc_fattr();
2994 if (res
.fattr
== NULL
)
2997 status
= nfs4_call_sync(server
->client
, server
, &msg
, &arg
.seq_args
, &res
.seq_res
, 1);
2999 update_changeattr(dir
, &res
.cinfo
);
3000 nfs_post_op_update_inode(inode
, res
.fattr
);
3003 nfs_free_fattr(res
.fattr
);
3007 static int nfs4_proc_link(struct inode
*inode
, struct inode
*dir
, struct qstr
*name
)
3009 struct nfs4_exception exception
= { };
3012 err
= nfs4_handle_exception(NFS_SERVER(inode
),
3013 _nfs4_proc_link(inode
, dir
, name
),
3015 } while (exception
.retry
);
3019 struct nfs4_createdata
{
3020 struct rpc_message msg
;
3021 struct nfs4_create_arg arg
;
3022 struct nfs4_create_res res
;
3024 struct nfs_fattr fattr
;
3027 static struct nfs4_createdata
*nfs4_alloc_createdata(struct inode
*dir
,
3028 struct qstr
*name
, struct iattr
*sattr
, u32 ftype
)
3030 struct nfs4_createdata
*data
;
3032 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3034 struct nfs_server
*server
= NFS_SERVER(dir
);
3036 data
->msg
.rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_CREATE
];
3037 data
->msg
.rpc_argp
= &data
->arg
;
3038 data
->msg
.rpc_resp
= &data
->res
;
3039 data
->arg
.dir_fh
= NFS_FH(dir
);
3040 data
->arg
.server
= server
;
3041 data
->arg
.name
= name
;
3042 data
->arg
.attrs
= sattr
;
3043 data
->arg
.ftype
= ftype
;
3044 data
->arg
.bitmask
= server
->attr_bitmask
;
3045 data
->res
.server
= server
;
3046 data
->res
.fh
= &data
->fh
;
3047 data
->res
.fattr
= &data
->fattr
;
3048 nfs_fattr_init(data
->res
.fattr
);
3053 static int nfs4_do_create(struct inode
*dir
, struct dentry
*dentry
, struct nfs4_createdata
*data
)
3055 int status
= nfs4_call_sync(NFS_SERVER(dir
)->client
, NFS_SERVER(dir
), &data
->msg
,
3056 &data
->arg
.seq_args
, &data
->res
.seq_res
, 1);
3058 update_changeattr(dir
, &data
->res
.dir_cinfo
);
3059 status
= nfs_instantiate(dentry
, data
->res
.fh
, data
->res
.fattr
);
3064 static void nfs4_free_createdata(struct nfs4_createdata
*data
)
3069 static int _nfs4_proc_symlink(struct inode
*dir
, struct dentry
*dentry
,
3070 struct page
*page
, unsigned int len
, struct iattr
*sattr
)
3072 struct nfs4_createdata
*data
;
3073 int status
= -ENAMETOOLONG
;
3075 if (len
> NFS4_MAXPATHLEN
)
3079 data
= nfs4_alloc_createdata(dir
, &dentry
->d_name
, sattr
, NF4LNK
);
3083 data
->msg
.rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_SYMLINK
];
3084 data
->arg
.u
.symlink
.pages
= &page
;
3085 data
->arg
.u
.symlink
.len
= len
;
3087 status
= nfs4_do_create(dir
, dentry
, data
);
3089 nfs4_free_createdata(data
);
3094 static int nfs4_proc_symlink(struct inode
*dir
, struct dentry
*dentry
,
3095 struct page
*page
, unsigned int len
, struct iattr
*sattr
)
3097 struct nfs4_exception exception
= { };
3100 err
= nfs4_handle_exception(NFS_SERVER(dir
),
3101 _nfs4_proc_symlink(dir
, dentry
, page
,
3104 } while (exception
.retry
);
3108 static int _nfs4_proc_mkdir(struct inode
*dir
, struct dentry
*dentry
,
3109 struct iattr
*sattr
)
3111 struct nfs4_createdata
*data
;
3112 int status
= -ENOMEM
;
3114 data
= nfs4_alloc_createdata(dir
, &dentry
->d_name
, sattr
, NF4DIR
);
3118 status
= nfs4_do_create(dir
, dentry
, data
);
3120 nfs4_free_createdata(data
);
3125 static int nfs4_proc_mkdir(struct inode
*dir
, struct dentry
*dentry
,
3126 struct iattr
*sattr
)
3128 struct nfs4_exception exception
= { };
3131 sattr
->ia_mode
&= ~current_umask();
3133 err
= nfs4_handle_exception(NFS_SERVER(dir
),
3134 _nfs4_proc_mkdir(dir
, dentry
, sattr
),
3136 } while (exception
.retry
);
3140 static int _nfs4_proc_readdir(struct dentry
*dentry
, struct rpc_cred
*cred
,
3141 u64 cookie
, struct page
**pages
, unsigned int count
, int plus
)
3143 struct inode
*dir
= dentry
->d_inode
;
3144 struct nfs4_readdir_arg args
= {
3149 .bitmask
= NFS_SERVER(dentry
->d_inode
)->attr_bitmask
,
3152 struct nfs4_readdir_res res
;
3153 struct rpc_message msg
= {
3154 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_READDIR
],
3161 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__
,
3162 dentry
->d_parent
->d_name
.name
,
3163 dentry
->d_name
.name
,
3164 (unsigned long long)cookie
);
3165 nfs4_setup_readdir(cookie
, NFS_COOKIEVERF(dir
), dentry
, &args
);
3166 res
.pgbase
= args
.pgbase
;
3167 status
= nfs4_call_sync(NFS_SERVER(dir
)->client
, NFS_SERVER(dir
), &msg
, &args
.seq_args
, &res
.seq_res
, 0);
3169 memcpy(NFS_COOKIEVERF(dir
), res
.verifier
.data
, NFS4_VERIFIER_SIZE
);
3170 status
+= args
.pgbase
;
3173 nfs_invalidate_atime(dir
);
3175 dprintk("%s: returns %d\n", __func__
, status
);
3179 static int nfs4_proc_readdir(struct dentry
*dentry
, struct rpc_cred
*cred
,
3180 u64 cookie
, struct page
**pages
, unsigned int count
, int plus
)
3182 struct nfs4_exception exception
= { };
3185 err
= nfs4_handle_exception(NFS_SERVER(dentry
->d_inode
),
3186 _nfs4_proc_readdir(dentry
, cred
, cookie
,
3187 pages
, count
, plus
),
3189 } while (exception
.retry
);
3193 static int _nfs4_proc_mknod(struct inode
*dir
, struct dentry
*dentry
,
3194 struct iattr
*sattr
, dev_t rdev
)
3196 struct nfs4_createdata
*data
;
3197 int mode
= sattr
->ia_mode
;
3198 int status
= -ENOMEM
;
3200 BUG_ON(!(sattr
->ia_valid
& ATTR_MODE
));
3201 BUG_ON(!S_ISFIFO(mode
) && !S_ISBLK(mode
) && !S_ISCHR(mode
) && !S_ISSOCK(mode
));
3203 data
= nfs4_alloc_createdata(dir
, &dentry
->d_name
, sattr
, NF4SOCK
);
3208 data
->arg
.ftype
= NF4FIFO
;
3209 else if (S_ISBLK(mode
)) {
3210 data
->arg
.ftype
= NF4BLK
;
3211 data
->arg
.u
.device
.specdata1
= MAJOR(rdev
);
3212 data
->arg
.u
.device
.specdata2
= MINOR(rdev
);
3214 else if (S_ISCHR(mode
)) {
3215 data
->arg
.ftype
= NF4CHR
;
3216 data
->arg
.u
.device
.specdata1
= MAJOR(rdev
);
3217 data
->arg
.u
.device
.specdata2
= MINOR(rdev
);
3220 status
= nfs4_do_create(dir
, dentry
, data
);
3222 nfs4_free_createdata(data
);
3227 static int nfs4_proc_mknod(struct inode
*dir
, struct dentry
*dentry
,
3228 struct iattr
*sattr
, dev_t rdev
)
3230 struct nfs4_exception exception
= { };
3233 sattr
->ia_mode
&= ~current_umask();
3235 err
= nfs4_handle_exception(NFS_SERVER(dir
),
3236 _nfs4_proc_mknod(dir
, dentry
, sattr
, rdev
),
3238 } while (exception
.retry
);
3242 static int _nfs4_proc_statfs(struct nfs_server
*server
, struct nfs_fh
*fhandle
,
3243 struct nfs_fsstat
*fsstat
)
3245 struct nfs4_statfs_arg args
= {
3247 .bitmask
= server
->attr_bitmask
,
3249 struct nfs4_statfs_res res
= {
3252 struct rpc_message msg
= {
3253 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_STATFS
],
3258 nfs_fattr_init(fsstat
->fattr
);
3259 return nfs4_call_sync(server
->client
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 0);
3262 static int nfs4_proc_statfs(struct nfs_server
*server
, struct nfs_fh
*fhandle
, struct nfs_fsstat
*fsstat
)
3264 struct nfs4_exception exception
= { };
3267 err
= nfs4_handle_exception(server
,
3268 _nfs4_proc_statfs(server
, fhandle
, fsstat
),
3270 } while (exception
.retry
);
3274 static int _nfs4_do_fsinfo(struct nfs_server
*server
, struct nfs_fh
*fhandle
,
3275 struct nfs_fsinfo
*fsinfo
)
3277 struct nfs4_fsinfo_arg args
= {
3279 .bitmask
= server
->attr_bitmask
,
3281 struct nfs4_fsinfo_res res
= {
3284 struct rpc_message msg
= {
3285 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_FSINFO
],
3290 return nfs4_call_sync(server
->client
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 0);
3293 static int nfs4_do_fsinfo(struct nfs_server
*server
, struct nfs_fh
*fhandle
, struct nfs_fsinfo
*fsinfo
)
3295 struct nfs4_exception exception
= { };
3299 err
= nfs4_handle_exception(server
,
3300 _nfs4_do_fsinfo(server
, fhandle
, fsinfo
),
3302 } while (exception
.retry
);
3306 static int nfs4_proc_fsinfo(struct nfs_server
*server
, struct nfs_fh
*fhandle
, struct nfs_fsinfo
*fsinfo
)
3308 nfs_fattr_init(fsinfo
->fattr
);
3309 return nfs4_do_fsinfo(server
, fhandle
, fsinfo
);
3312 static int _nfs4_proc_pathconf(struct nfs_server
*server
, struct nfs_fh
*fhandle
,
3313 struct nfs_pathconf
*pathconf
)
3315 struct nfs4_pathconf_arg args
= {
3317 .bitmask
= server
->attr_bitmask
,
3319 struct nfs4_pathconf_res res
= {
3320 .pathconf
= pathconf
,
3322 struct rpc_message msg
= {
3323 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_PATHCONF
],
3328 /* None of the pathconf attributes are mandatory to implement */
3329 if ((args
.bitmask
[0] & nfs4_pathconf_bitmap
[0]) == 0) {
3330 memset(pathconf
, 0, sizeof(*pathconf
));
3334 nfs_fattr_init(pathconf
->fattr
);
3335 return nfs4_call_sync(server
->client
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 0);
3338 static int nfs4_proc_pathconf(struct nfs_server
*server
, struct nfs_fh
*fhandle
,
3339 struct nfs_pathconf
*pathconf
)
3341 struct nfs4_exception exception
= { };
3345 err
= nfs4_handle_exception(server
,
3346 _nfs4_proc_pathconf(server
, fhandle
, pathconf
),
3348 } while (exception
.retry
);
3352 void __nfs4_read_done_cb(struct nfs_read_data
*data
)
3354 nfs_invalidate_atime(data
->header
->inode
);
3357 static int nfs4_read_done_cb(struct rpc_task
*task
, struct nfs_read_data
*data
)
3359 struct nfs_server
*server
= NFS_SERVER(data
->header
->inode
);
3361 if (nfs4_async_handle_error(task
, server
, data
->args
.context
->state
) == -EAGAIN
) {
3362 rpc_restart_call_prepare(task
);
3366 __nfs4_read_done_cb(data
);
3367 if (task
->tk_status
> 0)
3368 renew_lease(server
, data
->timestamp
);
3372 static int nfs4_read_done(struct rpc_task
*task
, struct nfs_read_data
*data
)
3375 dprintk("--> %s\n", __func__
);
3377 if (!nfs4_sequence_done(task
, &data
->res
.seq_res
))
3380 return data
->read_done_cb
? data
->read_done_cb(task
, data
) :
3381 nfs4_read_done_cb(task
, data
);
3384 static void nfs4_proc_read_setup(struct nfs_read_data
*data
, struct rpc_message
*msg
)
3386 data
->timestamp
= jiffies
;
3387 data
->read_done_cb
= nfs4_read_done_cb
;
3388 msg
->rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_READ
];
3389 nfs41_init_sequence(&data
->args
.seq_args
, &data
->res
.seq_res
, 0);
3392 static void nfs4_proc_read_rpc_prepare(struct rpc_task
*task
, struct nfs_read_data
*data
)
3394 if (nfs4_setup_sequence(NFS_SERVER(data
->header
->inode
),
3395 &data
->args
.seq_args
,
3399 rpc_call_start(task
);
3402 static int nfs4_write_done_cb(struct rpc_task
*task
, struct nfs_write_data
*data
)
3404 struct inode
*inode
= data
->header
->inode
;
3406 if (nfs4_async_handle_error(task
, NFS_SERVER(inode
), data
->args
.context
->state
) == -EAGAIN
) {
3407 rpc_restart_call_prepare(task
);
3410 if (task
->tk_status
>= 0) {
3411 renew_lease(NFS_SERVER(inode
), data
->timestamp
);
3412 nfs_post_op_update_inode_force_wcc(inode
, &data
->fattr
);
3417 static int nfs4_write_done(struct rpc_task
*task
, struct nfs_write_data
*data
)
3419 if (!nfs4_sequence_done(task
, &data
->res
.seq_res
))
3421 return data
->write_done_cb
? data
->write_done_cb(task
, data
) :
3422 nfs4_write_done_cb(task
, data
);
3426 bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data
*data
)
3428 const struct nfs_pgio_header
*hdr
= data
->header
;
3430 /* Don't request attributes for pNFS or O_DIRECT writes */
3431 if (data
->ds_clp
!= NULL
|| hdr
->dreq
!= NULL
)
3433 /* Otherwise, request attributes if and only if we don't hold
3436 return nfs_have_delegation(hdr
->inode
, FMODE_READ
) == 0;
3439 static void nfs4_proc_write_setup(struct nfs_write_data
*data
, struct rpc_message
*msg
)
3441 struct nfs_server
*server
= NFS_SERVER(data
->header
->inode
);
3443 if (!nfs4_write_need_cache_consistency_data(data
)) {
3444 data
->args
.bitmask
= NULL
;
3445 data
->res
.fattr
= NULL
;
3447 data
->args
.bitmask
= server
->cache_consistency_bitmask
;
3449 if (!data
->write_done_cb
)
3450 data
->write_done_cb
= nfs4_write_done_cb
;
3451 data
->res
.server
= server
;
3452 data
->timestamp
= jiffies
;
3454 msg
->rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_WRITE
];
3455 nfs41_init_sequence(&data
->args
.seq_args
, &data
->res
.seq_res
, 1);
3458 static void nfs4_proc_write_rpc_prepare(struct rpc_task
*task
, struct nfs_write_data
*data
)
3460 if (nfs4_setup_sequence(NFS_SERVER(data
->header
->inode
),
3461 &data
->args
.seq_args
,
3465 rpc_call_start(task
);
3468 static void nfs4_proc_commit_rpc_prepare(struct rpc_task
*task
, struct nfs_commit_data
*data
)
3470 if (nfs4_setup_sequence(NFS_SERVER(data
->inode
),
3471 &data
->args
.seq_args
,
3475 rpc_call_start(task
);
3478 static int nfs4_commit_done_cb(struct rpc_task
*task
, struct nfs_commit_data
*data
)
3480 struct inode
*inode
= data
->inode
;
3482 if (nfs4_async_handle_error(task
, NFS_SERVER(inode
), NULL
) == -EAGAIN
) {
3483 rpc_restart_call_prepare(task
);
3489 static int nfs4_commit_done(struct rpc_task
*task
, struct nfs_commit_data
*data
)
3491 if (!nfs4_sequence_done(task
, &data
->res
.seq_res
))
3493 return data
->commit_done_cb(task
, data
);
3496 static void nfs4_proc_commit_setup(struct nfs_commit_data
*data
, struct rpc_message
*msg
)
3498 struct nfs_server
*server
= NFS_SERVER(data
->inode
);
3500 if (data
->commit_done_cb
== NULL
)
3501 data
->commit_done_cb
= nfs4_commit_done_cb
;
3502 data
->res
.server
= server
;
3503 msg
->rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_COMMIT
];
3504 nfs41_init_sequence(&data
->args
.seq_args
, &data
->res
.seq_res
, 1);
3507 struct nfs4_renewdata
{
3508 struct nfs_client
*client
;
3509 unsigned long timestamp
;
3513 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3514 * standalone procedure for queueing an asynchronous RENEW.
3516 static void nfs4_renew_release(void *calldata
)
3518 struct nfs4_renewdata
*data
= calldata
;
3519 struct nfs_client
*clp
= data
->client
;
3521 if (atomic_read(&clp
->cl_count
) > 1)
3522 nfs4_schedule_state_renewal(clp
);
3523 nfs_put_client(clp
);
3527 static void nfs4_renew_done(struct rpc_task
*task
, void *calldata
)
3529 struct nfs4_renewdata
*data
= calldata
;
3530 struct nfs_client
*clp
= data
->client
;
3531 unsigned long timestamp
= data
->timestamp
;
3533 if (task
->tk_status
< 0) {
3534 /* Unless we're shutting down, schedule state recovery! */
3535 if (test_bit(NFS_CS_RENEWD
, &clp
->cl_res_state
) == 0)
3537 if (task
->tk_status
!= NFS4ERR_CB_PATH_DOWN
) {
3538 nfs4_schedule_lease_recovery(clp
);
3541 nfs4_schedule_path_down_recovery(clp
);
3543 do_renew_lease(clp
, timestamp
);
3546 static const struct rpc_call_ops nfs4_renew_ops
= {
3547 .rpc_call_done
= nfs4_renew_done
,
3548 .rpc_release
= nfs4_renew_release
,
3551 static int nfs4_proc_async_renew(struct nfs_client
*clp
, struct rpc_cred
*cred
, unsigned renew_flags
)
3553 struct rpc_message msg
= {
3554 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_RENEW
],
3558 struct nfs4_renewdata
*data
;
3560 if (renew_flags
== 0)
3562 if (!atomic_inc_not_zero(&clp
->cl_count
))
3564 data
= kmalloc(sizeof(*data
), GFP_NOFS
);
3568 data
->timestamp
= jiffies
;
3569 return rpc_call_async(clp
->cl_rpcclient
, &msg
, RPC_TASK_SOFT
,
3570 &nfs4_renew_ops
, data
);
3573 static int nfs4_proc_renew(struct nfs_client
*clp
, struct rpc_cred
*cred
)
3575 struct rpc_message msg
= {
3576 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_RENEW
],
3580 unsigned long now
= jiffies
;
3583 status
= rpc_call_sync(clp
->cl_rpcclient
, &msg
, 0);
3586 do_renew_lease(clp
, now
);
3590 static inline int nfs4_server_supports_acls(struct nfs_server
*server
)
3592 return (server
->caps
& NFS_CAP_ACLS
)
3593 && (server
->acl_bitmask
& ACL4_SUPPORT_ALLOW_ACL
)
3594 && (server
->acl_bitmask
& ACL4_SUPPORT_DENY_ACL
);
3597 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
3598 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
3601 #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3603 static int buf_to_pages_noslab(const void *buf
, size_t buflen
,
3604 struct page
**pages
, unsigned int *pgbase
)
3606 struct page
*newpage
, **spages
;
3612 len
= min_t(size_t, PAGE_CACHE_SIZE
, buflen
);
3613 newpage
= alloc_page(GFP_KERNEL
);
3615 if (newpage
== NULL
)
3617 memcpy(page_address(newpage
), buf
, len
);
3622 } while (buflen
!= 0);
3628 __free_page(spages
[rc
-1]);
3632 struct nfs4_cached_acl
{
3638 static void nfs4_set_cached_acl(struct inode
*inode
, struct nfs4_cached_acl
*acl
)
3640 struct nfs_inode
*nfsi
= NFS_I(inode
);
3642 spin_lock(&inode
->i_lock
);
3643 kfree(nfsi
->nfs4_acl
);
3644 nfsi
->nfs4_acl
= acl
;
3645 spin_unlock(&inode
->i_lock
);
3648 static void nfs4_zap_acl_attr(struct inode
*inode
)
3650 nfs4_set_cached_acl(inode
, NULL
);
3653 static inline ssize_t
nfs4_read_cached_acl(struct inode
*inode
, char *buf
, size_t buflen
)
3655 struct nfs_inode
*nfsi
= NFS_I(inode
);
3656 struct nfs4_cached_acl
*acl
;
3659 spin_lock(&inode
->i_lock
);
3660 acl
= nfsi
->nfs4_acl
;
3663 if (buf
== NULL
) /* user is just asking for length */
3665 if (acl
->cached
== 0)
3667 ret
= -ERANGE
; /* see getxattr(2) man page */
3668 if (acl
->len
> buflen
)
3670 memcpy(buf
, acl
->data
, acl
->len
);
3674 spin_unlock(&inode
->i_lock
);
3678 static void nfs4_write_cached_acl(struct inode
*inode
, struct page
**pages
, size_t pgbase
, size_t acl_len
)
3680 struct nfs4_cached_acl
*acl
;
3682 if (pages
&& acl_len
<= PAGE_SIZE
) {
3683 acl
= kmalloc(sizeof(*acl
) + acl_len
, GFP_KERNEL
);
3687 _copy_from_pages(acl
->data
, pages
, pgbase
, acl_len
);
3689 acl
= kmalloc(sizeof(*acl
), GFP_KERNEL
);
3696 nfs4_set_cached_acl(inode
, acl
);
3700 * The getxattr API returns the required buffer length when called with a
3701 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3702 * the required buf. On a NULL buf, we send a page of data to the server
3703 * guessing that the ACL request can be serviced by a page. If so, we cache
3704 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3705 * the cache. If not so, we throw away the page, and cache the required
3706 * length. The next getxattr call will then produce another round trip to
3707 * the server, this time with the input buf of the required size.
3709 static ssize_t
__nfs4_get_acl_uncached(struct inode
*inode
, void *buf
, size_t buflen
)
3711 struct page
*pages
[NFS4ACL_MAXPAGES
] = {NULL
, };
3712 struct nfs_getaclargs args
= {
3713 .fh
= NFS_FH(inode
),
3717 struct nfs_getaclres res
= {
3720 struct rpc_message msg
= {
3721 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_GETACL
],
3725 int ret
= -ENOMEM
, npages
, i
, acl_len
= 0;
3727 npages
= (buflen
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
3728 /* As long as we're doing a round trip to the server anyway,
3729 * let's be prepared for a page of acl data. */
3733 /* Add an extra page to handle the bitmap returned */
3736 for (i
= 0; i
< npages
; i
++) {
3737 pages
[i
] = alloc_page(GFP_KERNEL
);
3742 /* for decoding across pages */
3743 res
.acl_scratch
= alloc_page(GFP_KERNEL
);
3744 if (!res
.acl_scratch
)
3747 args
.acl_len
= npages
* PAGE_SIZE
;
3748 args
.acl_pgbase
= 0;
3750 /* Let decode_getfacl know not to fail if the ACL data is larger than
3751 * the page we send as a guess */
3753 res
.acl_flags
|= NFS4_ACL_LEN_REQUEST
;
3755 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
3756 __func__
, buf
, buflen
, npages
, args
.acl_len
);
3757 ret
= nfs4_call_sync(NFS_SERVER(inode
)->client
, NFS_SERVER(inode
),
3758 &msg
, &args
.seq_args
, &res
.seq_res
, 0);
3762 acl_len
= res
.acl_len
- res
.acl_data_offset
;
3763 if (acl_len
> args
.acl_len
)
3764 nfs4_write_cached_acl(inode
, NULL
, 0, acl_len
);
3766 nfs4_write_cached_acl(inode
, pages
, res
.acl_data_offset
,
3770 if (acl_len
> buflen
)
3772 _copy_from_pages(buf
, pages
, res
.acl_data_offset
,
3777 for (i
= 0; i
< npages
; i
++)
3779 __free_page(pages
[i
]);
3780 if (res
.acl_scratch
)
3781 __free_page(res
.acl_scratch
);
3785 static ssize_t
nfs4_get_acl_uncached(struct inode
*inode
, void *buf
, size_t buflen
)
3787 struct nfs4_exception exception
= { };
3790 ret
= __nfs4_get_acl_uncached(inode
, buf
, buflen
);
3793 ret
= nfs4_handle_exception(NFS_SERVER(inode
), ret
, &exception
);
3794 } while (exception
.retry
);
3798 static ssize_t
nfs4_proc_get_acl(struct inode
*inode
, void *buf
, size_t buflen
)
3800 struct nfs_server
*server
= NFS_SERVER(inode
);
3803 if (!nfs4_server_supports_acls(server
))
3805 ret
= nfs_revalidate_inode(server
, inode
);
3808 if (NFS_I(inode
)->cache_validity
& NFS_INO_INVALID_ACL
)
3809 nfs_zap_acl_cache(inode
);
3810 ret
= nfs4_read_cached_acl(inode
, buf
, buflen
);
3812 /* -ENOENT is returned if there is no ACL or if there is an ACL
3813 * but no cached acl data, just the acl length */
3815 return nfs4_get_acl_uncached(inode
, buf
, buflen
);
3818 static int __nfs4_proc_set_acl(struct inode
*inode
, const void *buf
, size_t buflen
)
3820 struct nfs_server
*server
= NFS_SERVER(inode
);
3821 struct page
*pages
[NFS4ACL_MAXPAGES
];
3822 struct nfs_setaclargs arg
= {
3823 .fh
= NFS_FH(inode
),
3827 struct nfs_setaclres res
;
3828 struct rpc_message msg
= {
3829 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_SETACL
],
3835 if (!nfs4_server_supports_acls(server
))
3837 i
= buf_to_pages_noslab(buf
, buflen
, arg
.acl_pages
, &arg
.acl_pgbase
);
3840 nfs_inode_return_delegation(inode
);
3841 ret
= nfs4_call_sync(server
->client
, server
, &msg
, &arg
.seq_args
, &res
.seq_res
, 1);
3844 * Free each page after tx, so the only ref left is
3845 * held by the network stack
3848 put_page(pages
[i
-1]);
3851 * Acl update can result in inode attribute update.
3852 * so mark the attribute cache invalid.
3854 spin_lock(&inode
->i_lock
);
3855 NFS_I(inode
)->cache_validity
|= NFS_INO_INVALID_ATTR
;
3856 spin_unlock(&inode
->i_lock
);
3857 nfs_access_zap_cache(inode
);
3858 nfs_zap_acl_cache(inode
);
3862 static int nfs4_proc_set_acl(struct inode
*inode
, const void *buf
, size_t buflen
)
3864 struct nfs4_exception exception
= { };
3867 err
= nfs4_handle_exception(NFS_SERVER(inode
),
3868 __nfs4_proc_set_acl(inode
, buf
, buflen
),
3870 } while (exception
.retry
);
3875 nfs4_async_handle_error(struct rpc_task
*task
, const struct nfs_server
*server
, struct nfs4_state
*state
)
3877 struct nfs_client
*clp
= server
->nfs_client
;
3879 if (task
->tk_status
>= 0)
3881 switch(task
->tk_status
) {
3882 case -NFS4ERR_DELEG_REVOKED
:
3883 case -NFS4ERR_ADMIN_REVOKED
:
3884 case -NFS4ERR_BAD_STATEID
:
3887 nfs_remove_bad_delegation(state
->inode
);
3888 case -NFS4ERR_OPENMODE
:
3891 nfs4_schedule_stateid_recovery(server
, state
);
3892 goto wait_on_recovery
;
3893 case -NFS4ERR_EXPIRED
:
3895 nfs4_schedule_stateid_recovery(server
, state
);
3896 case -NFS4ERR_STALE_STATEID
:
3897 case -NFS4ERR_STALE_CLIENTID
:
3898 nfs4_schedule_lease_recovery(clp
);
3899 goto wait_on_recovery
;
3900 #if defined(CONFIG_NFS_V4_1)
3901 case -NFS4ERR_BADSESSION
:
3902 case -NFS4ERR_BADSLOT
:
3903 case -NFS4ERR_BAD_HIGH_SLOT
:
3904 case -NFS4ERR_DEADSESSION
:
3905 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION
:
3906 case -NFS4ERR_SEQ_FALSE_RETRY
:
3907 case -NFS4ERR_SEQ_MISORDERED
:
3908 dprintk("%s ERROR %d, Reset session\n", __func__
,
3910 nfs4_schedule_session_recovery(clp
->cl_session
, task
->tk_status
);
3911 task
->tk_status
= 0;
3913 #endif /* CONFIG_NFS_V4_1 */
3914 case -NFS4ERR_DELAY
:
3915 nfs_inc_server_stats(server
, NFSIOS_DELAY
);
3916 case -NFS4ERR_GRACE
:
3918 rpc_delay(task
, NFS4_POLL_RETRY_MAX
);
3919 task
->tk_status
= 0;
3921 case -NFS4ERR_RETRY_UNCACHED_REP
:
3922 case -NFS4ERR_OLD_STATEID
:
3923 task
->tk_status
= 0;
3926 task
->tk_status
= nfs4_map_errors(task
->tk_status
);
3929 rpc_sleep_on(&clp
->cl_rpcwaitq
, task
, NULL
);
3930 if (test_bit(NFS4CLNT_MANAGER_RUNNING
, &clp
->cl_state
) == 0)
3931 rpc_wake_up_queued_task(&clp
->cl_rpcwaitq
, task
);
3932 task
->tk_status
= 0;
3936 static void nfs4_init_boot_verifier(const struct nfs_client
*clp
,
3937 nfs4_verifier
*bootverf
)
3941 if (test_bit(NFS4CLNT_PURGE_STATE
, &clp
->cl_state
)) {
3942 /* An impossible timestamp guarantees this value
3943 * will never match a generated boot time. */
3945 verf
[1] = (__be32
)(NSEC_PER_SEC
+ 1);
3947 struct nfs_net
*nn
= net_generic(clp
->cl_net
, nfs_net_id
);
3948 verf
[0] = (__be32
)nn
->boot_time
.tv_sec
;
3949 verf
[1] = (__be32
)nn
->boot_time
.tv_nsec
;
3951 memcpy(bootverf
->data
, verf
, sizeof(bootverf
->data
));
3954 int nfs4_proc_setclientid(struct nfs_client
*clp
, u32 program
,
3955 unsigned short port
, struct rpc_cred
*cred
,
3956 struct nfs4_setclientid_res
*res
)
3958 nfs4_verifier sc_verifier
;
3959 struct nfs4_setclientid setclientid
= {
3960 .sc_verifier
= &sc_verifier
,
3962 .sc_cb_ident
= clp
->cl_cb_ident
,
3964 struct rpc_message msg
= {
3965 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_SETCLIENTID
],
3966 .rpc_argp
= &setclientid
,
3973 nfs4_init_boot_verifier(clp
, &sc_verifier
);
3977 setclientid
.sc_name_len
= scnprintf(setclientid
.sc_name
,
3978 sizeof(setclientid
.sc_name
), "%s/%s %s %s %u",
3980 rpc_peeraddr2str(clp
->cl_rpcclient
,
3982 rpc_peeraddr2str(clp
->cl_rpcclient
,
3984 clp
->cl_rpcclient
->cl_auth
->au_ops
->au_name
,
3985 clp
->cl_id_uniquifier
);
3986 setclientid
.sc_netid_len
= scnprintf(setclientid
.sc_netid
,
3987 sizeof(setclientid
.sc_netid
),
3988 rpc_peeraddr2str(clp
->cl_rpcclient
,
3989 RPC_DISPLAY_NETID
));
3990 setclientid
.sc_uaddr_len
= scnprintf(setclientid
.sc_uaddr
,
3991 sizeof(setclientid
.sc_uaddr
), "%s.%u.%u",
3992 clp
->cl_ipaddr
, port
>> 8, port
& 255);
3995 status
= rpc_call_sync(clp
->cl_rpcclient
, &msg
, RPC_TASK_TIMEOUT
);
3996 if (status
!= -NFS4ERR_CLID_INUSE
)
3999 ++clp
->cl_id_uniquifier
;
4003 ssleep(clp
->cl_lease_time
/ HZ
+ 1);
4008 int nfs4_proc_setclientid_confirm(struct nfs_client
*clp
,
4009 struct nfs4_setclientid_res
*arg
,
4010 struct rpc_cred
*cred
)
4012 struct nfs_fsinfo fsinfo
;
4013 struct rpc_message msg
= {
4014 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_SETCLIENTID_CONFIRM
],
4016 .rpc_resp
= &fsinfo
,
4023 status
= rpc_call_sync(clp
->cl_rpcclient
, &msg
, RPC_TASK_TIMEOUT
);
4025 spin_lock(&clp
->cl_lock
);
4026 clp
->cl_lease_time
= fsinfo
.lease_time
* HZ
;
4027 clp
->cl_last_renewal
= now
;
4028 spin_unlock(&clp
->cl_lock
);
4033 struct nfs4_delegreturndata
{
4034 struct nfs4_delegreturnargs args
;
4035 struct nfs4_delegreturnres res
;
4037 nfs4_stateid stateid
;
4038 unsigned long timestamp
;
4039 struct nfs_fattr fattr
;
4043 static void nfs4_delegreturn_done(struct rpc_task
*task
, void *calldata
)
4045 struct nfs4_delegreturndata
*data
= calldata
;
4047 if (!nfs4_sequence_done(task
, &data
->res
.seq_res
))
4050 switch (task
->tk_status
) {
4051 case -NFS4ERR_STALE_STATEID
:
4052 case -NFS4ERR_EXPIRED
:
4054 renew_lease(data
->res
.server
, data
->timestamp
);
4057 if (nfs4_async_handle_error(task
, data
->res
.server
, NULL
) ==
4059 rpc_restart_call_prepare(task
);
4063 data
->rpc_status
= task
->tk_status
;
4066 static void nfs4_delegreturn_release(void *calldata
)
4071 #if defined(CONFIG_NFS_V4_1)
4072 static void nfs4_delegreturn_prepare(struct rpc_task
*task
, void *data
)
4074 struct nfs4_delegreturndata
*d_data
;
4076 d_data
= (struct nfs4_delegreturndata
*)data
;
4078 if (nfs4_setup_sequence(d_data
->res
.server
,
4079 &d_data
->args
.seq_args
,
4080 &d_data
->res
.seq_res
, task
))
4082 rpc_call_start(task
);
4084 #endif /* CONFIG_NFS_V4_1 */
4086 static const struct rpc_call_ops nfs4_delegreturn_ops
= {
4087 #if defined(CONFIG_NFS_V4_1)
4088 .rpc_call_prepare
= nfs4_delegreturn_prepare
,
4089 #endif /* CONFIG_NFS_V4_1 */
4090 .rpc_call_done
= nfs4_delegreturn_done
,
4091 .rpc_release
= nfs4_delegreturn_release
,
4094 static int _nfs4_proc_delegreturn(struct inode
*inode
, struct rpc_cred
*cred
, const nfs4_stateid
*stateid
, int issync
)
4096 struct nfs4_delegreturndata
*data
;
4097 struct nfs_server
*server
= NFS_SERVER(inode
);
4098 struct rpc_task
*task
;
4099 struct rpc_message msg
= {
4100 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_DELEGRETURN
],
4103 struct rpc_task_setup task_setup_data
= {
4104 .rpc_client
= server
->client
,
4105 .rpc_message
= &msg
,
4106 .callback_ops
= &nfs4_delegreturn_ops
,
4107 .flags
= RPC_TASK_ASYNC
,
4111 data
= kzalloc(sizeof(*data
), GFP_NOFS
);
4114 nfs41_init_sequence(&data
->args
.seq_args
, &data
->res
.seq_res
, 1);
4115 data
->args
.fhandle
= &data
->fh
;
4116 data
->args
.stateid
= &data
->stateid
;
4117 data
->args
.bitmask
= server
->cache_consistency_bitmask
;
4118 nfs_copy_fh(&data
->fh
, NFS_FH(inode
));
4119 nfs4_stateid_copy(&data
->stateid
, stateid
);
4120 data
->res
.fattr
= &data
->fattr
;
4121 data
->res
.server
= server
;
4122 nfs_fattr_init(data
->res
.fattr
);
4123 data
->timestamp
= jiffies
;
4124 data
->rpc_status
= 0;
4126 task_setup_data
.callback_data
= data
;
4127 msg
.rpc_argp
= &data
->args
;
4128 msg
.rpc_resp
= &data
->res
;
4129 task
= rpc_run_task(&task_setup_data
);
4131 return PTR_ERR(task
);
4134 status
= nfs4_wait_for_completion_rpc_task(task
);
4137 status
= data
->rpc_status
;
4139 nfs_post_op_update_inode_force_wcc(inode
, &data
->fattr
);
4141 nfs_refresh_inode(inode
, &data
->fattr
);
4147 int nfs4_proc_delegreturn(struct inode
*inode
, struct rpc_cred
*cred
, const nfs4_stateid
*stateid
, int issync
)
4149 struct nfs_server
*server
= NFS_SERVER(inode
);
4150 struct nfs4_exception exception
= { };
4153 err
= _nfs4_proc_delegreturn(inode
, cred
, stateid
, issync
);
4155 case -NFS4ERR_STALE_STATEID
:
4156 case -NFS4ERR_EXPIRED
:
4160 err
= nfs4_handle_exception(server
, err
, &exception
);
4161 } while (exception
.retry
);
4165 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
4166 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
4169 * sleep, with exponential backoff, and retry the LOCK operation.
4171 static unsigned long
4172 nfs4_set_lock_task_retry(unsigned long timeout
)
4174 freezable_schedule_timeout_killable(timeout
);
4176 if (timeout
> NFS4_LOCK_MAXTIMEOUT
)
4177 return NFS4_LOCK_MAXTIMEOUT
;
4181 static int _nfs4_proc_getlk(struct nfs4_state
*state
, int cmd
, struct file_lock
*request
)
4183 struct inode
*inode
= state
->inode
;
4184 struct nfs_server
*server
= NFS_SERVER(inode
);
4185 struct nfs_client
*clp
= server
->nfs_client
;
4186 struct nfs_lockt_args arg
= {
4187 .fh
= NFS_FH(inode
),
4190 struct nfs_lockt_res res
= {
4193 struct rpc_message msg
= {
4194 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_LOCKT
],
4197 .rpc_cred
= state
->owner
->so_cred
,
4199 struct nfs4_lock_state
*lsp
;
4202 arg
.lock_owner
.clientid
= clp
->cl_clientid
;
4203 status
= nfs4_set_lock_state(state
, request
);
4206 lsp
= request
->fl_u
.nfs4_fl
.owner
;
4207 arg
.lock_owner
.id
= lsp
->ls_seqid
.owner_id
;
4208 arg
.lock_owner
.s_dev
= server
->s_dev
;
4209 status
= nfs4_call_sync(server
->client
, server
, &msg
, &arg
.seq_args
, &res
.seq_res
, 1);
4212 request
->fl_type
= F_UNLCK
;
4214 case -NFS4ERR_DENIED
:
4217 request
->fl_ops
->fl_release_private(request
);
4222 static int nfs4_proc_getlk(struct nfs4_state
*state
, int cmd
, struct file_lock
*request
)
4224 struct nfs4_exception exception
= { };
4228 err
= nfs4_handle_exception(NFS_SERVER(state
->inode
),
4229 _nfs4_proc_getlk(state
, cmd
, request
),
4231 } while (exception
.retry
);
4235 static int do_vfs_lock(struct file
*file
, struct file_lock
*fl
)
4238 switch (fl
->fl_flags
& (FL_POSIX
|FL_FLOCK
)) {
4240 res
= posix_lock_file_wait(file
, fl
);
4243 res
= flock_lock_file_wait(file
, fl
);
4251 struct nfs4_unlockdata
{
4252 struct nfs_locku_args arg
;
4253 struct nfs_locku_res res
;
4254 struct nfs4_lock_state
*lsp
;
4255 struct nfs_open_context
*ctx
;
4256 struct file_lock fl
;
4257 const struct nfs_server
*server
;
4258 unsigned long timestamp
;
4261 static struct nfs4_unlockdata
*nfs4_alloc_unlockdata(struct file_lock
*fl
,
4262 struct nfs_open_context
*ctx
,
4263 struct nfs4_lock_state
*lsp
,
4264 struct nfs_seqid
*seqid
)
4266 struct nfs4_unlockdata
*p
;
4267 struct inode
*inode
= lsp
->ls_state
->inode
;
4269 p
= kzalloc(sizeof(*p
), GFP_NOFS
);
4272 p
->arg
.fh
= NFS_FH(inode
);
4274 p
->arg
.seqid
= seqid
;
4275 p
->res
.seqid
= seqid
;
4276 p
->arg
.stateid
= &lsp
->ls_stateid
;
4278 atomic_inc(&lsp
->ls_count
);
4279 /* Ensure we don't close file until we're done freeing locks! */
4280 p
->ctx
= get_nfs_open_context(ctx
);
4281 memcpy(&p
->fl
, fl
, sizeof(p
->fl
));
4282 p
->server
= NFS_SERVER(inode
);
4286 static void nfs4_locku_release_calldata(void *data
)
4288 struct nfs4_unlockdata
*calldata
= data
;
4289 nfs_free_seqid(calldata
->arg
.seqid
);
4290 nfs4_put_lock_state(calldata
->lsp
);
4291 put_nfs_open_context(calldata
->ctx
);
4295 static void nfs4_locku_done(struct rpc_task
*task
, void *data
)
4297 struct nfs4_unlockdata
*calldata
= data
;
4299 if (!nfs4_sequence_done(task
, &calldata
->res
.seq_res
))
4301 switch (task
->tk_status
) {
4303 nfs4_stateid_copy(&calldata
->lsp
->ls_stateid
,
4304 &calldata
->res
.stateid
);
4305 renew_lease(calldata
->server
, calldata
->timestamp
);
4307 case -NFS4ERR_BAD_STATEID
:
4308 case -NFS4ERR_OLD_STATEID
:
4309 case -NFS4ERR_STALE_STATEID
:
4310 case -NFS4ERR_EXPIRED
:
4313 if (nfs4_async_handle_error(task
, calldata
->server
, NULL
) == -EAGAIN
)
4314 rpc_restart_call_prepare(task
);
4318 static void nfs4_locku_prepare(struct rpc_task
*task
, void *data
)
4320 struct nfs4_unlockdata
*calldata
= data
;
4322 if (nfs_wait_on_sequence(calldata
->arg
.seqid
, task
) != 0)
4324 if ((calldata
->lsp
->ls_flags
& NFS_LOCK_INITIALIZED
) == 0) {
4325 /* Note: exit _without_ running nfs4_locku_done */
4326 task
->tk_action
= NULL
;
4329 calldata
->timestamp
= jiffies
;
4330 if (nfs4_setup_sequence(calldata
->server
,
4331 &calldata
->arg
.seq_args
,
4332 &calldata
->res
.seq_res
, task
))
4334 rpc_call_start(task
);
4337 static const struct rpc_call_ops nfs4_locku_ops
= {
4338 .rpc_call_prepare
= nfs4_locku_prepare
,
4339 .rpc_call_done
= nfs4_locku_done
,
4340 .rpc_release
= nfs4_locku_release_calldata
,
4343 static struct rpc_task
*nfs4_do_unlck(struct file_lock
*fl
,
4344 struct nfs_open_context
*ctx
,
4345 struct nfs4_lock_state
*lsp
,
4346 struct nfs_seqid
*seqid
)
4348 struct nfs4_unlockdata
*data
;
4349 struct rpc_message msg
= {
4350 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_LOCKU
],
4351 .rpc_cred
= ctx
->cred
,
4353 struct rpc_task_setup task_setup_data
= {
4354 .rpc_client
= NFS_CLIENT(lsp
->ls_state
->inode
),
4355 .rpc_message
= &msg
,
4356 .callback_ops
= &nfs4_locku_ops
,
4357 .workqueue
= nfsiod_workqueue
,
4358 .flags
= RPC_TASK_ASYNC
,
4361 /* Ensure this is an unlock - when canceling a lock, the
4362 * canceled lock is passed in, and it won't be an unlock.
4364 fl
->fl_type
= F_UNLCK
;
4366 data
= nfs4_alloc_unlockdata(fl
, ctx
, lsp
, seqid
);
4368 nfs_free_seqid(seqid
);
4369 return ERR_PTR(-ENOMEM
);
4372 nfs41_init_sequence(&data
->arg
.seq_args
, &data
->res
.seq_res
, 1);
4373 msg
.rpc_argp
= &data
->arg
;
4374 msg
.rpc_resp
= &data
->res
;
4375 task_setup_data
.callback_data
= data
;
4376 return rpc_run_task(&task_setup_data
);
4379 static int nfs4_proc_unlck(struct nfs4_state
*state
, int cmd
, struct file_lock
*request
)
4381 struct nfs_inode
*nfsi
= NFS_I(state
->inode
);
4382 struct nfs_seqid
*seqid
;
4383 struct nfs4_lock_state
*lsp
;
4384 struct rpc_task
*task
;
4386 unsigned char fl_flags
= request
->fl_flags
;
4388 status
= nfs4_set_lock_state(state
, request
);
4389 /* Unlock _before_ we do the RPC call */
4390 request
->fl_flags
|= FL_EXISTS
;
4391 down_read(&nfsi
->rwsem
);
4392 if (do_vfs_lock(request
->fl_file
, request
) == -ENOENT
) {
4393 up_read(&nfsi
->rwsem
);
4396 up_read(&nfsi
->rwsem
);
4399 /* Is this a delegated lock? */
4400 if (test_bit(NFS_DELEGATED_STATE
, &state
->flags
))
4402 lsp
= request
->fl_u
.nfs4_fl
.owner
;
4403 seqid
= nfs_alloc_seqid(&lsp
->ls_seqid
, GFP_KERNEL
);
4407 task
= nfs4_do_unlck(request
, nfs_file_open_context(request
->fl_file
), lsp
, seqid
);
4408 status
= PTR_ERR(task
);
4411 status
= nfs4_wait_for_completion_rpc_task(task
);
4414 request
->fl_flags
= fl_flags
;
4418 struct nfs4_lockdata
{
4419 struct nfs_lock_args arg
;
4420 struct nfs_lock_res res
;
4421 struct nfs4_lock_state
*lsp
;
4422 struct nfs_open_context
*ctx
;
4423 struct file_lock fl
;
4424 unsigned long timestamp
;
4427 struct nfs_server
*server
;
4430 static struct nfs4_lockdata
*nfs4_alloc_lockdata(struct file_lock
*fl
,
4431 struct nfs_open_context
*ctx
, struct nfs4_lock_state
*lsp
,
4434 struct nfs4_lockdata
*p
;
4435 struct inode
*inode
= lsp
->ls_state
->inode
;
4436 struct nfs_server
*server
= NFS_SERVER(inode
);
4438 p
= kzalloc(sizeof(*p
), gfp_mask
);
4442 p
->arg
.fh
= NFS_FH(inode
);
4444 p
->arg
.open_seqid
= nfs_alloc_seqid(&lsp
->ls_state
->owner
->so_seqid
, gfp_mask
);
4445 if (p
->arg
.open_seqid
== NULL
)
4447 p
->arg
.lock_seqid
= nfs_alloc_seqid(&lsp
->ls_seqid
, gfp_mask
);
4448 if (p
->arg
.lock_seqid
== NULL
)
4449 goto out_free_seqid
;
4450 p
->arg
.lock_stateid
= &lsp
->ls_stateid
;
4451 p
->arg
.lock_owner
.clientid
= server
->nfs_client
->cl_clientid
;
4452 p
->arg
.lock_owner
.id
= lsp
->ls_seqid
.owner_id
;
4453 p
->arg
.lock_owner
.s_dev
= server
->s_dev
;
4454 p
->res
.lock_seqid
= p
->arg
.lock_seqid
;
4457 atomic_inc(&lsp
->ls_count
);
4458 p
->ctx
= get_nfs_open_context(ctx
);
4459 memcpy(&p
->fl
, fl
, sizeof(p
->fl
));
4462 nfs_free_seqid(p
->arg
.open_seqid
);
4468 static void nfs4_lock_prepare(struct rpc_task
*task
, void *calldata
)
4470 struct nfs4_lockdata
*data
= calldata
;
4471 struct nfs4_state
*state
= data
->lsp
->ls_state
;
4473 dprintk("%s: begin!\n", __func__
);
4474 if (nfs_wait_on_sequence(data
->arg
.lock_seqid
, task
) != 0)
4476 /* Do we need to do an open_to_lock_owner? */
4477 if (!(data
->arg
.lock_seqid
->sequence
->flags
& NFS_SEQID_CONFIRMED
)) {
4478 if (nfs_wait_on_sequence(data
->arg
.open_seqid
, task
) != 0)
4480 data
->arg
.open_stateid
= &state
->stateid
;
4481 data
->arg
.new_lock_owner
= 1;
4482 data
->res
.open_seqid
= data
->arg
.open_seqid
;
4484 data
->arg
.new_lock_owner
= 0;
4485 data
->timestamp
= jiffies
;
4486 if (nfs4_setup_sequence(data
->server
,
4487 &data
->arg
.seq_args
,
4488 &data
->res
.seq_res
, task
))
4490 rpc_call_start(task
);
4491 dprintk("%s: done!, ret = %d\n", __func__
, data
->rpc_status
);
4494 static void nfs4_recover_lock_prepare(struct rpc_task
*task
, void *calldata
)
4496 rpc_task_set_priority(task
, RPC_PRIORITY_PRIVILEGED
);
4497 nfs4_lock_prepare(task
, calldata
);
4500 static void nfs4_lock_done(struct rpc_task
*task
, void *calldata
)
4502 struct nfs4_lockdata
*data
= calldata
;
4504 dprintk("%s: begin!\n", __func__
);
4506 if (!nfs4_sequence_done(task
, &data
->res
.seq_res
))
4509 data
->rpc_status
= task
->tk_status
;
4510 if (data
->arg
.new_lock_owner
!= 0) {
4511 if (data
->rpc_status
== 0)
4512 nfs_confirm_seqid(&data
->lsp
->ls_seqid
, 0);
4516 if (data
->rpc_status
== 0) {
4517 nfs4_stateid_copy(&data
->lsp
->ls_stateid
, &data
->res
.stateid
);
4518 data
->lsp
->ls_flags
|= NFS_LOCK_INITIALIZED
;
4519 renew_lease(NFS_SERVER(data
->ctx
->dentry
->d_inode
), data
->timestamp
);
4522 dprintk("%s: done, ret = %d!\n", __func__
, data
->rpc_status
);
4525 static void nfs4_lock_release(void *calldata
)
4527 struct nfs4_lockdata
*data
= calldata
;
4529 dprintk("%s: begin!\n", __func__
);
4530 nfs_free_seqid(data
->arg
.open_seqid
);
4531 if (data
->cancelled
!= 0) {
4532 struct rpc_task
*task
;
4533 task
= nfs4_do_unlck(&data
->fl
, data
->ctx
, data
->lsp
,
4534 data
->arg
.lock_seqid
);
4536 rpc_put_task_async(task
);
4537 dprintk("%s: cancelling lock!\n", __func__
);
4539 nfs_free_seqid(data
->arg
.lock_seqid
);
4540 nfs4_put_lock_state(data
->lsp
);
4541 put_nfs_open_context(data
->ctx
);
4543 dprintk("%s: done!\n", __func__
);
4546 static const struct rpc_call_ops nfs4_lock_ops
= {
4547 .rpc_call_prepare
= nfs4_lock_prepare
,
4548 .rpc_call_done
= nfs4_lock_done
,
4549 .rpc_release
= nfs4_lock_release
,
4552 static const struct rpc_call_ops nfs4_recover_lock_ops
= {
4553 .rpc_call_prepare
= nfs4_recover_lock_prepare
,
4554 .rpc_call_done
= nfs4_lock_done
,
4555 .rpc_release
= nfs4_lock_release
,
4558 static void nfs4_handle_setlk_error(struct nfs_server
*server
, struct nfs4_lock_state
*lsp
, int new_lock_owner
, int error
)
4561 case -NFS4ERR_ADMIN_REVOKED
:
4562 case -NFS4ERR_BAD_STATEID
:
4563 lsp
->ls_seqid
.flags
&= ~NFS_SEQID_CONFIRMED
;
4564 if (new_lock_owner
!= 0 ||
4565 (lsp
->ls_flags
& NFS_LOCK_INITIALIZED
) != 0)
4566 nfs4_schedule_stateid_recovery(server
, lsp
->ls_state
);
4568 case -NFS4ERR_STALE_STATEID
:
4569 lsp
->ls_seqid
.flags
&= ~NFS_SEQID_CONFIRMED
;
4570 case -NFS4ERR_EXPIRED
:
4571 nfs4_schedule_lease_recovery(server
->nfs_client
);
4575 static int _nfs4_do_setlk(struct nfs4_state
*state
, int cmd
, struct file_lock
*fl
, int recovery_type
)
4577 struct nfs4_lockdata
*data
;
4578 struct rpc_task
*task
;
4579 struct rpc_message msg
= {
4580 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_LOCK
],
4581 .rpc_cred
= state
->owner
->so_cred
,
4583 struct rpc_task_setup task_setup_data
= {
4584 .rpc_client
= NFS_CLIENT(state
->inode
),
4585 .rpc_message
= &msg
,
4586 .callback_ops
= &nfs4_lock_ops
,
4587 .workqueue
= nfsiod_workqueue
,
4588 .flags
= RPC_TASK_ASYNC
,
4592 dprintk("%s: begin!\n", __func__
);
4593 data
= nfs4_alloc_lockdata(fl
, nfs_file_open_context(fl
->fl_file
),
4594 fl
->fl_u
.nfs4_fl
.owner
,
4595 recovery_type
== NFS_LOCK_NEW
? GFP_KERNEL
: GFP_NOFS
);
4599 data
->arg
.block
= 1;
4600 if (recovery_type
> NFS_LOCK_NEW
) {
4601 if (recovery_type
== NFS_LOCK_RECLAIM
)
4602 data
->arg
.reclaim
= NFS_LOCK_RECLAIM
;
4603 task_setup_data
.callback_ops
= &nfs4_recover_lock_ops
;
4605 nfs41_init_sequence(&data
->arg
.seq_args
, &data
->res
.seq_res
, 1);
4606 msg
.rpc_argp
= &data
->arg
;
4607 msg
.rpc_resp
= &data
->res
;
4608 task_setup_data
.callback_data
= data
;
4609 task
= rpc_run_task(&task_setup_data
);
4611 return PTR_ERR(task
);
4612 ret
= nfs4_wait_for_completion_rpc_task(task
);
4614 ret
= data
->rpc_status
;
4616 nfs4_handle_setlk_error(data
->server
, data
->lsp
,
4617 data
->arg
.new_lock_owner
, ret
);
4619 data
->cancelled
= 1;
4621 dprintk("%s: done, ret = %d!\n", __func__
, ret
);
4625 static int nfs4_lock_reclaim(struct nfs4_state
*state
, struct file_lock
*request
)
4627 struct nfs_server
*server
= NFS_SERVER(state
->inode
);
4628 struct nfs4_exception exception
= {
4629 .inode
= state
->inode
,
4634 /* Cache the lock if possible... */
4635 if (test_bit(NFS_DELEGATED_STATE
, &state
->flags
) != 0)
4637 err
= _nfs4_do_setlk(state
, F_SETLK
, request
, NFS_LOCK_RECLAIM
);
4638 if (err
!= -NFS4ERR_DELAY
)
4640 nfs4_handle_exception(server
, err
, &exception
);
4641 } while (exception
.retry
);
4645 static int nfs4_lock_expired(struct nfs4_state
*state
, struct file_lock
*request
)
4647 struct nfs_server
*server
= NFS_SERVER(state
->inode
);
4648 struct nfs4_exception exception
= {
4649 .inode
= state
->inode
,
4653 err
= nfs4_set_lock_state(state
, request
);
4657 if (test_bit(NFS_DELEGATED_STATE
, &state
->flags
) != 0)
4659 err
= _nfs4_do_setlk(state
, F_SETLK
, request
, NFS_LOCK_EXPIRED
);
4663 case -NFS4ERR_GRACE
:
4664 case -NFS4ERR_DELAY
:
4665 nfs4_handle_exception(server
, err
, &exception
);
4668 } while (exception
.retry
);
4673 #if defined(CONFIG_NFS_V4_1)
4674 static int nfs41_check_expired_locks(struct nfs4_state
*state
)
4676 int status
, ret
= NFS_OK
;
4677 struct nfs4_lock_state
*lsp
;
4678 struct nfs_server
*server
= NFS_SERVER(state
->inode
);
4680 list_for_each_entry(lsp
, &state
->lock_states
, ls_locks
) {
4681 if (lsp
->ls_flags
& NFS_LOCK_INITIALIZED
) {
4682 status
= nfs41_test_stateid(server
, &lsp
->ls_stateid
);
4683 if (status
!= NFS_OK
) {
4684 nfs41_free_stateid(server
, &lsp
->ls_stateid
);
4685 lsp
->ls_flags
&= ~NFS_LOCK_INITIALIZED
;
4694 static int nfs41_lock_expired(struct nfs4_state
*state
, struct file_lock
*request
)
4696 int status
= NFS_OK
;
4698 if (test_bit(LK_STATE_IN_USE
, &state
->flags
))
4699 status
= nfs41_check_expired_locks(state
);
4700 if (status
== NFS_OK
)
4702 return nfs4_lock_expired(state
, request
);
4706 static int _nfs4_proc_setlk(struct nfs4_state
*state
, int cmd
, struct file_lock
*request
)
4708 struct nfs_inode
*nfsi
= NFS_I(state
->inode
);
4709 unsigned char fl_flags
= request
->fl_flags
;
4710 int status
= -ENOLCK
;
4712 if ((fl_flags
& FL_POSIX
) &&
4713 !test_bit(NFS_STATE_POSIX_LOCKS
, &state
->flags
))
4715 /* Is this a delegated open? */
4716 status
= nfs4_set_lock_state(state
, request
);
4719 request
->fl_flags
|= FL_ACCESS
;
4720 status
= do_vfs_lock(request
->fl_file
, request
);
4723 down_read(&nfsi
->rwsem
);
4724 if (test_bit(NFS_DELEGATED_STATE
, &state
->flags
)) {
4725 /* Yes: cache locks! */
4726 /* ...but avoid races with delegation recall... */
4727 request
->fl_flags
= fl_flags
& ~FL_SLEEP
;
4728 status
= do_vfs_lock(request
->fl_file
, request
);
4731 status
= _nfs4_do_setlk(state
, cmd
, request
, NFS_LOCK_NEW
);
4734 /* Note: we always want to sleep here! */
4735 request
->fl_flags
= fl_flags
| FL_SLEEP
;
4736 if (do_vfs_lock(request
->fl_file
, request
) < 0)
4737 printk(KERN_WARNING
"NFS: %s: VFS is out of sync with lock "
4738 "manager!\n", __func__
);
4740 up_read(&nfsi
->rwsem
);
4742 request
->fl_flags
= fl_flags
;
4746 static int nfs4_proc_setlk(struct nfs4_state
*state
, int cmd
, struct file_lock
*request
)
4748 struct nfs4_exception exception
= {
4750 .inode
= state
->inode
,
4755 err
= _nfs4_proc_setlk(state
, cmd
, request
);
4756 if (err
== -NFS4ERR_DENIED
)
4758 err
= nfs4_handle_exception(NFS_SERVER(state
->inode
),
4760 } while (exception
.retry
);
4765 nfs4_proc_lock(struct file
*filp
, int cmd
, struct file_lock
*request
)
4767 struct nfs_open_context
*ctx
;
4768 struct nfs4_state
*state
;
4769 unsigned long timeout
= NFS4_LOCK_MINTIMEOUT
;
4772 /* verify open state */
4773 ctx
= nfs_file_open_context(filp
);
4776 if (request
->fl_start
< 0 || request
->fl_end
< 0)
4779 if (IS_GETLK(cmd
)) {
4781 return nfs4_proc_getlk(state
, F_GETLK
, request
);
4785 if (!(IS_SETLK(cmd
) || IS_SETLKW(cmd
)))
4788 if (request
->fl_type
== F_UNLCK
) {
4790 return nfs4_proc_unlck(state
, cmd
, request
);
4797 * Don't rely on the VFS having checked the file open mode,
4798 * since it won't do this for flock() locks.
4800 switch (request
->fl_type
& (F_RDLCK
|F_WRLCK
|F_UNLCK
)) {
4802 if (!(filp
->f_mode
& FMODE_READ
))
4806 if (!(filp
->f_mode
& FMODE_WRITE
))
4811 status
= nfs4_proc_setlk(state
, cmd
, request
);
4812 if ((status
!= -EAGAIN
) || IS_SETLK(cmd
))
4814 timeout
= nfs4_set_lock_task_retry(timeout
);
4815 status
= -ERESTARTSYS
;
4818 } while(status
< 0);
4822 int nfs4_lock_delegation_recall(struct nfs4_state
*state
, struct file_lock
*fl
)
4824 struct nfs_server
*server
= NFS_SERVER(state
->inode
);
4825 struct nfs4_exception exception
= { };
4828 err
= nfs4_set_lock_state(state
, fl
);
4832 err
= _nfs4_do_setlk(state
, F_SETLK
, fl
, NFS_LOCK_NEW
);
4835 printk(KERN_ERR
"NFS: %s: unhandled error "
4836 "%d.\n", __func__
, err
);
4840 case -NFS4ERR_EXPIRED
:
4841 nfs4_schedule_stateid_recovery(server
, state
);
4842 case -NFS4ERR_STALE_CLIENTID
:
4843 case -NFS4ERR_STALE_STATEID
:
4844 nfs4_schedule_lease_recovery(server
->nfs_client
);
4846 case -NFS4ERR_BADSESSION
:
4847 case -NFS4ERR_BADSLOT
:
4848 case -NFS4ERR_BAD_HIGH_SLOT
:
4849 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION
:
4850 case -NFS4ERR_DEADSESSION
:
4851 nfs4_schedule_session_recovery(server
->nfs_client
->cl_session
, err
);
4855 * The show must go on: exit, but mark the
4856 * stateid as needing recovery.
4858 case -NFS4ERR_DELEG_REVOKED
:
4859 case -NFS4ERR_ADMIN_REVOKED
:
4860 case -NFS4ERR_BAD_STATEID
:
4861 case -NFS4ERR_OPENMODE
:
4862 nfs4_schedule_stateid_recovery(server
, state
);
4867 * User RPCSEC_GSS context has expired.
4868 * We cannot recover this stateid now, so
4869 * skip it and allow recovery thread to
4875 case -NFS4ERR_DENIED
:
4876 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
4879 case -NFS4ERR_DELAY
:
4882 err
= nfs4_handle_exception(server
, err
, &exception
);
4883 } while (exception
.retry
);
4888 struct nfs_release_lockowner_data
{
4889 struct nfs4_lock_state
*lsp
;
4890 struct nfs_server
*server
;
4891 struct nfs_release_lockowner_args args
;
4894 static void nfs4_release_lockowner_release(void *calldata
)
4896 struct nfs_release_lockowner_data
*data
= calldata
;
4897 nfs4_free_lock_state(data
->server
, data
->lsp
);
4901 static const struct rpc_call_ops nfs4_release_lockowner_ops
= {
4902 .rpc_release
= nfs4_release_lockowner_release
,
4905 int nfs4_release_lockowner(struct nfs4_lock_state
*lsp
)
4907 struct nfs_server
*server
= lsp
->ls_state
->owner
->so_server
;
4908 struct nfs_release_lockowner_data
*data
;
4909 struct rpc_message msg
= {
4910 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_RELEASE_LOCKOWNER
],
4913 if (server
->nfs_client
->cl_mvops
->minor_version
!= 0)
4915 data
= kmalloc(sizeof(*data
), GFP_NOFS
);
4919 data
->server
= server
;
4920 data
->args
.lock_owner
.clientid
= server
->nfs_client
->cl_clientid
;
4921 data
->args
.lock_owner
.id
= lsp
->ls_seqid
.owner_id
;
4922 data
->args
.lock_owner
.s_dev
= server
->s_dev
;
4923 msg
.rpc_argp
= &data
->args
;
4924 rpc_call_async(server
->client
, &msg
, 0, &nfs4_release_lockowner_ops
, data
);
4928 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
4930 static int nfs4_xattr_set_nfs4_acl(struct dentry
*dentry
, const char *key
,
4931 const void *buf
, size_t buflen
,
4932 int flags
, int type
)
4934 if (strcmp(key
, "") != 0)
4937 return nfs4_proc_set_acl(dentry
->d_inode
, buf
, buflen
);
4940 static int nfs4_xattr_get_nfs4_acl(struct dentry
*dentry
, const char *key
,
4941 void *buf
, size_t buflen
, int type
)
4943 if (strcmp(key
, "") != 0)
4946 return nfs4_proc_get_acl(dentry
->d_inode
, buf
, buflen
);
4949 static size_t nfs4_xattr_list_nfs4_acl(struct dentry
*dentry
, char *list
,
4950 size_t list_len
, const char *name
,
4951 size_t name_len
, int type
)
4953 size_t len
= sizeof(XATTR_NAME_NFSV4_ACL
);
4955 if (!nfs4_server_supports_acls(NFS_SERVER(dentry
->d_inode
)))
4958 if (list
&& len
<= list_len
)
4959 memcpy(list
, XATTR_NAME_NFSV4_ACL
, len
);
4964 * nfs_fhget will use either the mounted_on_fileid or the fileid
4966 static void nfs_fixup_referral_attributes(struct nfs_fattr
*fattr
)
4968 if (!(((fattr
->valid
& NFS_ATTR_FATTR_MOUNTED_ON_FILEID
) ||
4969 (fattr
->valid
& NFS_ATTR_FATTR_FILEID
)) &&
4970 (fattr
->valid
& NFS_ATTR_FATTR_FSID
) &&
4971 (fattr
->valid
& NFS_ATTR_FATTR_V4_LOCATIONS
)))
4974 fattr
->valid
|= NFS_ATTR_FATTR_TYPE
| NFS_ATTR_FATTR_MODE
|
4975 NFS_ATTR_FATTR_NLINK
| NFS_ATTR_FATTR_V4_REFERRAL
;
4976 fattr
->mode
= S_IFDIR
| S_IRUGO
| S_IXUGO
;
4980 static int _nfs4_proc_fs_locations(struct rpc_clnt
*client
, struct inode
*dir
,
4981 const struct qstr
*name
,
4982 struct nfs4_fs_locations
*fs_locations
,
4985 struct nfs_server
*server
= NFS_SERVER(dir
);
4987 [0] = FATTR4_WORD0_FSID
| FATTR4_WORD0_FS_LOCATIONS
,
4989 struct nfs4_fs_locations_arg args
= {
4990 .dir_fh
= NFS_FH(dir
),
4995 struct nfs4_fs_locations_res res
= {
4996 .fs_locations
= fs_locations
,
4998 struct rpc_message msg
= {
4999 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_FS_LOCATIONS
],
5005 dprintk("%s: start\n", __func__
);
5007 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
5008 * is not supported */
5009 if (NFS_SERVER(dir
)->attr_bitmask
[1] & FATTR4_WORD1_MOUNTED_ON_FILEID
)
5010 bitmask
[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID
;
5012 bitmask
[0] |= FATTR4_WORD0_FILEID
;
5014 nfs_fattr_init(&fs_locations
->fattr
);
5015 fs_locations
->server
= server
;
5016 fs_locations
->nlocations
= 0;
5017 status
= nfs4_call_sync(client
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 0);
5018 dprintk("%s: returned status = %d\n", __func__
, status
);
5022 int nfs4_proc_fs_locations(struct rpc_clnt
*client
, struct inode
*dir
,
5023 const struct qstr
*name
,
5024 struct nfs4_fs_locations
*fs_locations
,
5027 struct nfs4_exception exception
= { };
5030 err
= nfs4_handle_exception(NFS_SERVER(dir
),
5031 _nfs4_proc_fs_locations(client
, dir
, name
, fs_locations
, page
),
5033 } while (exception
.retry
);
5037 static int _nfs4_proc_secinfo(struct inode
*dir
, const struct qstr
*name
, struct nfs4_secinfo_flavors
*flavors
)
5040 struct nfs4_secinfo_arg args
= {
5041 .dir_fh
= NFS_FH(dir
),
5044 struct nfs4_secinfo_res res
= {
5047 struct rpc_message msg
= {
5048 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_SECINFO
],
5053 dprintk("NFS call secinfo %s\n", name
->name
);
5054 status
= nfs4_call_sync(NFS_SERVER(dir
)->client
, NFS_SERVER(dir
), &msg
, &args
.seq_args
, &res
.seq_res
, 0);
5055 dprintk("NFS reply secinfo: %d\n", status
);
5059 int nfs4_proc_secinfo(struct inode
*dir
, const struct qstr
*name
,
5060 struct nfs4_secinfo_flavors
*flavors
)
5062 struct nfs4_exception exception
= { };
5065 err
= nfs4_handle_exception(NFS_SERVER(dir
),
5066 _nfs4_proc_secinfo(dir
, name
, flavors
),
5068 } while (exception
.retry
);
5072 #ifdef CONFIG_NFS_V4_1
5074 * Check the exchange flags returned by the server for invalid flags, having
5075 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
5078 static int nfs4_check_cl_exchange_flags(u32 flags
)
5080 if (flags
& ~EXCHGID4_FLAG_MASK_R
)
5082 if ((flags
& EXCHGID4_FLAG_USE_PNFS_MDS
) &&
5083 (flags
& EXCHGID4_FLAG_USE_NON_PNFS
))
5085 if (!(flags
& (EXCHGID4_FLAG_MASK_PNFS
)))
5089 return -NFS4ERR_INVAL
;
5093 nfs41_same_server_scope(struct nfs41_server_scope
*a
,
5094 struct nfs41_server_scope
*b
)
5096 if (a
->server_scope_sz
== b
->server_scope_sz
&&
5097 memcmp(a
->server_scope
, b
->server_scope
, a
->server_scope_sz
) == 0)
5104 * nfs4_proc_bind_conn_to_session()
5106 * The 4.1 client currently uses the same TCP connection for the
5107 * fore and backchannel.
5109 int nfs4_proc_bind_conn_to_session(struct nfs_client
*clp
, struct rpc_cred
*cred
)
5112 struct nfs41_bind_conn_to_session_res res
;
5113 struct rpc_message msg
= {
5115 &nfs4_procedures
[NFSPROC4_CLNT_BIND_CONN_TO_SESSION
],
5121 dprintk("--> %s\n", __func__
);
5122 BUG_ON(clp
== NULL
);
5124 res
.session
= kzalloc(sizeof(struct nfs4_session
), GFP_NOFS
);
5125 if (unlikely(res
.session
== NULL
)) {
5130 status
= rpc_call_sync(clp
->cl_rpcclient
, &msg
, RPC_TASK_TIMEOUT
);
5132 if (memcmp(res
.session
->sess_id
.data
,
5133 clp
->cl_session
->sess_id
.data
, NFS4_MAX_SESSIONID_LEN
)) {
5134 dprintk("NFS: %s: Session ID mismatch\n", __func__
);
5138 if (res
.dir
!= NFS4_CDFS4_BOTH
) {
5139 dprintk("NFS: %s: Unexpected direction from server\n",
5144 if (res
.use_conn_in_rdma_mode
) {
5145 dprintk("NFS: %s: Server returned RDMA mode = true\n",
5154 dprintk("<-- %s status= %d\n", __func__
, status
);
5159 * nfs4_proc_exchange_id()
5161 * Since the clientid has expired, all compounds using sessions
5162 * associated with the stale clientid will be returning
5163 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
5164 * be in some phase of session reset.
5166 int nfs4_proc_exchange_id(struct nfs_client
*clp
, struct rpc_cred
*cred
)
5168 nfs4_verifier verifier
;
5169 struct nfs41_exchange_id_args args
= {
5170 .verifier
= &verifier
,
5172 .flags
= EXCHGID4_FLAG_SUPP_MOVED_REFER
,
5174 struct nfs41_exchange_id_res res
= {
5178 struct rpc_message msg
= {
5179 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_EXCHANGE_ID
],
5185 dprintk("--> %s\n", __func__
);
5186 BUG_ON(clp
== NULL
);
5188 nfs4_init_boot_verifier(clp
, &verifier
);
5190 args
.id_len
= scnprintf(args
.id
, sizeof(args
.id
),
5193 clp
->cl_rpcclient
->cl_nodename
,
5194 clp
->cl_rpcclient
->cl_auth
->au_flavor
);
5196 res
.server_owner
= kzalloc(sizeof(struct nfs41_server_owner
),
5198 if (unlikely(res
.server_owner
== NULL
)) {
5203 res
.server_scope
= kzalloc(sizeof(struct nfs41_server_scope
),
5205 if (unlikely(res
.server_scope
== NULL
)) {
5207 goto out_server_owner
;
5210 res
.impl_id
= kzalloc(sizeof(struct nfs41_impl_id
), GFP_NOFS
);
5211 if (unlikely(res
.impl_id
== NULL
)) {
5213 goto out_server_scope
;
5216 status
= rpc_call_sync(clp
->cl_rpcclient
, &msg
, RPC_TASK_TIMEOUT
);
5218 status
= nfs4_check_cl_exchange_flags(res
.flags
);
5221 clp
->cl_clientid
= res
.clientid
;
5222 clp
->cl_exchange_flags
= (res
.flags
& ~EXCHGID4_FLAG_CONFIRMED_R
);
5223 if (!(res
.flags
& EXCHGID4_FLAG_CONFIRMED_R
))
5224 clp
->cl_seqid
= res
.seqid
;
5226 kfree(clp
->cl_serverowner
);
5227 clp
->cl_serverowner
= res
.server_owner
;
5228 res
.server_owner
= NULL
;
5230 /* use the most recent implementation id */
5231 kfree(clp
->cl_implid
);
5232 clp
->cl_implid
= res
.impl_id
;
5234 if (clp
->cl_serverscope
!= NULL
&&
5235 !nfs41_same_server_scope(clp
->cl_serverscope
,
5236 res
.server_scope
)) {
5237 dprintk("%s: server_scope mismatch detected\n",
5239 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH
, &clp
->cl_state
);
5240 kfree(clp
->cl_serverscope
);
5241 clp
->cl_serverscope
= NULL
;
5244 if (clp
->cl_serverscope
== NULL
) {
5245 clp
->cl_serverscope
= res
.server_scope
;
5252 kfree(res
.server_owner
);
5254 kfree(res
.server_scope
);
5256 if (clp
->cl_implid
!= NULL
)
5257 dprintk("%s: Server Implementation ID: "
5258 "domain: %s, name: %s, date: %llu,%u\n",
5259 __func__
, clp
->cl_implid
->domain
, clp
->cl_implid
->name
,
5260 clp
->cl_implid
->date
.seconds
,
5261 clp
->cl_implid
->date
.nseconds
);
5262 dprintk("<-- %s status= %d\n", __func__
, status
);
5266 static int _nfs4_proc_destroy_clientid(struct nfs_client
*clp
,
5267 struct rpc_cred
*cred
)
5269 struct rpc_message msg
= {
5270 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_DESTROY_CLIENTID
],
5276 status
= rpc_call_sync(clp
->cl_rpcclient
, &msg
, RPC_TASK_TIMEOUT
);
5278 pr_warn("NFS: Got error %d from the server %s on "
5279 "DESTROY_CLIENTID.", status
, clp
->cl_hostname
);
5283 static int nfs4_proc_destroy_clientid(struct nfs_client
*clp
,
5284 struct rpc_cred
*cred
)
5289 for (loop
= NFS4_MAX_LOOP_ON_RECOVER
; loop
!= 0; loop
--) {
5290 ret
= _nfs4_proc_destroy_clientid(clp
, cred
);
5292 case -NFS4ERR_DELAY
:
5293 case -NFS4ERR_CLIENTID_BUSY
:
5303 int nfs4_destroy_clientid(struct nfs_client
*clp
)
5305 struct rpc_cred
*cred
;
5308 if (clp
->cl_mvops
->minor_version
< 1)
5310 if (clp
->cl_exchange_flags
== 0)
5312 cred
= nfs4_get_exchange_id_cred(clp
);
5313 ret
= nfs4_proc_destroy_clientid(clp
, cred
);
5318 case -NFS4ERR_STALE_CLIENTID
:
5319 clp
->cl_exchange_flags
= 0;
5325 struct nfs4_get_lease_time_data
{
5326 struct nfs4_get_lease_time_args
*args
;
5327 struct nfs4_get_lease_time_res
*res
;
5328 struct nfs_client
*clp
;
5331 static void nfs4_get_lease_time_prepare(struct rpc_task
*task
,
5335 struct nfs4_get_lease_time_data
*data
=
5336 (struct nfs4_get_lease_time_data
*)calldata
;
5338 dprintk("--> %s\n", __func__
);
5339 rpc_task_set_priority(task
, RPC_PRIORITY_PRIVILEGED
);
5340 /* just setup sequence, do not trigger session recovery
5341 since we're invoked within one */
5342 ret
= nfs41_setup_sequence(data
->clp
->cl_session
,
5343 &data
->args
->la_seq_args
,
5344 &data
->res
->lr_seq_res
, task
);
5346 BUG_ON(ret
== -EAGAIN
);
5347 rpc_call_start(task
);
5348 dprintk("<-- %s\n", __func__
);
5352 * Called from nfs4_state_manager thread for session setup, so don't recover
5353 * from sequence operation or clientid errors.
5355 static void nfs4_get_lease_time_done(struct rpc_task
*task
, void *calldata
)
5357 struct nfs4_get_lease_time_data
*data
=
5358 (struct nfs4_get_lease_time_data
*)calldata
;
5360 dprintk("--> %s\n", __func__
);
5361 if (!nfs41_sequence_done(task
, &data
->res
->lr_seq_res
))
5363 switch (task
->tk_status
) {
5364 case -NFS4ERR_DELAY
:
5365 case -NFS4ERR_GRACE
:
5366 dprintk("%s Retry: tk_status %d\n", __func__
, task
->tk_status
);
5367 rpc_delay(task
, NFS4_POLL_RETRY_MIN
);
5368 task
->tk_status
= 0;
5370 case -NFS4ERR_RETRY_UNCACHED_REP
:
5371 rpc_restart_call_prepare(task
);
5374 dprintk("<-- %s\n", __func__
);
5377 static const struct rpc_call_ops nfs4_get_lease_time_ops
= {
5378 .rpc_call_prepare
= nfs4_get_lease_time_prepare
,
5379 .rpc_call_done
= nfs4_get_lease_time_done
,
5382 int nfs4_proc_get_lease_time(struct nfs_client
*clp
, struct nfs_fsinfo
*fsinfo
)
5384 struct rpc_task
*task
;
5385 struct nfs4_get_lease_time_args args
;
5386 struct nfs4_get_lease_time_res res
= {
5387 .lr_fsinfo
= fsinfo
,
5389 struct nfs4_get_lease_time_data data
= {
5394 struct rpc_message msg
= {
5395 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_GET_LEASE_TIME
],
5399 struct rpc_task_setup task_setup
= {
5400 .rpc_client
= clp
->cl_rpcclient
,
5401 .rpc_message
= &msg
,
5402 .callback_ops
= &nfs4_get_lease_time_ops
,
5403 .callback_data
= &data
,
5404 .flags
= RPC_TASK_TIMEOUT
,
5408 nfs41_init_sequence(&args
.la_seq_args
, &res
.lr_seq_res
, 0);
5409 dprintk("--> %s\n", __func__
);
5410 task
= rpc_run_task(&task_setup
);
5413 status
= PTR_ERR(task
);
5415 status
= task
->tk_status
;
5418 dprintk("<-- %s return %d\n", __func__
, status
);
5423 static struct nfs4_slot
*nfs4_alloc_slots(u32 max_slots
, gfp_t gfp_flags
)
5425 return kcalloc(max_slots
, sizeof(struct nfs4_slot
), gfp_flags
);
5428 static void nfs4_add_and_init_slots(struct nfs4_slot_table
*tbl
,
5429 struct nfs4_slot
*new,
5433 struct nfs4_slot
*old
= NULL
;
5436 spin_lock(&tbl
->slot_tbl_lock
);
5440 tbl
->max_slots
= max_slots
;
5442 tbl
->highest_used_slotid
= -1; /* no slot is currently used */
5443 for (i
= 0; i
< tbl
->max_slots
; i
++)
5444 tbl
->slots
[i
].seq_nr
= ivalue
;
5445 spin_unlock(&tbl
->slot_tbl_lock
);
5450 * (re)Initialise a slot table
5452 static int nfs4_realloc_slot_table(struct nfs4_slot_table
*tbl
, u32 max_reqs
,
5455 struct nfs4_slot
*new = NULL
;
5458 dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__
,
5459 max_reqs
, tbl
->max_slots
);
5461 /* Does the newly negotiated max_reqs match the existing slot table? */
5462 if (max_reqs
!= tbl
->max_slots
) {
5463 new = nfs4_alloc_slots(max_reqs
, GFP_NOFS
);
5469 nfs4_add_and_init_slots(tbl
, new, max_reqs
, ivalue
);
5470 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__
,
5471 tbl
, tbl
->slots
, tbl
->max_slots
);
5473 dprintk("<-- %s: return %d\n", __func__
, ret
);
5477 /* Destroy the slot table */
5478 static void nfs4_destroy_slot_tables(struct nfs4_session
*session
)
5480 if (session
->fc_slot_table
.slots
!= NULL
) {
5481 kfree(session
->fc_slot_table
.slots
);
5482 session
->fc_slot_table
.slots
= NULL
;
5484 if (session
->bc_slot_table
.slots
!= NULL
) {
5485 kfree(session
->bc_slot_table
.slots
);
5486 session
->bc_slot_table
.slots
= NULL
;
5492 * Initialize or reset the forechannel and backchannel tables
5494 static int nfs4_setup_session_slot_tables(struct nfs4_session
*ses
)
5496 struct nfs4_slot_table
*tbl
;
5499 dprintk("--> %s\n", __func__
);
5501 tbl
= &ses
->fc_slot_table
;
5502 status
= nfs4_realloc_slot_table(tbl
, ses
->fc_attrs
.max_reqs
, 1);
5503 if (status
) /* -ENOMEM */
5506 tbl
= &ses
->bc_slot_table
;
5507 status
= nfs4_realloc_slot_table(tbl
, ses
->bc_attrs
.max_reqs
, 0);
5508 if (status
&& tbl
->slots
== NULL
)
5509 /* Fore and back channel share a connection so get
5510 * both slot tables or neither */
5511 nfs4_destroy_slot_tables(ses
);
5515 struct nfs4_session
*nfs4_alloc_session(struct nfs_client
*clp
)
5517 struct nfs4_session
*session
;
5518 struct nfs4_slot_table
*tbl
;
5520 session
= kzalloc(sizeof(struct nfs4_session
), GFP_NOFS
);
5524 tbl
= &session
->fc_slot_table
;
5525 tbl
->highest_used_slotid
= NFS4_NO_SLOT
;
5526 spin_lock_init(&tbl
->slot_tbl_lock
);
5527 rpc_init_priority_wait_queue(&tbl
->slot_tbl_waitq
, "ForeChannel Slot table");
5528 init_completion(&tbl
->complete
);
5530 tbl
= &session
->bc_slot_table
;
5531 tbl
->highest_used_slotid
= NFS4_NO_SLOT
;
5532 spin_lock_init(&tbl
->slot_tbl_lock
);
5533 rpc_init_wait_queue(&tbl
->slot_tbl_waitq
, "BackChannel Slot table");
5534 init_completion(&tbl
->complete
);
5536 session
->session_state
= 1<<NFS4_SESSION_INITING
;
5542 void nfs4_destroy_session(struct nfs4_session
*session
)
5544 struct rpc_xprt
*xprt
;
5545 struct rpc_cred
*cred
;
5547 cred
= nfs4_get_exchange_id_cred(session
->clp
);
5548 nfs4_proc_destroy_session(session
, cred
);
5553 xprt
= rcu_dereference(session
->clp
->cl_rpcclient
->cl_xprt
);
5555 dprintk("%s Destroy backchannel for xprt %p\n",
5557 xprt_destroy_backchannel(xprt
, NFS41_BC_MIN_CALLBACKS
);
5558 nfs4_destroy_slot_tables(session
);
5563 * Initialize the values to be used by the client in CREATE_SESSION
5564 * If nfs4_init_session set the fore channel request and response sizes,
5567 * Set the back channel max_resp_sz_cached to zero to force the client to
5568 * always set csa_cachethis to FALSE because the current implementation
5569 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
5571 static void nfs4_init_channel_attrs(struct nfs41_create_session_args
*args
)
5573 struct nfs4_session
*session
= args
->client
->cl_session
;
5574 unsigned int mxrqst_sz
= session
->fc_attrs
.max_rqst_sz
,
5575 mxresp_sz
= session
->fc_attrs
.max_resp_sz
;
5578 mxrqst_sz
= NFS_MAX_FILE_IO_SIZE
;
5580 mxresp_sz
= NFS_MAX_FILE_IO_SIZE
;
5581 /* Fore channel attributes */
5582 args
->fc_attrs
.max_rqst_sz
= mxrqst_sz
;
5583 args
->fc_attrs
.max_resp_sz
= mxresp_sz
;
5584 args
->fc_attrs
.max_ops
= NFS4_MAX_OPS
;
5585 args
->fc_attrs
.max_reqs
= max_session_slots
;
5587 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
5588 "max_ops=%u max_reqs=%u\n",
5590 args
->fc_attrs
.max_rqst_sz
, args
->fc_attrs
.max_resp_sz
,
5591 args
->fc_attrs
.max_ops
, args
->fc_attrs
.max_reqs
);
5593 /* Back channel attributes */
5594 args
->bc_attrs
.max_rqst_sz
= PAGE_SIZE
;
5595 args
->bc_attrs
.max_resp_sz
= PAGE_SIZE
;
5596 args
->bc_attrs
.max_resp_sz_cached
= 0;
5597 args
->bc_attrs
.max_ops
= NFS4_MAX_BACK_CHANNEL_OPS
;
5598 args
->bc_attrs
.max_reqs
= 1;
5600 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
5601 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
5603 args
->bc_attrs
.max_rqst_sz
, args
->bc_attrs
.max_resp_sz
,
5604 args
->bc_attrs
.max_resp_sz_cached
, args
->bc_attrs
.max_ops
,
5605 args
->bc_attrs
.max_reqs
);
5608 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args
*args
, struct nfs4_session
*session
)
5610 struct nfs4_channel_attrs
*sent
= &args
->fc_attrs
;
5611 struct nfs4_channel_attrs
*rcvd
= &session
->fc_attrs
;
5613 if (rcvd
->max_resp_sz
> sent
->max_resp_sz
)
5616 * Our requested max_ops is the minimum we need; we're not
5617 * prepared to break up compounds into smaller pieces than that.
5618 * So, no point even trying to continue if the server won't
5621 if (rcvd
->max_ops
< sent
->max_ops
)
5623 if (rcvd
->max_reqs
== 0)
5625 if (rcvd
->max_reqs
> NFS4_MAX_SLOT_TABLE
)
5626 rcvd
->max_reqs
= NFS4_MAX_SLOT_TABLE
;
5630 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args
*args
, struct nfs4_session
*session
)
5632 struct nfs4_channel_attrs
*sent
= &args
->bc_attrs
;
5633 struct nfs4_channel_attrs
*rcvd
= &session
->bc_attrs
;
5635 if (rcvd
->max_rqst_sz
> sent
->max_rqst_sz
)
5637 if (rcvd
->max_resp_sz
< sent
->max_resp_sz
)
5639 if (rcvd
->max_resp_sz_cached
> sent
->max_resp_sz_cached
)
5641 /* These would render the backchannel useless: */
5642 if (rcvd
->max_ops
!= sent
->max_ops
)
5644 if (rcvd
->max_reqs
!= sent
->max_reqs
)
5649 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args
*args
,
5650 struct nfs4_session
*session
)
5654 ret
= nfs4_verify_fore_channel_attrs(args
, session
);
5657 return nfs4_verify_back_channel_attrs(args
, session
);
5660 static int _nfs4_proc_create_session(struct nfs_client
*clp
,
5661 struct rpc_cred
*cred
)
5663 struct nfs4_session
*session
= clp
->cl_session
;
5664 struct nfs41_create_session_args args
= {
5666 .cb_program
= NFS4_CALLBACK
,
5668 struct nfs41_create_session_res res
= {
5671 struct rpc_message msg
= {
5672 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_CREATE_SESSION
],
5679 nfs4_init_channel_attrs(&args
);
5680 args
.flags
= (SESSION4_PERSIST
| SESSION4_BACK_CHAN
);
5682 status
= rpc_call_sync(session
->clp
->cl_rpcclient
, &msg
, RPC_TASK_TIMEOUT
);
5685 /* Verify the session's negotiated channel_attrs values */
5686 status
= nfs4_verify_channel_attrs(&args
, session
);
5688 /* Increment the clientid slot sequence id */
5696 * Issues a CREATE_SESSION operation to the server.
5697 * It is the responsibility of the caller to verify the session is
5698 * expired before calling this routine.
5700 int nfs4_proc_create_session(struct nfs_client
*clp
, struct rpc_cred
*cred
)
5704 struct nfs4_session
*session
= clp
->cl_session
;
5706 dprintk("--> %s clp=%p session=%p\n", __func__
, clp
, session
);
5708 status
= _nfs4_proc_create_session(clp
, cred
);
5712 /* Init or reset the session slot tables */
5713 status
= nfs4_setup_session_slot_tables(session
);
5714 dprintk("slot table setup returned %d\n", status
);
5718 ptr
= (unsigned *)&session
->sess_id
.data
[0];
5719 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__
,
5720 clp
->cl_seqid
, ptr
[0], ptr
[1], ptr
[2], ptr
[3]);
5722 dprintk("<-- %s\n", __func__
);
5727 * Issue the over-the-wire RPC DESTROY_SESSION.
5728 * The caller must serialize access to this routine.
5730 int nfs4_proc_destroy_session(struct nfs4_session
*session
,
5731 struct rpc_cred
*cred
)
5733 struct rpc_message msg
= {
5734 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_DESTROY_SESSION
],
5735 .rpc_argp
= session
,
5740 dprintk("--> nfs4_proc_destroy_session\n");
5742 /* session is still being setup */
5743 if (session
->clp
->cl_cons_state
!= NFS_CS_READY
)
5746 status
= rpc_call_sync(session
->clp
->cl_rpcclient
, &msg
, RPC_TASK_TIMEOUT
);
5750 "NFS: Got error %d from the server on DESTROY_SESSION. "
5751 "Session has been destroyed regardless...\n", status
);
5753 dprintk("<-- nfs4_proc_destroy_session\n");
5758 * With sessions, the client is not marked ready until after a
5759 * successful EXCHANGE_ID and CREATE_SESSION.
5761 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
5762 * other versions of NFS can be tried.
5764 static int nfs41_check_session_ready(struct nfs_client
*clp
)
5768 if (clp
->cl_cons_state
== NFS_CS_SESSION_INITING
) {
5769 ret
= nfs4_client_recover_expired_lease(clp
);
5773 if (clp
->cl_cons_state
< NFS_CS_READY
)
5774 return -EPROTONOSUPPORT
;
5779 int nfs4_init_session(struct nfs_server
*server
)
5781 struct nfs_client
*clp
= server
->nfs_client
;
5782 struct nfs4_session
*session
;
5783 unsigned int rsize
, wsize
;
5785 if (!nfs4_has_session(clp
))
5788 session
= clp
->cl_session
;
5789 spin_lock(&clp
->cl_lock
);
5790 if (test_and_clear_bit(NFS4_SESSION_INITING
, &session
->session_state
)) {
5792 rsize
= server
->rsize
;
5794 rsize
= NFS_MAX_FILE_IO_SIZE
;
5795 wsize
= server
->wsize
;
5797 wsize
= NFS_MAX_FILE_IO_SIZE
;
5799 session
->fc_attrs
.max_rqst_sz
= wsize
+ nfs41_maxwrite_overhead
;
5800 session
->fc_attrs
.max_resp_sz
= rsize
+ nfs41_maxread_overhead
;
5802 spin_unlock(&clp
->cl_lock
);
5804 return nfs41_check_session_ready(clp
);
5807 int nfs4_init_ds_session(struct nfs_client
*clp
, unsigned long lease_time
)
5809 struct nfs4_session
*session
= clp
->cl_session
;
5812 spin_lock(&clp
->cl_lock
);
5813 if (test_and_clear_bit(NFS4_SESSION_INITING
, &session
->session_state
)) {
5815 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
5816 * DS lease to be equal to the MDS lease.
5818 clp
->cl_lease_time
= lease_time
;
5819 clp
->cl_last_renewal
= jiffies
;
5821 spin_unlock(&clp
->cl_lock
);
5823 ret
= nfs41_check_session_ready(clp
);
5826 /* Test for the DS role */
5827 if (!is_ds_client(clp
))
5831 EXPORT_SYMBOL_GPL(nfs4_init_ds_session
);
5835 * Renew the cl_session lease.
5837 struct nfs4_sequence_data
{
5838 struct nfs_client
*clp
;
5839 struct nfs4_sequence_args args
;
5840 struct nfs4_sequence_res res
;
5843 static void nfs41_sequence_release(void *data
)
5845 struct nfs4_sequence_data
*calldata
= data
;
5846 struct nfs_client
*clp
= calldata
->clp
;
5848 if (atomic_read(&clp
->cl_count
) > 1)
5849 nfs4_schedule_state_renewal(clp
);
5850 nfs_put_client(clp
);
5854 static int nfs41_sequence_handle_errors(struct rpc_task
*task
, struct nfs_client
*clp
)
5856 switch(task
->tk_status
) {
5857 case -NFS4ERR_DELAY
:
5858 rpc_delay(task
, NFS4_POLL_RETRY_MAX
);
5861 nfs4_schedule_lease_recovery(clp
);
5866 static void nfs41_sequence_call_done(struct rpc_task
*task
, void *data
)
5868 struct nfs4_sequence_data
*calldata
= data
;
5869 struct nfs_client
*clp
= calldata
->clp
;
5871 if (!nfs41_sequence_done(task
, task
->tk_msg
.rpc_resp
))
5874 if (task
->tk_status
< 0) {
5875 dprintk("%s ERROR %d\n", __func__
, task
->tk_status
);
5876 if (atomic_read(&clp
->cl_count
) == 1)
5879 if (nfs41_sequence_handle_errors(task
, clp
) == -EAGAIN
) {
5880 rpc_restart_call_prepare(task
);
5884 dprintk("%s rpc_cred %p\n", __func__
, task
->tk_msg
.rpc_cred
);
5886 dprintk("<-- %s\n", __func__
);
5889 static void nfs41_sequence_prepare(struct rpc_task
*task
, void *data
)
5891 struct nfs4_sequence_data
*calldata
= data
;
5892 struct nfs_client
*clp
= calldata
->clp
;
5893 struct nfs4_sequence_args
*args
;
5894 struct nfs4_sequence_res
*res
;
5896 args
= task
->tk_msg
.rpc_argp
;
5897 res
= task
->tk_msg
.rpc_resp
;
5899 if (nfs41_setup_sequence(clp
->cl_session
, args
, res
, task
))
5901 rpc_call_start(task
);
5904 static const struct rpc_call_ops nfs41_sequence_ops
= {
5905 .rpc_call_done
= nfs41_sequence_call_done
,
5906 .rpc_call_prepare
= nfs41_sequence_prepare
,
5907 .rpc_release
= nfs41_sequence_release
,
5910 static struct rpc_task
*_nfs41_proc_sequence(struct nfs_client
*clp
, struct rpc_cred
*cred
)
5912 struct nfs4_sequence_data
*calldata
;
5913 struct rpc_message msg
= {
5914 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_SEQUENCE
],
5917 struct rpc_task_setup task_setup_data
= {
5918 .rpc_client
= clp
->cl_rpcclient
,
5919 .rpc_message
= &msg
,
5920 .callback_ops
= &nfs41_sequence_ops
,
5921 .flags
= RPC_TASK_ASYNC
| RPC_TASK_SOFT
,
5924 if (!atomic_inc_not_zero(&clp
->cl_count
))
5925 return ERR_PTR(-EIO
);
5926 calldata
= kzalloc(sizeof(*calldata
), GFP_NOFS
);
5927 if (calldata
== NULL
) {
5928 nfs_put_client(clp
);
5929 return ERR_PTR(-ENOMEM
);
5931 nfs41_init_sequence(&calldata
->args
, &calldata
->res
, 0);
5932 msg
.rpc_argp
= &calldata
->args
;
5933 msg
.rpc_resp
= &calldata
->res
;
5934 calldata
->clp
= clp
;
5935 task_setup_data
.callback_data
= calldata
;
5937 return rpc_run_task(&task_setup_data
);
5940 static int nfs41_proc_async_sequence(struct nfs_client
*clp
, struct rpc_cred
*cred
, unsigned renew_flags
)
5942 struct rpc_task
*task
;
5945 if ((renew_flags
& NFS4_RENEW_TIMEOUT
) == 0)
5947 task
= _nfs41_proc_sequence(clp
, cred
);
5949 ret
= PTR_ERR(task
);
5951 rpc_put_task_async(task
);
5952 dprintk("<-- %s status=%d\n", __func__
, ret
);
5956 static int nfs4_proc_sequence(struct nfs_client
*clp
, struct rpc_cred
*cred
)
5958 struct rpc_task
*task
;
5961 task
= _nfs41_proc_sequence(clp
, cred
);
5963 ret
= PTR_ERR(task
);
5966 ret
= rpc_wait_for_completion_task(task
);
5968 struct nfs4_sequence_res
*res
= task
->tk_msg
.rpc_resp
;
5970 if (task
->tk_status
== 0)
5971 nfs41_handle_sequence_flag_errors(clp
, res
->sr_status_flags
);
5972 ret
= task
->tk_status
;
5976 dprintk("<-- %s status=%d\n", __func__
, ret
);
5980 struct nfs4_reclaim_complete_data
{
5981 struct nfs_client
*clp
;
5982 struct nfs41_reclaim_complete_args arg
;
5983 struct nfs41_reclaim_complete_res res
;
5986 static void nfs4_reclaim_complete_prepare(struct rpc_task
*task
, void *data
)
5988 struct nfs4_reclaim_complete_data
*calldata
= data
;
5990 rpc_task_set_priority(task
, RPC_PRIORITY_PRIVILEGED
);
5991 if (nfs41_setup_sequence(calldata
->clp
->cl_session
,
5992 &calldata
->arg
.seq_args
,
5993 &calldata
->res
.seq_res
, task
))
5996 rpc_call_start(task
);
5999 static int nfs41_reclaim_complete_handle_errors(struct rpc_task
*task
, struct nfs_client
*clp
)
6001 switch(task
->tk_status
) {
6003 case -NFS4ERR_COMPLETE_ALREADY
:
6004 case -NFS4ERR_WRONG_CRED
: /* What to do here? */
6006 case -NFS4ERR_DELAY
:
6007 rpc_delay(task
, NFS4_POLL_RETRY_MAX
);
6009 case -NFS4ERR_RETRY_UNCACHED_REP
:
6012 nfs4_schedule_lease_recovery(clp
);
6017 static void nfs4_reclaim_complete_done(struct rpc_task
*task
, void *data
)
6019 struct nfs4_reclaim_complete_data
*calldata
= data
;
6020 struct nfs_client
*clp
= calldata
->clp
;
6021 struct nfs4_sequence_res
*res
= &calldata
->res
.seq_res
;
6023 dprintk("--> %s\n", __func__
);
6024 if (!nfs41_sequence_done(task
, res
))
6027 if (nfs41_reclaim_complete_handle_errors(task
, clp
) == -EAGAIN
) {
6028 rpc_restart_call_prepare(task
);
6031 dprintk("<-- %s\n", __func__
);
6034 static void nfs4_free_reclaim_complete_data(void *data
)
6036 struct nfs4_reclaim_complete_data
*calldata
= data
;
6041 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops
= {
6042 .rpc_call_prepare
= nfs4_reclaim_complete_prepare
,
6043 .rpc_call_done
= nfs4_reclaim_complete_done
,
6044 .rpc_release
= nfs4_free_reclaim_complete_data
,
6048 * Issue a global reclaim complete.
6050 static int nfs41_proc_reclaim_complete(struct nfs_client
*clp
)
6052 struct nfs4_reclaim_complete_data
*calldata
;
6053 struct rpc_task
*task
;
6054 struct rpc_message msg
= {
6055 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_RECLAIM_COMPLETE
],
6057 struct rpc_task_setup task_setup_data
= {
6058 .rpc_client
= clp
->cl_rpcclient
,
6059 .rpc_message
= &msg
,
6060 .callback_ops
= &nfs4_reclaim_complete_call_ops
,
6061 .flags
= RPC_TASK_ASYNC
,
6063 int status
= -ENOMEM
;
6065 dprintk("--> %s\n", __func__
);
6066 calldata
= kzalloc(sizeof(*calldata
), GFP_NOFS
);
6067 if (calldata
== NULL
)
6069 calldata
->clp
= clp
;
6070 calldata
->arg
.one_fs
= 0;
6072 nfs41_init_sequence(&calldata
->arg
.seq_args
, &calldata
->res
.seq_res
, 0);
6073 msg
.rpc_argp
= &calldata
->arg
;
6074 msg
.rpc_resp
= &calldata
->res
;
6075 task_setup_data
.callback_data
= calldata
;
6076 task
= rpc_run_task(&task_setup_data
);
6078 status
= PTR_ERR(task
);
6081 status
= nfs4_wait_for_completion_rpc_task(task
);
6083 status
= task
->tk_status
;
6087 dprintk("<-- %s status=%d\n", __func__
, status
);
6092 nfs4_layoutget_prepare(struct rpc_task
*task
, void *calldata
)
6094 struct nfs4_layoutget
*lgp
= calldata
;
6095 struct nfs_server
*server
= NFS_SERVER(lgp
->args
.inode
);
6097 dprintk("--> %s\n", __func__
);
6098 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
6099 * right now covering the LAYOUTGET we are about to send.
6100 * However, that is not so catastrophic, and there seems
6101 * to be no way to prevent it completely.
6103 if (nfs4_setup_sequence(server
, &lgp
->args
.seq_args
,
6104 &lgp
->res
.seq_res
, task
))
6106 if (pnfs_choose_layoutget_stateid(&lgp
->args
.stateid
,
6107 NFS_I(lgp
->args
.inode
)->layout
,
6108 lgp
->args
.ctx
->state
)) {
6109 rpc_exit(task
, NFS4_OK
);
6112 rpc_call_start(task
);
6115 static void nfs4_layoutget_done(struct rpc_task
*task
, void *calldata
)
6117 struct nfs4_layoutget
*lgp
= calldata
;
6118 struct nfs_server
*server
= NFS_SERVER(lgp
->args
.inode
);
6120 dprintk("--> %s\n", __func__
);
6122 if (!nfs4_sequence_done(task
, &lgp
->res
.seq_res
))
6125 switch (task
->tk_status
) {
6128 case -NFS4ERR_LAYOUTTRYLATER
:
6129 case -NFS4ERR_RECALLCONFLICT
:
6130 task
->tk_status
= -NFS4ERR_DELAY
;
6133 if (nfs4_async_handle_error(task
, server
, NULL
) == -EAGAIN
) {
6134 rpc_restart_call_prepare(task
);
6138 dprintk("<-- %s\n", __func__
);
6141 static void nfs4_layoutget_release(void *calldata
)
6143 struct nfs4_layoutget
*lgp
= calldata
;
6145 dprintk("--> %s\n", __func__
);
6146 put_nfs_open_context(lgp
->args
.ctx
);
6148 dprintk("<-- %s\n", __func__
);
6151 static const struct rpc_call_ops nfs4_layoutget_call_ops
= {
6152 .rpc_call_prepare
= nfs4_layoutget_prepare
,
6153 .rpc_call_done
= nfs4_layoutget_done
,
6154 .rpc_release
= nfs4_layoutget_release
,
6157 int nfs4_proc_layoutget(struct nfs4_layoutget
*lgp
)
6159 struct nfs_server
*server
= NFS_SERVER(lgp
->args
.inode
);
6160 struct rpc_task
*task
;
6161 struct rpc_message msg
= {
6162 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_LAYOUTGET
],
6163 .rpc_argp
= &lgp
->args
,
6164 .rpc_resp
= &lgp
->res
,
6166 struct rpc_task_setup task_setup_data
= {
6167 .rpc_client
= server
->client
,
6168 .rpc_message
= &msg
,
6169 .callback_ops
= &nfs4_layoutget_call_ops
,
6170 .callback_data
= lgp
,
6171 .flags
= RPC_TASK_ASYNC
,
6175 dprintk("--> %s\n", __func__
);
6177 lgp
->res
.layoutp
= &lgp
->args
.layout
;
6178 lgp
->res
.seq_res
.sr_slot
= NULL
;
6179 nfs41_init_sequence(&lgp
->args
.seq_args
, &lgp
->res
.seq_res
, 0);
6180 task
= rpc_run_task(&task_setup_data
);
6182 return PTR_ERR(task
);
6183 status
= nfs4_wait_for_completion_rpc_task(task
);
6185 status
= task
->tk_status
;
6187 status
= pnfs_layout_process(lgp
);
6189 dprintk("<-- %s status=%d\n", __func__
, status
);
6194 nfs4_layoutreturn_prepare(struct rpc_task
*task
, void *calldata
)
6196 struct nfs4_layoutreturn
*lrp
= calldata
;
6198 dprintk("--> %s\n", __func__
);
6199 if (nfs41_setup_sequence(lrp
->clp
->cl_session
, &lrp
->args
.seq_args
,
6200 &lrp
->res
.seq_res
, task
))
6202 rpc_call_start(task
);
6205 static void nfs4_layoutreturn_done(struct rpc_task
*task
, void *calldata
)
6207 struct nfs4_layoutreturn
*lrp
= calldata
;
6208 struct nfs_server
*server
;
6209 struct pnfs_layout_hdr
*lo
= lrp
->args
.layout
;
6211 dprintk("--> %s\n", __func__
);
6213 if (!nfs4_sequence_done(task
, &lrp
->res
.seq_res
))
6216 server
= NFS_SERVER(lrp
->args
.inode
);
6217 if (nfs4_async_handle_error(task
, server
, NULL
) == -EAGAIN
) {
6218 rpc_restart_call_prepare(task
);
6221 spin_lock(&lo
->plh_inode
->i_lock
);
6222 if (task
->tk_status
== 0) {
6223 if (lrp
->res
.lrs_present
) {
6224 pnfs_set_layout_stateid(lo
, &lrp
->res
.stateid
, true);
6226 BUG_ON(!list_empty(&lo
->plh_segs
));
6228 lo
->plh_block_lgets
--;
6229 spin_unlock(&lo
->plh_inode
->i_lock
);
6230 dprintk("<-- %s\n", __func__
);
6233 static void nfs4_layoutreturn_release(void *calldata
)
6235 struct nfs4_layoutreturn
*lrp
= calldata
;
6237 dprintk("--> %s\n", __func__
);
6238 put_layout_hdr(lrp
->args
.layout
);
6240 dprintk("<-- %s\n", __func__
);
6243 static const struct rpc_call_ops nfs4_layoutreturn_call_ops
= {
6244 .rpc_call_prepare
= nfs4_layoutreturn_prepare
,
6245 .rpc_call_done
= nfs4_layoutreturn_done
,
6246 .rpc_release
= nfs4_layoutreturn_release
,
6249 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn
*lrp
)
6251 struct rpc_task
*task
;
6252 struct rpc_message msg
= {
6253 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_LAYOUTRETURN
],
6254 .rpc_argp
= &lrp
->args
,
6255 .rpc_resp
= &lrp
->res
,
6257 struct rpc_task_setup task_setup_data
= {
6258 .rpc_client
= lrp
->clp
->cl_rpcclient
,
6259 .rpc_message
= &msg
,
6260 .callback_ops
= &nfs4_layoutreturn_call_ops
,
6261 .callback_data
= lrp
,
6265 dprintk("--> %s\n", __func__
);
6266 nfs41_init_sequence(&lrp
->args
.seq_args
, &lrp
->res
.seq_res
, 1);
6267 task
= rpc_run_task(&task_setup_data
);
6269 return PTR_ERR(task
);
6270 status
= task
->tk_status
;
6271 dprintk("<-- %s status=%d\n", __func__
, status
);
6277 * Retrieve the list of Data Server devices from the MDS.
6279 static int _nfs4_getdevicelist(struct nfs_server
*server
,
6280 const struct nfs_fh
*fh
,
6281 struct pnfs_devicelist
*devlist
)
6283 struct nfs4_getdevicelist_args args
= {
6285 .layoutclass
= server
->pnfs_curr_ld
->id
,
6287 struct nfs4_getdevicelist_res res
= {
6290 struct rpc_message msg
= {
6291 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_GETDEVICELIST
],
6297 dprintk("--> %s\n", __func__
);
6298 status
= nfs4_call_sync(server
->client
, server
, &msg
, &args
.seq_args
,
6300 dprintk("<-- %s status=%d\n", __func__
, status
);
6304 int nfs4_proc_getdevicelist(struct nfs_server
*server
,
6305 const struct nfs_fh
*fh
,
6306 struct pnfs_devicelist
*devlist
)
6308 struct nfs4_exception exception
= { };
6312 err
= nfs4_handle_exception(server
,
6313 _nfs4_getdevicelist(server
, fh
, devlist
),
6315 } while (exception
.retry
);
6317 dprintk("%s: err=%d, num_devs=%u\n", __func__
,
6318 err
, devlist
->num_devs
);
6322 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist
);
6325 _nfs4_proc_getdeviceinfo(struct nfs_server
*server
, struct pnfs_device
*pdev
)
6327 struct nfs4_getdeviceinfo_args args
= {
6330 struct nfs4_getdeviceinfo_res res
= {
6333 struct rpc_message msg
= {
6334 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_GETDEVICEINFO
],
6340 dprintk("--> %s\n", __func__
);
6341 status
= nfs4_call_sync(server
->client
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 0);
6342 dprintk("<-- %s status=%d\n", __func__
, status
);
6347 int nfs4_proc_getdeviceinfo(struct nfs_server
*server
, struct pnfs_device
*pdev
)
6349 struct nfs4_exception exception
= { };
6353 err
= nfs4_handle_exception(server
,
6354 _nfs4_proc_getdeviceinfo(server
, pdev
),
6356 } while (exception
.retry
);
6359 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo
);
6361 static void nfs4_layoutcommit_prepare(struct rpc_task
*task
, void *calldata
)
6363 struct nfs4_layoutcommit_data
*data
= calldata
;
6364 struct nfs_server
*server
= NFS_SERVER(data
->args
.inode
);
6366 if (nfs4_setup_sequence(server
, &data
->args
.seq_args
,
6367 &data
->res
.seq_res
, task
))
6369 rpc_call_start(task
);
6373 nfs4_layoutcommit_done(struct rpc_task
*task
, void *calldata
)
6375 struct nfs4_layoutcommit_data
*data
= calldata
;
6376 struct nfs_server
*server
= NFS_SERVER(data
->args
.inode
);
6378 if (!nfs4_sequence_done(task
, &data
->res
.seq_res
))
6381 switch (task
->tk_status
) { /* Just ignore these failures */
6382 case -NFS4ERR_DELEG_REVOKED
: /* layout was recalled */
6383 case -NFS4ERR_BADIOMODE
: /* no IOMODE_RW layout for range */
6384 case -NFS4ERR_BADLAYOUT
: /* no layout */
6385 case -NFS4ERR_GRACE
: /* loca_recalim always false */
6386 task
->tk_status
= 0;
6389 nfs_post_op_update_inode_force_wcc(data
->args
.inode
,
6393 if (nfs4_async_handle_error(task
, server
, NULL
) == -EAGAIN
) {
6394 rpc_restart_call_prepare(task
);
6400 static void nfs4_layoutcommit_release(void *calldata
)
6402 struct nfs4_layoutcommit_data
*data
= calldata
;
6403 struct pnfs_layout_segment
*lseg
, *tmp
;
6404 unsigned long *bitlock
= &NFS_I(data
->args
.inode
)->flags
;
6406 pnfs_cleanup_layoutcommit(data
);
6407 /* Matched by references in pnfs_set_layoutcommit */
6408 list_for_each_entry_safe(lseg
, tmp
, &data
->lseg_list
, pls_lc_list
) {
6409 list_del_init(&lseg
->pls_lc_list
);
6410 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT
,
6415 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING
, bitlock
);
6416 smp_mb__after_clear_bit();
6417 wake_up_bit(bitlock
, NFS_INO_LAYOUTCOMMITTING
);
6419 put_rpccred(data
->cred
);
6423 static const struct rpc_call_ops nfs4_layoutcommit_ops
= {
6424 .rpc_call_prepare
= nfs4_layoutcommit_prepare
,
6425 .rpc_call_done
= nfs4_layoutcommit_done
,
6426 .rpc_release
= nfs4_layoutcommit_release
,
6430 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data
*data
, bool sync
)
6432 struct rpc_message msg
= {
6433 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_LAYOUTCOMMIT
],
6434 .rpc_argp
= &data
->args
,
6435 .rpc_resp
= &data
->res
,
6436 .rpc_cred
= data
->cred
,
6438 struct rpc_task_setup task_setup_data
= {
6439 .task
= &data
->task
,
6440 .rpc_client
= NFS_CLIENT(data
->args
.inode
),
6441 .rpc_message
= &msg
,
6442 .callback_ops
= &nfs4_layoutcommit_ops
,
6443 .callback_data
= data
,
6444 .flags
= RPC_TASK_ASYNC
,
6446 struct rpc_task
*task
;
6449 dprintk("NFS: %4d initiating layoutcommit call. sync %d "
6450 "lbw: %llu inode %lu\n",
6451 data
->task
.tk_pid
, sync
,
6452 data
->args
.lastbytewritten
,
6453 data
->args
.inode
->i_ino
);
6455 nfs41_init_sequence(&data
->args
.seq_args
, &data
->res
.seq_res
, 1);
6456 task
= rpc_run_task(&task_setup_data
);
6458 return PTR_ERR(task
);
6461 status
= nfs4_wait_for_completion_rpc_task(task
);
6464 status
= task
->tk_status
;
6466 dprintk("%s: status %d\n", __func__
, status
);
6472 _nfs41_proc_secinfo_no_name(struct nfs_server
*server
, struct nfs_fh
*fhandle
,
6473 struct nfs_fsinfo
*info
, struct nfs4_secinfo_flavors
*flavors
)
6475 struct nfs41_secinfo_no_name_args args
= {
6476 .style
= SECINFO_STYLE_CURRENT_FH
,
6478 struct nfs4_secinfo_res res
= {
6481 struct rpc_message msg
= {
6482 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_SECINFO_NO_NAME
],
6486 return nfs4_call_sync(server
->client
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 0);
6490 nfs41_proc_secinfo_no_name(struct nfs_server
*server
, struct nfs_fh
*fhandle
,
6491 struct nfs_fsinfo
*info
, struct nfs4_secinfo_flavors
*flavors
)
6493 struct nfs4_exception exception
= { };
6496 err
= _nfs41_proc_secinfo_no_name(server
, fhandle
, info
, flavors
);
6499 case -NFS4ERR_WRONGSEC
:
6500 case -NFS4ERR_NOTSUPP
:
6503 err
= nfs4_handle_exception(server
, err
, &exception
);
6505 } while (exception
.retry
);
6511 nfs41_find_root_sec(struct nfs_server
*server
, struct nfs_fh
*fhandle
,
6512 struct nfs_fsinfo
*info
)
6516 rpc_authflavor_t flavor
;
6517 struct nfs4_secinfo_flavors
*flavors
;
6519 page
= alloc_page(GFP_KERNEL
);
6525 flavors
= page_address(page
);
6526 err
= nfs41_proc_secinfo_no_name(server
, fhandle
, info
, flavors
);
6529 * Fall back on "guess and check" method if
6530 * the server doesn't support SECINFO_NO_NAME
6532 if (err
== -NFS4ERR_WRONGSEC
|| err
== -NFS4ERR_NOTSUPP
) {
6533 err
= nfs4_find_root_sec(server
, fhandle
, info
);
6539 flavor
= nfs_find_best_sec(flavors
);
6541 err
= nfs4_lookup_root_sec(server
, fhandle
, info
, flavor
);
6551 static int _nfs41_test_stateid(struct nfs_server
*server
, nfs4_stateid
*stateid
)
6554 struct nfs41_test_stateid_args args
= {
6557 struct nfs41_test_stateid_res res
;
6558 struct rpc_message msg
= {
6559 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_TEST_STATEID
],
6564 nfs41_init_sequence(&args
.seq_args
, &res
.seq_res
, 0);
6565 status
= nfs4_call_sync_sequence(server
->client
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 1);
6567 if (status
== NFS_OK
)
6572 static int nfs41_test_stateid(struct nfs_server
*server
, nfs4_stateid
*stateid
)
6574 struct nfs4_exception exception
= { };
6577 err
= nfs4_handle_exception(server
,
6578 _nfs41_test_stateid(server
, stateid
),
6580 } while (exception
.retry
);
6584 static int _nfs4_free_stateid(struct nfs_server
*server
, nfs4_stateid
*stateid
)
6586 struct nfs41_free_stateid_args args
= {
6589 struct nfs41_free_stateid_res res
;
6590 struct rpc_message msg
= {
6591 .rpc_proc
= &nfs4_procedures
[NFSPROC4_CLNT_FREE_STATEID
],
6596 nfs41_init_sequence(&args
.seq_args
, &res
.seq_res
, 0);
6597 return nfs4_call_sync_sequence(server
->client
, server
, &msg
, &args
.seq_args
, &res
.seq_res
, 1);
6600 static int nfs41_free_stateid(struct nfs_server
*server
, nfs4_stateid
*stateid
)
6602 struct nfs4_exception exception
= { };
6605 err
= nfs4_handle_exception(server
,
6606 _nfs4_free_stateid(server
, stateid
),
6608 } while (exception
.retry
);
6612 static bool nfs41_match_stateid(const nfs4_stateid
*s1
,
6613 const nfs4_stateid
*s2
)
6615 if (memcmp(s1
->other
, s2
->other
, sizeof(s1
->other
)) != 0)
6618 if (s1
->seqid
== s2
->seqid
)
6620 if (s1
->seqid
== 0 || s2
->seqid
== 0)
6626 #endif /* CONFIG_NFS_V4_1 */
6628 static bool nfs4_match_stateid(const nfs4_stateid
*s1
,
6629 const nfs4_stateid
*s2
)
6631 return nfs4_stateid_match(s1
, s2
);
6635 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops
= {
6636 .owner_flag_bit
= NFS_OWNER_RECLAIM_REBOOT
,
6637 .state_flag_bit
= NFS_STATE_RECLAIM_REBOOT
,
6638 .recover_open
= nfs4_open_reclaim
,
6639 .recover_lock
= nfs4_lock_reclaim
,
6640 .establish_clid
= nfs4_init_clientid
,
6641 .get_clid_cred
= nfs4_get_setclientid_cred
,
6644 #if defined(CONFIG_NFS_V4_1)
6645 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops
= {
6646 .owner_flag_bit
= NFS_OWNER_RECLAIM_REBOOT
,
6647 .state_flag_bit
= NFS_STATE_RECLAIM_REBOOT
,
6648 .recover_open
= nfs4_open_reclaim
,
6649 .recover_lock
= nfs4_lock_reclaim
,
6650 .establish_clid
= nfs41_init_clientid
,
6651 .get_clid_cred
= nfs4_get_exchange_id_cred
,
6652 .reclaim_complete
= nfs41_proc_reclaim_complete
,
6654 #endif /* CONFIG_NFS_V4_1 */
6656 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops
= {
6657 .owner_flag_bit
= NFS_OWNER_RECLAIM_NOGRACE
,
6658 .state_flag_bit
= NFS_STATE_RECLAIM_NOGRACE
,
6659 .recover_open
= nfs4_open_expired
,
6660 .recover_lock
= nfs4_lock_expired
,
6661 .establish_clid
= nfs4_init_clientid
,
6662 .get_clid_cred
= nfs4_get_setclientid_cred
,
6665 #if defined(CONFIG_NFS_V4_1)
6666 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops
= {
6667 .owner_flag_bit
= NFS_OWNER_RECLAIM_NOGRACE
,
6668 .state_flag_bit
= NFS_STATE_RECLAIM_NOGRACE
,
6669 .recover_open
= nfs41_open_expired
,
6670 .recover_lock
= nfs41_lock_expired
,
6671 .establish_clid
= nfs41_init_clientid
,
6672 .get_clid_cred
= nfs4_get_exchange_id_cred
,
6674 #endif /* CONFIG_NFS_V4_1 */
6676 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops
= {
6677 .sched_state_renewal
= nfs4_proc_async_renew
,
6678 .get_state_renewal_cred_locked
= nfs4_get_renew_cred_locked
,
6679 .renew_lease
= nfs4_proc_renew
,
6682 #if defined(CONFIG_NFS_V4_1)
6683 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops
= {
6684 .sched_state_renewal
= nfs41_proc_async_sequence
,
6685 .get_state_renewal_cred_locked
= nfs4_get_machine_cred_locked
,
6686 .renew_lease
= nfs4_proc_sequence
,
6690 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops
= {
6692 .call_sync
= _nfs4_call_sync
,
6693 .match_stateid
= nfs4_match_stateid
,
6694 .find_root_sec
= nfs4_find_root_sec
,
6695 .reboot_recovery_ops
= &nfs40_reboot_recovery_ops
,
6696 .nograce_recovery_ops
= &nfs40_nograce_recovery_ops
,
6697 .state_renewal_ops
= &nfs40_state_renewal_ops
,
6700 #if defined(CONFIG_NFS_V4_1)
6701 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops
= {
6703 .call_sync
= _nfs4_call_sync_session
,
6704 .match_stateid
= nfs41_match_stateid
,
6705 .find_root_sec
= nfs41_find_root_sec
,
6706 .reboot_recovery_ops
= &nfs41_reboot_recovery_ops
,
6707 .nograce_recovery_ops
= &nfs41_nograce_recovery_ops
,
6708 .state_renewal_ops
= &nfs41_state_renewal_ops
,
6712 const struct nfs4_minor_version_ops
*nfs_v4_minor_ops
[] = {
6713 [0] = &nfs_v4_0_minor_ops
,
6714 #if defined(CONFIG_NFS_V4_1)
6715 [1] = &nfs_v4_1_minor_ops
,
6719 static const struct inode_operations nfs4_file_inode_operations
= {
6720 .permission
= nfs_permission
,
6721 .getattr
= nfs_getattr
,
6722 .setattr
= nfs_setattr
,
6723 .getxattr
= generic_getxattr
,
6724 .setxattr
= generic_setxattr
,
6725 .listxattr
= generic_listxattr
,
6726 .removexattr
= generic_removexattr
,
6729 const struct nfs_rpc_ops nfs_v4_clientops
= {
6730 .version
= 4, /* protocol version */
6731 .dentry_ops
= &nfs4_dentry_operations
,
6732 .dir_inode_ops
= &nfs4_dir_inode_operations
,
6733 .file_inode_ops
= &nfs4_file_inode_operations
,
6734 .file_ops
= &nfs4_file_operations
,
6735 .getroot
= nfs4_proc_get_root
,
6736 .submount
= nfs4_submount
,
6737 .getattr
= nfs4_proc_getattr
,
6738 .setattr
= nfs4_proc_setattr
,
6739 .lookup
= nfs4_proc_lookup
,
6740 .access
= nfs4_proc_access
,
6741 .readlink
= nfs4_proc_readlink
,
6742 .create
= nfs4_proc_create
,
6743 .remove
= nfs4_proc_remove
,
6744 .unlink_setup
= nfs4_proc_unlink_setup
,
6745 .unlink_rpc_prepare
= nfs4_proc_unlink_rpc_prepare
,
6746 .unlink_done
= nfs4_proc_unlink_done
,
6747 .rename
= nfs4_proc_rename
,
6748 .rename_setup
= nfs4_proc_rename_setup
,
6749 .rename_rpc_prepare
= nfs4_proc_rename_rpc_prepare
,
6750 .rename_done
= nfs4_proc_rename_done
,
6751 .link
= nfs4_proc_link
,
6752 .symlink
= nfs4_proc_symlink
,
6753 .mkdir
= nfs4_proc_mkdir
,
6754 .rmdir
= nfs4_proc_remove
,
6755 .readdir
= nfs4_proc_readdir
,
6756 .mknod
= nfs4_proc_mknod
,
6757 .statfs
= nfs4_proc_statfs
,
6758 .fsinfo
= nfs4_proc_fsinfo
,
6759 .pathconf
= nfs4_proc_pathconf
,
6760 .set_capabilities
= nfs4_server_capabilities
,
6761 .decode_dirent
= nfs4_decode_dirent
,
6762 .read_setup
= nfs4_proc_read_setup
,
6763 .read_rpc_prepare
= nfs4_proc_read_rpc_prepare
,
6764 .read_done
= nfs4_read_done
,
6765 .write_setup
= nfs4_proc_write_setup
,
6766 .write_rpc_prepare
= nfs4_proc_write_rpc_prepare
,
6767 .write_done
= nfs4_write_done
,
6768 .commit_setup
= nfs4_proc_commit_setup
,
6769 .commit_rpc_prepare
= nfs4_proc_commit_rpc_prepare
,
6770 .commit_done
= nfs4_commit_done
,
6771 .lock
= nfs4_proc_lock
,
6772 .clear_acl_cache
= nfs4_zap_acl_attr
,
6773 .close_context
= nfs4_close_context
,
6774 .open_context
= nfs4_atomic_open
,
6775 .init_client
= nfs4_init_client
,
6778 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler
= {
6779 .prefix
= XATTR_NAME_NFSV4_ACL
,
6780 .list
= nfs4_xattr_list_nfs4_acl
,
6781 .get
= nfs4_xattr_get_nfs4_acl
,
6782 .set
= nfs4_xattr_set_nfs4_acl
,
6785 const struct xattr_handler
*nfs4_xattr_handlers
[] = {
6786 &nfs4_xattr_nfs4_acl_handler
,
6790 module_param(max_session_slots
, ushort
, 0644);
6791 MODULE_PARM_DESC(max_session_slots
, "Maximum number of outstanding NFSv4.1 "
6792 "requests the client will negotiate");