2 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/file.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/hash.h>
48 #include "current_stateid.h"
52 #define NFSDDBG_FACILITY NFSDDBG_PROC
54 #define all_ones {{~0,~0},~0}
55 static const stateid_t one_stateid
= {
57 .si_opaque
= all_ones
,
59 static const stateid_t zero_stateid
= {
62 static const stateid_t currentstateid
= {
66 static u64 current_sessionid
= 1;
68 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
69 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
70 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
72 /* forward declarations */
73 static bool check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
);
74 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
);
79 * Currently used for the del_recall_lru and file hash table. In an
80 * effort to decrease the scope of the client_mutex, this spinlock may
81 * eventually cover more:
83 static DEFINE_SPINLOCK(state_lock
);
86 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
87 * the refcount on the open stateid to drop.
89 static DECLARE_WAIT_QUEUE_HEAD(close_wq
);
91 static struct kmem_cache
*openowner_slab
;
92 static struct kmem_cache
*lockowner_slab
;
93 static struct kmem_cache
*file_slab
;
94 static struct kmem_cache
*stateid_slab
;
95 static struct kmem_cache
*deleg_slab
;
97 static void free_session(struct nfsd4_session
*);
99 static bool is_session_dead(struct nfsd4_session
*ses
)
101 return ses
->se_flags
& NFS4_SESSION_DEAD
;
104 static __be32
mark_session_dead_locked(struct nfsd4_session
*ses
, int ref_held_by_me
)
106 if (atomic_read(&ses
->se_ref
) > ref_held_by_me
)
107 return nfserr_jukebox
;
108 ses
->se_flags
|= NFS4_SESSION_DEAD
;
112 static bool is_client_expired(struct nfs4_client
*clp
)
114 return clp
->cl_time
== 0;
117 static __be32
get_client_locked(struct nfs4_client
*clp
)
119 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
121 lockdep_assert_held(&nn
->client_lock
);
123 if (is_client_expired(clp
))
124 return nfserr_expired
;
125 atomic_inc(&clp
->cl_refcount
);
129 /* must be called under the client_lock */
131 renew_client_locked(struct nfs4_client
*clp
)
133 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
135 if (is_client_expired(clp
)) {
137 printk("%s: client (clientid %08x/%08x) already expired\n",
139 clp
->cl_clientid
.cl_boot
,
140 clp
->cl_clientid
.cl_id
);
144 dprintk("renewing client (clientid %08x/%08x)\n",
145 clp
->cl_clientid
.cl_boot
,
146 clp
->cl_clientid
.cl_id
);
147 list_move_tail(&clp
->cl_lru
, &nn
->client_lru
);
148 clp
->cl_time
= get_seconds();
152 renew_client(struct nfs4_client
*clp
)
154 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
156 spin_lock(&nn
->client_lock
);
157 renew_client_locked(clp
);
158 spin_unlock(&nn
->client_lock
);
161 static void put_client_renew_locked(struct nfs4_client
*clp
)
163 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
165 lockdep_assert_held(&nn
->client_lock
);
167 if (!atomic_dec_and_test(&clp
->cl_refcount
))
169 if (!is_client_expired(clp
))
170 renew_client_locked(clp
);
173 static void put_client_renew(struct nfs4_client
*clp
)
175 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
177 if (!atomic_dec_and_lock(&clp
->cl_refcount
, &nn
->client_lock
))
179 if (!is_client_expired(clp
))
180 renew_client_locked(clp
);
181 spin_unlock(&nn
->client_lock
);
184 static __be32
nfsd4_get_session_locked(struct nfsd4_session
*ses
)
188 if (is_session_dead(ses
))
189 return nfserr_badsession
;
190 status
= get_client_locked(ses
->se_client
);
193 atomic_inc(&ses
->se_ref
);
197 static void nfsd4_put_session_locked(struct nfsd4_session
*ses
)
199 struct nfs4_client
*clp
= ses
->se_client
;
200 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
202 lockdep_assert_held(&nn
->client_lock
);
204 if (atomic_dec_and_test(&ses
->se_ref
) && is_session_dead(ses
))
206 put_client_renew_locked(clp
);
209 static void nfsd4_put_session(struct nfsd4_session
*ses
)
211 struct nfs4_client
*clp
= ses
->se_client
;
212 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
214 spin_lock(&nn
->client_lock
);
215 nfsd4_put_session_locked(ses
);
216 spin_unlock(&nn
->client_lock
);
220 same_owner_str(struct nfs4_stateowner
*sop
, struct xdr_netobj
*owner
)
222 return (sop
->so_owner
.len
== owner
->len
) &&
223 0 == memcmp(sop
->so_owner
.data
, owner
->data
, owner
->len
);
226 static struct nfs4_openowner
*
227 find_openstateowner_str_locked(unsigned int hashval
, struct nfsd4_open
*open
,
228 struct nfs4_client
*clp
)
230 struct nfs4_stateowner
*so
;
232 lockdep_assert_held(&clp
->cl_lock
);
234 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[hashval
],
236 if (!so
->so_is_open_owner
)
238 if (same_owner_str(so
, &open
->op_owner
)) {
239 atomic_inc(&so
->so_count
);
240 return openowner(so
);
246 static struct nfs4_openowner
*
247 find_openstateowner_str(unsigned int hashval
, struct nfsd4_open
*open
,
248 struct nfs4_client
*clp
)
250 struct nfs4_openowner
*oo
;
252 spin_lock(&clp
->cl_lock
);
253 oo
= find_openstateowner_str_locked(hashval
, open
, clp
);
254 spin_unlock(&clp
->cl_lock
);
259 opaque_hashval(const void *ptr
, int nbytes
)
261 unsigned char *cptr
= (unsigned char *) ptr
;
271 static void nfsd4_free_file(struct nfs4_file
*f
)
273 kmem_cache_free(file_slab
, f
);
277 put_nfs4_file(struct nfs4_file
*fi
)
279 might_lock(&state_lock
);
281 if (atomic_dec_and_lock(&fi
->fi_ref
, &state_lock
)) {
282 hlist_del(&fi
->fi_hash
);
283 spin_unlock(&state_lock
);
289 get_nfs4_file(struct nfs4_file
*fi
)
291 atomic_inc(&fi
->fi_ref
);
295 __nfs4_get_fd(struct nfs4_file
*f
, int oflag
)
297 if (f
->fi_fds
[oflag
])
298 return get_file(f
->fi_fds
[oflag
]);
303 find_writeable_file_locked(struct nfs4_file
*f
)
307 lockdep_assert_held(&f
->fi_lock
);
309 ret
= __nfs4_get_fd(f
, O_WRONLY
);
311 ret
= __nfs4_get_fd(f
, O_RDWR
);
316 find_writeable_file(struct nfs4_file
*f
)
320 spin_lock(&f
->fi_lock
);
321 ret
= find_writeable_file_locked(f
);
322 spin_unlock(&f
->fi_lock
);
327 static struct file
*find_readable_file_locked(struct nfs4_file
*f
)
331 lockdep_assert_held(&f
->fi_lock
);
333 ret
= __nfs4_get_fd(f
, O_RDONLY
);
335 ret
= __nfs4_get_fd(f
, O_RDWR
);
340 find_readable_file(struct nfs4_file
*f
)
344 spin_lock(&f
->fi_lock
);
345 ret
= find_readable_file_locked(f
);
346 spin_unlock(&f
->fi_lock
);
352 find_any_file(struct nfs4_file
*f
)
356 spin_lock(&f
->fi_lock
);
357 ret
= __nfs4_get_fd(f
, O_RDWR
);
359 ret
= __nfs4_get_fd(f
, O_WRONLY
);
361 ret
= __nfs4_get_fd(f
, O_RDONLY
);
363 spin_unlock(&f
->fi_lock
);
367 static atomic_long_t num_delegations
;
368 unsigned long max_delegations
;
371 * Open owner state (share locks)
374 /* hash tables for lock and open owners */
375 #define OWNER_HASH_BITS 8
376 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
377 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
379 static unsigned int ownerstr_hashval(struct xdr_netobj
*ownername
)
383 ret
= opaque_hashval(ownername
->data
, ownername
->len
);
384 return ret
& OWNER_HASH_MASK
;
387 /* hash table for nfs4_file */
388 #define FILE_HASH_BITS 8
389 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
391 static unsigned int nfsd_fh_hashval(struct knfsd_fh
*fh
)
393 return jhash2(fh
->fh_base
.fh_pad
, XDR_QUADLEN(fh
->fh_size
), 0);
396 static unsigned int file_hashval(struct knfsd_fh
*fh
)
398 return nfsd_fh_hashval(fh
) & (FILE_HASH_SIZE
- 1);
401 static bool nfsd_fh_match(struct knfsd_fh
*fh1
, struct knfsd_fh
*fh2
)
403 return fh1
->fh_size
== fh2
->fh_size
&&
404 !memcmp(fh1
->fh_base
.fh_pad
,
409 static struct hlist_head file_hashtbl
[FILE_HASH_SIZE
];
412 __nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
414 lockdep_assert_held(&fp
->fi_lock
);
416 if (access
& NFS4_SHARE_ACCESS_WRITE
)
417 atomic_inc(&fp
->fi_access
[O_WRONLY
]);
418 if (access
& NFS4_SHARE_ACCESS_READ
)
419 atomic_inc(&fp
->fi_access
[O_RDONLY
]);
423 nfs4_file_get_access(struct nfs4_file
*fp
, u32 access
)
425 lockdep_assert_held(&fp
->fi_lock
);
427 /* Does this access mode make sense? */
428 if (access
& ~NFS4_SHARE_ACCESS_BOTH
)
431 /* Does it conflict with a deny mode already set? */
432 if ((access
& fp
->fi_share_deny
) != 0)
433 return nfserr_share_denied
;
435 __nfs4_file_get_access(fp
, access
);
439 static __be32
nfs4_file_check_deny(struct nfs4_file
*fp
, u32 deny
)
441 /* Common case is that there is no deny mode. */
443 /* Does this deny mode make sense? */
444 if (deny
& ~NFS4_SHARE_DENY_BOTH
)
447 if ((deny
& NFS4_SHARE_DENY_READ
) &&
448 atomic_read(&fp
->fi_access
[O_RDONLY
]))
449 return nfserr_share_denied
;
451 if ((deny
& NFS4_SHARE_DENY_WRITE
) &&
452 atomic_read(&fp
->fi_access
[O_WRONLY
]))
453 return nfserr_share_denied
;
458 static void __nfs4_file_put_access(struct nfs4_file
*fp
, int oflag
)
460 might_lock(&fp
->fi_lock
);
462 if (atomic_dec_and_lock(&fp
->fi_access
[oflag
], &fp
->fi_lock
)) {
463 struct file
*f1
= NULL
;
464 struct file
*f2
= NULL
;
466 swap(f1
, fp
->fi_fds
[oflag
]);
467 if (atomic_read(&fp
->fi_access
[1 - oflag
]) == 0)
468 swap(f2
, fp
->fi_fds
[O_RDWR
]);
469 spin_unlock(&fp
->fi_lock
);
477 static void nfs4_file_put_access(struct nfs4_file
*fp
, u32 access
)
479 WARN_ON_ONCE(access
& ~NFS4_SHARE_ACCESS_BOTH
);
481 if (access
& NFS4_SHARE_ACCESS_WRITE
)
482 __nfs4_file_put_access(fp
, O_WRONLY
);
483 if (access
& NFS4_SHARE_ACCESS_READ
)
484 __nfs4_file_put_access(fp
, O_RDONLY
);
487 static struct nfs4_stid
*nfs4_alloc_stid(struct nfs4_client
*cl
,
488 struct kmem_cache
*slab
)
490 struct nfs4_stid
*stid
;
493 stid
= kmem_cache_zalloc(slab
, GFP_KERNEL
);
497 idr_preload(GFP_KERNEL
);
498 spin_lock(&cl
->cl_lock
);
499 new_id
= idr_alloc_cyclic(&cl
->cl_stateids
, stid
, 0, 0, GFP_NOWAIT
);
500 spin_unlock(&cl
->cl_lock
);
504 stid
->sc_client
= cl
;
505 stid
->sc_stateid
.si_opaque
.so_id
= new_id
;
506 stid
->sc_stateid
.si_opaque
.so_clid
= cl
->cl_clientid
;
507 /* Will be incremented before return to client: */
508 atomic_set(&stid
->sc_count
, 1);
511 * It shouldn't be a problem to reuse an opaque stateid value.
512 * I don't think it is for 4.1. But with 4.0 I worry that, for
513 * example, a stray write retransmission could be accepted by
514 * the server when it should have been rejected. Therefore,
515 * adopt a trick from the sctp code to attempt to maximize the
516 * amount of time until an id is reused, by ensuring they always
517 * "increase" (mod INT_MAX):
521 kmem_cache_free(slab
, stid
);
525 static struct nfs4_ol_stateid
* nfs4_alloc_open_stateid(struct nfs4_client
*clp
)
527 struct nfs4_stid
*stid
;
528 struct nfs4_ol_stateid
*stp
;
530 stid
= nfs4_alloc_stid(clp
, stateid_slab
);
534 stp
= openlockstateid(stid
);
535 stp
->st_stid
.sc_free
= nfs4_free_ol_stateid
;
539 static void nfs4_free_deleg(struct nfs4_stid
*stid
)
541 kmem_cache_free(deleg_slab
, stid
);
542 atomic_long_dec(&num_delegations
);
546 * When we recall a delegation, we should be careful not to hand it
547 * out again straight away.
548 * To ensure this we keep a pair of bloom filters ('new' and 'old')
549 * in which the filehandles of recalled delegations are "stored".
550 * If a filehandle appear in either filter, a delegation is blocked.
551 * When a delegation is recalled, the filehandle is stored in the "new"
553 * Every 30 seconds we swap the filters and clear the "new" one,
554 * unless both are empty of course.
556 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
557 * low 3 bytes as hash-table indices.
559 * 'blocked_delegations_lock', which is always taken in block_delegations(),
560 * is used to manage concurrent access. Testing does not need the lock
561 * except when swapping the two filters.
563 static DEFINE_SPINLOCK(blocked_delegations_lock
);
564 static struct bloom_pair
{
565 int entries
, old_entries
;
567 int new; /* index into 'set' */
568 DECLARE_BITMAP(set
[2], 256);
569 } blocked_delegations
;
571 static int delegation_blocked(struct knfsd_fh
*fh
)
574 struct bloom_pair
*bd
= &blocked_delegations
;
576 if (bd
->entries
== 0)
578 if (seconds_since_boot() - bd
->swap_time
> 30) {
579 spin_lock(&blocked_delegations_lock
);
580 if (seconds_since_boot() - bd
->swap_time
> 30) {
581 bd
->entries
-= bd
->old_entries
;
582 bd
->old_entries
= bd
->entries
;
583 memset(bd
->set
[bd
->new], 0,
586 bd
->swap_time
= seconds_since_boot();
588 spin_unlock(&blocked_delegations_lock
);
590 hash
= arch_fast_hash(&fh
->fh_base
, fh
->fh_size
, 0);
591 if (test_bit(hash
&255, bd
->set
[0]) &&
592 test_bit((hash
>>8)&255, bd
->set
[0]) &&
593 test_bit((hash
>>16)&255, bd
->set
[0]))
596 if (test_bit(hash
&255, bd
->set
[1]) &&
597 test_bit((hash
>>8)&255, bd
->set
[1]) &&
598 test_bit((hash
>>16)&255, bd
->set
[1]))
604 static void block_delegations(struct knfsd_fh
*fh
)
607 struct bloom_pair
*bd
= &blocked_delegations
;
609 hash
= arch_fast_hash(&fh
->fh_base
, fh
->fh_size
, 0);
611 spin_lock(&blocked_delegations_lock
);
612 __set_bit(hash
&255, bd
->set
[bd
->new]);
613 __set_bit((hash
>>8)&255, bd
->set
[bd
->new]);
614 __set_bit((hash
>>16)&255, bd
->set
[bd
->new]);
615 if (bd
->entries
== 0)
616 bd
->swap_time
= seconds_since_boot();
618 spin_unlock(&blocked_delegations_lock
);
621 static struct nfs4_delegation
*
622 alloc_init_deleg(struct nfs4_client
*clp
, struct svc_fh
*current_fh
)
624 struct nfs4_delegation
*dp
;
627 dprintk("NFSD alloc_init_deleg\n");
628 n
= atomic_long_inc_return(&num_delegations
);
629 if (n
< 0 || n
> max_delegations
)
631 if (delegation_blocked(¤t_fh
->fh_handle
))
633 dp
= delegstateid(nfs4_alloc_stid(clp
, deleg_slab
));
637 dp
->dl_stid
.sc_free
= nfs4_free_deleg
;
639 * delegation seqid's are never incremented. The 4.1 special
640 * meaning of seqid 0 isn't meaningful, really, but let's avoid
641 * 0 anyway just for consistency and use 1:
643 dp
->dl_stid
.sc_stateid
.si_generation
= 1;
644 INIT_LIST_HEAD(&dp
->dl_perfile
);
645 INIT_LIST_HEAD(&dp
->dl_perclnt
);
646 INIT_LIST_HEAD(&dp
->dl_recall_lru
);
647 dp
->dl_type
= NFS4_OPEN_DELEGATE_READ
;
648 INIT_WORK(&dp
->dl_recall
.cb_work
, nfsd4_run_cb_recall
);
651 atomic_long_dec(&num_delegations
);
656 nfs4_put_stid(struct nfs4_stid
*s
)
658 struct nfs4_file
*fp
= s
->sc_file
;
659 struct nfs4_client
*clp
= s
->sc_client
;
661 might_lock(&clp
->cl_lock
);
663 if (!atomic_dec_and_lock(&s
->sc_count
, &clp
->cl_lock
)) {
664 wake_up_all(&close_wq
);
667 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
668 spin_unlock(&clp
->cl_lock
);
674 static void nfs4_put_deleg_lease(struct nfs4_file
*fp
)
676 lockdep_assert_held(&state_lock
);
680 if (atomic_dec_and_test(&fp
->fi_delegees
)) {
681 vfs_setlease(fp
->fi_deleg_file
, F_UNLCK
, &fp
->fi_lease
);
683 fput(fp
->fi_deleg_file
);
684 fp
->fi_deleg_file
= NULL
;
688 static void unhash_stid(struct nfs4_stid
*s
)
694 hash_delegation_locked(struct nfs4_delegation
*dp
, struct nfs4_file
*fp
)
696 lockdep_assert_held(&state_lock
);
697 lockdep_assert_held(&fp
->fi_lock
);
699 atomic_inc(&dp
->dl_stid
.sc_count
);
700 dp
->dl_stid
.sc_type
= NFS4_DELEG_STID
;
701 list_add(&dp
->dl_perfile
, &fp
->fi_delegations
);
702 list_add(&dp
->dl_perclnt
, &dp
->dl_stid
.sc_client
->cl_delegations
);
706 unhash_delegation_locked(struct nfs4_delegation
*dp
)
708 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
710 lockdep_assert_held(&state_lock
);
712 dp
->dl_stid
.sc_type
= NFS4_CLOSED_DELEG_STID
;
713 /* Ensure that deleg break won't try to requeue it */
715 spin_lock(&fp
->fi_lock
);
716 list_del_init(&dp
->dl_perclnt
);
717 list_del_init(&dp
->dl_recall_lru
);
718 list_del_init(&dp
->dl_perfile
);
719 spin_unlock(&fp
->fi_lock
);
721 nfs4_put_deleg_lease(fp
);
724 static void destroy_delegation(struct nfs4_delegation
*dp
)
726 spin_lock(&state_lock
);
727 unhash_delegation_locked(dp
);
728 spin_unlock(&state_lock
);
729 nfs4_put_stid(&dp
->dl_stid
);
732 static void revoke_delegation(struct nfs4_delegation
*dp
)
734 struct nfs4_client
*clp
= dp
->dl_stid
.sc_client
;
736 WARN_ON(!list_empty(&dp
->dl_recall_lru
));
738 if (clp
->cl_minorversion
== 0)
739 nfs4_put_stid(&dp
->dl_stid
);
741 dp
->dl_stid
.sc_type
= NFS4_REVOKED_DELEG_STID
;
742 spin_lock(&clp
->cl_lock
);
743 list_add(&dp
->dl_recall_lru
, &clp
->cl_revoked
);
744 spin_unlock(&clp
->cl_lock
);
752 static unsigned int clientid_hashval(u32 id
)
754 return id
& CLIENT_HASH_MASK
;
757 static unsigned int clientstr_hashval(const char *name
)
759 return opaque_hashval(name
, 8) & CLIENT_HASH_MASK
;
763 * We store the NONE, READ, WRITE, and BOTH bits separately in the
764 * st_{access,deny}_bmap field of the stateid, in order to track not
765 * only what share bits are currently in force, but also what
766 * combinations of share bits previous opens have used. This allows us
767 * to enforce the recommendation of rfc 3530 14.2.19 that the server
768 * return an error if the client attempt to downgrade to a combination
769 * of share bits not explicable by closing some of its previous opens.
771 * XXX: This enforcement is actually incomplete, since we don't keep
772 * track of access/deny bit combinations; so, e.g., we allow:
774 * OPEN allow read, deny write
775 * OPEN allow both, deny none
776 * DOWNGRADE allow read, deny none
778 * which we should reject.
781 bmap_to_share_mode(unsigned long bmap
) {
783 unsigned int access
= 0;
785 for (i
= 1; i
< 4; i
++) {
786 if (test_bit(i
, &bmap
))
792 /* set share access for a given stateid */
794 set_access(u32 access
, struct nfs4_ol_stateid
*stp
)
796 unsigned char mask
= 1 << access
;
798 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
799 stp
->st_access_bmap
|= mask
;
802 /* clear share access for a given stateid */
804 clear_access(u32 access
, struct nfs4_ol_stateid
*stp
)
806 unsigned char mask
= 1 << access
;
808 WARN_ON_ONCE(access
> NFS4_SHARE_ACCESS_BOTH
);
809 stp
->st_access_bmap
&= ~mask
;
812 /* test whether a given stateid has access */
814 test_access(u32 access
, struct nfs4_ol_stateid
*stp
)
816 unsigned char mask
= 1 << access
;
818 return (bool)(stp
->st_access_bmap
& mask
);
821 /* set share deny for a given stateid */
823 set_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
825 unsigned char mask
= 1 << deny
;
827 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
828 stp
->st_deny_bmap
|= mask
;
831 /* clear share deny for a given stateid */
833 clear_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
835 unsigned char mask
= 1 << deny
;
837 WARN_ON_ONCE(deny
> NFS4_SHARE_DENY_BOTH
);
838 stp
->st_deny_bmap
&= ~mask
;
841 /* test whether a given stateid is denying specific access */
843 test_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
845 unsigned char mask
= 1 << deny
;
847 return (bool)(stp
->st_deny_bmap
& mask
);
850 static int nfs4_access_to_omode(u32 access
)
852 switch (access
& NFS4_SHARE_ACCESS_BOTH
) {
853 case NFS4_SHARE_ACCESS_READ
:
855 case NFS4_SHARE_ACCESS_WRITE
:
857 case NFS4_SHARE_ACCESS_BOTH
:
865 * A stateid that had a deny mode associated with it is being released
866 * or downgraded. Recalculate the deny mode on the file.
869 recalculate_deny_mode(struct nfs4_file
*fp
)
871 struct nfs4_ol_stateid
*stp
;
873 spin_lock(&fp
->fi_lock
);
874 fp
->fi_share_deny
= 0;
875 list_for_each_entry(stp
, &fp
->fi_stateids
, st_perfile
)
876 fp
->fi_share_deny
|= bmap_to_share_mode(stp
->st_deny_bmap
);
877 spin_unlock(&fp
->fi_lock
);
881 reset_union_bmap_deny(u32 deny
, struct nfs4_ol_stateid
*stp
)
886 for (i
= 1; i
< 4; i
++) {
887 if ((i
& deny
) != i
) {
893 /* Recalculate per-file deny mode if there was a change */
895 recalculate_deny_mode(stp
->st_stid
.sc_file
);
898 /* release all access and file references for a given stateid */
900 release_all_access(struct nfs4_ol_stateid
*stp
)
903 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
905 if (fp
&& stp
->st_deny_bmap
!= 0)
906 recalculate_deny_mode(fp
);
908 for (i
= 1; i
< 4; i
++) {
909 if (test_access(i
, stp
))
910 nfs4_file_put_access(stp
->st_stid
.sc_file
, i
);
911 clear_access(i
, stp
);
915 static void nfs4_put_stateowner(struct nfs4_stateowner
*sop
)
917 struct nfs4_client
*clp
= sop
->so_client
;
919 might_lock(&clp
->cl_lock
);
921 if (!atomic_dec_and_lock(&sop
->so_count
, &clp
->cl_lock
))
923 sop
->so_ops
->so_unhash(sop
);
924 spin_unlock(&clp
->cl_lock
);
925 kfree(sop
->so_owner
.data
);
926 sop
->so_ops
->so_free(sop
);
929 static void unhash_ol_stateid(struct nfs4_ol_stateid
*stp
)
931 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
933 lockdep_assert_held(&stp
->st_stateowner
->so_client
->cl_lock
);
935 spin_lock(&fp
->fi_lock
);
936 list_del(&stp
->st_perfile
);
937 spin_unlock(&fp
->fi_lock
);
938 list_del(&stp
->st_perstateowner
);
941 static void nfs4_free_ol_stateid(struct nfs4_stid
*stid
)
943 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
945 release_all_access(stp
);
946 if (stp
->st_stateowner
)
947 nfs4_put_stateowner(stp
->st_stateowner
);
948 kmem_cache_free(stateid_slab
, stid
);
951 static void nfs4_free_lock_stateid(struct nfs4_stid
*stid
)
953 struct nfs4_ol_stateid
*stp
= openlockstateid(stid
);
954 struct nfs4_lockowner
*lo
= lockowner(stp
->st_stateowner
);
957 file
= find_any_file(stp
->st_stid
.sc_file
);
959 filp_close(file
, (fl_owner_t
)lo
);
960 nfs4_free_ol_stateid(stid
);
964 * Put the persistent reference to an already unhashed generic stateid, while
965 * holding the cl_lock. If it's the last reference, then put it onto the
966 * reaplist for later destruction.
968 static void put_ol_stateid_locked(struct nfs4_ol_stateid
*stp
,
969 struct list_head
*reaplist
)
971 struct nfs4_stid
*s
= &stp
->st_stid
;
972 struct nfs4_client
*clp
= s
->sc_client
;
974 lockdep_assert_held(&clp
->cl_lock
);
976 WARN_ON_ONCE(!list_empty(&stp
->st_locks
));
978 if (!atomic_dec_and_test(&s
->sc_count
)) {
979 wake_up_all(&close_wq
);
983 idr_remove(&clp
->cl_stateids
, s
->sc_stateid
.si_opaque
.so_id
);
984 list_add(&stp
->st_locks
, reaplist
);
987 static void unhash_lock_stateid(struct nfs4_ol_stateid
*stp
)
989 struct nfs4_openowner
*oo
= openowner(stp
->st_openstp
->st_stateowner
);
991 lockdep_assert_held(&oo
->oo_owner
.so_client
->cl_lock
);
993 list_del_init(&stp
->st_locks
);
994 unhash_ol_stateid(stp
);
995 unhash_stid(&stp
->st_stid
);
998 static void release_lock_stateid(struct nfs4_ol_stateid
*stp
)
1000 struct nfs4_openowner
*oo
= openowner(stp
->st_openstp
->st_stateowner
);
1002 spin_lock(&oo
->oo_owner
.so_client
->cl_lock
);
1003 unhash_lock_stateid(stp
);
1004 spin_unlock(&oo
->oo_owner
.so_client
->cl_lock
);
1005 nfs4_put_stid(&stp
->st_stid
);
1008 static void unhash_lockowner_locked(struct nfs4_lockowner
*lo
)
1010 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
1012 lockdep_assert_held(&clp
->cl_lock
);
1014 list_del_init(&lo
->lo_owner
.so_strhash
);
1018 * Free a list of generic stateids that were collected earlier after being
1022 free_ol_stateid_reaplist(struct list_head
*reaplist
)
1024 struct nfs4_ol_stateid
*stp
;
1025 struct nfs4_file
*fp
;
1029 while (!list_empty(reaplist
)) {
1030 stp
= list_first_entry(reaplist
, struct nfs4_ol_stateid
,
1032 list_del(&stp
->st_locks
);
1033 fp
= stp
->st_stid
.sc_file
;
1034 stp
->st_stid
.sc_free(&stp
->st_stid
);
1040 static void release_lockowner(struct nfs4_lockowner
*lo
)
1042 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
1043 struct nfs4_ol_stateid
*stp
;
1044 struct list_head reaplist
;
1046 INIT_LIST_HEAD(&reaplist
);
1048 spin_lock(&clp
->cl_lock
);
1049 unhash_lockowner_locked(lo
);
1050 while (!list_empty(&lo
->lo_owner
.so_stateids
)) {
1051 stp
= list_first_entry(&lo
->lo_owner
.so_stateids
,
1052 struct nfs4_ol_stateid
, st_perstateowner
);
1053 unhash_lock_stateid(stp
);
1054 put_ol_stateid_locked(stp
, &reaplist
);
1056 spin_unlock(&clp
->cl_lock
);
1057 free_ol_stateid_reaplist(&reaplist
);
1058 nfs4_put_stateowner(&lo
->lo_owner
);
1061 static void release_open_stateid_locks(struct nfs4_ol_stateid
*open_stp
,
1062 struct list_head
*reaplist
)
1064 struct nfs4_ol_stateid
*stp
;
1066 while (!list_empty(&open_stp
->st_locks
)) {
1067 stp
= list_entry(open_stp
->st_locks
.next
,
1068 struct nfs4_ol_stateid
, st_locks
);
1069 unhash_lock_stateid(stp
);
1070 put_ol_stateid_locked(stp
, reaplist
);
1074 static void unhash_open_stateid(struct nfs4_ol_stateid
*stp
,
1075 struct list_head
*reaplist
)
1077 lockdep_assert_held(&stp
->st_stid
.sc_client
->cl_lock
);
1079 unhash_ol_stateid(stp
);
1080 release_open_stateid_locks(stp
, reaplist
);
1083 static void release_open_stateid(struct nfs4_ol_stateid
*stp
)
1085 LIST_HEAD(reaplist
);
1087 spin_lock(&stp
->st_stid
.sc_client
->cl_lock
);
1088 unhash_open_stateid(stp
, &reaplist
);
1089 put_ol_stateid_locked(stp
, &reaplist
);
1090 spin_unlock(&stp
->st_stid
.sc_client
->cl_lock
);
1091 free_ol_stateid_reaplist(&reaplist
);
1094 static void unhash_openowner_locked(struct nfs4_openowner
*oo
)
1096 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1098 lockdep_assert_held(&clp
->cl_lock
);
1100 list_del_init(&oo
->oo_owner
.so_strhash
);
1101 list_del_init(&oo
->oo_perclient
);
1104 static void release_last_closed_stateid(struct nfs4_openowner
*oo
)
1106 struct nfsd_net
*nn
= net_generic(oo
->oo_owner
.so_client
->net
,
1108 struct nfs4_ol_stateid
*s
;
1110 spin_lock(&nn
->client_lock
);
1111 s
= oo
->oo_last_closed_stid
;
1113 list_del_init(&oo
->oo_close_lru
);
1114 oo
->oo_last_closed_stid
= NULL
;
1116 spin_unlock(&nn
->client_lock
);
1118 nfs4_put_stid(&s
->st_stid
);
1121 static void release_openowner(struct nfs4_openowner
*oo
)
1123 struct nfs4_ol_stateid
*stp
;
1124 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
1125 struct list_head reaplist
;
1127 INIT_LIST_HEAD(&reaplist
);
1129 spin_lock(&clp
->cl_lock
);
1130 unhash_openowner_locked(oo
);
1131 while (!list_empty(&oo
->oo_owner
.so_stateids
)) {
1132 stp
= list_first_entry(&oo
->oo_owner
.so_stateids
,
1133 struct nfs4_ol_stateid
, st_perstateowner
);
1134 unhash_open_stateid(stp
, &reaplist
);
1135 put_ol_stateid_locked(stp
, &reaplist
);
1137 spin_unlock(&clp
->cl_lock
);
1138 free_ol_stateid_reaplist(&reaplist
);
1139 release_last_closed_stateid(oo
);
1140 nfs4_put_stateowner(&oo
->oo_owner
);
1144 hash_sessionid(struct nfs4_sessionid
*sessionid
)
1146 struct nfsd4_sessionid
*sid
= (struct nfsd4_sessionid
*)sessionid
;
1148 return sid
->sequence
% SESSION_HASH_SIZE
;
1153 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1155 u32
*ptr
= (u32
*)(&sessionid
->data
[0]);
1156 dprintk("%s: %u:%u:%u:%u\n", fn
, ptr
[0], ptr
[1], ptr
[2], ptr
[3]);
1160 dump_sessionid(const char *fn
, struct nfs4_sessionid
*sessionid
)
1166 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1167 * won't be used for replay.
1169 void nfsd4_bump_seqid(struct nfsd4_compound_state
*cstate
, __be32 nfserr
)
1171 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
1173 if (nfserr
== nfserr_replay_me
)
1176 if (!seqid_mutating_err(ntohl(nfserr
))) {
1177 nfsd4_cstate_clear_replay(cstate
);
1182 if (so
->so_is_open_owner
)
1183 release_last_closed_stateid(openowner(so
));
1189 gen_sessionid(struct nfsd4_session
*ses
)
1191 struct nfs4_client
*clp
= ses
->se_client
;
1192 struct nfsd4_sessionid
*sid
;
1194 sid
= (struct nfsd4_sessionid
*)ses
->se_sessionid
.data
;
1195 sid
->clientid
= clp
->cl_clientid
;
1196 sid
->sequence
= current_sessionid
++;
1201 * The protocol defines ca_maxresponssize_cached to include the size of
1202 * the rpc header, but all we need to cache is the data starting after
1203 * the end of the initial SEQUENCE operation--the rest we regenerate
1204 * each time. Therefore we can advertise a ca_maxresponssize_cached
1205 * value that is the number of bytes in our cache plus a few additional
1206 * bytes. In order to stay on the safe side, and not promise more than
1207 * we can cache, those additional bytes must be the minimum possible: 24
1208 * bytes of rpc header (xid through accept state, with AUTH_NULL
1209 * verifier), 12 for the compound header (with zero-length tag), and 44
1210 * for the SEQUENCE op response:
1212 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1215 free_session_slots(struct nfsd4_session
*ses
)
1219 for (i
= 0; i
< ses
->se_fchannel
.maxreqs
; i
++)
1220 kfree(ses
->se_slots
[i
]);
1224 * We don't actually need to cache the rpc and session headers, so we
1225 * can allocate a little less for each slot:
1227 static inline u32
slot_bytes(struct nfsd4_channel_attrs
*ca
)
1231 if (ca
->maxresp_cached
< NFSD_MIN_HDR_SEQ_SZ
)
1234 size
= ca
->maxresp_cached
- NFSD_MIN_HDR_SEQ_SZ
;
1235 return size
+ sizeof(struct nfsd4_slot
);
1239 * XXX: If we run out of reserved DRC memory we could (up to a point)
1240 * re-negotiate active sessions and reduce their slot usage to make
1241 * room for new connections. For now we just fail the create session.
1243 static u32
nfsd4_get_drc_mem(struct nfsd4_channel_attrs
*ca
)
1245 u32 slotsize
= slot_bytes(ca
);
1246 u32 num
= ca
->maxreqs
;
1249 spin_lock(&nfsd_drc_lock
);
1250 avail
= min((unsigned long)NFSD_MAX_MEM_PER_SESSION
,
1251 nfsd_drc_max_mem
- nfsd_drc_mem_used
);
1252 num
= min_t(int, num
, avail
/ slotsize
);
1253 nfsd_drc_mem_used
+= num
* slotsize
;
1254 spin_unlock(&nfsd_drc_lock
);
1259 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs
*ca
)
1261 int slotsize
= slot_bytes(ca
);
1263 spin_lock(&nfsd_drc_lock
);
1264 nfsd_drc_mem_used
-= slotsize
* ca
->maxreqs
;
1265 spin_unlock(&nfsd_drc_lock
);
1268 static struct nfsd4_session
*alloc_session(struct nfsd4_channel_attrs
*fattrs
,
1269 struct nfsd4_channel_attrs
*battrs
)
1271 int numslots
= fattrs
->maxreqs
;
1272 int slotsize
= slot_bytes(fattrs
);
1273 struct nfsd4_session
*new;
1276 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION
* sizeof(struct nfsd4_slot
*)
1277 + sizeof(struct nfsd4_session
) > PAGE_SIZE
);
1278 mem
= numslots
* sizeof(struct nfsd4_slot
*);
1280 new = kzalloc(sizeof(*new) + mem
, GFP_KERNEL
);
1283 /* allocate each struct nfsd4_slot and data cache in one piece */
1284 for (i
= 0; i
< numslots
; i
++) {
1285 new->se_slots
[i
] = kzalloc(slotsize
, GFP_KERNEL
);
1286 if (!new->se_slots
[i
])
1290 memcpy(&new->se_fchannel
, fattrs
, sizeof(struct nfsd4_channel_attrs
));
1291 memcpy(&new->se_bchannel
, battrs
, sizeof(struct nfsd4_channel_attrs
));
1296 kfree(new->se_slots
[i
]);
1301 static void free_conn(struct nfsd4_conn
*c
)
1303 svc_xprt_put(c
->cn_xprt
);
1307 static void nfsd4_conn_lost(struct svc_xpt_user
*u
)
1309 struct nfsd4_conn
*c
= container_of(u
, struct nfsd4_conn
, cn_xpt_user
);
1310 struct nfs4_client
*clp
= c
->cn_session
->se_client
;
1312 spin_lock(&clp
->cl_lock
);
1313 if (!list_empty(&c
->cn_persession
)) {
1314 list_del(&c
->cn_persession
);
1317 nfsd4_probe_callback(clp
);
1318 spin_unlock(&clp
->cl_lock
);
1321 static struct nfsd4_conn
*alloc_conn(struct svc_rqst
*rqstp
, u32 flags
)
1323 struct nfsd4_conn
*conn
;
1325 conn
= kmalloc(sizeof(struct nfsd4_conn
), GFP_KERNEL
);
1328 svc_xprt_get(rqstp
->rq_xprt
);
1329 conn
->cn_xprt
= rqstp
->rq_xprt
;
1330 conn
->cn_flags
= flags
;
1331 INIT_LIST_HEAD(&conn
->cn_xpt_user
.list
);
1335 static void __nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1337 conn
->cn_session
= ses
;
1338 list_add(&conn
->cn_persession
, &ses
->se_conns
);
1341 static void nfsd4_hash_conn(struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1343 struct nfs4_client
*clp
= ses
->se_client
;
1345 spin_lock(&clp
->cl_lock
);
1346 __nfsd4_hash_conn(conn
, ses
);
1347 spin_unlock(&clp
->cl_lock
);
1350 static int nfsd4_register_conn(struct nfsd4_conn
*conn
)
1352 conn
->cn_xpt_user
.callback
= nfsd4_conn_lost
;
1353 return register_xpt_user(conn
->cn_xprt
, &conn
->cn_xpt_user
);
1356 static void nfsd4_init_conn(struct svc_rqst
*rqstp
, struct nfsd4_conn
*conn
, struct nfsd4_session
*ses
)
1360 nfsd4_hash_conn(conn
, ses
);
1361 ret
= nfsd4_register_conn(conn
);
1363 /* oops; xprt is already down: */
1364 nfsd4_conn_lost(&conn
->cn_xpt_user
);
1365 /* We may have gained or lost a callback channel: */
1366 nfsd4_probe_callback_sync(ses
->se_client
);
1369 static struct nfsd4_conn
*alloc_conn_from_crses(struct svc_rqst
*rqstp
, struct nfsd4_create_session
*cses
)
1371 u32 dir
= NFS4_CDFC4_FORE
;
1373 if (cses
->flags
& SESSION4_BACK_CHAN
)
1374 dir
|= NFS4_CDFC4_BACK
;
1375 return alloc_conn(rqstp
, dir
);
1378 /* must be called under client_lock */
1379 static void nfsd4_del_conns(struct nfsd4_session
*s
)
1381 struct nfs4_client
*clp
= s
->se_client
;
1382 struct nfsd4_conn
*c
;
1384 spin_lock(&clp
->cl_lock
);
1385 while (!list_empty(&s
->se_conns
)) {
1386 c
= list_first_entry(&s
->se_conns
, struct nfsd4_conn
, cn_persession
);
1387 list_del_init(&c
->cn_persession
);
1388 spin_unlock(&clp
->cl_lock
);
1390 unregister_xpt_user(c
->cn_xprt
, &c
->cn_xpt_user
);
1393 spin_lock(&clp
->cl_lock
);
1395 spin_unlock(&clp
->cl_lock
);
1398 static void __free_session(struct nfsd4_session
*ses
)
1400 free_session_slots(ses
);
1404 static void free_session(struct nfsd4_session
*ses
)
1406 nfsd4_del_conns(ses
);
1407 nfsd4_put_drc_mem(&ses
->se_fchannel
);
1408 __free_session(ses
);
1411 static void init_session(struct svc_rqst
*rqstp
, struct nfsd4_session
*new, struct nfs4_client
*clp
, struct nfsd4_create_session
*cses
)
1414 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
1416 new->se_client
= clp
;
1419 INIT_LIST_HEAD(&new->se_conns
);
1421 new->se_cb_seq_nr
= 1;
1422 new->se_flags
= cses
->flags
;
1423 new->se_cb_prog
= cses
->callback_prog
;
1424 new->se_cb_sec
= cses
->cb_sec
;
1425 atomic_set(&new->se_ref
, 0);
1426 idx
= hash_sessionid(&new->se_sessionid
);
1427 list_add(&new->se_hash
, &nn
->sessionid_hashtbl
[idx
]);
1428 spin_lock(&clp
->cl_lock
);
1429 list_add(&new->se_perclnt
, &clp
->cl_sessions
);
1430 spin_unlock(&clp
->cl_lock
);
1432 if (cses
->flags
& SESSION4_BACK_CHAN
) {
1433 struct sockaddr
*sa
= svc_addr(rqstp
);
1435 * This is a little silly; with sessions there's no real
1436 * use for the callback address. Use the peer address
1437 * as a reasonable default for now, but consider fixing
1438 * the rpc client not to require an address in the
1441 rpc_copy_addr((struct sockaddr
*)&clp
->cl_cb_conn
.cb_addr
, sa
);
1442 clp
->cl_cb_conn
.cb_addrlen
= svc_addr_len(sa
);
1446 /* caller must hold client_lock */
1447 static struct nfsd4_session
*
1448 __find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
)
1450 struct nfsd4_session
*elem
;
1452 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
1454 lockdep_assert_held(&nn
->client_lock
);
1456 dump_sessionid(__func__
, sessionid
);
1457 idx
= hash_sessionid(sessionid
);
1458 /* Search in the appropriate list */
1459 list_for_each_entry(elem
, &nn
->sessionid_hashtbl
[idx
], se_hash
) {
1460 if (!memcmp(elem
->se_sessionid
.data
, sessionid
->data
,
1461 NFS4_MAX_SESSIONID_LEN
)) {
1466 dprintk("%s: session not found\n", __func__
);
1470 static struct nfsd4_session
*
1471 find_in_sessionid_hashtbl(struct nfs4_sessionid
*sessionid
, struct net
*net
,
1474 struct nfsd4_session
*session
;
1475 __be32 status
= nfserr_badsession
;
1477 session
= __find_in_sessionid_hashtbl(sessionid
, net
);
1480 status
= nfsd4_get_session_locked(session
);
1488 /* caller must hold client_lock */
1490 unhash_session(struct nfsd4_session
*ses
)
1492 struct nfs4_client
*clp
= ses
->se_client
;
1493 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1495 lockdep_assert_held(&nn
->client_lock
);
1497 list_del(&ses
->se_hash
);
1498 spin_lock(&ses
->se_client
->cl_lock
);
1499 list_del(&ses
->se_perclnt
);
1500 spin_unlock(&ses
->se_client
->cl_lock
);
1503 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1505 STALE_CLIENTID(clientid_t
*clid
, struct nfsd_net
*nn
)
1507 if (clid
->cl_boot
== nn
->boot_time
)
1509 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1510 clid
->cl_boot
, clid
->cl_id
, nn
->boot_time
);
1515 * XXX Should we use a slab cache ?
1516 * This type of memory management is somewhat inefficient, but we use it
1517 * anyway since SETCLIENTID is not a common operation.
1519 static struct nfs4_client
*alloc_client(struct xdr_netobj name
)
1521 struct nfs4_client
*clp
;
1524 clp
= kzalloc(sizeof(struct nfs4_client
), GFP_KERNEL
);
1527 clp
->cl_name
.data
= kmemdup(name
.data
, name
.len
, GFP_KERNEL
);
1528 if (clp
->cl_name
.data
== NULL
)
1530 clp
->cl_ownerstr_hashtbl
= kmalloc(sizeof(struct list_head
) *
1531 OWNER_HASH_SIZE
, GFP_KERNEL
);
1532 if (!clp
->cl_ownerstr_hashtbl
)
1533 goto err_no_hashtbl
;
1534 for (i
= 0; i
< OWNER_HASH_SIZE
; i
++)
1535 INIT_LIST_HEAD(&clp
->cl_ownerstr_hashtbl
[i
]);
1536 clp
->cl_name
.len
= name
.len
;
1537 INIT_LIST_HEAD(&clp
->cl_sessions
);
1538 idr_init(&clp
->cl_stateids
);
1539 atomic_set(&clp
->cl_refcount
, 0);
1540 clp
->cl_cb_state
= NFSD4_CB_UNKNOWN
;
1541 INIT_LIST_HEAD(&clp
->cl_idhash
);
1542 INIT_LIST_HEAD(&clp
->cl_openowners
);
1543 INIT_LIST_HEAD(&clp
->cl_delegations
);
1544 INIT_LIST_HEAD(&clp
->cl_lru
);
1545 INIT_LIST_HEAD(&clp
->cl_callbacks
);
1546 INIT_LIST_HEAD(&clp
->cl_revoked
);
1547 spin_lock_init(&clp
->cl_lock
);
1548 rpc_init_wait_queue(&clp
->cl_cb_waitq
, "Backchannel slot table");
1551 kfree(clp
->cl_name
.data
);
1558 free_client(struct nfs4_client
*clp
)
1560 while (!list_empty(&clp
->cl_sessions
)) {
1561 struct nfsd4_session
*ses
;
1562 ses
= list_entry(clp
->cl_sessions
.next
, struct nfsd4_session
,
1564 list_del(&ses
->se_perclnt
);
1565 WARN_ON_ONCE(atomic_read(&ses
->se_ref
));
1568 rpc_destroy_wait_queue(&clp
->cl_cb_waitq
);
1569 free_svc_cred(&clp
->cl_cred
);
1570 kfree(clp
->cl_ownerstr_hashtbl
);
1571 kfree(clp
->cl_name
.data
);
1572 idr_destroy(&clp
->cl_stateids
);
1576 /* must be called under the client_lock */
1578 unhash_client_locked(struct nfs4_client
*clp
)
1580 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1581 struct nfsd4_session
*ses
;
1583 lockdep_assert_held(&nn
->client_lock
);
1585 /* Mark the client as expired! */
1587 /* Make it invisible */
1588 if (!list_empty(&clp
->cl_idhash
)) {
1589 list_del_init(&clp
->cl_idhash
);
1590 if (test_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
))
1591 rb_erase(&clp
->cl_namenode
, &nn
->conf_name_tree
);
1593 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
1595 list_del_init(&clp
->cl_lru
);
1596 spin_lock(&clp
->cl_lock
);
1597 list_for_each_entry(ses
, &clp
->cl_sessions
, se_perclnt
)
1598 list_del_init(&ses
->se_hash
);
1599 spin_unlock(&clp
->cl_lock
);
1603 unhash_client(struct nfs4_client
*clp
)
1605 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1607 spin_lock(&nn
->client_lock
);
1608 unhash_client_locked(clp
);
1609 spin_unlock(&nn
->client_lock
);
1612 static __be32
mark_client_expired_locked(struct nfs4_client
*clp
)
1614 if (atomic_read(&clp
->cl_refcount
))
1615 return nfserr_jukebox
;
1616 unhash_client_locked(clp
);
1621 __destroy_client(struct nfs4_client
*clp
)
1623 struct nfs4_openowner
*oo
;
1624 struct nfs4_delegation
*dp
;
1625 struct list_head reaplist
;
1627 INIT_LIST_HEAD(&reaplist
);
1628 spin_lock(&state_lock
);
1629 while (!list_empty(&clp
->cl_delegations
)) {
1630 dp
= list_entry(clp
->cl_delegations
.next
, struct nfs4_delegation
, dl_perclnt
);
1631 unhash_delegation_locked(dp
);
1632 list_add(&dp
->dl_recall_lru
, &reaplist
);
1634 spin_unlock(&state_lock
);
1635 while (!list_empty(&reaplist
)) {
1636 dp
= list_entry(reaplist
.next
, struct nfs4_delegation
, dl_recall_lru
);
1637 list_del_init(&dp
->dl_recall_lru
);
1638 nfs4_put_stid(&dp
->dl_stid
);
1640 while (!list_empty(&clp
->cl_revoked
)) {
1641 dp
= list_entry(reaplist
.next
, struct nfs4_delegation
, dl_recall_lru
);
1642 list_del_init(&dp
->dl_recall_lru
);
1643 nfs4_put_stid(&dp
->dl_stid
);
1645 while (!list_empty(&clp
->cl_openowners
)) {
1646 oo
= list_entry(clp
->cl_openowners
.next
, struct nfs4_openowner
, oo_perclient
);
1647 atomic_inc(&oo
->oo_owner
.so_count
);
1648 release_openowner(oo
);
1650 nfsd4_shutdown_callback(clp
);
1651 if (clp
->cl_cb_conn
.cb_xprt
)
1652 svc_xprt_put(clp
->cl_cb_conn
.cb_xprt
);
1657 destroy_client(struct nfs4_client
*clp
)
1660 __destroy_client(clp
);
1663 static void expire_client(struct nfs4_client
*clp
)
1666 nfsd4_client_record_remove(clp
);
1667 __destroy_client(clp
);
1670 static void copy_verf(struct nfs4_client
*target
, nfs4_verifier
*source
)
1672 memcpy(target
->cl_verifier
.data
, source
->data
,
1673 sizeof(target
->cl_verifier
.data
));
1676 static void copy_clid(struct nfs4_client
*target
, struct nfs4_client
*source
)
1678 target
->cl_clientid
.cl_boot
= source
->cl_clientid
.cl_boot
;
1679 target
->cl_clientid
.cl_id
= source
->cl_clientid
.cl_id
;
1682 static int copy_cred(struct svc_cred
*target
, struct svc_cred
*source
)
1684 if (source
->cr_principal
) {
1685 target
->cr_principal
=
1686 kstrdup(source
->cr_principal
, GFP_KERNEL
);
1687 if (target
->cr_principal
== NULL
)
1690 target
->cr_principal
= NULL
;
1691 target
->cr_flavor
= source
->cr_flavor
;
1692 target
->cr_uid
= source
->cr_uid
;
1693 target
->cr_gid
= source
->cr_gid
;
1694 target
->cr_group_info
= source
->cr_group_info
;
1695 get_group_info(target
->cr_group_info
);
1696 target
->cr_gss_mech
= source
->cr_gss_mech
;
1697 if (source
->cr_gss_mech
)
1698 gss_mech_get(source
->cr_gss_mech
);
1703 compare_blob(const struct xdr_netobj
*o1
, const struct xdr_netobj
*o2
)
1707 res
= o1
->len
- o2
->len
;
1710 return (long long)memcmp(o1
->data
, o2
->data
, o1
->len
);
1713 static int same_name(const char *n1
, const char *n2
)
1715 return 0 == memcmp(n1
, n2
, HEXDIR_LEN
);
1719 same_verf(nfs4_verifier
*v1
, nfs4_verifier
*v2
)
1721 return 0 == memcmp(v1
->data
, v2
->data
, sizeof(v1
->data
));
1725 same_clid(clientid_t
*cl1
, clientid_t
*cl2
)
1727 return (cl1
->cl_boot
== cl2
->cl_boot
) && (cl1
->cl_id
== cl2
->cl_id
);
1730 static bool groups_equal(struct group_info
*g1
, struct group_info
*g2
)
1734 if (g1
->ngroups
!= g2
->ngroups
)
1736 for (i
=0; i
<g1
->ngroups
; i
++)
1737 if (!gid_eq(GROUP_AT(g1
, i
), GROUP_AT(g2
, i
)))
1743 * RFC 3530 language requires clid_inuse be returned when the
1744 * "principal" associated with a requests differs from that previously
1745 * used. We use uid, gid's, and gss principal string as our best
1746 * approximation. We also don't want to allow non-gss use of a client
1747 * established using gss: in theory cr_principal should catch that
1748 * change, but in practice cr_principal can be null even in the gss case
1749 * since gssd doesn't always pass down a principal string.
1751 static bool is_gss_cred(struct svc_cred
*cr
)
1753 /* Is cr_flavor one of the gss "pseudoflavors"?: */
1754 return (cr
->cr_flavor
> RPC_AUTH_MAXFLAVOR
);
1759 same_creds(struct svc_cred
*cr1
, struct svc_cred
*cr2
)
1761 if ((is_gss_cred(cr1
) != is_gss_cred(cr2
))
1762 || (!uid_eq(cr1
->cr_uid
, cr2
->cr_uid
))
1763 || (!gid_eq(cr1
->cr_gid
, cr2
->cr_gid
))
1764 || !groups_equal(cr1
->cr_group_info
, cr2
->cr_group_info
))
1766 if (cr1
->cr_principal
== cr2
->cr_principal
)
1768 if (!cr1
->cr_principal
|| !cr2
->cr_principal
)
1770 return 0 == strcmp(cr1
->cr_principal
, cr2
->cr_principal
);
1773 static bool svc_rqst_integrity_protected(struct svc_rqst
*rqstp
)
1775 struct svc_cred
*cr
= &rqstp
->rq_cred
;
1778 if (!cr
->cr_gss_mech
)
1780 service
= gss_pseudoflavor_to_service(cr
->cr_gss_mech
, cr
->cr_flavor
);
1781 return service
== RPC_GSS_SVC_INTEGRITY
||
1782 service
== RPC_GSS_SVC_PRIVACY
;
1785 static bool mach_creds_match(struct nfs4_client
*cl
, struct svc_rqst
*rqstp
)
1787 struct svc_cred
*cr
= &rqstp
->rq_cred
;
1789 if (!cl
->cl_mach_cred
)
1791 if (cl
->cl_cred
.cr_gss_mech
!= cr
->cr_gss_mech
)
1793 if (!svc_rqst_integrity_protected(rqstp
))
1795 if (!cr
->cr_principal
)
1797 return 0 == strcmp(cl
->cl_cred
.cr_principal
, cr
->cr_principal
);
1800 static void gen_confirm(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
1805 * This is opaque to client, so no need to byte-swap. Use
1806 * __force to keep sparse happy
1808 verf
[0] = (__force __be32
)get_seconds();
1809 verf
[1] = (__force __be32
)nn
->clientid_counter
;
1810 memcpy(clp
->cl_confirm
.data
, verf
, sizeof(clp
->cl_confirm
.data
));
1813 static void gen_clid(struct nfs4_client
*clp
, struct nfsd_net
*nn
)
1815 clp
->cl_clientid
.cl_boot
= nn
->boot_time
;
1816 clp
->cl_clientid
.cl_id
= nn
->clientid_counter
++;
1817 gen_confirm(clp
, nn
);
1820 static struct nfs4_stid
*
1821 find_stateid_locked(struct nfs4_client
*cl
, stateid_t
*t
)
1823 struct nfs4_stid
*ret
;
1825 ret
= idr_find(&cl
->cl_stateids
, t
->si_opaque
.so_id
);
1826 if (!ret
|| !ret
->sc_type
)
1831 static struct nfs4_stid
*
1832 find_stateid_by_type(struct nfs4_client
*cl
, stateid_t
*t
, char typemask
)
1834 struct nfs4_stid
*s
;
1836 spin_lock(&cl
->cl_lock
);
1837 s
= find_stateid_locked(cl
, t
);
1839 if (typemask
& s
->sc_type
)
1840 atomic_inc(&s
->sc_count
);
1844 spin_unlock(&cl
->cl_lock
);
1848 static struct nfs4_client
*create_client(struct xdr_netobj name
,
1849 struct svc_rqst
*rqstp
, nfs4_verifier
*verf
)
1851 struct nfs4_client
*clp
;
1852 struct sockaddr
*sa
= svc_addr(rqstp
);
1854 struct net
*net
= SVC_NET(rqstp
);
1856 clp
= alloc_client(name
);
1860 ret
= copy_cred(&clp
->cl_cred
, &rqstp
->rq_cred
);
1865 INIT_WORK(&clp
->cl_cb_null
.cb_work
, nfsd4_run_cb_null
);
1866 clp
->cl_time
= get_seconds();
1867 clear_bit(0, &clp
->cl_cb_slot_busy
);
1868 copy_verf(clp
, verf
);
1869 rpc_copy_addr((struct sockaddr
*) &clp
->cl_addr
, sa
);
1870 clp
->cl_cb_session
= NULL
;
1876 add_clp_to_name_tree(struct nfs4_client
*new_clp
, struct rb_root
*root
)
1878 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
1879 struct nfs4_client
*clp
;
1882 clp
= rb_entry(*new, struct nfs4_client
, cl_namenode
);
1885 if (compare_blob(&clp
->cl_name
, &new_clp
->cl_name
) > 0)
1886 new = &((*new)->rb_left
);
1888 new = &((*new)->rb_right
);
1891 rb_link_node(&new_clp
->cl_namenode
, parent
, new);
1892 rb_insert_color(&new_clp
->cl_namenode
, root
);
1895 static struct nfs4_client
*
1896 find_clp_in_name_tree(struct xdr_netobj
*name
, struct rb_root
*root
)
1899 struct rb_node
*node
= root
->rb_node
;
1900 struct nfs4_client
*clp
;
1903 clp
= rb_entry(node
, struct nfs4_client
, cl_namenode
);
1904 cmp
= compare_blob(&clp
->cl_name
, name
);
1906 node
= node
->rb_left
;
1908 node
= node
->rb_right
;
1916 add_to_unconfirmed(struct nfs4_client
*clp
)
1918 unsigned int idhashval
;
1919 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1921 lockdep_assert_held(&nn
->client_lock
);
1923 clear_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
1924 add_clp_to_name_tree(clp
, &nn
->unconf_name_tree
);
1925 idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
1926 list_add(&clp
->cl_idhash
, &nn
->unconf_id_hashtbl
[idhashval
]);
1927 renew_client_locked(clp
);
1931 move_to_confirmed(struct nfs4_client
*clp
)
1933 unsigned int idhashval
= clientid_hashval(clp
->cl_clientid
.cl_id
);
1934 struct nfsd_net
*nn
= net_generic(clp
->net
, nfsd_net_id
);
1936 lockdep_assert_held(&nn
->client_lock
);
1938 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp
);
1939 list_move(&clp
->cl_idhash
, &nn
->conf_id_hashtbl
[idhashval
]);
1940 rb_erase(&clp
->cl_namenode
, &nn
->unconf_name_tree
);
1941 add_clp_to_name_tree(clp
, &nn
->conf_name_tree
);
1942 set_bit(NFSD4_CLIENT_CONFIRMED
, &clp
->cl_flags
);
1943 renew_client_locked(clp
);
1946 static struct nfs4_client
*
1947 find_client_in_id_table(struct list_head
*tbl
, clientid_t
*clid
, bool sessions
)
1949 struct nfs4_client
*clp
;
1950 unsigned int idhashval
= clientid_hashval(clid
->cl_id
);
1952 list_for_each_entry(clp
, &tbl
[idhashval
], cl_idhash
) {
1953 if (same_clid(&clp
->cl_clientid
, clid
)) {
1954 if ((bool)clp
->cl_minorversion
!= sessions
)
1956 renew_client_locked(clp
);
1963 static struct nfs4_client
*
1964 find_confirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
1966 struct list_head
*tbl
= nn
->conf_id_hashtbl
;
1968 lockdep_assert_held(&nn
->client_lock
);
1969 return find_client_in_id_table(tbl
, clid
, sessions
);
1972 static struct nfs4_client
*
1973 find_unconfirmed_client(clientid_t
*clid
, bool sessions
, struct nfsd_net
*nn
)
1975 struct list_head
*tbl
= nn
->unconf_id_hashtbl
;
1977 lockdep_assert_held(&nn
->client_lock
);
1978 return find_client_in_id_table(tbl
, clid
, sessions
);
1981 static bool clp_used_exchangeid(struct nfs4_client
*clp
)
1983 return clp
->cl_exchange_flags
!= 0;
1986 static struct nfs4_client
*
1987 find_confirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
1989 lockdep_assert_held(&nn
->client_lock
);
1990 return find_clp_in_name_tree(name
, &nn
->conf_name_tree
);
1993 static struct nfs4_client
*
1994 find_unconfirmed_client_by_name(struct xdr_netobj
*name
, struct nfsd_net
*nn
)
1996 lockdep_assert_held(&nn
->client_lock
);
1997 return find_clp_in_name_tree(name
, &nn
->unconf_name_tree
);
2001 gen_callback(struct nfs4_client
*clp
, struct nfsd4_setclientid
*se
, struct svc_rqst
*rqstp
)
2003 struct nfs4_cb_conn
*conn
= &clp
->cl_cb_conn
;
2004 struct sockaddr
*sa
= svc_addr(rqstp
);
2005 u32 scopeid
= rpc_get_scope_id(sa
);
2006 unsigned short expected_family
;
2008 /* Currently, we only support tcp and tcp6 for the callback channel */
2009 if (se
->se_callback_netid_len
== 3 &&
2010 !memcmp(se
->se_callback_netid_val
, "tcp", 3))
2011 expected_family
= AF_INET
;
2012 else if (se
->se_callback_netid_len
== 4 &&
2013 !memcmp(se
->se_callback_netid_val
, "tcp6", 4))
2014 expected_family
= AF_INET6
;
2018 conn
->cb_addrlen
= rpc_uaddr2sockaddr(clp
->net
, se
->se_callback_addr_val
,
2019 se
->se_callback_addr_len
,
2020 (struct sockaddr
*)&conn
->cb_addr
,
2021 sizeof(conn
->cb_addr
));
2023 if (!conn
->cb_addrlen
|| conn
->cb_addr
.ss_family
!= expected_family
)
2026 if (conn
->cb_addr
.ss_family
== AF_INET6
)
2027 ((struct sockaddr_in6
*)&conn
->cb_addr
)->sin6_scope_id
= scopeid
;
2029 conn
->cb_prog
= se
->se_callback_prog
;
2030 conn
->cb_ident
= se
->se_callback_ident
;
2031 memcpy(&conn
->cb_saddr
, &rqstp
->rq_daddr
, rqstp
->rq_daddrlen
);
2034 conn
->cb_addr
.ss_family
= AF_UNSPEC
;
2035 conn
->cb_addrlen
= 0;
2036 dprintk(KERN_INFO
"NFSD: this client (clientid %08x/%08x) "
2037 "will not receive delegations\n",
2038 clp
->cl_clientid
.cl_boot
, clp
->cl_clientid
.cl_id
);
2044 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2047 nfsd4_store_cache_entry(struct nfsd4_compoundres
*resp
)
2049 struct xdr_buf
*buf
= resp
->xdr
.buf
;
2050 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2053 dprintk("--> %s slot %p\n", __func__
, slot
);
2055 slot
->sl_opcnt
= resp
->opcnt
;
2056 slot
->sl_status
= resp
->cstate
.status
;
2058 slot
->sl_flags
|= NFSD4_SLOT_INITIALIZED
;
2059 if (nfsd4_not_cached(resp
)) {
2060 slot
->sl_datalen
= 0;
2063 base
= resp
->cstate
.data_offset
;
2064 slot
->sl_datalen
= buf
->len
- base
;
2065 if (read_bytes_from_xdr_buf(buf
, base
, slot
->sl_data
, slot
->sl_datalen
))
2066 WARN("%s: sessions DRC could not cache compound\n", __func__
);
2071 * Encode the replay sequence operation from the slot values.
2072 * If cachethis is FALSE encode the uncached rep error on the next
2073 * operation which sets resp->p and increments resp->opcnt for
2074 * nfs4svc_encode_compoundres.
2078 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs
*args
,
2079 struct nfsd4_compoundres
*resp
)
2081 struct nfsd4_op
*op
;
2082 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2084 /* Encode the replayed sequence operation */
2085 op
= &args
->ops
[resp
->opcnt
- 1];
2086 nfsd4_encode_operation(resp
, op
);
2088 /* Return nfserr_retry_uncached_rep in next operation. */
2089 if (args
->opcnt
> 1 && !(slot
->sl_flags
& NFSD4_SLOT_CACHETHIS
)) {
2090 op
= &args
->ops
[resp
->opcnt
++];
2091 op
->status
= nfserr_retry_uncached_rep
;
2092 nfsd4_encode_operation(resp
, op
);
2098 * The sequence operation is not cached because we can use the slot and
2102 nfsd4_replay_cache_entry(struct nfsd4_compoundres
*resp
,
2103 struct nfsd4_sequence
*seq
)
2105 struct nfsd4_slot
*slot
= resp
->cstate
.slot
;
2106 struct xdr_stream
*xdr
= &resp
->xdr
;
2110 dprintk("--> %s slot %p\n", __func__
, slot
);
2112 status
= nfsd4_enc_sequence_replay(resp
->rqstp
->rq_argp
, resp
);
2116 p
= xdr_reserve_space(xdr
, slot
->sl_datalen
);
2119 return nfserr_serverfault
;
2121 xdr_encode_opaque_fixed(p
, slot
->sl_data
, slot
->sl_datalen
);
2122 xdr_commit_encode(xdr
);
2124 resp
->opcnt
= slot
->sl_opcnt
;
2125 return slot
->sl_status
;
2129 * Set the exchange_id flags returned by the server.
2132 nfsd4_set_ex_flags(struct nfs4_client
*new, struct nfsd4_exchange_id
*clid
)
2134 /* pNFS is not supported */
2135 new->cl_exchange_flags
|= EXCHGID4_FLAG_USE_NON_PNFS
;
2137 /* Referrals are supported, Migration is not. */
2138 new->cl_exchange_flags
|= EXCHGID4_FLAG_SUPP_MOVED_REFER
;
2140 /* set the wire flags to return to client. */
2141 clid
->flags
= new->cl_exchange_flags
;
2144 static bool client_has_state(struct nfs4_client
*clp
)
2147 * Note clp->cl_openowners check isn't quite right: there's no
2148 * need to count owners without stateid's.
2150 * Also note we should probably be using this in 4.0 case too.
2152 return !list_empty(&clp
->cl_openowners
)
2153 || !list_empty(&clp
->cl_delegations
)
2154 || !list_empty(&clp
->cl_sessions
);
2158 nfsd4_exchange_id(struct svc_rqst
*rqstp
,
2159 struct nfsd4_compound_state
*cstate
,
2160 struct nfsd4_exchange_id
*exid
)
2162 struct nfs4_client
*conf
, *new;
2163 struct nfs4_client
*unconf
= NULL
;
2165 char addr_str
[INET6_ADDRSTRLEN
];
2166 nfs4_verifier verf
= exid
->verifier
;
2167 struct sockaddr
*sa
= svc_addr(rqstp
);
2168 bool update
= exid
->flags
& EXCHGID4_FLAG_UPD_CONFIRMED_REC_A
;
2169 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2171 rpc_ntop(sa
, addr_str
, sizeof(addr_str
));
2172 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2173 "ip_addr=%s flags %x, spa_how %d\n",
2174 __func__
, rqstp
, exid
, exid
->clname
.len
, exid
->clname
.data
,
2175 addr_str
, exid
->flags
, exid
->spa_how
);
2177 if (exid
->flags
& ~EXCHGID4_FLAG_MASK_A
)
2178 return nfserr_inval
;
2180 switch (exid
->spa_how
) {
2182 if (!svc_rqst_integrity_protected(rqstp
))
2183 return nfserr_inval
;
2186 default: /* checked by xdr code */
2189 return nfserr_encr_alg_unsupp
;
2192 new = create_client(exid
->clname
, rqstp
, &verf
);
2194 return nfserr_jukebox
;
2196 /* Cases below refer to rfc 5661 section 18.35.4: */
2197 spin_lock(&nn
->client_lock
);
2198 conf
= find_confirmed_client_by_name(&exid
->clname
, nn
);
2200 bool creds_match
= same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
);
2201 bool verfs_match
= same_verf(&verf
, &conf
->cl_verifier
);
2204 if (!clp_used_exchangeid(conf
)) { /* buggy client */
2205 status
= nfserr_inval
;
2208 if (!mach_creds_match(conf
, rqstp
)) {
2209 status
= nfserr_wrong_cred
;
2212 if (!creds_match
) { /* case 9 */
2213 status
= nfserr_perm
;
2216 if (!verfs_match
) { /* case 8 */
2217 status
= nfserr_not_same
;
2221 exid
->flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
2224 if (!creds_match
) { /* case 3 */
2225 if (client_has_state(conf
)) {
2226 status
= nfserr_clid_inuse
;
2231 if (verfs_match
) { /* case 2 */
2232 conf
->cl_exchange_flags
|= EXCHGID4_FLAG_CONFIRMED_R
;
2235 /* case 5, client reboot */
2240 if (update
) { /* case 7 */
2241 status
= nfserr_noent
;
2245 unconf
= find_unconfirmed_client_by_name(&exid
->clname
, nn
);
2246 if (unconf
) /* case 4, possible retry or client restart */
2247 unhash_client_locked(unconf
);
2249 /* case 1 (normal case) */
2252 status
= mark_client_expired_locked(conf
);
2256 new->cl_minorversion
= cstate
->minorversion
;
2257 new->cl_mach_cred
= (exid
->spa_how
== SP4_MACH_CRED
);
2260 add_to_unconfirmed(new);
2263 exid
->clientid
.cl_boot
= conf
->cl_clientid
.cl_boot
;
2264 exid
->clientid
.cl_id
= conf
->cl_clientid
.cl_id
;
2266 exid
->seqid
= conf
->cl_cs_slot
.sl_seqid
+ 1;
2267 nfsd4_set_ex_flags(conf
, exid
);
2269 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2270 conf
->cl_cs_slot
.sl_seqid
, conf
->cl_exchange_flags
);
2274 spin_unlock(&nn
->client_lock
);
2278 expire_client(unconf
);
2283 check_slot_seqid(u32 seqid
, u32 slot_seqid
, int slot_inuse
)
2285 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__
, seqid
,
2288 /* The slot is in use, and no response has been sent. */
2290 if (seqid
== slot_seqid
)
2291 return nfserr_jukebox
;
2293 return nfserr_seq_misordered
;
2295 /* Note unsigned 32-bit arithmetic handles wraparound: */
2296 if (likely(seqid
== slot_seqid
+ 1))
2298 if (seqid
== slot_seqid
)
2299 return nfserr_replay_cache
;
2300 return nfserr_seq_misordered
;
2304 * Cache the create session result into the create session single DRC
2305 * slot cache by saving the xdr structure. sl_seqid has been set.
2306 * Do this for solo or embedded create session operations.
2309 nfsd4_cache_create_session(struct nfsd4_create_session
*cr_ses
,
2310 struct nfsd4_clid_slot
*slot
, __be32 nfserr
)
2312 slot
->sl_status
= nfserr
;
2313 memcpy(&slot
->sl_cr_ses
, cr_ses
, sizeof(*cr_ses
));
2317 nfsd4_replay_create_session(struct nfsd4_create_session
*cr_ses
,
2318 struct nfsd4_clid_slot
*slot
)
2320 memcpy(cr_ses
, &slot
->sl_cr_ses
, sizeof(*cr_ses
));
2321 return slot
->sl_status
;
2324 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2325 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2326 1 + /* MIN tag is length with zero, only length */ \
2327 3 + /* version, opcount, opcode */ \
2328 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2329 /* seqid, slotID, slotID, cache */ \
2330 4 ) * sizeof(__be32))
2332 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2333 2 + /* verifier: AUTH_NULL, length 0 */\
2335 1 + /* MIN tag is length with zero, only length */ \
2336 3 + /* opcount, opcode, opstatus*/ \
2337 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2338 /* seqid, slotID, slotID, slotID, status */ \
2339 5 ) * sizeof(__be32))
2341 static __be32
check_forechannel_attrs(struct nfsd4_channel_attrs
*ca
, struct nfsd_net
*nn
)
2343 u32 maxrpc
= nn
->nfsd_serv
->sv_max_mesg
;
2345 if (ca
->maxreq_sz
< NFSD_MIN_REQ_HDR_SEQ_SZ
)
2346 return nfserr_toosmall
;
2347 if (ca
->maxresp_sz
< NFSD_MIN_RESP_HDR_SEQ_SZ
)
2348 return nfserr_toosmall
;
2349 ca
->headerpadsz
= 0;
2350 ca
->maxreq_sz
= min_t(u32
, ca
->maxreq_sz
, maxrpc
);
2351 ca
->maxresp_sz
= min_t(u32
, ca
->maxresp_sz
, maxrpc
);
2352 ca
->maxops
= min_t(u32
, ca
->maxops
, NFSD_MAX_OPS_PER_COMPOUND
);
2353 ca
->maxresp_cached
= min_t(u32
, ca
->maxresp_cached
,
2354 NFSD_SLOT_CACHE_SIZE
+ NFSD_MIN_HDR_SEQ_SZ
);
2355 ca
->maxreqs
= min_t(u32
, ca
->maxreqs
, NFSD_MAX_SLOTS_PER_SESSION
);
2357 * Note decreasing slot size below client's request may make it
2358 * difficult for client to function correctly, whereas
2359 * decreasing the number of slots will (just?) affect
2360 * performance. When short on memory we therefore prefer to
2361 * decrease number of slots instead of their size. Clients that
2362 * request larger slots than they need will get poor results:
2364 ca
->maxreqs
= nfsd4_get_drc_mem(ca
);
2366 return nfserr_jukebox
;
2371 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
2372 RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
2373 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
2374 RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
2376 static __be32
check_backchannel_attrs(struct nfsd4_channel_attrs
*ca
)
2378 ca
->headerpadsz
= 0;
2381 * These RPC_MAX_HEADER macros are overkill, especially since we
2382 * don't even do gss on the backchannel yet. But this is still
2383 * less than 1k. Tighten up this estimate in the unlikely event
2384 * it turns out to be a problem for some client:
2386 if (ca
->maxreq_sz
< NFSD_CB_MAX_REQ_SZ
)
2387 return nfserr_toosmall
;
2388 if (ca
->maxresp_sz
< NFSD_CB_MAX_RESP_SZ
)
2389 return nfserr_toosmall
;
2390 ca
->maxresp_cached
= 0;
2392 return nfserr_toosmall
;
2397 static __be32
nfsd4_check_cb_sec(struct nfsd4_cb_sec
*cbs
)
2399 switch (cbs
->flavor
) {
2405 * GSS case: the spec doesn't allow us to return this
2406 * error. But it also doesn't allow us not to support
2408 * I'd rather this fail hard than return some error the
2409 * client might think it can already handle:
2411 return nfserr_encr_alg_unsupp
;
2416 nfsd4_create_session(struct svc_rqst
*rqstp
,
2417 struct nfsd4_compound_state
*cstate
,
2418 struct nfsd4_create_session
*cr_ses
)
2420 struct sockaddr
*sa
= svc_addr(rqstp
);
2421 struct nfs4_client
*conf
, *unconf
;
2422 struct nfs4_client
*old
= NULL
;
2423 struct nfsd4_session
*new;
2424 struct nfsd4_conn
*conn
;
2425 struct nfsd4_clid_slot
*cs_slot
= NULL
;
2427 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2429 if (cr_ses
->flags
& ~SESSION4_FLAG_MASK_A
)
2430 return nfserr_inval
;
2431 status
= nfsd4_check_cb_sec(&cr_ses
->cb_sec
);
2434 status
= check_forechannel_attrs(&cr_ses
->fore_channel
, nn
);
2437 status
= check_backchannel_attrs(&cr_ses
->back_channel
);
2439 goto out_release_drc_mem
;
2440 status
= nfserr_jukebox
;
2441 new = alloc_session(&cr_ses
->fore_channel
, &cr_ses
->back_channel
);
2443 goto out_release_drc_mem
;
2444 conn
= alloc_conn_from_crses(rqstp
, cr_ses
);
2446 goto out_free_session
;
2448 spin_lock(&nn
->client_lock
);
2449 unconf
= find_unconfirmed_client(&cr_ses
->clientid
, true, nn
);
2450 conf
= find_confirmed_client(&cr_ses
->clientid
, true, nn
);
2451 WARN_ON_ONCE(conf
&& unconf
);
2454 status
= nfserr_wrong_cred
;
2455 if (!mach_creds_match(conf
, rqstp
))
2457 cs_slot
= &conf
->cl_cs_slot
;
2458 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
2459 if (status
== nfserr_replay_cache
) {
2460 status
= nfsd4_replay_create_session(cr_ses
, cs_slot
);
2462 } else if (cr_ses
->seqid
!= cs_slot
->sl_seqid
+ 1) {
2463 status
= nfserr_seq_misordered
;
2466 } else if (unconf
) {
2467 if (!same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
) ||
2468 !rpc_cmp_addr(sa
, (struct sockaddr
*) &unconf
->cl_addr
)) {
2469 status
= nfserr_clid_inuse
;
2472 status
= nfserr_wrong_cred
;
2473 if (!mach_creds_match(unconf
, rqstp
))
2475 cs_slot
= &unconf
->cl_cs_slot
;
2476 status
= check_slot_seqid(cr_ses
->seqid
, cs_slot
->sl_seqid
, 0);
2478 /* an unconfirmed replay returns misordered */
2479 status
= nfserr_seq_misordered
;
2482 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
2484 status
= mark_client_expired_locked(old
);
2490 move_to_confirmed(unconf
);
2493 status
= nfserr_stale_clientid
;
2498 * We do not support RDMA or persistent sessions
2500 cr_ses
->flags
&= ~SESSION4_PERSIST
;
2501 cr_ses
->flags
&= ~SESSION4_RDMA
;
2503 init_session(rqstp
, new, conf
, cr_ses
);
2504 nfsd4_get_session_locked(new);
2506 memcpy(cr_ses
->sessionid
.data
, new->se_sessionid
.data
,
2507 NFS4_MAX_SESSIONID_LEN
);
2508 cs_slot
->sl_seqid
++;
2509 cr_ses
->seqid
= cs_slot
->sl_seqid
;
2511 /* cache solo and embedded create sessions under the client_lock */
2512 nfsd4_cache_create_session(cr_ses
, cs_slot
, status
);
2513 spin_unlock(&nn
->client_lock
);
2514 /* init connection and backchannel */
2515 nfsd4_init_conn(rqstp
, conn
, new);
2516 nfsd4_put_session(new);
2521 spin_unlock(&nn
->client_lock
);
2526 __free_session(new);
2527 out_release_drc_mem
:
2528 nfsd4_put_drc_mem(&cr_ses
->fore_channel
);
2532 static __be32
nfsd4_map_bcts_dir(u32
*dir
)
2535 case NFS4_CDFC4_FORE
:
2536 case NFS4_CDFC4_BACK
:
2538 case NFS4_CDFC4_FORE_OR_BOTH
:
2539 case NFS4_CDFC4_BACK_OR_BOTH
:
2540 *dir
= NFS4_CDFC4_BOTH
;
2543 return nfserr_inval
;
2546 __be32
nfsd4_backchannel_ctl(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
, struct nfsd4_backchannel_ctl
*bc
)
2548 struct nfsd4_session
*session
= cstate
->session
;
2549 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2552 status
= nfsd4_check_cb_sec(&bc
->bc_cb_sec
);
2555 spin_lock(&nn
->client_lock
);
2556 session
->se_cb_prog
= bc
->bc_cb_program
;
2557 session
->se_cb_sec
= bc
->bc_cb_sec
;
2558 spin_unlock(&nn
->client_lock
);
2560 nfsd4_probe_callback(session
->se_client
);
2565 __be32
nfsd4_bind_conn_to_session(struct svc_rqst
*rqstp
,
2566 struct nfsd4_compound_state
*cstate
,
2567 struct nfsd4_bind_conn_to_session
*bcts
)
2570 struct nfsd4_conn
*conn
;
2571 struct nfsd4_session
*session
;
2572 struct net
*net
= SVC_NET(rqstp
);
2573 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2575 if (!nfsd4_last_compound_op(rqstp
))
2576 return nfserr_not_only_op
;
2577 spin_lock(&nn
->client_lock
);
2578 session
= find_in_sessionid_hashtbl(&bcts
->sessionid
, net
, &status
);
2579 spin_unlock(&nn
->client_lock
);
2581 goto out_no_session
;
2582 status
= nfserr_wrong_cred
;
2583 if (!mach_creds_match(session
->se_client
, rqstp
))
2585 status
= nfsd4_map_bcts_dir(&bcts
->dir
);
2588 conn
= alloc_conn(rqstp
, bcts
->dir
);
2589 status
= nfserr_jukebox
;
2592 nfsd4_init_conn(rqstp
, conn
, session
);
2595 nfsd4_put_session(session
);
2600 static bool nfsd4_compound_in_session(struct nfsd4_session
*session
, struct nfs4_sessionid
*sid
)
2604 return !memcmp(sid
, &session
->se_sessionid
, sizeof(*sid
));
2608 nfsd4_destroy_session(struct svc_rqst
*r
,
2609 struct nfsd4_compound_state
*cstate
,
2610 struct nfsd4_destroy_session
*sessionid
)
2612 struct nfsd4_session
*ses
;
2614 int ref_held_by_me
= 0;
2615 struct net
*net
= SVC_NET(r
);
2616 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2618 status
= nfserr_not_only_op
;
2619 if (nfsd4_compound_in_session(cstate
->session
, &sessionid
->sessionid
)) {
2620 if (!nfsd4_last_compound_op(r
))
2624 dump_sessionid(__func__
, &sessionid
->sessionid
);
2625 spin_lock(&nn
->client_lock
);
2626 ses
= find_in_sessionid_hashtbl(&sessionid
->sessionid
, net
, &status
);
2628 goto out_client_lock
;
2629 status
= nfserr_wrong_cred
;
2630 if (!mach_creds_match(ses
->se_client
, r
))
2631 goto out_put_session
;
2632 status
= mark_session_dead_locked(ses
, 1 + ref_held_by_me
);
2634 goto out_put_session
;
2635 unhash_session(ses
);
2636 spin_unlock(&nn
->client_lock
);
2638 nfsd4_probe_callback_sync(ses
->se_client
);
2640 spin_lock(&nn
->client_lock
);
2643 nfsd4_put_session_locked(ses
);
2645 spin_unlock(&nn
->client_lock
);
2650 static struct nfsd4_conn
*__nfsd4_find_conn(struct svc_xprt
*xpt
, struct nfsd4_session
*s
)
2652 struct nfsd4_conn
*c
;
2654 list_for_each_entry(c
, &s
->se_conns
, cn_persession
) {
2655 if (c
->cn_xprt
== xpt
) {
2662 static __be32
nfsd4_sequence_check_conn(struct nfsd4_conn
*new, struct nfsd4_session
*ses
)
2664 struct nfs4_client
*clp
= ses
->se_client
;
2665 struct nfsd4_conn
*c
;
2666 __be32 status
= nfs_ok
;
2669 spin_lock(&clp
->cl_lock
);
2670 c
= __nfsd4_find_conn(new->cn_xprt
, ses
);
2673 status
= nfserr_conn_not_bound_to_session
;
2674 if (clp
->cl_mach_cred
)
2676 __nfsd4_hash_conn(new, ses
);
2677 spin_unlock(&clp
->cl_lock
);
2678 ret
= nfsd4_register_conn(new);
2680 /* oops; xprt is already down: */
2681 nfsd4_conn_lost(&new->cn_xpt_user
);
2684 spin_unlock(&clp
->cl_lock
);
2689 static bool nfsd4_session_too_many_ops(struct svc_rqst
*rqstp
, struct nfsd4_session
*session
)
2691 struct nfsd4_compoundargs
*args
= rqstp
->rq_argp
;
2693 return args
->opcnt
> session
->se_fchannel
.maxops
;
2696 static bool nfsd4_request_too_big(struct svc_rqst
*rqstp
,
2697 struct nfsd4_session
*session
)
2699 struct xdr_buf
*xb
= &rqstp
->rq_arg
;
2701 return xb
->len
> session
->se_fchannel
.maxreq_sz
;
2705 nfsd4_sequence(struct svc_rqst
*rqstp
,
2706 struct nfsd4_compound_state
*cstate
,
2707 struct nfsd4_sequence
*seq
)
2709 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
2710 struct xdr_stream
*xdr
= &resp
->xdr
;
2711 struct nfsd4_session
*session
;
2712 struct nfs4_client
*clp
;
2713 struct nfsd4_slot
*slot
;
2714 struct nfsd4_conn
*conn
;
2717 struct net
*net
= SVC_NET(rqstp
);
2718 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
2720 if (resp
->opcnt
!= 1)
2721 return nfserr_sequence_pos
;
2724 * Will be either used or freed by nfsd4_sequence_check_conn
2727 conn
= alloc_conn(rqstp
, NFS4_CDFC4_FORE
);
2729 return nfserr_jukebox
;
2731 spin_lock(&nn
->client_lock
);
2732 session
= find_in_sessionid_hashtbl(&seq
->sessionid
, net
, &status
);
2734 goto out_no_session
;
2735 clp
= session
->se_client
;
2737 status
= nfserr_too_many_ops
;
2738 if (nfsd4_session_too_many_ops(rqstp
, session
))
2739 goto out_put_session
;
2741 status
= nfserr_req_too_big
;
2742 if (nfsd4_request_too_big(rqstp
, session
))
2743 goto out_put_session
;
2745 status
= nfserr_badslot
;
2746 if (seq
->slotid
>= session
->se_fchannel
.maxreqs
)
2747 goto out_put_session
;
2749 slot
= session
->se_slots
[seq
->slotid
];
2750 dprintk("%s: slotid %d\n", __func__
, seq
->slotid
);
2752 /* We do not negotiate the number of slots yet, so set the
2753 * maxslots to the session maxreqs which is used to encode
2754 * sr_highest_slotid and the sr_target_slot id to maxslots */
2755 seq
->maxslots
= session
->se_fchannel
.maxreqs
;
2757 status
= check_slot_seqid(seq
->seqid
, slot
->sl_seqid
,
2758 slot
->sl_flags
& NFSD4_SLOT_INUSE
);
2759 if (status
== nfserr_replay_cache
) {
2760 status
= nfserr_seq_misordered
;
2761 if (!(slot
->sl_flags
& NFSD4_SLOT_INITIALIZED
))
2762 goto out_put_session
;
2763 cstate
->slot
= slot
;
2764 cstate
->session
= session
;
2766 /* Return the cached reply status and set cstate->status
2767 * for nfsd4_proc_compound processing */
2768 status
= nfsd4_replay_cache_entry(resp
, seq
);
2769 cstate
->status
= nfserr_replay_cache
;
2773 goto out_put_session
;
2775 status
= nfsd4_sequence_check_conn(conn
, session
);
2778 goto out_put_session
;
2780 buflen
= (seq
->cachethis
) ?
2781 session
->se_fchannel
.maxresp_cached
:
2782 session
->se_fchannel
.maxresp_sz
;
2783 status
= (seq
->cachethis
) ? nfserr_rep_too_big_to_cache
:
2785 if (xdr_restrict_buflen(xdr
, buflen
- rqstp
->rq_auth_slack
))
2786 goto out_put_session
;
2787 svc_reserve(rqstp
, buflen
);
2790 /* Success! bump slot seqid */
2791 slot
->sl_seqid
= seq
->seqid
;
2792 slot
->sl_flags
|= NFSD4_SLOT_INUSE
;
2794 slot
->sl_flags
|= NFSD4_SLOT_CACHETHIS
;
2796 slot
->sl_flags
&= ~NFSD4_SLOT_CACHETHIS
;
2798 cstate
->slot
= slot
;
2799 cstate
->session
= session
;
2803 switch (clp
->cl_cb_state
) {
2805 seq
->status_flags
= SEQ4_STATUS_CB_PATH_DOWN
;
2807 case NFSD4_CB_FAULT
:
2808 seq
->status_flags
= SEQ4_STATUS_BACKCHANNEL_FAULT
;
2811 seq
->status_flags
= 0;
2813 if (!list_empty(&clp
->cl_revoked
))
2814 seq
->status_flags
|= SEQ4_STATUS_RECALLABLE_STATE_REVOKED
;
2818 spin_unlock(&nn
->client_lock
);
2821 nfsd4_put_session_locked(session
);
2822 goto out_no_session
;
2826 nfsd4_sequence_done(struct nfsd4_compoundres
*resp
)
2828 struct nfsd4_compound_state
*cs
= &resp
->cstate
;
2830 if (nfsd4_has_session(cs
)) {
2831 if (cs
->status
!= nfserr_replay_cache
) {
2832 nfsd4_store_cache_entry(resp
);
2833 cs
->slot
->sl_flags
&= ~NFSD4_SLOT_INUSE
;
2835 /* Drop session reference that was taken in nfsd4_sequence() */
2836 nfsd4_put_session(cs
->session
);
2838 put_client_renew(cs
->clp
);
2842 nfsd4_destroy_clientid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
, struct nfsd4_destroy_clientid
*dc
)
2844 struct nfs4_client
*conf
, *unconf
;
2845 struct nfs4_client
*clp
= NULL
;
2847 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2849 spin_lock(&nn
->client_lock
);
2850 unconf
= find_unconfirmed_client(&dc
->clientid
, true, nn
);
2851 conf
= find_confirmed_client(&dc
->clientid
, true, nn
);
2852 WARN_ON_ONCE(conf
&& unconf
);
2855 if (client_has_state(conf
)) {
2856 status
= nfserr_clientid_busy
;
2859 status
= mark_client_expired_locked(conf
);
2866 status
= nfserr_stale_clientid
;
2869 if (!mach_creds_match(clp
, rqstp
)) {
2871 status
= nfserr_wrong_cred
;
2874 unhash_client_locked(clp
);
2876 spin_unlock(&nn
->client_lock
);
2883 nfsd4_reclaim_complete(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
, struct nfsd4_reclaim_complete
*rc
)
2887 if (rc
->rca_one_fs
) {
2888 if (!cstate
->current_fh
.fh_dentry
)
2889 return nfserr_nofilehandle
;
2891 * We don't take advantage of the rca_one_fs case.
2892 * That's OK, it's optional, we can safely ignore it.
2897 status
= nfserr_complete_already
;
2898 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE
,
2899 &cstate
->session
->se_client
->cl_flags
))
2902 status
= nfserr_stale_clientid
;
2903 if (is_client_expired(cstate
->session
->se_client
))
2905 * The following error isn't really legal.
2906 * But we only get here if the client just explicitly
2907 * destroyed the client. Surely it no longer cares what
2908 * error it gets back on an operation for the dead
2914 nfsd4_client_record_create(cstate
->session
->se_client
);
2920 nfsd4_setclientid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
2921 struct nfsd4_setclientid
*setclid
)
2923 struct xdr_netobj clname
= setclid
->se_name
;
2924 nfs4_verifier clverifier
= setclid
->se_verf
;
2925 struct nfs4_client
*conf
, *new;
2926 struct nfs4_client
*unconf
= NULL
;
2928 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2930 new = create_client(clname
, rqstp
, &clverifier
);
2932 return nfserr_jukebox
;
2933 /* Cases below refer to rfc 3530 section 14.2.33: */
2934 spin_lock(&nn
->client_lock
);
2935 conf
= find_confirmed_client_by_name(&clname
, nn
);
2938 status
= nfserr_clid_inuse
;
2939 if (clp_used_exchangeid(conf
))
2941 if (!same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
)) {
2942 char addr_str
[INET6_ADDRSTRLEN
];
2943 rpc_ntop((struct sockaddr
*) &conf
->cl_addr
, addr_str
,
2945 dprintk("NFSD: setclientid: string in use by client "
2946 "at %s\n", addr_str
);
2950 unconf
= find_unconfirmed_client_by_name(&clname
, nn
);
2952 unhash_client_locked(unconf
);
2953 if (conf
&& same_verf(&conf
->cl_verifier
, &clverifier
))
2954 /* case 1: probable callback update */
2955 copy_clid(new, conf
);
2956 else /* case 4 (new client) or cases 2, 3 (client reboot): */
2958 new->cl_minorversion
= 0;
2959 gen_callback(new, setclid
, rqstp
);
2960 add_to_unconfirmed(new);
2961 setclid
->se_clientid
.cl_boot
= new->cl_clientid
.cl_boot
;
2962 setclid
->se_clientid
.cl_id
= new->cl_clientid
.cl_id
;
2963 memcpy(setclid
->se_confirm
.data
, new->cl_confirm
.data
, sizeof(setclid
->se_confirm
.data
));
2967 spin_unlock(&nn
->client_lock
);
2971 expire_client(unconf
);
2977 nfsd4_setclientid_confirm(struct svc_rqst
*rqstp
,
2978 struct nfsd4_compound_state
*cstate
,
2979 struct nfsd4_setclientid_confirm
*setclientid_confirm
)
2981 struct nfs4_client
*conf
, *unconf
;
2982 struct nfs4_client
*old
= NULL
;
2983 nfs4_verifier confirm
= setclientid_confirm
->sc_confirm
;
2984 clientid_t
* clid
= &setclientid_confirm
->sc_clientid
;
2986 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
2988 if (STALE_CLIENTID(clid
, nn
))
2989 return nfserr_stale_clientid
;
2991 spin_lock(&nn
->client_lock
);
2992 conf
= find_confirmed_client(clid
, false, nn
);
2993 unconf
= find_unconfirmed_client(clid
, false, nn
);
2995 * We try hard to give out unique clientid's, so if we get an
2996 * attempt to confirm the same clientid with a different cred,
2997 * there's a bug somewhere. Let's charitably assume it's our
3000 status
= nfserr_serverfault
;
3001 if (unconf
&& !same_creds(&unconf
->cl_cred
, &rqstp
->rq_cred
))
3003 if (conf
&& !same_creds(&conf
->cl_cred
, &rqstp
->rq_cred
))
3005 /* cases below refer to rfc 3530 section 14.2.34: */
3006 if (!unconf
|| !same_verf(&confirm
, &unconf
->cl_confirm
)) {
3007 if (conf
&& !unconf
) /* case 2: probable retransmit */
3009 else /* case 4: client hasn't noticed we rebooted yet? */
3010 status
= nfserr_stale_clientid
;
3014 if (conf
) { /* case 1: callback update */
3016 unhash_client_locked(old
);
3017 nfsd4_change_callback(conf
, &unconf
->cl_cb_conn
);
3018 } else { /* case 3: normal case; new or rebooted client */
3019 old
= find_confirmed_client_by_name(&unconf
->cl_name
, nn
);
3021 status
= mark_client_expired_locked(old
);
3027 move_to_confirmed(unconf
);
3030 get_client_locked(conf
);
3031 spin_unlock(&nn
->client_lock
);
3032 nfsd4_probe_callback(conf
);
3033 spin_lock(&nn
->client_lock
);
3034 put_client_renew_locked(conf
);
3036 spin_unlock(&nn
->client_lock
);
3042 static struct nfs4_file
*nfsd4_alloc_file(void)
3044 return kmem_cache_alloc(file_slab
, GFP_KERNEL
);
3047 /* OPEN Share state helper functions */
3048 static void nfsd4_init_file(struct nfs4_file
*fp
, struct knfsd_fh
*fh
)
3050 unsigned int hashval
= file_hashval(fh
);
3052 lockdep_assert_held(&state_lock
);
3054 atomic_set(&fp
->fi_ref
, 1);
3055 spin_lock_init(&fp
->fi_lock
);
3056 INIT_LIST_HEAD(&fp
->fi_stateids
);
3057 INIT_LIST_HEAD(&fp
->fi_delegations
);
3058 fh_copy_shallow(&fp
->fi_fhandle
, fh
);
3059 fp
->fi_had_conflict
= false;
3060 fp
->fi_lease
= NULL
;
3061 fp
->fi_share_deny
= 0;
3062 memset(fp
->fi_fds
, 0, sizeof(fp
->fi_fds
));
3063 memset(fp
->fi_access
, 0, sizeof(fp
->fi_access
));
3064 hlist_add_head(&fp
->fi_hash
, &file_hashtbl
[hashval
]);
3068 nfsd4_free_slabs(void)
3070 kmem_cache_destroy(openowner_slab
);
3071 kmem_cache_destroy(lockowner_slab
);
3072 kmem_cache_destroy(file_slab
);
3073 kmem_cache_destroy(stateid_slab
);
3074 kmem_cache_destroy(deleg_slab
);
3078 nfsd4_init_slabs(void)
3080 openowner_slab
= kmem_cache_create("nfsd4_openowners",
3081 sizeof(struct nfs4_openowner
), 0, 0, NULL
);
3082 if (openowner_slab
== NULL
)
3084 lockowner_slab
= kmem_cache_create("nfsd4_lockowners",
3085 sizeof(struct nfs4_lockowner
), 0, 0, NULL
);
3086 if (lockowner_slab
== NULL
)
3087 goto out_free_openowner_slab
;
3088 file_slab
= kmem_cache_create("nfsd4_files",
3089 sizeof(struct nfs4_file
), 0, 0, NULL
);
3090 if (file_slab
== NULL
)
3091 goto out_free_lockowner_slab
;
3092 stateid_slab
= kmem_cache_create("nfsd4_stateids",
3093 sizeof(struct nfs4_ol_stateid
), 0, 0, NULL
);
3094 if (stateid_slab
== NULL
)
3095 goto out_free_file_slab
;
3096 deleg_slab
= kmem_cache_create("nfsd4_delegations",
3097 sizeof(struct nfs4_delegation
), 0, 0, NULL
);
3098 if (deleg_slab
== NULL
)
3099 goto out_free_stateid_slab
;
3102 out_free_stateid_slab
:
3103 kmem_cache_destroy(stateid_slab
);
3105 kmem_cache_destroy(file_slab
);
3106 out_free_lockowner_slab
:
3107 kmem_cache_destroy(lockowner_slab
);
3108 out_free_openowner_slab
:
3109 kmem_cache_destroy(openowner_slab
);
3111 dprintk("nfsd4: out of memory while initializing nfsv4\n");
3115 static void init_nfs4_replay(struct nfs4_replay
*rp
)
3117 rp
->rp_status
= nfserr_serverfault
;
3119 rp
->rp_buf
= rp
->rp_ibuf
;
3120 mutex_init(&rp
->rp_mutex
);
3123 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state
*cstate
,
3124 struct nfs4_stateowner
*so
)
3126 if (!nfsd4_has_session(cstate
)) {
3127 mutex_lock(&so
->so_replay
.rp_mutex
);
3128 cstate
->replay_owner
= so
;
3129 atomic_inc(&so
->so_count
);
3133 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state
*cstate
)
3135 struct nfs4_stateowner
*so
= cstate
->replay_owner
;
3138 cstate
->replay_owner
= NULL
;
3139 mutex_unlock(&so
->so_replay
.rp_mutex
);
3140 nfs4_put_stateowner(so
);
3144 static inline void *alloc_stateowner(struct kmem_cache
*slab
, struct xdr_netobj
*owner
, struct nfs4_client
*clp
)
3146 struct nfs4_stateowner
*sop
;
3148 sop
= kmem_cache_alloc(slab
, GFP_KERNEL
);
3152 sop
->so_owner
.data
= kmemdup(owner
->data
, owner
->len
, GFP_KERNEL
);
3153 if (!sop
->so_owner
.data
) {
3154 kmem_cache_free(slab
, sop
);
3157 sop
->so_owner
.len
= owner
->len
;
3159 INIT_LIST_HEAD(&sop
->so_stateids
);
3160 sop
->so_client
= clp
;
3161 init_nfs4_replay(&sop
->so_replay
);
3162 atomic_set(&sop
->so_count
, 1);
3166 static void hash_openowner(struct nfs4_openowner
*oo
, struct nfs4_client
*clp
, unsigned int strhashval
)
3168 lockdep_assert_held(&clp
->cl_lock
);
3170 list_add(&oo
->oo_owner
.so_strhash
,
3171 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
3172 list_add(&oo
->oo_perclient
, &clp
->cl_openowners
);
3175 static void nfs4_unhash_openowner(struct nfs4_stateowner
*so
)
3177 unhash_openowner_locked(openowner(so
));
3180 static void nfs4_free_openowner(struct nfs4_stateowner
*so
)
3182 struct nfs4_openowner
*oo
= openowner(so
);
3184 kmem_cache_free(openowner_slab
, oo
);
3187 static const struct nfs4_stateowner_operations openowner_ops
= {
3188 .so_unhash
= nfs4_unhash_openowner
,
3189 .so_free
= nfs4_free_openowner
,
3192 static struct nfs4_openowner
*
3193 alloc_init_open_stateowner(unsigned int strhashval
, struct nfsd4_open
*open
,
3194 struct nfsd4_compound_state
*cstate
)
3196 struct nfs4_client
*clp
= cstate
->clp
;
3197 struct nfs4_openowner
*oo
, *ret
;
3199 oo
= alloc_stateowner(openowner_slab
, &open
->op_owner
, clp
);
3202 oo
->oo_owner
.so_ops
= &openowner_ops
;
3203 oo
->oo_owner
.so_is_open_owner
= 1;
3204 oo
->oo_owner
.so_seqid
= open
->op_seqid
;
3206 if (nfsd4_has_session(cstate
))
3207 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
3209 oo
->oo_last_closed_stid
= NULL
;
3210 INIT_LIST_HEAD(&oo
->oo_close_lru
);
3211 spin_lock(&clp
->cl_lock
);
3212 ret
= find_openstateowner_str_locked(strhashval
, open
, clp
);
3214 hash_openowner(oo
, clp
, strhashval
);
3217 nfs4_free_openowner(&oo
->oo_owner
);
3218 spin_unlock(&clp
->cl_lock
);
3222 static void init_open_stateid(struct nfs4_ol_stateid
*stp
, struct nfs4_file
*fp
, struct nfsd4_open
*open
) {
3223 struct nfs4_openowner
*oo
= open
->op_openowner
;
3225 atomic_inc(&stp
->st_stid
.sc_count
);
3226 stp
->st_stid
.sc_type
= NFS4_OPEN_STID
;
3227 INIT_LIST_HEAD(&stp
->st_locks
);
3228 stp
->st_stateowner
= &oo
->oo_owner
;
3229 atomic_inc(&stp
->st_stateowner
->so_count
);
3231 stp
->st_stid
.sc_file
= fp
;
3232 stp
->st_access_bmap
= 0;
3233 stp
->st_deny_bmap
= 0;
3234 stp
->st_openstp
= NULL
;
3235 spin_lock(&oo
->oo_owner
.so_client
->cl_lock
);
3236 list_add(&stp
->st_perstateowner
, &oo
->oo_owner
.so_stateids
);
3237 spin_lock(&fp
->fi_lock
);
3238 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
3239 spin_unlock(&fp
->fi_lock
);
3240 spin_unlock(&oo
->oo_owner
.so_client
->cl_lock
);
3244 * In the 4.0 case we need to keep the owners around a little while to handle
3245 * CLOSE replay. We still do need to release any file access that is held by
3246 * them before returning however.
3249 move_to_close_lru(struct nfs4_ol_stateid
*s
, struct net
*net
)
3251 struct nfs4_ol_stateid
*last
;
3252 struct nfs4_openowner
*oo
= openowner(s
->st_stateowner
);
3253 struct nfsd_net
*nn
= net_generic(s
->st_stid
.sc_client
->net
,
3256 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo
);
3259 * We know that we hold one reference via nfsd4_close, and another
3260 * "persistent" reference for the client. If the refcount is higher
3261 * than 2, then there are still calls in progress that are using this
3262 * stateid. We can't put the sc_file reference until they are finished.
3263 * Wait for the refcount to drop to 2. Since it has been unhashed,
3264 * there should be no danger of the refcount going back up again at
3267 wait_event(close_wq
, atomic_read(&s
->st_stid
.sc_count
) == 2);
3269 release_all_access(s
);
3270 if (s
->st_stid
.sc_file
) {
3271 put_nfs4_file(s
->st_stid
.sc_file
);
3272 s
->st_stid
.sc_file
= NULL
;
3275 spin_lock(&nn
->client_lock
);
3276 last
= oo
->oo_last_closed_stid
;
3277 oo
->oo_last_closed_stid
= s
;
3278 list_move_tail(&oo
->oo_close_lru
, &nn
->close_lru
);
3279 oo
->oo_time
= get_seconds();
3280 spin_unlock(&nn
->client_lock
);
3282 nfs4_put_stid(&last
->st_stid
);
3285 /* search file_hashtbl[] for file */
3286 static struct nfs4_file
*
3287 find_file_locked(struct knfsd_fh
*fh
)
3289 unsigned int hashval
= file_hashval(fh
);
3290 struct nfs4_file
*fp
;
3292 lockdep_assert_held(&state_lock
);
3294 hlist_for_each_entry(fp
, &file_hashtbl
[hashval
], fi_hash
) {
3295 if (nfsd_fh_match(&fp
->fi_fhandle
, fh
)) {
3303 static struct nfs4_file
*
3304 find_file(struct knfsd_fh
*fh
)
3306 struct nfs4_file
*fp
;
3308 spin_lock(&state_lock
);
3309 fp
= find_file_locked(fh
);
3310 spin_unlock(&state_lock
);
3314 static struct nfs4_file
*
3315 find_or_add_file(struct nfs4_file
*new, struct knfsd_fh
*fh
)
3317 struct nfs4_file
*fp
;
3319 spin_lock(&state_lock
);
3320 fp
= find_file_locked(fh
);
3322 nfsd4_init_file(new, fh
);
3325 spin_unlock(&state_lock
);
3331 * Called to check deny when READ with all zero stateid or
3332 * WRITE with all zero or all one stateid
3335 nfs4_share_conflict(struct svc_fh
*current_fh
, unsigned int deny_type
)
3337 struct nfs4_file
*fp
;
3338 __be32 ret
= nfs_ok
;
3340 fp
= find_file(¤t_fh
->fh_handle
);
3343 /* Check for conflicting share reservations */
3344 spin_lock(&fp
->fi_lock
);
3345 if (fp
->fi_share_deny
& deny_type
)
3346 ret
= nfserr_locked
;
3347 spin_unlock(&fp
->fi_lock
);
3352 void nfsd4_prepare_cb_recall(struct nfs4_delegation
*dp
)
3354 struct nfsd_net
*nn
= net_generic(dp
->dl_stid
.sc_client
->net
,
3357 block_delegations(&dp
->dl_stid
.sc_file
->fi_fhandle
);
3360 * We can't do this in nfsd_break_deleg_cb because it is
3361 * already holding inode->i_lock.
3363 * If the dl_time != 0, then we know that it has already been
3364 * queued for a lease break. Don't queue it again.
3366 spin_lock(&state_lock
);
3367 if (dp
->dl_time
== 0) {
3368 dp
->dl_time
= get_seconds();
3369 list_add_tail(&dp
->dl_recall_lru
, &nn
->del_recall_lru
);
3371 spin_unlock(&state_lock
);
3374 static void nfsd_break_one_deleg(struct nfs4_delegation
*dp
)
3377 * We're assuming the state code never drops its reference
3378 * without first removing the lease. Since we're in this lease
3379 * callback (and since the lease code is serialized by the kernel
3380 * lock) we know the server hasn't removed the lease yet, we know
3381 * it's safe to take a reference.
3383 atomic_inc(&dp
->dl_stid
.sc_count
);
3384 nfsd4_cb_recall(dp
);
3387 /* Called from break_lease() with i_lock held. */
3388 static void nfsd_break_deleg_cb(struct file_lock
*fl
)
3390 struct nfs4_file
*fp
= (struct nfs4_file
*)fl
->fl_owner
;
3391 struct nfs4_delegation
*dp
;
3394 WARN(1, "(%p)->fl_owner NULL\n", fl
);
3397 if (fp
->fi_had_conflict
) {
3398 WARN(1, "duplicate break on %p\n", fp
);
3402 * We don't want the locks code to timeout the lease for us;
3403 * we'll remove it ourself if a delegation isn't returned
3406 fl
->fl_break_time
= 0;
3408 spin_lock(&fp
->fi_lock
);
3409 fp
->fi_had_conflict
= true;
3411 * If there are no delegations on the list, then we can't count on this
3412 * lease ever being cleaned up. Set the fl_break_time to jiffies so that
3413 * time_out_leases will do it ASAP. The fact that fi_had_conflict is now
3414 * true should keep any new delegations from being hashed.
3416 if (list_empty(&fp
->fi_delegations
))
3417 fl
->fl_break_time
= jiffies
;
3419 list_for_each_entry(dp
, &fp
->fi_delegations
, dl_perfile
)
3420 nfsd_break_one_deleg(dp
);
3421 spin_unlock(&fp
->fi_lock
);
3425 int nfsd_change_deleg_cb(struct file_lock
**onlist
, int arg
)
3428 return lease_modify(onlist
, arg
);
3433 static const struct lock_manager_operations nfsd_lease_mng_ops
= {
3434 .lm_break
= nfsd_break_deleg_cb
,
3435 .lm_change
= nfsd_change_deleg_cb
,
3438 static __be32
nfsd4_check_seqid(struct nfsd4_compound_state
*cstate
, struct nfs4_stateowner
*so
, u32 seqid
)
3440 if (nfsd4_has_session(cstate
))
3442 if (seqid
== so
->so_seqid
- 1)
3443 return nfserr_replay_me
;
3444 if (seqid
== so
->so_seqid
)
3446 return nfserr_bad_seqid
;
3449 static __be32
lookup_clientid(clientid_t
*clid
,
3450 struct nfsd4_compound_state
*cstate
,
3451 struct nfsd_net
*nn
)
3453 struct nfs4_client
*found
;
3456 found
= cstate
->clp
;
3457 if (!same_clid(&found
->cl_clientid
, clid
))
3458 return nfserr_stale_clientid
;
3462 if (STALE_CLIENTID(clid
, nn
))
3463 return nfserr_stale_clientid
;
3466 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3467 * cached already then we know this is for is for v4.0 and "sessions"
3470 WARN_ON_ONCE(cstate
->session
);
3471 spin_lock(&nn
->client_lock
);
3472 found
= find_confirmed_client(clid
, false, nn
);
3474 spin_unlock(&nn
->client_lock
);
3475 return nfserr_expired
;
3477 atomic_inc(&found
->cl_refcount
);
3478 spin_unlock(&nn
->client_lock
);
3480 /* Cache the nfs4_client in cstate! */
3481 cstate
->clp
= found
;
3486 nfsd4_process_open1(struct nfsd4_compound_state
*cstate
,
3487 struct nfsd4_open
*open
, struct nfsd_net
*nn
)
3489 clientid_t
*clientid
= &open
->op_clientid
;
3490 struct nfs4_client
*clp
= NULL
;
3491 unsigned int strhashval
;
3492 struct nfs4_openowner
*oo
= NULL
;
3495 if (STALE_CLIENTID(&open
->op_clientid
, nn
))
3496 return nfserr_stale_clientid
;
3498 * In case we need it later, after we've already created the
3499 * file and don't want to risk a further failure:
3501 open
->op_file
= nfsd4_alloc_file();
3502 if (open
->op_file
== NULL
)
3503 return nfserr_jukebox
;
3505 status
= lookup_clientid(clientid
, cstate
, nn
);
3510 strhashval
= ownerstr_hashval(&open
->op_owner
);
3511 oo
= find_openstateowner_str(strhashval
, open
, clp
);
3512 open
->op_openowner
= oo
;
3516 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
3517 /* Replace unconfirmed owners without checking for replay. */
3518 release_openowner(oo
);
3519 open
->op_openowner
= NULL
;
3522 status
= nfsd4_check_seqid(cstate
, &oo
->oo_owner
, open
->op_seqid
);
3527 oo
= alloc_init_open_stateowner(strhashval
, open
, cstate
);
3529 return nfserr_jukebox
;
3530 open
->op_openowner
= oo
;
3532 open
->op_stp
= nfs4_alloc_open_stateid(clp
);
3534 return nfserr_jukebox
;
3538 static inline __be32
3539 nfs4_check_delegmode(struct nfs4_delegation
*dp
, int flags
)
3541 if ((flags
& WR_STATE
) && (dp
->dl_type
== NFS4_OPEN_DELEGATE_READ
))
3542 return nfserr_openmode
;
3547 static int share_access_to_flags(u32 share_access
)
3549 return share_access
== NFS4_SHARE_ACCESS_READ
? RD_STATE
: WR_STATE
;
3552 static struct nfs4_delegation
*find_deleg_stateid(struct nfs4_client
*cl
, stateid_t
*s
)
3554 struct nfs4_stid
*ret
;
3556 ret
= find_stateid_by_type(cl
, s
, NFS4_DELEG_STID
);
3559 return delegstateid(ret
);
3562 static bool nfsd4_is_deleg_cur(struct nfsd4_open
*open
)
3564 return open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEGATE_CUR
||
3565 open
->op_claim_type
== NFS4_OPEN_CLAIM_DELEG_CUR_FH
;
3569 nfs4_check_deleg(struct nfs4_client
*cl
, struct nfsd4_open
*open
,
3570 struct nfs4_delegation
**dp
)
3573 __be32 status
= nfserr_bad_stateid
;
3574 struct nfs4_delegation
*deleg
;
3576 deleg
= find_deleg_stateid(cl
, &open
->op_delegate_stateid
);
3579 flags
= share_access_to_flags(open
->op_share_access
);
3580 status
= nfs4_check_delegmode(deleg
, flags
);
3582 nfs4_put_stid(&deleg
->dl_stid
);
3587 if (!nfsd4_is_deleg_cur(open
))
3591 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
3595 static struct nfs4_ol_stateid
*
3596 nfsd4_find_existing_open(struct nfs4_file
*fp
, struct nfsd4_open
*open
)
3598 struct nfs4_ol_stateid
*local
, *ret
= NULL
;
3599 struct nfs4_openowner
*oo
= open
->op_openowner
;
3601 spin_lock(&fp
->fi_lock
);
3602 list_for_each_entry(local
, &fp
->fi_stateids
, st_perfile
) {
3603 /* ignore lock owners */
3604 if (local
->st_stateowner
->so_is_open_owner
== 0)
3606 if (local
->st_stateowner
== &oo
->oo_owner
) {
3608 atomic_inc(&ret
->st_stid
.sc_count
);
3612 spin_unlock(&fp
->fi_lock
);
3616 static inline int nfs4_access_to_access(u32 nfs4_access
)
3620 if (nfs4_access
& NFS4_SHARE_ACCESS_READ
)
3621 flags
|= NFSD_MAY_READ
;
3622 if (nfs4_access
& NFS4_SHARE_ACCESS_WRITE
)
3623 flags
|= NFSD_MAY_WRITE
;
3627 static inline __be32
3628 nfsd4_truncate(struct svc_rqst
*rqstp
, struct svc_fh
*fh
,
3629 struct nfsd4_open
*open
)
3631 struct iattr iattr
= {
3632 .ia_valid
= ATTR_SIZE
,
3635 if (!open
->op_truncate
)
3637 if (!(open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
))
3638 return nfserr_inval
;
3639 return nfsd_setattr(rqstp
, fh
, &iattr
, 0, (time_t)0);
3642 static __be32
nfs4_get_vfs_file(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
,
3643 struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
,
3644 struct nfsd4_open
*open
)
3646 struct file
*filp
= NULL
;
3648 int oflag
= nfs4_access_to_omode(open
->op_share_access
);
3649 int access
= nfs4_access_to_access(open
->op_share_access
);
3650 unsigned char old_access_bmap
, old_deny_bmap
;
3652 spin_lock(&fp
->fi_lock
);
3655 * Are we trying to set a deny mode that would conflict with
3658 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
3659 if (status
!= nfs_ok
) {
3660 spin_unlock(&fp
->fi_lock
);
3664 /* set access to the file */
3665 status
= nfs4_file_get_access(fp
, open
->op_share_access
);
3666 if (status
!= nfs_ok
) {
3667 spin_unlock(&fp
->fi_lock
);
3671 /* Set access bits in stateid */
3672 old_access_bmap
= stp
->st_access_bmap
;
3673 set_access(open
->op_share_access
, stp
);
3675 /* Set new deny mask */
3676 old_deny_bmap
= stp
->st_deny_bmap
;
3677 set_deny(open
->op_share_deny
, stp
);
3678 fp
->fi_share_deny
|= (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
3680 if (!fp
->fi_fds
[oflag
]) {
3681 spin_unlock(&fp
->fi_lock
);
3682 status
= nfsd_open(rqstp
, cur_fh
, S_IFREG
, access
, &filp
);
3684 goto out_put_access
;
3685 spin_lock(&fp
->fi_lock
);
3686 if (!fp
->fi_fds
[oflag
]) {
3687 fp
->fi_fds
[oflag
] = filp
;
3691 spin_unlock(&fp
->fi_lock
);
3695 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
3697 goto out_put_access
;
3701 stp
->st_access_bmap
= old_access_bmap
;
3702 nfs4_file_put_access(fp
, open
->op_share_access
);
3703 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap
), stp
);
3708 nfs4_upgrade_open(struct svc_rqst
*rqstp
, struct nfs4_file
*fp
, struct svc_fh
*cur_fh
, struct nfs4_ol_stateid
*stp
, struct nfsd4_open
*open
)
3711 unsigned char old_deny_bmap
;
3713 if (!test_access(open
->op_share_access
, stp
))
3714 return nfs4_get_vfs_file(rqstp
, fp
, cur_fh
, stp
, open
);
3716 /* test and set deny mode */
3717 spin_lock(&fp
->fi_lock
);
3718 status
= nfs4_file_check_deny(fp
, open
->op_share_deny
);
3719 if (status
== nfs_ok
) {
3720 old_deny_bmap
= stp
->st_deny_bmap
;
3721 set_deny(open
->op_share_deny
, stp
);
3722 fp
->fi_share_deny
|=
3723 (open
->op_share_deny
& NFS4_SHARE_DENY_BOTH
);
3725 spin_unlock(&fp
->fi_lock
);
3727 if (status
!= nfs_ok
)
3730 status
= nfsd4_truncate(rqstp
, cur_fh
, open
);
3731 if (status
!= nfs_ok
)
3732 reset_union_bmap_deny(old_deny_bmap
, stp
);
3737 nfs4_set_claim_prev(struct nfsd4_open
*open
, bool has_session
)
3739 open
->op_openowner
->oo_flags
|= NFS4_OO_CONFIRMED
;
3742 /* Should we give out recallable state?: */
3743 static bool nfsd4_cb_channel_good(struct nfs4_client
*clp
)
3745 if (clp
->cl_cb_state
== NFSD4_CB_UP
)
3748 * In the sessions case, since we don't have to establish a
3749 * separate connection for callbacks, we assume it's OK
3750 * until we hear otherwise:
3752 return clp
->cl_minorversion
&& clp
->cl_cb_state
== NFSD4_CB_UNKNOWN
;
3755 static struct file_lock
*nfs4_alloc_init_lease(struct nfs4_file
*fp
, int flag
)
3757 struct file_lock
*fl
;
3759 fl
= locks_alloc_lock();
3762 locks_init_lock(fl
);
3763 fl
->fl_lmops
= &nfsd_lease_mng_ops
;
3764 fl
->fl_flags
= FL_DELEG
;
3765 fl
->fl_type
= flag
== NFS4_OPEN_DELEGATE_READ
? F_RDLCK
: F_WRLCK
;
3766 fl
->fl_end
= OFFSET_MAX
;
3767 fl
->fl_owner
= (fl_owner_t
)fp
;
3768 fl
->fl_pid
= current
->tgid
;
3772 static int nfs4_setlease(struct nfs4_delegation
*dp
)
3774 struct nfs4_file
*fp
= dp
->dl_stid
.sc_file
;
3775 struct file_lock
*fl
;
3779 fl
= nfs4_alloc_init_lease(fp
, NFS4_OPEN_DELEGATE_READ
);
3782 filp
= find_readable_file(fp
);
3784 /* We should always have a readable file here */
3789 status
= vfs_setlease(filp
, fl
->fl_type
, &fl
);
3791 locks_free_lock(fl
);
3794 spin_lock(&state_lock
);
3795 spin_lock(&fp
->fi_lock
);
3796 /* Did the lease get broken before we took the lock? */
3798 if (fp
->fi_had_conflict
)
3803 atomic_inc(&fp
->fi_delegees
);
3804 hash_delegation_locked(dp
, fp
);
3808 fp
->fi_deleg_file
= filp
;
3809 atomic_set(&fp
->fi_delegees
, 1);
3810 hash_delegation_locked(dp
, fp
);
3811 spin_unlock(&fp
->fi_lock
);
3812 spin_unlock(&state_lock
);
3815 spin_unlock(&fp
->fi_lock
);
3816 spin_unlock(&state_lock
);
3822 static struct nfs4_delegation
*
3823 nfs4_set_delegation(struct nfs4_client
*clp
, struct svc_fh
*fh
,
3824 struct nfs4_file
*fp
)
3827 struct nfs4_delegation
*dp
;
3829 if (fp
->fi_had_conflict
)
3830 return ERR_PTR(-EAGAIN
);
3832 dp
= alloc_init_deleg(clp
, fh
);
3834 return ERR_PTR(-ENOMEM
);
3837 spin_lock(&state_lock
);
3838 spin_lock(&fp
->fi_lock
);
3839 dp
->dl_stid
.sc_file
= fp
;
3840 if (!fp
->fi_lease
) {
3841 spin_unlock(&fp
->fi_lock
);
3842 spin_unlock(&state_lock
);
3843 status
= nfs4_setlease(dp
);
3846 atomic_inc(&fp
->fi_delegees
);
3847 if (fp
->fi_had_conflict
) {
3851 hash_delegation_locked(dp
, fp
);
3854 spin_unlock(&fp
->fi_lock
);
3855 spin_unlock(&state_lock
);
3858 nfs4_put_stid(&dp
->dl_stid
);
3859 return ERR_PTR(status
);
3864 static void nfsd4_open_deleg_none_ext(struct nfsd4_open
*open
, int status
)
3866 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
3867 if (status
== -EAGAIN
)
3868 open
->op_why_no_deleg
= WND4_CONTENTION
;
3870 open
->op_why_no_deleg
= WND4_RESOURCE
;
3871 switch (open
->op_deleg_want
) {
3872 case NFS4_SHARE_WANT_READ_DELEG
:
3873 case NFS4_SHARE_WANT_WRITE_DELEG
:
3874 case NFS4_SHARE_WANT_ANY_DELEG
:
3876 case NFS4_SHARE_WANT_CANCEL
:
3877 open
->op_why_no_deleg
= WND4_CANCELLED
;
3879 case NFS4_SHARE_WANT_NO_DELEG
:
3886 * Attempt to hand out a delegation.
3888 * Note we don't support write delegations, and won't until the vfs has
3889 * proper support for them.
3892 nfs4_open_delegation(struct svc_fh
*fh
, struct nfsd4_open
*open
,
3893 struct nfs4_ol_stateid
*stp
)
3895 struct nfs4_delegation
*dp
;
3896 struct nfs4_openowner
*oo
= openowner(stp
->st_stateowner
);
3897 struct nfs4_client
*clp
= stp
->st_stid
.sc_client
;
3901 cb_up
= nfsd4_cb_channel_good(oo
->oo_owner
.so_client
);
3902 open
->op_recall
= 0;
3903 switch (open
->op_claim_type
) {
3904 case NFS4_OPEN_CLAIM_PREVIOUS
:
3906 open
->op_recall
= 1;
3907 if (open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_READ
)
3910 case NFS4_OPEN_CLAIM_NULL
:
3911 case NFS4_OPEN_CLAIM_FH
:
3913 * Let's not give out any delegations till everyone's
3914 * had the chance to reclaim theirs....
3916 if (locks_in_grace(clp
->net
))
3918 if (!cb_up
|| !(oo
->oo_flags
& NFS4_OO_CONFIRMED
))
3921 * Also, if the file was opened for write or
3922 * create, there's a good chance the client's
3923 * about to write to it, resulting in an
3924 * immediate recall (since we don't support
3925 * write delegations):
3927 if (open
->op_share_access
& NFS4_SHARE_ACCESS_WRITE
)
3929 if (open
->op_create
== NFS4_OPEN_CREATE
)
3935 dp
= nfs4_set_delegation(clp
, fh
, stp
->st_stid
.sc_file
);
3939 memcpy(&open
->op_delegate_stateid
, &dp
->dl_stid
.sc_stateid
, sizeof(dp
->dl_stid
.sc_stateid
));
3941 dprintk("NFSD: delegation stateid=" STATEID_FMT
"\n",
3942 STATEID_VAL(&dp
->dl_stid
.sc_stateid
));
3943 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_READ
;
3944 nfs4_put_stid(&dp
->dl_stid
);
3947 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE
;
3948 if (open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
&&
3949 open
->op_delegate_type
!= NFS4_OPEN_DELEGATE_NONE
) {
3950 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
3951 open
->op_recall
= 1;
3954 /* 4.1 client asking for a delegation? */
3955 if (open
->op_deleg_want
)
3956 nfsd4_open_deleg_none_ext(open
, status
);
3960 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open
*open
,
3961 struct nfs4_delegation
*dp
)
3963 if (open
->op_deleg_want
== NFS4_SHARE_WANT_READ_DELEG
&&
3964 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
3965 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
3966 open
->op_why_no_deleg
= WND4_NOT_SUPP_DOWNGRADE
;
3967 } else if (open
->op_deleg_want
== NFS4_SHARE_WANT_WRITE_DELEG
&&
3968 dp
->dl_type
== NFS4_OPEN_DELEGATE_WRITE
) {
3969 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
3970 open
->op_why_no_deleg
= WND4_NOT_SUPP_UPGRADE
;
3972 /* Otherwise the client must be confused wanting a delegation
3973 * it already has, therefore we don't return
3974 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
3979 nfsd4_process_open2(struct svc_rqst
*rqstp
, struct svc_fh
*current_fh
, struct nfsd4_open
*open
)
3981 struct nfsd4_compoundres
*resp
= rqstp
->rq_resp
;
3982 struct nfs4_client
*cl
= open
->op_openowner
->oo_owner
.so_client
;
3983 struct nfs4_file
*fp
= NULL
;
3984 struct nfs4_ol_stateid
*stp
= NULL
;
3985 struct nfs4_delegation
*dp
= NULL
;
3989 * Lookup file; if found, lookup stateid and check open request,
3990 * and check for delegations in the process of being recalled.
3991 * If not found, create the nfs4_file struct
3993 fp
= find_or_add_file(open
->op_file
, ¤t_fh
->fh_handle
);
3994 if (fp
!= open
->op_file
) {
3995 status
= nfs4_check_deleg(cl
, open
, &dp
);
3998 stp
= nfsd4_find_existing_open(fp
, open
);
4000 open
->op_file
= NULL
;
4001 status
= nfserr_bad_stateid
;
4002 if (nfsd4_is_deleg_cur(open
))
4004 status
= nfserr_jukebox
;
4008 * OPEN the file, or upgrade an existing OPEN.
4009 * If truncate fails, the OPEN fails.
4012 /* Stateid was found, this is an OPEN upgrade */
4013 status
= nfs4_upgrade_open(rqstp
, fp
, current_fh
, stp
, open
);
4018 open
->op_stp
= NULL
;
4019 init_open_stateid(stp
, fp
, open
);
4020 status
= nfs4_get_vfs_file(rqstp
, fp
, current_fh
, stp
, open
);
4022 release_open_stateid(stp
);
4026 update_stateid(&stp
->st_stid
.sc_stateid
);
4027 memcpy(&open
->op_stateid
, &stp
->st_stid
.sc_stateid
, sizeof(stateid_t
));
4029 if (nfsd4_has_session(&resp
->cstate
)) {
4030 if (open
->op_deleg_want
& NFS4_SHARE_WANT_NO_DELEG
) {
4031 open
->op_delegate_type
= NFS4_OPEN_DELEGATE_NONE_EXT
;
4032 open
->op_why_no_deleg
= WND4_NOT_WANTED
;
4038 * Attempt to hand out a delegation. No error return, because the
4039 * OPEN succeeds even if we fail.
4041 nfs4_open_delegation(current_fh
, open
, stp
);
4045 dprintk("%s: stateid=" STATEID_FMT
"\n", __func__
,
4046 STATEID_VAL(&stp
->st_stid
.sc_stateid
));
4048 /* 4.1 client trying to upgrade/downgrade delegation? */
4049 if (open
->op_delegate_type
== NFS4_OPEN_DELEGATE_NONE
&& dp
&&
4050 open
->op_deleg_want
)
4051 nfsd4_deleg_xgrade_none_ext(open
, dp
);
4055 if (status
== 0 && open
->op_claim_type
== NFS4_OPEN_CLAIM_PREVIOUS
)
4056 nfs4_set_claim_prev(open
, nfsd4_has_session(&resp
->cstate
));
4058 * To finish the open response, we just need to set the rflags.
4060 open
->op_rflags
= NFS4_OPEN_RESULT_LOCKTYPE_POSIX
;
4061 if (!(open
->op_openowner
->oo_flags
& NFS4_OO_CONFIRMED
) &&
4062 !nfsd4_has_session(&resp
->cstate
))
4063 open
->op_rflags
|= NFS4_OPEN_RESULT_CONFIRM
;
4065 nfs4_put_stid(&dp
->dl_stid
);
4067 nfs4_put_stid(&stp
->st_stid
);
4072 void nfsd4_cleanup_open_state(struct nfsd4_compound_state
*cstate
,
4073 struct nfsd4_open
*open
, __be32 status
)
4075 if (open
->op_openowner
) {
4076 struct nfs4_stateowner
*so
= &open
->op_openowner
->oo_owner
;
4078 nfsd4_cstate_assign_replay(cstate
, so
);
4079 nfs4_put_stateowner(so
);
4082 nfsd4_free_file(open
->op_file
);
4084 nfs4_put_stid(&open
->op_stp
->st_stid
);
4088 nfsd4_renew(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4091 struct nfs4_client
*clp
;
4093 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
4095 dprintk("process_renew(%08x/%08x): starting\n",
4096 clid
->cl_boot
, clid
->cl_id
);
4097 status
= lookup_clientid(clid
, cstate
, nn
);
4101 status
= nfserr_cb_path_down
;
4102 if (!list_empty(&clp
->cl_delegations
)
4103 && clp
->cl_cb_state
!= NFSD4_CB_UP
)
4111 nfsd4_end_grace(struct nfsd_net
*nn
)
4113 /* do nothing if grace period already ended */
4114 if (nn
->grace_ended
)
4117 dprintk("NFSD: end of grace period\n");
4118 nn
->grace_ended
= true;
4119 nfsd4_record_grace_done(nn
, nn
->boot_time
);
4120 locks_end_grace(&nn
->nfsd4_manager
);
4122 * Now that every NFSv4 client has had the chance to recover and
4123 * to see the (possibly new, possibly shorter) lease time, we
4124 * can safely set the next grace time to the current lease time:
4126 nn
->nfsd4_grace
= nn
->nfsd4_lease
;
4130 nfs4_laundromat(struct nfsd_net
*nn
)
4132 struct nfs4_client
*clp
;
4133 struct nfs4_openowner
*oo
;
4134 struct nfs4_delegation
*dp
;
4135 struct nfs4_ol_stateid
*stp
;
4136 struct list_head
*pos
, *next
, reaplist
;
4137 time_t cutoff
= get_seconds() - nn
->nfsd4_lease
;
4138 time_t t
, new_timeo
= nn
->nfsd4_lease
;
4140 dprintk("NFSD: laundromat service - starting\n");
4141 nfsd4_end_grace(nn
);
4142 INIT_LIST_HEAD(&reaplist
);
4143 spin_lock(&nn
->client_lock
);
4144 list_for_each_safe(pos
, next
, &nn
->client_lru
) {
4145 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
4146 if (time_after((unsigned long)clp
->cl_time
, (unsigned long)cutoff
)) {
4147 t
= clp
->cl_time
- cutoff
;
4148 new_timeo
= min(new_timeo
, t
);
4151 if (mark_client_expired_locked(clp
)) {
4152 dprintk("NFSD: client in use (clientid %08x)\n",
4153 clp
->cl_clientid
.cl_id
);
4156 list_add(&clp
->cl_lru
, &reaplist
);
4158 spin_unlock(&nn
->client_lock
);
4159 list_for_each_safe(pos
, next
, &reaplist
) {
4160 clp
= list_entry(pos
, struct nfs4_client
, cl_lru
);
4161 dprintk("NFSD: purging unused client (clientid %08x)\n",
4162 clp
->cl_clientid
.cl_id
);
4163 list_del_init(&clp
->cl_lru
);
4166 spin_lock(&state_lock
);
4167 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
4168 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
4169 if (net_generic(dp
->dl_stid
.sc_client
->net
, nfsd_net_id
) != nn
)
4171 if (time_after((unsigned long)dp
->dl_time
, (unsigned long)cutoff
)) {
4172 t
= dp
->dl_time
- cutoff
;
4173 new_timeo
= min(new_timeo
, t
);
4176 unhash_delegation_locked(dp
);
4177 list_add(&dp
->dl_recall_lru
, &reaplist
);
4179 spin_unlock(&state_lock
);
4180 while (!list_empty(&reaplist
)) {
4181 dp
= list_first_entry(&reaplist
, struct nfs4_delegation
,
4183 list_del_init(&dp
->dl_recall_lru
);
4184 revoke_delegation(dp
);
4187 spin_lock(&nn
->client_lock
);
4188 while (!list_empty(&nn
->close_lru
)) {
4189 oo
= list_first_entry(&nn
->close_lru
, struct nfs4_openowner
,
4191 if (time_after((unsigned long)oo
->oo_time
,
4192 (unsigned long)cutoff
)) {
4193 t
= oo
->oo_time
- cutoff
;
4194 new_timeo
= min(new_timeo
, t
);
4197 list_del_init(&oo
->oo_close_lru
);
4198 stp
= oo
->oo_last_closed_stid
;
4199 oo
->oo_last_closed_stid
= NULL
;
4200 spin_unlock(&nn
->client_lock
);
4201 nfs4_put_stid(&stp
->st_stid
);
4202 spin_lock(&nn
->client_lock
);
4204 spin_unlock(&nn
->client_lock
);
4206 new_timeo
= max_t(time_t, new_timeo
, NFSD_LAUNDROMAT_MINTIMEOUT
);
4210 static struct workqueue_struct
*laundry_wq
;
4211 static void laundromat_main(struct work_struct
*);
4214 laundromat_main(struct work_struct
*laundry
)
4217 struct delayed_work
*dwork
= container_of(laundry
, struct delayed_work
,
4219 struct nfsd_net
*nn
= container_of(dwork
, struct nfsd_net
,
4222 t
= nfs4_laundromat(nn
);
4223 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t
);
4224 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, t
*HZ
);
4227 static inline __be32
nfs4_check_fh(struct svc_fh
*fhp
, struct nfs4_ol_stateid
*stp
)
4229 if (!nfsd_fh_match(&fhp
->fh_handle
, &stp
->st_stid
.sc_file
->fi_fhandle
))
4230 return nfserr_bad_stateid
;
4235 access_permit_read(struct nfs4_ol_stateid
*stp
)
4237 return test_access(NFS4_SHARE_ACCESS_READ
, stp
) ||
4238 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
) ||
4239 test_access(NFS4_SHARE_ACCESS_WRITE
, stp
);
4243 access_permit_write(struct nfs4_ol_stateid
*stp
)
4245 return test_access(NFS4_SHARE_ACCESS_WRITE
, stp
) ||
4246 test_access(NFS4_SHARE_ACCESS_BOTH
, stp
);
4250 __be32
nfs4_check_openmode(struct nfs4_ol_stateid
*stp
, int flags
)
4252 __be32 status
= nfserr_openmode
;
4254 /* For lock stateid's, we test the parent open, not the lock: */
4255 if (stp
->st_openstp
)
4256 stp
= stp
->st_openstp
;
4257 if ((flags
& WR_STATE
) && !access_permit_write(stp
))
4259 if ((flags
& RD_STATE
) && !access_permit_read(stp
))
4266 static inline __be32
4267 check_special_stateids(struct net
*net
, svc_fh
*current_fh
, stateid_t
*stateid
, int flags
)
4269 if (ONE_STATEID(stateid
) && (flags
& RD_STATE
))
4271 else if (locks_in_grace(net
)) {
4272 /* Answer in remaining cases depends on existence of
4273 * conflicting state; so we must wait out the grace period. */
4274 return nfserr_grace
;
4275 } else if (flags
& WR_STATE
)
4276 return nfs4_share_conflict(current_fh
,
4277 NFS4_SHARE_DENY_WRITE
);
4278 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4279 return nfs4_share_conflict(current_fh
,
4280 NFS4_SHARE_DENY_READ
);
4284 * Allow READ/WRITE during grace period on recovered state only for files
4285 * that are not able to provide mandatory locking.
4288 grace_disallows_io(struct net
*net
, struct inode
*inode
)
4290 return locks_in_grace(net
) && mandatory_lock(inode
);
4293 /* Returns true iff a is later than b: */
4294 static bool stateid_generation_after(stateid_t
*a
, stateid_t
*b
)
4296 return (s32
)(a
->si_generation
- b
->si_generation
) > 0;
4299 static __be32
check_stateid_generation(stateid_t
*in
, stateid_t
*ref
, bool has_session
)
4302 * When sessions are used the stateid generation number is ignored
4305 if (has_session
&& in
->si_generation
== 0)
4308 if (in
->si_generation
== ref
->si_generation
)
4311 /* If the client sends us a stateid from the future, it's buggy: */
4312 if (stateid_generation_after(in
, ref
))
4313 return nfserr_bad_stateid
;
4315 * However, we could see a stateid from the past, even from a
4316 * non-buggy client. For example, if the client sends a lock
4317 * while some IO is outstanding, the lock may bump si_generation
4318 * while the IO is still in flight. The client could avoid that
4319 * situation by waiting for responses on all the IO requests,
4320 * but better performance may result in retrying IO that
4321 * receives an old_stateid error if requests are rarely
4322 * reordered in flight:
4324 return nfserr_old_stateid
;
4327 static __be32
nfsd4_validate_stateid(struct nfs4_client
*cl
, stateid_t
*stateid
)
4329 struct nfs4_stid
*s
;
4330 struct nfs4_ol_stateid
*ols
;
4331 __be32 status
= nfserr_bad_stateid
;
4333 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
))
4335 /* Client debugging aid. */
4336 if (!same_clid(&stateid
->si_opaque
.so_clid
, &cl
->cl_clientid
)) {
4337 char addr_str
[INET6_ADDRSTRLEN
];
4338 rpc_ntop((struct sockaddr
*)&cl
->cl_addr
, addr_str
,
4340 pr_warn_ratelimited("NFSD: client %s testing state ID "
4341 "with incorrect client ID\n", addr_str
);
4344 spin_lock(&cl
->cl_lock
);
4345 s
= find_stateid_locked(cl
, stateid
);
4348 status
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
4351 switch (s
->sc_type
) {
4352 case NFS4_DELEG_STID
:
4355 case NFS4_REVOKED_DELEG_STID
:
4356 status
= nfserr_deleg_revoked
;
4358 case NFS4_OPEN_STID
:
4359 case NFS4_LOCK_STID
:
4360 ols
= openlockstateid(s
);
4361 if (ols
->st_stateowner
->so_is_open_owner
4362 && !(openowner(ols
->st_stateowner
)->oo_flags
4363 & NFS4_OO_CONFIRMED
))
4364 status
= nfserr_bad_stateid
;
4369 printk("unknown stateid type %x\n", s
->sc_type
);
4371 case NFS4_CLOSED_STID
:
4372 case NFS4_CLOSED_DELEG_STID
:
4373 status
= nfserr_bad_stateid
;
4376 spin_unlock(&cl
->cl_lock
);
4381 nfsd4_lookup_stateid(struct nfsd4_compound_state
*cstate
,
4382 stateid_t
*stateid
, unsigned char typemask
,
4383 struct nfs4_stid
**s
, struct nfsd_net
*nn
)
4387 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
))
4388 return nfserr_bad_stateid
;
4389 status
= lookup_clientid(&stateid
->si_opaque
.so_clid
, cstate
, nn
);
4390 if (status
== nfserr_stale_clientid
) {
4391 if (cstate
->session
)
4392 return nfserr_bad_stateid
;
4393 return nfserr_stale_stateid
;
4397 *s
= find_stateid_by_type(cstate
->clp
, stateid
, typemask
);
4399 return nfserr_bad_stateid
;
4404 * Checks for stateid operations
4407 nfs4_preprocess_stateid_op(struct net
*net
, struct nfsd4_compound_state
*cstate
,
4408 stateid_t
*stateid
, int flags
, struct file
**filpp
)
4410 struct nfs4_stid
*s
;
4411 struct nfs4_ol_stateid
*stp
= NULL
;
4412 struct nfs4_delegation
*dp
= NULL
;
4413 struct svc_fh
*current_fh
= &cstate
->current_fh
;
4414 struct inode
*ino
= current_fh
->fh_dentry
->d_inode
;
4415 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
4416 struct file
*file
= NULL
;
4422 if (grace_disallows_io(net
, ino
))
4423 return nfserr_grace
;
4425 if (ZERO_STATEID(stateid
) || ONE_STATEID(stateid
))
4426 return check_special_stateids(net
, current_fh
, stateid
, flags
);
4428 status
= nfsd4_lookup_stateid(cstate
, stateid
,
4429 NFS4_DELEG_STID
|NFS4_OPEN_STID
|NFS4_LOCK_STID
,
4433 status
= check_stateid_generation(stateid
, &s
->sc_stateid
, nfsd4_has_session(cstate
));
4436 switch (s
->sc_type
) {
4437 case NFS4_DELEG_STID
:
4438 dp
= delegstateid(s
);
4439 status
= nfs4_check_delegmode(dp
, flags
);
4443 file
= dp
->dl_stid
.sc_file
->fi_deleg_file
;
4446 status
= nfserr_serverfault
;
4452 case NFS4_OPEN_STID
:
4453 case NFS4_LOCK_STID
:
4454 stp
= openlockstateid(s
);
4455 status
= nfs4_check_fh(current_fh
, stp
);
4458 if (stp
->st_stateowner
->so_is_open_owner
4459 && !(openowner(stp
->st_stateowner
)->oo_flags
& NFS4_OO_CONFIRMED
))
4461 status
= nfs4_check_openmode(stp
, flags
);
4465 struct nfs4_file
*fp
= stp
->st_stid
.sc_file
;
4467 if (flags
& RD_STATE
)
4468 file
= find_readable_file(fp
);
4470 file
= find_writeable_file(fp
);
4474 status
= nfserr_bad_stateid
;
4486 * Test if the stateid is valid
4489 nfsd4_test_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4490 struct nfsd4_test_stateid
*test_stateid
)
4492 struct nfsd4_test_stateid_id
*stateid
;
4493 struct nfs4_client
*cl
= cstate
->session
->se_client
;
4495 list_for_each_entry(stateid
, &test_stateid
->ts_stateid_list
, ts_id_list
)
4496 stateid
->ts_id_status
=
4497 nfsd4_validate_stateid(cl
, &stateid
->ts_id_stateid
);
4503 nfsd4_free_stateid(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4504 struct nfsd4_free_stateid
*free_stateid
)
4506 stateid_t
*stateid
= &free_stateid
->fr_stateid
;
4507 struct nfs4_stid
*s
;
4508 struct nfs4_delegation
*dp
;
4509 struct nfs4_ol_stateid
*stp
;
4510 struct nfs4_client
*cl
= cstate
->session
->se_client
;
4511 __be32 ret
= nfserr_bad_stateid
;
4513 spin_lock(&cl
->cl_lock
);
4514 s
= find_stateid_locked(cl
, stateid
);
4517 switch (s
->sc_type
) {
4518 case NFS4_DELEG_STID
:
4519 ret
= nfserr_locks_held
;
4521 case NFS4_OPEN_STID
:
4522 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
4525 ret
= nfserr_locks_held
;
4527 case NFS4_LOCK_STID
:
4528 ret
= check_stateid_generation(stateid
, &s
->sc_stateid
, 1);
4531 stp
= openlockstateid(s
);
4532 ret
= nfserr_locks_held
;
4533 if (check_for_locks(stp
->st_stid
.sc_file
,
4534 lockowner(stp
->st_stateowner
)))
4536 unhash_lock_stateid(stp
);
4537 spin_unlock(&cl
->cl_lock
);
4541 case NFS4_REVOKED_DELEG_STID
:
4542 dp
= delegstateid(s
);
4543 list_del_init(&dp
->dl_recall_lru
);
4544 spin_unlock(&cl
->cl_lock
);
4548 /* Default falls through and returns nfserr_bad_stateid */
4551 spin_unlock(&cl
->cl_lock
);
4559 return (type
== NFS4_READW_LT
|| type
== NFS4_READ_LT
) ?
4560 RD_STATE
: WR_STATE
;
4563 static __be32
nfs4_seqid_op_checks(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
, u32 seqid
, struct nfs4_ol_stateid
*stp
)
4565 struct svc_fh
*current_fh
= &cstate
->current_fh
;
4566 struct nfs4_stateowner
*sop
= stp
->st_stateowner
;
4569 status
= nfsd4_check_seqid(cstate
, sop
, seqid
);
4572 if (stp
->st_stid
.sc_type
== NFS4_CLOSED_STID
4573 || stp
->st_stid
.sc_type
== NFS4_REVOKED_DELEG_STID
)
4575 * "Closed" stateid's exist *only* to return
4576 * nfserr_replay_me from the previous step, and
4577 * revoked delegations are kept only for free_stateid.
4579 return nfserr_bad_stateid
;
4580 status
= check_stateid_generation(stateid
, &stp
->st_stid
.sc_stateid
, nfsd4_has_session(cstate
));
4583 return nfs4_check_fh(current_fh
, stp
);
4587 * Checks for sequence id mutating operations.
4590 nfs4_preprocess_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
4591 stateid_t
*stateid
, char typemask
,
4592 struct nfs4_ol_stateid
**stpp
,
4593 struct nfsd_net
*nn
)
4596 struct nfs4_stid
*s
;
4597 struct nfs4_ol_stateid
*stp
= NULL
;
4599 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT
"\n", __func__
,
4600 seqid
, STATEID_VAL(stateid
));
4603 status
= nfsd4_lookup_stateid(cstate
, stateid
, typemask
, &s
, nn
);
4606 stp
= openlockstateid(s
);
4607 nfsd4_cstate_assign_replay(cstate
, stp
->st_stateowner
);
4609 status
= nfs4_seqid_op_checks(cstate
, stateid
, seqid
, stp
);
4613 nfs4_put_stid(&stp
->st_stid
);
4617 static __be32
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state
*cstate
, u32 seqid
,
4618 stateid_t
*stateid
, struct nfs4_ol_stateid
**stpp
, struct nfsd_net
*nn
)
4621 struct nfs4_openowner
*oo
;
4622 struct nfs4_ol_stateid
*stp
;
4624 status
= nfs4_preprocess_seqid_op(cstate
, seqid
, stateid
,
4625 NFS4_OPEN_STID
, &stp
, nn
);
4628 oo
= openowner(stp
->st_stateowner
);
4629 if (!(oo
->oo_flags
& NFS4_OO_CONFIRMED
)) {
4630 nfs4_put_stid(&stp
->st_stid
);
4631 return nfserr_bad_stateid
;
4638 nfsd4_open_confirm(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4639 struct nfsd4_open_confirm
*oc
)
4642 struct nfs4_openowner
*oo
;
4643 struct nfs4_ol_stateid
*stp
;
4644 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
4646 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
4647 cstate
->current_fh
.fh_dentry
);
4649 status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0);
4653 status
= nfs4_preprocess_seqid_op(cstate
,
4654 oc
->oc_seqid
, &oc
->oc_req_stateid
,
4655 NFS4_OPEN_STID
, &stp
, nn
);
4658 oo
= openowner(stp
->st_stateowner
);
4659 status
= nfserr_bad_stateid
;
4660 if (oo
->oo_flags
& NFS4_OO_CONFIRMED
)
4662 oo
->oo_flags
|= NFS4_OO_CONFIRMED
;
4663 update_stateid(&stp
->st_stid
.sc_stateid
);
4664 memcpy(&oc
->oc_resp_stateid
, &stp
->st_stid
.sc_stateid
, sizeof(stateid_t
));
4665 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT
"\n",
4666 __func__
, oc
->oc_seqid
, STATEID_VAL(&stp
->st_stid
.sc_stateid
));
4668 nfsd4_client_record_create(oo
->oo_owner
.so_client
);
4671 nfs4_put_stid(&stp
->st_stid
);
4673 nfsd4_bump_seqid(cstate
, status
);
4677 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid
*stp
, u32 access
)
4679 if (!test_access(access
, stp
))
4681 nfs4_file_put_access(stp
->st_stid
.sc_file
, access
);
4682 clear_access(access
, stp
);
4685 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid
*stp
, u32 to_access
)
4687 switch (to_access
) {
4688 case NFS4_SHARE_ACCESS_READ
:
4689 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_WRITE
);
4690 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
4692 case NFS4_SHARE_ACCESS_WRITE
:
4693 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_READ
);
4694 nfs4_stateid_downgrade_bit(stp
, NFS4_SHARE_ACCESS_BOTH
);
4696 case NFS4_SHARE_ACCESS_BOTH
:
4704 nfsd4_open_downgrade(struct svc_rqst
*rqstp
,
4705 struct nfsd4_compound_state
*cstate
,
4706 struct nfsd4_open_downgrade
*od
)
4709 struct nfs4_ol_stateid
*stp
;
4710 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
4712 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
4713 cstate
->current_fh
.fh_dentry
);
4715 /* We don't yet support WANT bits: */
4716 if (od
->od_deleg_want
)
4717 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__
,
4720 status
= nfs4_preprocess_confirmed_seqid_op(cstate
, od
->od_seqid
,
4721 &od
->od_stateid
, &stp
, nn
);
4724 status
= nfserr_inval
;
4725 if (!test_access(od
->od_share_access
, stp
)) {
4726 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
4727 stp
->st_access_bmap
, od
->od_share_access
);
4730 if (!test_deny(od
->od_share_deny
, stp
)) {
4731 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
4732 stp
->st_deny_bmap
, od
->od_share_deny
);
4735 nfs4_stateid_downgrade(stp
, od
->od_share_access
);
4737 reset_union_bmap_deny(od
->od_share_deny
, stp
);
4739 update_stateid(&stp
->st_stid
.sc_stateid
);
4740 memcpy(&od
->od_stateid
, &stp
->st_stid
.sc_stateid
, sizeof(stateid_t
));
4743 nfs4_put_stid(&stp
->st_stid
);
4745 nfsd4_bump_seqid(cstate
, status
);
4749 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid
*s
)
4751 struct nfs4_client
*clp
= s
->st_stid
.sc_client
;
4752 LIST_HEAD(reaplist
);
4754 s
->st_stid
.sc_type
= NFS4_CLOSED_STID
;
4755 spin_lock(&clp
->cl_lock
);
4756 unhash_open_stateid(s
, &reaplist
);
4758 if (clp
->cl_minorversion
) {
4759 put_ol_stateid_locked(s
, &reaplist
);
4760 spin_unlock(&clp
->cl_lock
);
4761 free_ol_stateid_reaplist(&reaplist
);
4763 spin_unlock(&clp
->cl_lock
);
4764 free_ol_stateid_reaplist(&reaplist
);
4765 move_to_close_lru(s
, clp
->net
);
4770 * nfs4_unlock_state() called after encode
4773 nfsd4_close(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4774 struct nfsd4_close
*close
)
4777 struct nfs4_ol_stateid
*stp
;
4778 struct net
*net
= SVC_NET(rqstp
);
4779 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
4781 dprintk("NFSD: nfsd4_close on file %pd\n",
4782 cstate
->current_fh
.fh_dentry
);
4784 status
= nfs4_preprocess_seqid_op(cstate
, close
->cl_seqid
,
4786 NFS4_OPEN_STID
|NFS4_CLOSED_STID
,
4788 nfsd4_bump_seqid(cstate
, status
);
4791 update_stateid(&stp
->st_stid
.sc_stateid
);
4792 memcpy(&close
->cl_stateid
, &stp
->st_stid
.sc_stateid
, sizeof(stateid_t
));
4794 nfsd4_close_open_stateid(stp
);
4796 /* put reference from nfs4_preprocess_seqid_op */
4797 nfs4_put_stid(&stp
->st_stid
);
4803 nfsd4_delegreturn(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
4804 struct nfsd4_delegreturn
*dr
)
4806 struct nfs4_delegation
*dp
;
4807 stateid_t
*stateid
= &dr
->dr_stateid
;
4808 struct nfs4_stid
*s
;
4810 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
4812 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
4815 status
= nfsd4_lookup_stateid(cstate
, stateid
, NFS4_DELEG_STID
, &s
, nn
);
4818 dp
= delegstateid(s
);
4819 status
= check_stateid_generation(stateid
, &dp
->dl_stid
.sc_stateid
, nfsd4_has_session(cstate
));
4823 destroy_delegation(dp
);
4825 nfs4_put_stid(&dp
->dl_stid
);
4831 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
4834 end_offset(u64 start
, u64 len
)
4839 return end
>= start
? end
: NFS4_MAX_UINT64
;
4842 /* last octet in a range */
4844 last_byte_offset(u64 start
, u64 len
)
4850 return end
> start
? end
- 1: NFS4_MAX_UINT64
;
4854 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
4855 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
4856 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
4857 * locking, this prevents us from being completely protocol-compliant. The
4858 * real solution to this problem is to start using unsigned file offsets in
4859 * the VFS, but this is a very deep change!
4862 nfs4_transform_lock_offset(struct file_lock
*lock
)
4864 if (lock
->fl_start
< 0)
4865 lock
->fl_start
= OFFSET_MAX
;
4866 if (lock
->fl_end
< 0)
4867 lock
->fl_end
= OFFSET_MAX
;
4870 /* Hack!: For now, we're defining this just so we can use a pointer to it
4871 * as a unique cookie to identify our (NFSv4's) posix locks. */
4872 static const struct lock_manager_operations nfsd_posix_mng_ops
= {
4876 nfs4_set_lock_denied(struct file_lock
*fl
, struct nfsd4_lock_denied
*deny
)
4878 struct nfs4_lockowner
*lo
;
4880 if (fl
->fl_lmops
== &nfsd_posix_mng_ops
) {
4881 lo
= (struct nfs4_lockowner
*) fl
->fl_owner
;
4882 deny
->ld_owner
.data
= kmemdup(lo
->lo_owner
.so_owner
.data
,
4883 lo
->lo_owner
.so_owner
.len
, GFP_KERNEL
);
4884 if (!deny
->ld_owner
.data
)
4885 /* We just don't care that much */
4887 deny
->ld_owner
.len
= lo
->lo_owner
.so_owner
.len
;
4888 deny
->ld_clientid
= lo
->lo_owner
.so_client
->cl_clientid
;
4891 deny
->ld_owner
.len
= 0;
4892 deny
->ld_owner
.data
= NULL
;
4893 deny
->ld_clientid
.cl_boot
= 0;
4894 deny
->ld_clientid
.cl_id
= 0;
4896 deny
->ld_start
= fl
->fl_start
;
4897 deny
->ld_length
= NFS4_MAX_UINT64
;
4898 if (fl
->fl_end
!= NFS4_MAX_UINT64
)
4899 deny
->ld_length
= fl
->fl_end
- fl
->fl_start
+ 1;
4900 deny
->ld_type
= NFS4_READ_LT
;
4901 if (fl
->fl_type
!= F_RDLCK
)
4902 deny
->ld_type
= NFS4_WRITE_LT
;
4905 static struct nfs4_lockowner
*
4906 find_lockowner_str_locked(clientid_t
*clid
, struct xdr_netobj
*owner
,
4907 struct nfs4_client
*clp
)
4909 unsigned int strhashval
= ownerstr_hashval(owner
);
4910 struct nfs4_stateowner
*so
;
4912 lockdep_assert_held(&clp
->cl_lock
);
4914 list_for_each_entry(so
, &clp
->cl_ownerstr_hashtbl
[strhashval
],
4916 if (so
->so_is_open_owner
)
4918 if (!same_owner_str(so
, owner
))
4920 atomic_inc(&so
->so_count
);
4921 return lockowner(so
);
4926 static struct nfs4_lockowner
*
4927 find_lockowner_str(clientid_t
*clid
, struct xdr_netobj
*owner
,
4928 struct nfs4_client
*clp
)
4930 struct nfs4_lockowner
*lo
;
4932 spin_lock(&clp
->cl_lock
);
4933 lo
= find_lockowner_str_locked(clid
, owner
, clp
);
4934 spin_unlock(&clp
->cl_lock
);
4938 static void nfs4_unhash_lockowner(struct nfs4_stateowner
*sop
)
4940 unhash_lockowner_locked(lockowner(sop
));
4943 static void nfs4_free_lockowner(struct nfs4_stateowner
*sop
)
4945 struct nfs4_lockowner
*lo
= lockowner(sop
);
4947 kmem_cache_free(lockowner_slab
, lo
);
4950 static const struct nfs4_stateowner_operations lockowner_ops
= {
4951 .so_unhash
= nfs4_unhash_lockowner
,
4952 .so_free
= nfs4_free_lockowner
,
4956 * Alloc a lock owner structure.
4957 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
4960 * strhashval = ownerstr_hashval
4962 static struct nfs4_lockowner
*
4963 alloc_init_lock_stateowner(unsigned int strhashval
, struct nfs4_client
*clp
,
4964 struct nfs4_ol_stateid
*open_stp
,
4965 struct nfsd4_lock
*lock
)
4967 struct nfs4_lockowner
*lo
, *ret
;
4969 lo
= alloc_stateowner(lockowner_slab
, &lock
->lk_new_owner
, clp
);
4972 INIT_LIST_HEAD(&lo
->lo_owner
.so_stateids
);
4973 lo
->lo_owner
.so_is_open_owner
= 0;
4974 lo
->lo_owner
.so_seqid
= lock
->lk_new_lock_seqid
;
4975 lo
->lo_owner
.so_ops
= &lockowner_ops
;
4976 spin_lock(&clp
->cl_lock
);
4977 ret
= find_lockowner_str_locked(&clp
->cl_clientid
,
4978 &lock
->lk_new_owner
, clp
);
4980 list_add(&lo
->lo_owner
.so_strhash
,
4981 &clp
->cl_ownerstr_hashtbl
[strhashval
]);
4984 nfs4_free_lockowner(&lo
->lo_owner
);
4985 spin_unlock(&clp
->cl_lock
);
4990 init_lock_stateid(struct nfs4_ol_stateid
*stp
, struct nfs4_lockowner
*lo
,
4991 struct nfs4_file
*fp
, struct inode
*inode
,
4992 struct nfs4_ol_stateid
*open_stp
)
4994 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
4996 lockdep_assert_held(&clp
->cl_lock
);
4998 atomic_inc(&stp
->st_stid
.sc_count
);
4999 stp
->st_stid
.sc_type
= NFS4_LOCK_STID
;
5000 stp
->st_stateowner
= &lo
->lo_owner
;
5001 atomic_inc(&lo
->lo_owner
.so_count
);
5003 stp
->st_stid
.sc_file
= fp
;
5004 stp
->st_stid
.sc_free
= nfs4_free_lock_stateid
;
5005 stp
->st_access_bmap
= 0;
5006 stp
->st_deny_bmap
= open_stp
->st_deny_bmap
;
5007 stp
->st_openstp
= open_stp
;
5008 list_add(&stp
->st_locks
, &open_stp
->st_locks
);
5009 list_add(&stp
->st_perstateowner
, &lo
->lo_owner
.so_stateids
);
5010 spin_lock(&fp
->fi_lock
);
5011 list_add(&stp
->st_perfile
, &fp
->fi_stateids
);
5012 spin_unlock(&fp
->fi_lock
);
5015 static struct nfs4_ol_stateid
*
5016 find_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fp
)
5018 struct nfs4_ol_stateid
*lst
;
5019 struct nfs4_client
*clp
= lo
->lo_owner
.so_client
;
5021 lockdep_assert_held(&clp
->cl_lock
);
5023 list_for_each_entry(lst
, &lo
->lo_owner
.so_stateids
, st_perstateowner
) {
5024 if (lst
->st_stid
.sc_file
== fp
) {
5025 atomic_inc(&lst
->st_stid
.sc_count
);
5032 static struct nfs4_ol_stateid
*
5033 find_or_create_lock_stateid(struct nfs4_lockowner
*lo
, struct nfs4_file
*fi
,
5034 struct inode
*inode
, struct nfs4_ol_stateid
*ost
,
5037 struct nfs4_stid
*ns
= NULL
;
5038 struct nfs4_ol_stateid
*lst
;
5039 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
5040 struct nfs4_client
*clp
= oo
->oo_owner
.so_client
;
5042 spin_lock(&clp
->cl_lock
);
5043 lst
= find_lock_stateid(lo
, fi
);
5045 spin_unlock(&clp
->cl_lock
);
5046 ns
= nfs4_alloc_stid(clp
, stateid_slab
);
5050 spin_lock(&clp
->cl_lock
);
5051 lst
= find_lock_stateid(lo
, fi
);
5053 lst
= openlockstateid(ns
);
5054 init_lock_stateid(lst
, lo
, fi
, inode
, ost
);
5059 spin_unlock(&clp
->cl_lock
);
5066 check_lock_length(u64 offset
, u64 length
)
5068 return ((length
== 0) || ((length
!= NFS4_MAX_UINT64
) &&
5069 LOFF_OVERFLOW(offset
, length
)));
5072 static void get_lock_access(struct nfs4_ol_stateid
*lock_stp
, u32 access
)
5074 struct nfs4_file
*fp
= lock_stp
->st_stid
.sc_file
;
5076 lockdep_assert_held(&fp
->fi_lock
);
5078 if (test_access(access
, lock_stp
))
5080 __nfs4_file_get_access(fp
, access
);
5081 set_access(access
, lock_stp
);
5085 lookup_or_create_lock_state(struct nfsd4_compound_state
*cstate
,
5086 struct nfs4_ol_stateid
*ost
,
5087 struct nfsd4_lock
*lock
,
5088 struct nfs4_ol_stateid
**lst
, bool *new)
5091 struct nfs4_file
*fi
= ost
->st_stid
.sc_file
;
5092 struct nfs4_openowner
*oo
= openowner(ost
->st_stateowner
);
5093 struct nfs4_client
*cl
= oo
->oo_owner
.so_client
;
5094 struct inode
*inode
= cstate
->current_fh
.fh_dentry
->d_inode
;
5095 struct nfs4_lockowner
*lo
;
5096 unsigned int strhashval
;
5098 lo
= find_lockowner_str(&cl
->cl_clientid
, &lock
->v
.new.owner
, cl
);
5100 strhashval
= ownerstr_hashval(&lock
->v
.new.owner
);
5101 lo
= alloc_init_lock_stateowner(strhashval
, cl
, ost
, lock
);
5103 return nfserr_jukebox
;
5105 /* with an existing lockowner, seqids must be the same */
5106 status
= nfserr_bad_seqid
;
5107 if (!cstate
->minorversion
&&
5108 lock
->lk_new_lock_seqid
!= lo
->lo_owner
.so_seqid
)
5112 *lst
= find_or_create_lock_stateid(lo
, fi
, inode
, ost
, new);
5114 status
= nfserr_jukebox
;
5119 nfs4_put_stateowner(&lo
->lo_owner
);
5127 nfsd4_lock(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5128 struct nfsd4_lock
*lock
)
5130 struct nfs4_openowner
*open_sop
= NULL
;
5131 struct nfs4_lockowner
*lock_sop
= NULL
;
5132 struct nfs4_ol_stateid
*lock_stp
= NULL
;
5133 struct nfs4_ol_stateid
*open_stp
= NULL
;
5134 struct nfs4_file
*fp
;
5135 struct file
*filp
= NULL
;
5136 struct file_lock
*file_lock
= NULL
;
5137 struct file_lock
*conflock
= NULL
;
5142 struct net
*net
= SVC_NET(rqstp
);
5143 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
5145 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5146 (long long) lock
->lk_offset
,
5147 (long long) lock
->lk_length
);
5149 if (check_lock_length(lock
->lk_offset
, lock
->lk_length
))
5150 return nfserr_inval
;
5152 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
,
5153 S_IFREG
, NFSD_MAY_LOCK
))) {
5154 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5158 if (lock
->lk_is_new
) {
5159 if (nfsd4_has_session(cstate
))
5160 /* See rfc 5661 18.10.3: given clientid is ignored: */
5161 memcpy(&lock
->v
.new.clientid
,
5162 &cstate
->session
->se_client
->cl_clientid
,
5163 sizeof(clientid_t
));
5165 status
= nfserr_stale_clientid
;
5166 if (STALE_CLIENTID(&lock
->lk_new_clientid
, nn
))
5169 /* validate and update open stateid and open seqid */
5170 status
= nfs4_preprocess_confirmed_seqid_op(cstate
,
5171 lock
->lk_new_open_seqid
,
5172 &lock
->lk_new_open_stateid
,
5176 open_sop
= openowner(open_stp
->st_stateowner
);
5177 status
= nfserr_bad_stateid
;
5178 if (!same_clid(&open_sop
->oo_owner
.so_client
->cl_clientid
,
5179 &lock
->v
.new.clientid
))
5181 status
= lookup_or_create_lock_state(cstate
, open_stp
, lock
,
5184 status
= nfs4_preprocess_seqid_op(cstate
,
5185 lock
->lk_old_lock_seqid
,
5186 &lock
->lk_old_lock_stateid
,
5187 NFS4_LOCK_STID
, &lock_stp
, nn
);
5191 lock_sop
= lockowner(lock_stp
->st_stateowner
);
5193 lkflg
= setlkflg(lock
->lk_type
);
5194 status
= nfs4_check_openmode(lock_stp
, lkflg
);
5198 status
= nfserr_grace
;
5199 if (locks_in_grace(net
) && !lock
->lk_reclaim
)
5201 status
= nfserr_no_grace
;
5202 if (!locks_in_grace(net
) && lock
->lk_reclaim
)
5205 file_lock
= locks_alloc_lock();
5207 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
5208 status
= nfserr_jukebox
;
5212 fp
= lock_stp
->st_stid
.sc_file
;
5213 locks_init_lock(file_lock
);
5214 switch (lock
->lk_type
) {
5217 spin_lock(&fp
->fi_lock
);
5218 filp
= find_readable_file_locked(fp
);
5220 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_READ
);
5221 spin_unlock(&fp
->fi_lock
);
5222 file_lock
->fl_type
= F_RDLCK
;
5225 case NFS4_WRITEW_LT
:
5226 spin_lock(&fp
->fi_lock
);
5227 filp
= find_writeable_file_locked(fp
);
5229 get_lock_access(lock_stp
, NFS4_SHARE_ACCESS_WRITE
);
5230 spin_unlock(&fp
->fi_lock
);
5231 file_lock
->fl_type
= F_WRLCK
;
5234 status
= nfserr_inval
;
5238 status
= nfserr_openmode
;
5241 file_lock
->fl_owner
= (fl_owner_t
)lock_sop
;
5242 file_lock
->fl_pid
= current
->tgid
;
5243 file_lock
->fl_file
= filp
;
5244 file_lock
->fl_flags
= FL_POSIX
;
5245 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
5246 file_lock
->fl_start
= lock
->lk_offset
;
5247 file_lock
->fl_end
= last_byte_offset(lock
->lk_offset
, lock
->lk_length
);
5248 nfs4_transform_lock_offset(file_lock
);
5250 conflock
= locks_alloc_lock();
5252 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
5253 status
= nfserr_jukebox
;
5257 err
= vfs_lock_file(filp
, F_SETLK
, file_lock
, conflock
);
5259 case 0: /* success! */
5260 update_stateid(&lock_stp
->st_stid
.sc_stateid
);
5261 memcpy(&lock
->lk_resp_stateid
, &lock_stp
->st_stid
.sc_stateid
,
5265 case (EAGAIN
): /* conflock holds conflicting lock */
5266 status
= nfserr_denied
;
5267 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
5268 nfs4_set_lock_denied(conflock
, &lock
->lk_denied
);
5271 status
= nfserr_deadlock
;
5274 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err
);
5275 status
= nfserrno(err
);
5282 /* Bump seqid manually if the 4.0 replay owner is openowner */
5283 if (cstate
->replay_owner
&&
5284 cstate
->replay_owner
!= &lock_sop
->lo_owner
&&
5285 seqid_mutating_err(ntohl(status
)))
5286 lock_sop
->lo_owner
.so_seqid
++;
5289 * If this is a new, never-before-used stateid, and we are
5290 * returning an error, then just go ahead and release it.
5293 release_lock_stateid(lock_stp
);
5295 nfs4_put_stid(&lock_stp
->st_stid
);
5298 nfs4_put_stid(&open_stp
->st_stid
);
5299 nfsd4_bump_seqid(cstate
, status
);
5301 locks_free_lock(file_lock
);
5303 locks_free_lock(conflock
);
5308 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
5309 * so we do a temporary open here just to get an open file to pass to
5310 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
5313 static __be32
nfsd_test_lock(struct svc_rqst
*rqstp
, struct svc_fh
*fhp
, struct file_lock
*lock
)
5316 __be32 err
= nfsd_open(rqstp
, fhp
, S_IFREG
, NFSD_MAY_READ
, &file
);
5318 err
= nfserrno(vfs_test_lock(file
, lock
));
5328 nfsd4_lockt(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5329 struct nfsd4_lockt
*lockt
)
5331 struct file_lock
*file_lock
= NULL
;
5332 struct nfs4_lockowner
*lo
= NULL
;
5334 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5336 if (locks_in_grace(SVC_NET(rqstp
)))
5337 return nfserr_grace
;
5339 if (check_lock_length(lockt
->lt_offset
, lockt
->lt_length
))
5340 return nfserr_inval
;
5342 if (!nfsd4_has_session(cstate
)) {
5343 status
= lookup_clientid(&lockt
->lt_clientid
, cstate
, nn
);
5348 if ((status
= fh_verify(rqstp
, &cstate
->current_fh
, S_IFREG
, 0)))
5351 file_lock
= locks_alloc_lock();
5353 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
5354 status
= nfserr_jukebox
;
5357 locks_init_lock(file_lock
);
5358 switch (lockt
->lt_type
) {
5361 file_lock
->fl_type
= F_RDLCK
;
5364 case NFS4_WRITEW_LT
:
5365 file_lock
->fl_type
= F_WRLCK
;
5368 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
5369 status
= nfserr_inval
;
5373 lo
= find_lockowner_str(&lockt
->lt_clientid
, &lockt
->lt_owner
,
5376 file_lock
->fl_owner
= (fl_owner_t
)lo
;
5377 file_lock
->fl_pid
= current
->tgid
;
5378 file_lock
->fl_flags
= FL_POSIX
;
5380 file_lock
->fl_start
= lockt
->lt_offset
;
5381 file_lock
->fl_end
= last_byte_offset(lockt
->lt_offset
, lockt
->lt_length
);
5383 nfs4_transform_lock_offset(file_lock
);
5385 status
= nfsd_test_lock(rqstp
, &cstate
->current_fh
, file_lock
);
5389 if (file_lock
->fl_type
!= F_UNLCK
) {
5390 status
= nfserr_denied
;
5391 nfs4_set_lock_denied(file_lock
, &lockt
->lt_denied
);
5395 nfs4_put_stateowner(&lo
->lo_owner
);
5397 locks_free_lock(file_lock
);
5402 nfsd4_locku(struct svc_rqst
*rqstp
, struct nfsd4_compound_state
*cstate
,
5403 struct nfsd4_locku
*locku
)
5405 struct nfs4_ol_stateid
*stp
;
5406 struct file
*filp
= NULL
;
5407 struct file_lock
*file_lock
= NULL
;
5410 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5412 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
5413 (long long) locku
->lu_offset
,
5414 (long long) locku
->lu_length
);
5416 if (check_lock_length(locku
->lu_offset
, locku
->lu_length
))
5417 return nfserr_inval
;
5419 status
= nfs4_preprocess_seqid_op(cstate
, locku
->lu_seqid
,
5420 &locku
->lu_stateid
, NFS4_LOCK_STID
,
5424 filp
= find_any_file(stp
->st_stid
.sc_file
);
5426 status
= nfserr_lock_range
;
5429 file_lock
= locks_alloc_lock();
5431 dprintk("NFSD: %s: unable to allocate lock!\n", __func__
);
5432 status
= nfserr_jukebox
;
5435 locks_init_lock(file_lock
);
5436 file_lock
->fl_type
= F_UNLCK
;
5437 file_lock
->fl_owner
= (fl_owner_t
)lockowner(stp
->st_stateowner
);
5438 file_lock
->fl_pid
= current
->tgid
;
5439 file_lock
->fl_file
= filp
;
5440 file_lock
->fl_flags
= FL_POSIX
;
5441 file_lock
->fl_lmops
= &nfsd_posix_mng_ops
;
5442 file_lock
->fl_start
= locku
->lu_offset
;
5444 file_lock
->fl_end
= last_byte_offset(locku
->lu_offset
,
5446 nfs4_transform_lock_offset(file_lock
);
5448 err
= vfs_lock_file(filp
, F_SETLK
, file_lock
, NULL
);
5450 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
5453 update_stateid(&stp
->st_stid
.sc_stateid
);
5454 memcpy(&locku
->lu_stateid
, &stp
->st_stid
.sc_stateid
, sizeof(stateid_t
));
5458 nfs4_put_stid(&stp
->st_stid
);
5460 nfsd4_bump_seqid(cstate
, status
);
5462 locks_free_lock(file_lock
);
5466 status
= nfserrno(err
);
5472 * true: locks held by lockowner
5473 * false: no locks held by lockowner
5476 check_for_locks(struct nfs4_file
*fp
, struct nfs4_lockowner
*lowner
)
5478 struct file_lock
**flpp
;
5480 struct file
*filp
= find_any_file(fp
);
5481 struct inode
*inode
;
5484 /* Any valid lock stateid should have some sort of access */
5489 inode
= file_inode(filp
);
5491 spin_lock(&inode
->i_lock
);
5492 for (flpp
= &inode
->i_flock
; *flpp
!= NULL
; flpp
= &(*flpp
)->fl_next
) {
5493 if ((*flpp
)->fl_owner
== (fl_owner_t
)lowner
) {
5498 spin_unlock(&inode
->i_lock
);
5504 nfsd4_release_lockowner(struct svc_rqst
*rqstp
,
5505 struct nfsd4_compound_state
*cstate
,
5506 struct nfsd4_release_lockowner
*rlockowner
)
5508 clientid_t
*clid
= &rlockowner
->rl_clientid
;
5509 struct nfs4_stateowner
*sop
;
5510 struct nfs4_lockowner
*lo
= NULL
;
5511 struct nfs4_ol_stateid
*stp
;
5512 struct xdr_netobj
*owner
= &rlockowner
->rl_owner
;
5513 unsigned int hashval
= ownerstr_hashval(owner
);
5515 struct nfsd_net
*nn
= net_generic(SVC_NET(rqstp
), nfsd_net_id
);
5516 struct nfs4_client
*clp
;
5518 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
5519 clid
->cl_boot
, clid
->cl_id
);
5521 status
= lookup_clientid(clid
, cstate
, nn
);
5526 /* Find the matching lock stateowner */
5527 spin_lock(&clp
->cl_lock
);
5528 list_for_each_entry(sop
, &clp
->cl_ownerstr_hashtbl
[hashval
],
5531 if (sop
->so_is_open_owner
|| !same_owner_str(sop
, owner
))
5534 /* see if there are still any locks associated with it */
5535 lo
= lockowner(sop
);
5536 list_for_each_entry(stp
, &sop
->so_stateids
, st_perstateowner
) {
5537 if (check_for_locks(stp
->st_stid
.sc_file
, lo
)) {
5538 status
= nfserr_locks_held
;
5539 spin_unlock(&clp
->cl_lock
);
5544 atomic_inc(&sop
->so_count
);
5547 spin_unlock(&clp
->cl_lock
);
5549 release_lockowner(lo
);
5553 static inline struct nfs4_client_reclaim
*
5556 return kmalloc(sizeof(struct nfs4_client_reclaim
), GFP_KERNEL
);
5560 nfs4_has_reclaimed_state(const char *name
, struct nfsd_net
*nn
)
5562 struct nfs4_client_reclaim
*crp
;
5564 crp
= nfsd4_find_reclaim_client(name
, nn
);
5565 return (crp
&& crp
->cr_clp
);
5569 * failure => all reset bets are off, nfserr_no_grace...
5571 struct nfs4_client_reclaim
*
5572 nfs4_client_to_reclaim(const char *name
, struct nfsd_net
*nn
)
5574 unsigned int strhashval
;
5575 struct nfs4_client_reclaim
*crp
;
5577 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN
, name
);
5578 crp
= alloc_reclaim();
5580 strhashval
= clientstr_hashval(name
);
5581 INIT_LIST_HEAD(&crp
->cr_strhash
);
5582 list_add(&crp
->cr_strhash
, &nn
->reclaim_str_hashtbl
[strhashval
]);
5583 memcpy(crp
->cr_recdir
, name
, HEXDIR_LEN
);
5585 nn
->reclaim_str_hashtbl_size
++;
5591 nfs4_remove_reclaim_record(struct nfs4_client_reclaim
*crp
, struct nfsd_net
*nn
)
5593 list_del(&crp
->cr_strhash
);
5595 nn
->reclaim_str_hashtbl_size
--;
5599 nfs4_release_reclaim(struct nfsd_net
*nn
)
5601 struct nfs4_client_reclaim
*crp
= NULL
;
5604 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
5605 while (!list_empty(&nn
->reclaim_str_hashtbl
[i
])) {
5606 crp
= list_entry(nn
->reclaim_str_hashtbl
[i
].next
,
5607 struct nfs4_client_reclaim
, cr_strhash
);
5608 nfs4_remove_reclaim_record(crp
, nn
);
5611 WARN_ON_ONCE(nn
->reclaim_str_hashtbl_size
);
5615 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
5616 struct nfs4_client_reclaim
*
5617 nfsd4_find_reclaim_client(const char *recdir
, struct nfsd_net
*nn
)
5619 unsigned int strhashval
;
5620 struct nfs4_client_reclaim
*crp
= NULL
;
5622 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir
);
5624 strhashval
= clientstr_hashval(recdir
);
5625 list_for_each_entry(crp
, &nn
->reclaim_str_hashtbl
[strhashval
], cr_strhash
) {
5626 if (same_name(crp
->cr_recdir
, recdir
)) {
5634 * Called from OPEN. Look for clientid in reclaim list.
5637 nfs4_check_open_reclaim(clientid_t
*clid
,
5638 struct nfsd4_compound_state
*cstate
,
5639 struct nfsd_net
*nn
)
5643 /* find clientid in conf_id_hashtbl */
5644 status
= lookup_clientid(clid
, cstate
, nn
);
5646 return nfserr_reclaim_bad
;
5648 if (nfsd4_client_record_check(cstate
->clp
))
5649 return nfserr_reclaim_bad
;
5654 #ifdef CONFIG_NFSD_FAULT_INJECTION
5656 put_client(struct nfs4_client
*clp
)
5658 atomic_dec(&clp
->cl_refcount
);
5661 static struct nfs4_client
*
5662 nfsd_find_client(struct sockaddr_storage
*addr
, size_t addr_size
)
5664 struct nfs4_client
*clp
;
5665 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5668 if (!nfsd_netns_ready(nn
))
5671 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
5672 if (memcmp(&clp
->cl_addr
, addr
, addr_size
) == 0)
5679 nfsd_inject_print_clients(void)
5681 struct nfs4_client
*clp
;
5683 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5685 char buf
[INET6_ADDRSTRLEN
];
5687 if (!nfsd_netns_ready(nn
))
5690 spin_lock(&nn
->client_lock
);
5691 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
5692 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
5693 pr_info("NFS Client: %s\n", buf
);
5696 spin_unlock(&nn
->client_lock
);
5702 nfsd_inject_forget_client(struct sockaddr_storage
*addr
, size_t addr_size
)
5705 struct nfs4_client
*clp
;
5706 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5709 if (!nfsd_netns_ready(nn
))
5712 spin_lock(&nn
->client_lock
);
5713 clp
= nfsd_find_client(addr
, addr_size
);
5715 if (mark_client_expired_locked(clp
) == nfs_ok
)
5720 spin_unlock(&nn
->client_lock
);
5729 nfsd_inject_forget_clients(u64 max
)
5732 struct nfs4_client
*clp
, *next
;
5733 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5735 LIST_HEAD(reaplist
);
5737 if (!nfsd_netns_ready(nn
))
5740 spin_lock(&nn
->client_lock
);
5741 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
5742 if (mark_client_expired_locked(clp
) == nfs_ok
) {
5743 list_add(&clp
->cl_lru
, &reaplist
);
5744 if (max
!= 0 && ++count
>= max
)
5748 spin_unlock(&nn
->client_lock
);
5750 list_for_each_entry_safe(clp
, next
, &reaplist
, cl_lru
)
5756 static void nfsd_print_count(struct nfs4_client
*clp
, unsigned int count
,
5759 char buf
[INET6_ADDRSTRLEN
];
5760 rpc_ntop((struct sockaddr
*)&clp
->cl_addr
, buf
, sizeof(buf
));
5761 printk(KERN_INFO
"NFS Client: %s has %u %s\n", buf
, count
, type
);
5765 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid
*lst
,
5766 struct list_head
*collect
)
5768 struct nfs4_client
*clp
= lst
->st_stid
.sc_client
;
5769 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5775 lockdep_assert_held(&nn
->client_lock
);
5776 atomic_inc(&clp
->cl_refcount
);
5777 list_add(&lst
->st_locks
, collect
);
5780 static u64
nfsd_foreach_client_lock(struct nfs4_client
*clp
, u64 max
,
5781 struct list_head
*collect
,
5782 void (*func
)(struct nfs4_ol_stateid
*))
5784 struct nfs4_openowner
*oop
;
5785 struct nfs4_ol_stateid
*stp
, *st_next
;
5786 struct nfs4_ol_stateid
*lst
, *lst_next
;
5789 spin_lock(&clp
->cl_lock
);
5790 list_for_each_entry(oop
, &clp
->cl_openowners
, oo_perclient
) {
5791 list_for_each_entry_safe(stp
, st_next
,
5792 &oop
->oo_owner
.so_stateids
, st_perstateowner
) {
5793 list_for_each_entry_safe(lst
, lst_next
,
5794 &stp
->st_locks
, st_locks
) {
5797 nfsd_inject_add_lock_to_list(lst
,
5802 * Despite the fact that these functions deal
5803 * with 64-bit integers for "count", we must
5804 * ensure that it doesn't blow up the
5805 * clp->cl_refcount. Throw a warning if we
5806 * start to approach INT_MAX here.
5808 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
5815 spin_unlock(&clp
->cl_lock
);
5821 nfsd_collect_client_locks(struct nfs4_client
*clp
, struct list_head
*collect
,
5824 return nfsd_foreach_client_lock(clp
, max
, collect
, unhash_lock_stateid
);
5828 nfsd_print_client_locks(struct nfs4_client
*clp
)
5830 u64 count
= nfsd_foreach_client_lock(clp
, 0, NULL
, NULL
);
5831 nfsd_print_count(clp
, count
, "locked files");
5836 nfsd_inject_print_locks(void)
5838 struct nfs4_client
*clp
;
5840 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5843 if (!nfsd_netns_ready(nn
))
5846 spin_lock(&nn
->client_lock
);
5847 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
5848 count
+= nfsd_print_client_locks(clp
);
5849 spin_unlock(&nn
->client_lock
);
5855 nfsd_reap_locks(struct list_head
*reaplist
)
5857 struct nfs4_client
*clp
;
5858 struct nfs4_ol_stateid
*stp
, *next
;
5860 list_for_each_entry_safe(stp
, next
, reaplist
, st_locks
) {
5861 list_del_init(&stp
->st_locks
);
5862 clp
= stp
->st_stid
.sc_client
;
5863 nfs4_put_stid(&stp
->st_stid
);
5869 nfsd_inject_forget_client_locks(struct sockaddr_storage
*addr
, size_t addr_size
)
5871 unsigned int count
= 0;
5872 struct nfs4_client
*clp
;
5873 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5875 LIST_HEAD(reaplist
);
5877 if (!nfsd_netns_ready(nn
))
5880 spin_lock(&nn
->client_lock
);
5881 clp
= nfsd_find_client(addr
, addr_size
);
5883 count
= nfsd_collect_client_locks(clp
, &reaplist
, 0);
5884 spin_unlock(&nn
->client_lock
);
5885 nfsd_reap_locks(&reaplist
);
5890 nfsd_inject_forget_locks(u64 max
)
5893 struct nfs4_client
*clp
;
5894 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5896 LIST_HEAD(reaplist
);
5898 if (!nfsd_netns_ready(nn
))
5901 spin_lock(&nn
->client_lock
);
5902 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
5903 count
+= nfsd_collect_client_locks(clp
, &reaplist
, max
- count
);
5904 if (max
!= 0 && count
>= max
)
5907 spin_unlock(&nn
->client_lock
);
5908 nfsd_reap_locks(&reaplist
);
5913 nfsd_foreach_client_openowner(struct nfs4_client
*clp
, u64 max
,
5914 struct list_head
*collect
,
5915 void (*func
)(struct nfs4_openowner
*))
5917 struct nfs4_openowner
*oop
, *next
;
5918 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5922 lockdep_assert_held(&nn
->client_lock
);
5924 spin_lock(&clp
->cl_lock
);
5925 list_for_each_entry_safe(oop
, next
, &clp
->cl_openowners
, oo_perclient
) {
5929 atomic_inc(&clp
->cl_refcount
);
5930 list_add(&oop
->oo_perclient
, collect
);
5935 * Despite the fact that these functions deal with
5936 * 64-bit integers for "count", we must ensure that
5937 * it doesn't blow up the clp->cl_refcount. Throw a
5938 * warning if we start to approach INT_MAX here.
5940 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
5944 spin_unlock(&clp
->cl_lock
);
5950 nfsd_print_client_openowners(struct nfs4_client
*clp
)
5952 u64 count
= nfsd_foreach_client_openowner(clp
, 0, NULL
, NULL
);
5954 nfsd_print_count(clp
, count
, "openowners");
5959 nfsd_collect_client_openowners(struct nfs4_client
*clp
,
5960 struct list_head
*collect
, u64 max
)
5962 return nfsd_foreach_client_openowner(clp
, max
, collect
,
5963 unhash_openowner_locked
);
5967 nfsd_inject_print_openowners(void)
5969 struct nfs4_client
*clp
;
5971 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
5974 if (!nfsd_netns_ready(nn
))
5977 spin_lock(&nn
->client_lock
);
5978 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
5979 count
+= nfsd_print_client_openowners(clp
);
5980 spin_unlock(&nn
->client_lock
);
5986 nfsd_reap_openowners(struct list_head
*reaplist
)
5988 struct nfs4_client
*clp
;
5989 struct nfs4_openowner
*oop
, *next
;
5991 list_for_each_entry_safe(oop
, next
, reaplist
, oo_perclient
) {
5992 list_del_init(&oop
->oo_perclient
);
5993 clp
= oop
->oo_owner
.so_client
;
5994 release_openowner(oop
);
6000 nfsd_inject_forget_client_openowners(struct sockaddr_storage
*addr
,
6003 unsigned int count
= 0;
6004 struct nfs4_client
*clp
;
6005 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6007 LIST_HEAD(reaplist
);
6009 if (!nfsd_netns_ready(nn
))
6012 spin_lock(&nn
->client_lock
);
6013 clp
= nfsd_find_client(addr
, addr_size
);
6015 count
= nfsd_collect_client_openowners(clp
, &reaplist
, 0);
6016 spin_unlock(&nn
->client_lock
);
6017 nfsd_reap_openowners(&reaplist
);
6022 nfsd_inject_forget_openowners(u64 max
)
6025 struct nfs4_client
*clp
;
6026 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6028 LIST_HEAD(reaplist
);
6030 if (!nfsd_netns_ready(nn
))
6033 spin_lock(&nn
->client_lock
);
6034 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6035 count
+= nfsd_collect_client_openowners(clp
, &reaplist
,
6037 if (max
!= 0 && count
>= max
)
6040 spin_unlock(&nn
->client_lock
);
6041 nfsd_reap_openowners(&reaplist
);
6045 static u64
nfsd_find_all_delegations(struct nfs4_client
*clp
, u64 max
,
6046 struct list_head
*victims
)
6048 struct nfs4_delegation
*dp
, *next
;
6049 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6053 lockdep_assert_held(&nn
->client_lock
);
6055 spin_lock(&state_lock
);
6056 list_for_each_entry_safe(dp
, next
, &clp
->cl_delegations
, dl_perclnt
) {
6059 * It's not safe to mess with delegations that have a
6060 * non-zero dl_time. They might have already been broken
6061 * and could be processed by the laundromat outside of
6062 * the state_lock. Just leave them be.
6064 if (dp
->dl_time
!= 0)
6067 atomic_inc(&clp
->cl_refcount
);
6068 unhash_delegation_locked(dp
);
6069 list_add(&dp
->dl_recall_lru
, victims
);
6073 * Despite the fact that these functions deal with
6074 * 64-bit integers for "count", we must ensure that
6075 * it doesn't blow up the clp->cl_refcount. Throw a
6076 * warning if we start to approach INT_MAX here.
6078 WARN_ON_ONCE(count
== (INT_MAX
/ 2));
6082 spin_unlock(&state_lock
);
6087 nfsd_print_client_delegations(struct nfs4_client
*clp
)
6089 u64 count
= nfsd_find_all_delegations(clp
, 0, NULL
);
6091 nfsd_print_count(clp
, count
, "delegations");
6096 nfsd_inject_print_delegations(void)
6098 struct nfs4_client
*clp
;
6100 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6103 if (!nfsd_netns_ready(nn
))
6106 spin_lock(&nn
->client_lock
);
6107 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
)
6108 count
+= nfsd_print_client_delegations(clp
);
6109 spin_unlock(&nn
->client_lock
);
6115 nfsd_forget_delegations(struct list_head
*reaplist
)
6117 struct nfs4_client
*clp
;
6118 struct nfs4_delegation
*dp
, *next
;
6120 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
6121 list_del_init(&dp
->dl_recall_lru
);
6122 clp
= dp
->dl_stid
.sc_client
;
6123 revoke_delegation(dp
);
6129 nfsd_inject_forget_client_delegations(struct sockaddr_storage
*addr
,
6133 struct nfs4_client
*clp
;
6134 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6136 LIST_HEAD(reaplist
);
6138 if (!nfsd_netns_ready(nn
))
6141 spin_lock(&nn
->client_lock
);
6142 clp
= nfsd_find_client(addr
, addr_size
);
6144 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
6145 spin_unlock(&nn
->client_lock
);
6147 nfsd_forget_delegations(&reaplist
);
6152 nfsd_inject_forget_delegations(u64 max
)
6155 struct nfs4_client
*clp
;
6156 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6158 LIST_HEAD(reaplist
);
6160 if (!nfsd_netns_ready(nn
))
6163 spin_lock(&nn
->client_lock
);
6164 list_for_each_entry(clp
, &nn
->client_lru
, cl_lru
) {
6165 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
6166 if (max
!= 0 && count
>= max
)
6169 spin_unlock(&nn
->client_lock
);
6170 nfsd_forget_delegations(&reaplist
);
6175 nfsd_recall_delegations(struct list_head
*reaplist
)
6177 struct nfs4_client
*clp
;
6178 struct nfs4_delegation
*dp
, *next
;
6180 list_for_each_entry_safe(dp
, next
, reaplist
, dl_recall_lru
) {
6181 list_del_init(&dp
->dl_recall_lru
);
6182 clp
= dp
->dl_stid
.sc_client
;
6184 * We skipped all entries that had a zero dl_time before,
6185 * so we can now reset the dl_time back to 0. If a delegation
6186 * break comes in now, then it won't make any difference since
6187 * we're recalling it either way.
6189 spin_lock(&state_lock
);
6191 spin_unlock(&state_lock
);
6192 nfsd_break_one_deleg(dp
);
6198 nfsd_inject_recall_client_delegations(struct sockaddr_storage
*addr
,
6202 struct nfs4_client
*clp
;
6203 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6205 LIST_HEAD(reaplist
);
6207 if (!nfsd_netns_ready(nn
))
6210 spin_lock(&nn
->client_lock
);
6211 clp
= nfsd_find_client(addr
, addr_size
);
6213 count
= nfsd_find_all_delegations(clp
, 0, &reaplist
);
6214 spin_unlock(&nn
->client_lock
);
6216 nfsd_recall_delegations(&reaplist
);
6221 nfsd_inject_recall_delegations(u64 max
)
6224 struct nfs4_client
*clp
, *next
;
6225 struct nfsd_net
*nn
= net_generic(current
->nsproxy
->net_ns
,
6227 LIST_HEAD(reaplist
);
6229 if (!nfsd_netns_ready(nn
))
6232 spin_lock(&nn
->client_lock
);
6233 list_for_each_entry_safe(clp
, next
, &nn
->client_lru
, cl_lru
) {
6234 count
+= nfsd_find_all_delegations(clp
, max
- count
, &reaplist
);
6235 if (max
!= 0 && ++count
>= max
)
6238 spin_unlock(&nn
->client_lock
);
6239 nfsd_recall_delegations(&reaplist
);
6242 #endif /* CONFIG_NFSD_FAULT_INJECTION */
6245 * Since the lifetime of a delegation isn't limited to that of an open, a
6246 * client may quite reasonably hang on to a delegation as long as it has
6247 * the inode cached. This becomes an obvious problem the first time a
6248 * client's inode cache approaches the size of the server's total memory.
6250 * For now we avoid this problem by imposing a hard limit on the number
6251 * of delegations, which varies according to the server's memory size.
6254 set_max_delegations(void)
6257 * Allow at most 4 delegations per megabyte of RAM. Quick
6258 * estimates suggest that in the worst case (where every delegation
6259 * is for a different inode), a delegation could take about 1.5K,
6260 * giving a worst case usage of about 6% of memory.
6262 max_delegations
= nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT
);
6265 static int nfs4_state_create_net(struct net
*net
)
6267 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6270 nn
->conf_id_hashtbl
= kmalloc(sizeof(struct list_head
) *
6271 CLIENT_HASH_SIZE
, GFP_KERNEL
);
6272 if (!nn
->conf_id_hashtbl
)
6274 nn
->unconf_id_hashtbl
= kmalloc(sizeof(struct list_head
) *
6275 CLIENT_HASH_SIZE
, GFP_KERNEL
);
6276 if (!nn
->unconf_id_hashtbl
)
6278 nn
->sessionid_hashtbl
= kmalloc(sizeof(struct list_head
) *
6279 SESSION_HASH_SIZE
, GFP_KERNEL
);
6280 if (!nn
->sessionid_hashtbl
)
6283 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
6284 INIT_LIST_HEAD(&nn
->conf_id_hashtbl
[i
]);
6285 INIT_LIST_HEAD(&nn
->unconf_id_hashtbl
[i
]);
6287 for (i
= 0; i
< SESSION_HASH_SIZE
; i
++)
6288 INIT_LIST_HEAD(&nn
->sessionid_hashtbl
[i
]);
6289 nn
->conf_name_tree
= RB_ROOT
;
6290 nn
->unconf_name_tree
= RB_ROOT
;
6291 INIT_LIST_HEAD(&nn
->client_lru
);
6292 INIT_LIST_HEAD(&nn
->close_lru
);
6293 INIT_LIST_HEAD(&nn
->del_recall_lru
);
6294 spin_lock_init(&nn
->client_lock
);
6296 INIT_DELAYED_WORK(&nn
->laundromat_work
, laundromat_main
);
6302 kfree(nn
->unconf_id_hashtbl
);
6304 kfree(nn
->conf_id_hashtbl
);
6310 nfs4_state_destroy_net(struct net
*net
)
6313 struct nfs4_client
*clp
= NULL
;
6314 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6316 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
6317 while (!list_empty(&nn
->conf_id_hashtbl
[i
])) {
6318 clp
= list_entry(nn
->conf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
6319 destroy_client(clp
);
6323 for (i
= 0; i
< CLIENT_HASH_SIZE
; i
++) {
6324 while (!list_empty(&nn
->unconf_id_hashtbl
[i
])) {
6325 clp
= list_entry(nn
->unconf_id_hashtbl
[i
].next
, struct nfs4_client
, cl_idhash
);
6326 destroy_client(clp
);
6330 kfree(nn
->sessionid_hashtbl
);
6331 kfree(nn
->unconf_id_hashtbl
);
6332 kfree(nn
->conf_id_hashtbl
);
6337 nfs4_state_start_net(struct net
*net
)
6339 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6342 ret
= nfs4_state_create_net(net
);
6345 nfsd4_client_tracking_init(net
);
6346 nn
->boot_time
= get_seconds();
6347 locks_start_grace(net
, &nn
->nfsd4_manager
);
6348 nn
->grace_ended
= false;
6349 printk(KERN_INFO
"NFSD: starting %ld-second grace period (net %p)\n",
6350 nn
->nfsd4_grace
, net
);
6351 queue_delayed_work(laundry_wq
, &nn
->laundromat_work
, nn
->nfsd4_grace
* HZ
);
6355 /* initialization to perform when the nfsd service is started: */
6358 nfs4_state_start(void)
6362 ret
= set_callback_cred();
6365 laundry_wq
= create_singlethread_workqueue("nfsd4");
6366 if (laundry_wq
== NULL
) {
6370 ret
= nfsd4_create_callback_queue();
6372 goto out_free_laundry
;
6374 set_max_delegations();
6379 destroy_workqueue(laundry_wq
);
6385 nfs4_state_shutdown_net(struct net
*net
)
6387 struct nfs4_delegation
*dp
= NULL
;
6388 struct list_head
*pos
, *next
, reaplist
;
6389 struct nfsd_net
*nn
= net_generic(net
, nfsd_net_id
);
6391 cancel_delayed_work_sync(&nn
->laundromat_work
);
6392 locks_end_grace(&nn
->nfsd4_manager
);
6394 INIT_LIST_HEAD(&reaplist
);
6395 spin_lock(&state_lock
);
6396 list_for_each_safe(pos
, next
, &nn
->del_recall_lru
) {
6397 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
6398 unhash_delegation_locked(dp
);
6399 list_add(&dp
->dl_recall_lru
, &reaplist
);
6401 spin_unlock(&state_lock
);
6402 list_for_each_safe(pos
, next
, &reaplist
) {
6403 dp
= list_entry (pos
, struct nfs4_delegation
, dl_recall_lru
);
6404 list_del_init(&dp
->dl_recall_lru
);
6405 nfs4_put_stid(&dp
->dl_stid
);
6408 nfsd4_client_tracking_exit(net
);
6409 nfs4_state_destroy_net(net
);
6413 nfs4_state_shutdown(void)
6415 destroy_workqueue(laundry_wq
);
6416 nfsd4_destroy_callback_queue();
6420 get_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
6422 if (HAS_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
) && CURRENT_STATEID(stateid
))
6423 memcpy(stateid
, &cstate
->current_stateid
, sizeof(stateid_t
));
6427 put_stateid(struct nfsd4_compound_state
*cstate
, stateid_t
*stateid
)
6429 if (cstate
->minorversion
) {
6430 memcpy(&cstate
->current_stateid
, stateid
, sizeof(stateid_t
));
6431 SET_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
);
6436 clear_current_stateid(struct nfsd4_compound_state
*cstate
)
6438 CLEAR_STATE_ID(cstate
, CURRENT_STATE_ID_FLAG
);
6442 * functions to set current state id
6445 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_open_downgrade
*odp
)
6447 put_stateid(cstate
, &odp
->od_stateid
);
6451 nfsd4_set_openstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_open
*open
)
6453 put_stateid(cstate
, &open
->op_stateid
);
6457 nfsd4_set_closestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_close
*close
)
6459 put_stateid(cstate
, &close
->cl_stateid
);
6463 nfsd4_set_lockstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_lock
*lock
)
6465 put_stateid(cstate
, &lock
->lk_resp_stateid
);
6469 * functions to consume current state id
6473 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_open_downgrade
*odp
)
6475 get_stateid(cstate
, &odp
->od_stateid
);
6479 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_delegreturn
*drp
)
6481 get_stateid(cstate
, &drp
->dr_stateid
);
6485 nfsd4_get_freestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_free_stateid
*fsp
)
6487 get_stateid(cstate
, &fsp
->fr_stateid
);
6491 nfsd4_get_setattrstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_setattr
*setattr
)
6493 get_stateid(cstate
, &setattr
->sa_stateid
);
6497 nfsd4_get_closestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_close
*close
)
6499 get_stateid(cstate
, &close
->cl_stateid
);
6503 nfsd4_get_lockustateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_locku
*locku
)
6505 get_stateid(cstate
, &locku
->lu_stateid
);
6509 nfsd4_get_readstateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_read
*read
)
6511 get_stateid(cstate
, &read
->rd_stateid
);
6515 nfsd4_get_writestateid(struct nfsd4_compound_state
*cstate
, struct nfsd4_write
*write
)
6517 get_stateid(cstate
, &write
->wr_stateid
);