4 * Client-side XDR for NFSv4.
6 * Copyright (c) 2002 The Regents of the University of Michigan.
9 * Kendrick Smith <kmsmith@umich.edu>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
41 #include <linux/config.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/workqueue.h>
47 #include <linux/bitops.h>
51 #include "delegation.h"
53 #define OPENOWNER_POOL_SIZE 8
55 const nfs4_stateid zero_stateid
;
57 static DEFINE_SPINLOCK(state_spinlock
);
58 static LIST_HEAD(nfs4_clientid_list
);
60 static void nfs4_recover_state(void *);
63 init_nfsv4_state(struct nfs_server
*server
)
65 server
->nfs4_state
= NULL
;
66 INIT_LIST_HEAD(&server
->nfs4_siblings
);
70 destroy_nfsv4_state(struct nfs_server
*server
)
72 kfree(server
->mnt_path
);
73 server
->mnt_path
= NULL
;
74 if (server
->nfs4_state
) {
75 nfs4_put_client(server
->nfs4_state
);
76 server
->nfs4_state
= NULL
;
81 * nfs4_get_client(): returns an empty client structure
82 * nfs4_put_client(): drops reference to client structure
84 * Since these are allocated/deallocated very rarely, we don't
85 * bother putting them in a slab cache...
87 static struct nfs4_client
*
88 nfs4_alloc_client(struct in_addr
*addr
)
90 struct nfs4_client
*clp
;
92 if (nfs_callback_up() < 0)
94 if ((clp
= kmalloc(sizeof(*clp
), GFP_KERNEL
)) == NULL
) {
98 memset(clp
, 0, sizeof(*clp
));
99 memcpy(&clp
->cl_addr
, addr
, sizeof(clp
->cl_addr
));
100 init_rwsem(&clp
->cl_sem
);
101 INIT_LIST_HEAD(&clp
->cl_delegations
);
102 INIT_LIST_HEAD(&clp
->cl_state_owners
);
103 INIT_LIST_HEAD(&clp
->cl_unused
);
104 spin_lock_init(&clp
->cl_lock
);
105 atomic_set(&clp
->cl_count
, 1);
106 INIT_WORK(&clp
->cl_recoverd
, nfs4_recover_state
, clp
);
107 INIT_WORK(&clp
->cl_renewd
, nfs4_renew_state
, clp
);
108 INIT_LIST_HEAD(&clp
->cl_superblocks
);
109 init_waitqueue_head(&clp
->cl_waitq
);
110 rpc_init_wait_queue(&clp
->cl_rpcwaitq
, "NFS4 client");
111 clp
->cl_rpcclient
= ERR_PTR(-EINVAL
);
112 clp
->cl_boot_time
= CURRENT_TIME
;
113 clp
->cl_state
= 1 << NFS4CLNT_OK
;
118 nfs4_free_client(struct nfs4_client
*clp
)
120 struct nfs4_state_owner
*sp
;
122 while (!list_empty(&clp
->cl_unused
)) {
123 sp
= list_entry(clp
->cl_unused
.next
,
124 struct nfs4_state_owner
,
126 list_del(&sp
->so_list
);
129 BUG_ON(!list_empty(&clp
->cl_state_owners
));
131 put_rpccred(clp
->cl_cred
);
132 nfs_idmap_delete(clp
);
133 if (!IS_ERR(clp
->cl_rpcclient
))
134 rpc_shutdown_client(clp
->cl_rpcclient
);
139 static struct nfs4_client
*__nfs4_find_client(struct in_addr
*addr
)
141 struct nfs4_client
*clp
;
142 list_for_each_entry(clp
, &nfs4_clientid_list
, cl_servers
) {
143 if (memcmp(&clp
->cl_addr
, addr
, sizeof(clp
->cl_addr
)) == 0) {
144 atomic_inc(&clp
->cl_count
);
151 struct nfs4_client
*nfs4_find_client(struct in_addr
*addr
)
153 struct nfs4_client
*clp
;
154 spin_lock(&state_spinlock
);
155 clp
= __nfs4_find_client(addr
);
156 spin_unlock(&state_spinlock
);
161 nfs4_get_client(struct in_addr
*addr
)
163 struct nfs4_client
*clp
, *new = NULL
;
165 spin_lock(&state_spinlock
);
167 clp
= __nfs4_find_client(addr
);
172 list_add(&clp
->cl_servers
, &nfs4_clientid_list
);
176 spin_unlock(&state_spinlock
);
177 new = nfs4_alloc_client(addr
);
178 spin_lock(&state_spinlock
);
182 spin_unlock(&state_spinlock
);
184 nfs4_free_client(new);
189 nfs4_put_client(struct nfs4_client
*clp
)
191 if (!atomic_dec_and_lock(&clp
->cl_count
, &state_spinlock
))
193 list_del(&clp
->cl_servers
);
194 spin_unlock(&state_spinlock
);
195 BUG_ON(!list_empty(&clp
->cl_superblocks
));
196 wake_up_all(&clp
->cl_waitq
);
197 rpc_wake_up(&clp
->cl_rpcwaitq
);
198 nfs4_kill_renewd(clp
);
199 nfs4_free_client(clp
);
202 static int __nfs4_init_client(struct nfs4_client
*clp
)
204 int status
= nfs4_proc_setclientid(clp
, NFS4_CALLBACK
, nfs_callback_tcpport
);
206 status
= nfs4_proc_setclientid_confirm(clp
);
208 nfs4_schedule_state_renewal(clp
);
212 int nfs4_init_client(struct nfs4_client
*clp
)
214 return nfs4_map_errors(__nfs4_init_client(clp
));
218 nfs4_alloc_lockowner_id(struct nfs4_client
*clp
)
220 return clp
->cl_lockowner_id
++;
223 static struct nfs4_state_owner
*
224 nfs4_client_grab_unused(struct nfs4_client
*clp
, struct rpc_cred
*cred
)
226 struct nfs4_state_owner
*sp
= NULL
;
228 if (!list_empty(&clp
->cl_unused
)) {
229 sp
= list_entry(clp
->cl_unused
.next
, struct nfs4_state_owner
, so_list
);
230 atomic_inc(&sp
->so_count
);
232 list_move(&sp
->so_list
, &clp
->cl_state_owners
);
238 static struct nfs4_state_owner
*
239 nfs4_find_state_owner(struct nfs4_client
*clp
, struct rpc_cred
*cred
)
241 struct nfs4_state_owner
*sp
, *res
= NULL
;
243 list_for_each_entry(sp
, &clp
->cl_state_owners
, so_list
) {
244 if (sp
->so_cred
!= cred
)
246 atomic_inc(&sp
->so_count
);
247 /* Move to the head of the list */
248 list_move(&sp
->so_list
, &clp
->cl_state_owners
);
256 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
257 * create a new state_owner.
260 static struct nfs4_state_owner
*
261 nfs4_alloc_state_owner(void)
263 struct nfs4_state_owner
*sp
;
265 sp
= kzalloc(sizeof(*sp
),GFP_KERNEL
);
268 spin_lock_init(&sp
->so_lock
);
269 INIT_LIST_HEAD(&sp
->so_states
);
270 INIT_LIST_HEAD(&sp
->so_delegations
);
271 rpc_init_wait_queue(&sp
->so_sequence
.wait
, "Seqid_waitqueue");
272 sp
->so_seqid
.sequence
= &sp
->so_sequence
;
273 spin_lock_init(&sp
->so_sequence
.lock
);
274 INIT_LIST_HEAD(&sp
->so_sequence
.list
);
275 atomic_set(&sp
->so_count
, 1);
280 nfs4_drop_state_owner(struct nfs4_state_owner
*sp
)
282 struct nfs4_client
*clp
= sp
->so_client
;
283 spin_lock(&clp
->cl_lock
);
284 list_del_init(&sp
->so_list
);
285 spin_unlock(&clp
->cl_lock
);
289 * Note: must be called with clp->cl_sem held in order to prevent races
290 * with reboot recovery!
292 struct nfs4_state_owner
*nfs4_get_state_owner(struct nfs_server
*server
, struct rpc_cred
*cred
)
294 struct nfs4_client
*clp
= server
->nfs4_state
;
295 struct nfs4_state_owner
*sp
, *new;
298 new = nfs4_alloc_state_owner();
299 spin_lock(&clp
->cl_lock
);
300 sp
= nfs4_find_state_owner(clp
, cred
);
302 sp
= nfs4_client_grab_unused(clp
, cred
);
303 if (sp
== NULL
&& new != NULL
) {
304 list_add(&new->so_list
, &clp
->cl_state_owners
);
305 new->so_client
= clp
;
306 new->so_id
= nfs4_alloc_lockowner_id(clp
);
311 spin_unlock(&clp
->cl_lock
);
320 * Must be called with clp->cl_sem held in order to avoid races
321 * with state recovery...
323 void nfs4_put_state_owner(struct nfs4_state_owner
*sp
)
325 struct nfs4_client
*clp
= sp
->so_client
;
326 struct rpc_cred
*cred
= sp
->so_cred
;
328 if (!atomic_dec_and_lock(&sp
->so_count
, &clp
->cl_lock
))
330 if (clp
->cl_nunused
>= OPENOWNER_POOL_SIZE
)
332 if (list_empty(&sp
->so_list
))
334 list_move(&sp
->so_list
, &clp
->cl_unused
);
336 spin_unlock(&clp
->cl_lock
);
341 list_del(&sp
->so_list
);
342 spin_unlock(&clp
->cl_lock
);
347 static struct nfs4_state
*
348 nfs4_alloc_open_state(void)
350 struct nfs4_state
*state
;
352 state
= kmalloc(sizeof(*state
), GFP_KERNEL
);
359 memset(state
->stateid
.data
, 0, sizeof(state
->stateid
.data
));
360 atomic_set(&state
->count
, 1);
361 INIT_LIST_HEAD(&state
->lock_states
);
362 spin_lock_init(&state
->state_lock
);
367 nfs4_state_set_mode_locked(struct nfs4_state
*state
, mode_t mode
)
369 if (state
->state
== mode
)
371 /* NB! List reordering - see the reclaim code for why. */
372 if ((mode
& FMODE_WRITE
) != (state
->state
& FMODE_WRITE
)) {
373 if (mode
& FMODE_WRITE
)
374 list_move(&state
->open_states
, &state
->owner
->so_states
);
376 list_move_tail(&state
->open_states
, &state
->owner
->so_states
);
379 list_del_init(&state
->inode_states
);
383 static struct nfs4_state
*
384 __nfs4_find_state_byowner(struct inode
*inode
, struct nfs4_state_owner
*owner
)
386 struct nfs_inode
*nfsi
= NFS_I(inode
);
387 struct nfs4_state
*state
;
389 list_for_each_entry(state
, &nfsi
->open_states
, inode_states
) {
390 /* Is this in the process of being freed? */
391 if (state
->state
== 0)
393 if (state
->owner
== owner
) {
394 atomic_inc(&state
->count
);
402 nfs4_free_open_state(struct nfs4_state
*state
)
408 nfs4_get_open_state(struct inode
*inode
, struct nfs4_state_owner
*owner
)
410 struct nfs4_state
*state
, *new;
411 struct nfs_inode
*nfsi
= NFS_I(inode
);
413 spin_lock(&inode
->i_lock
);
414 state
= __nfs4_find_state_byowner(inode
, owner
);
415 spin_unlock(&inode
->i_lock
);
418 new = nfs4_alloc_open_state();
419 spin_lock(&owner
->so_lock
);
420 spin_lock(&inode
->i_lock
);
421 state
= __nfs4_find_state_byowner(inode
, owner
);
422 if (state
== NULL
&& new != NULL
) {
424 state
->owner
= owner
;
425 atomic_inc(&owner
->so_count
);
426 list_add(&state
->inode_states
, &nfsi
->open_states
);
427 state
->inode
= igrab(inode
);
428 spin_unlock(&inode
->i_lock
);
429 /* Note: The reclaim code dictates that we add stateless
430 * and read-only stateids to the end of the list */
431 list_add_tail(&state
->open_states
, &owner
->so_states
);
432 spin_unlock(&owner
->so_lock
);
434 spin_unlock(&inode
->i_lock
);
435 spin_unlock(&owner
->so_lock
);
437 nfs4_free_open_state(new);
444 * Beware! Caller must be holding exactly one
445 * reference to clp->cl_sem!
447 void nfs4_put_open_state(struct nfs4_state
*state
)
449 struct inode
*inode
= state
->inode
;
450 struct nfs4_state_owner
*owner
= state
->owner
;
452 if (!atomic_dec_and_lock(&state
->count
, &owner
->so_lock
))
454 spin_lock(&inode
->i_lock
);
455 if (!list_empty(&state
->inode_states
))
456 list_del(&state
->inode_states
);
457 list_del(&state
->open_states
);
458 spin_unlock(&inode
->i_lock
);
459 spin_unlock(&owner
->so_lock
);
461 nfs4_free_open_state(state
);
462 nfs4_put_state_owner(owner
);
466 * Close the current file.
468 void nfs4_close_state(struct nfs4_state
*state
, mode_t mode
)
470 struct inode
*inode
= state
->inode
;
471 struct nfs4_state_owner
*owner
= state
->owner
;
472 int oldstate
, newstate
= 0;
474 atomic_inc(&owner
->so_count
);
475 /* Protect against nfs4_find_state() */
476 spin_lock(&owner
->so_lock
);
477 spin_lock(&inode
->i_lock
);
478 if (mode
& FMODE_READ
)
480 if (mode
& FMODE_WRITE
)
482 oldstate
= newstate
= state
->state
;
483 if (state
->nreaders
== 0)
484 newstate
&= ~FMODE_READ
;
485 if (state
->nwriters
== 0)
486 newstate
&= ~FMODE_WRITE
;
487 if (test_bit(NFS_DELEGATED_STATE
, &state
->flags
)) {
488 nfs4_state_set_mode_locked(state
, newstate
);
491 spin_unlock(&inode
->i_lock
);
492 spin_unlock(&owner
->so_lock
);
494 if (oldstate
!= newstate
&& nfs4_do_close(inode
, state
) == 0)
496 nfs4_put_open_state(state
);
497 nfs4_put_state_owner(owner
);
501 * Search the state->lock_states for an existing lock_owner
502 * that is compatible with current->files
504 static struct nfs4_lock_state
*
505 __nfs4_find_lock_state(struct nfs4_state
*state
, fl_owner_t fl_owner
)
507 struct nfs4_lock_state
*pos
;
508 list_for_each_entry(pos
, &state
->lock_states
, ls_locks
) {
509 if (pos
->ls_owner
!= fl_owner
)
511 atomic_inc(&pos
->ls_count
);
518 * Return a compatible lock_state. If no initialized lock_state structure
519 * exists, return an uninitialized one.
522 static struct nfs4_lock_state
*nfs4_alloc_lock_state(struct nfs4_state
*state
, fl_owner_t fl_owner
)
524 struct nfs4_lock_state
*lsp
;
525 struct nfs4_client
*clp
= state
->owner
->so_client
;
527 lsp
= kzalloc(sizeof(*lsp
), GFP_KERNEL
);
530 lsp
->ls_seqid
.sequence
= &state
->owner
->so_sequence
;
531 atomic_set(&lsp
->ls_count
, 1);
532 lsp
->ls_owner
= fl_owner
;
533 spin_lock(&clp
->cl_lock
);
534 lsp
->ls_id
= nfs4_alloc_lockowner_id(clp
);
535 spin_unlock(&clp
->cl_lock
);
536 INIT_LIST_HEAD(&lsp
->ls_locks
);
541 * Return a compatible lock_state. If no initialized lock_state structure
542 * exists, return an uninitialized one.
544 * The caller must be holding clp->cl_sem
546 static struct nfs4_lock_state
*nfs4_get_lock_state(struct nfs4_state
*state
, fl_owner_t owner
)
548 struct nfs4_lock_state
*lsp
, *new = NULL
;
551 spin_lock(&state
->state_lock
);
552 lsp
= __nfs4_find_lock_state(state
, owner
);
556 new->ls_state
= state
;
557 list_add(&new->ls_locks
, &state
->lock_states
);
558 set_bit(LK_STATE_IN_USE
, &state
->flags
);
563 spin_unlock(&state
->state_lock
);
564 new = nfs4_alloc_lock_state(state
, owner
);
568 spin_unlock(&state
->state_lock
);
574 * Release reference to lock_state, and free it if we see that
575 * it is no longer in use
577 void nfs4_put_lock_state(struct nfs4_lock_state
*lsp
)
579 struct nfs4_state
*state
;
583 state
= lsp
->ls_state
;
584 if (!atomic_dec_and_lock(&lsp
->ls_count
, &state
->state_lock
))
586 list_del(&lsp
->ls_locks
);
587 if (list_empty(&state
->lock_states
))
588 clear_bit(LK_STATE_IN_USE
, &state
->flags
);
589 spin_unlock(&state
->state_lock
);
593 static void nfs4_fl_copy_lock(struct file_lock
*dst
, struct file_lock
*src
)
595 struct nfs4_lock_state
*lsp
= src
->fl_u
.nfs4_fl
.owner
;
597 dst
->fl_u
.nfs4_fl
.owner
= lsp
;
598 atomic_inc(&lsp
->ls_count
);
601 static void nfs4_fl_release_lock(struct file_lock
*fl
)
603 nfs4_put_lock_state(fl
->fl_u
.nfs4_fl
.owner
);
606 static struct file_lock_operations nfs4_fl_lock_ops
= {
607 .fl_copy_lock
= nfs4_fl_copy_lock
,
608 .fl_release_private
= nfs4_fl_release_lock
,
611 int nfs4_set_lock_state(struct nfs4_state
*state
, struct file_lock
*fl
)
613 struct nfs4_lock_state
*lsp
;
615 if (fl
->fl_ops
!= NULL
)
617 lsp
= nfs4_get_lock_state(state
, fl
->fl_owner
);
620 fl
->fl_u
.nfs4_fl
.owner
= lsp
;
621 fl
->fl_ops
= &nfs4_fl_lock_ops
;
626 * Byte-range lock aware utility to initialize the stateid of read/write
629 void nfs4_copy_stateid(nfs4_stateid
*dst
, struct nfs4_state
*state
, fl_owner_t fl_owner
)
631 struct nfs4_lock_state
*lsp
;
633 memcpy(dst
, &state
->stateid
, sizeof(*dst
));
634 if (test_bit(LK_STATE_IN_USE
, &state
->flags
) == 0)
637 spin_lock(&state
->state_lock
);
638 lsp
= __nfs4_find_lock_state(state
, fl_owner
);
639 if (lsp
!= NULL
&& (lsp
->ls_flags
& NFS_LOCK_INITIALIZED
) != 0)
640 memcpy(dst
, &lsp
->ls_stateid
, sizeof(*dst
));
641 spin_unlock(&state
->state_lock
);
642 nfs4_put_lock_state(lsp
);
645 struct nfs_seqid
*nfs_alloc_seqid(struct nfs_seqid_counter
*counter
)
647 struct rpc_sequence
*sequence
= counter
->sequence
;
648 struct nfs_seqid
*new;
650 new = kmalloc(sizeof(*new), GFP_KERNEL
);
652 new->sequence
= counter
;
653 spin_lock(&sequence
->lock
);
654 list_add_tail(&new->list
, &sequence
->list
);
655 spin_unlock(&sequence
->lock
);
660 void nfs_free_seqid(struct nfs_seqid
*seqid
)
662 struct rpc_sequence
*sequence
= seqid
->sequence
->sequence
;
664 spin_lock(&sequence
->lock
);
665 list_del(&seqid
->list
);
666 spin_unlock(&sequence
->lock
);
667 rpc_wake_up(&sequence
->wait
);
672 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
673 * failed with a seqid incrementing error -
674 * see comments nfs_fs.h:seqid_mutating_error()
676 static inline void nfs_increment_seqid(int status
, struct nfs_seqid
*seqid
)
681 case -NFS4ERR_BAD_SEQID
:
682 case -NFS4ERR_STALE_CLIENTID
:
683 case -NFS4ERR_STALE_STATEID
:
684 case -NFS4ERR_BAD_STATEID
:
685 case -NFS4ERR_BADXDR
:
686 case -NFS4ERR_RESOURCE
:
687 case -NFS4ERR_NOFILEHANDLE
:
688 /* Non-seqid mutating errors */
692 * Note: no locking needed as we are guaranteed to be first
693 * on the sequence list
695 seqid
->sequence
->counter
++;
698 void nfs_increment_open_seqid(int status
, struct nfs_seqid
*seqid
)
700 if (status
== -NFS4ERR_BAD_SEQID
) {
701 struct nfs4_state_owner
*sp
= container_of(seqid
->sequence
,
702 struct nfs4_state_owner
, so_seqid
);
703 nfs4_drop_state_owner(sp
);
705 return nfs_increment_seqid(status
, seqid
);
709 * Increment the seqid if the LOCK/LOCKU succeeded, or
710 * failed with a seqid incrementing error -
711 * see comments nfs_fs.h:seqid_mutating_error()
713 void nfs_increment_lock_seqid(int status
, struct nfs_seqid
*seqid
)
715 return nfs_increment_seqid(status
, seqid
);
718 int nfs_wait_on_sequence(struct nfs_seqid
*seqid
, struct rpc_task
*task
)
720 struct rpc_sequence
*sequence
= seqid
->sequence
->sequence
;
723 if (sequence
->list
.next
== &seqid
->list
)
725 spin_lock(&sequence
->lock
);
726 if (sequence
->list
.next
!= &seqid
->list
) {
727 rpc_sleep_on(&sequence
->wait
, task
, NULL
, NULL
);
730 spin_unlock(&sequence
->lock
);
735 static int reclaimer(void *);
736 struct reclaimer_args
{
737 struct nfs4_client
*clp
;
738 struct completion complete
;
742 * State recovery routine
745 nfs4_recover_state(void *data
)
747 struct nfs4_client
*clp
= (struct nfs4_client
*)data
;
748 struct reclaimer_args args
= {
753 init_completion(&args
.complete
);
755 if (kernel_thread(reclaimer
, &args
, CLONE_KERNEL
) < 0)
756 goto out_failed_clear
;
757 wait_for_completion(&args
.complete
);
760 set_bit(NFS4CLNT_OK
, &clp
->cl_state
);
761 wake_up_all(&clp
->cl_waitq
);
762 rpc_wake_up(&clp
->cl_rpcwaitq
);
766 * Schedule a state recovery attempt
769 nfs4_schedule_state_recovery(struct nfs4_client
*clp
)
773 if (test_and_clear_bit(NFS4CLNT_OK
, &clp
->cl_state
))
774 schedule_work(&clp
->cl_recoverd
);
777 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops
*ops
, struct nfs4_state
*state
)
779 struct inode
*inode
= state
->inode
;
780 struct file_lock
*fl
;
783 for (fl
= inode
->i_flock
; fl
!= 0; fl
= fl
->fl_next
) {
784 if (!(fl
->fl_flags
& (FL_POSIX
|FL_FLOCK
)))
786 if (((struct nfs_open_context
*)fl
->fl_file
->private_data
)->state
!= state
)
788 status
= ops
->recover_lock(state
, fl
);
793 printk(KERN_ERR
"%s: unhandled error %d. Zeroing state\n",
794 __FUNCTION__
, status
);
795 case -NFS4ERR_EXPIRED
:
796 case -NFS4ERR_NO_GRACE
:
797 case -NFS4ERR_RECLAIM_BAD
:
798 case -NFS4ERR_RECLAIM_CONFLICT
:
799 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
801 case -NFS4ERR_STALE_CLIENTID
:
810 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops
*ops
, struct nfs4_state_owner
*sp
)
812 struct nfs4_state
*state
;
813 struct nfs4_lock_state
*lock
;
816 /* Note: we rely on the sp->so_states list being ordered
817 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
819 * This is needed to ensure that the server won't give us any
820 * read delegations that we have to return if, say, we are
821 * recovering after a network partition or a reboot from a
822 * server that doesn't support a grace period.
824 list_for_each_entry(state
, &sp
->so_states
, open_states
) {
825 if (state
->state
== 0)
827 status
= ops
->recover_open(sp
, state
);
829 status
= nfs4_reclaim_locks(ops
, state
);
832 list_for_each_entry(lock
, &state
->lock_states
, ls_locks
) {
833 if (!(lock
->ls_flags
& NFS_LOCK_INITIALIZED
))
834 printk("%s: Lock reclaim failed!\n",
841 printk(KERN_ERR
"%s: unhandled error %d. Zeroing state\n",
842 __FUNCTION__
, status
);
844 case -NFS4ERR_RECLAIM_BAD
:
845 case -NFS4ERR_RECLAIM_CONFLICT
:
847 * Open state on this file cannot be recovered
848 * All we can do is revert to using the zero stateid.
850 memset(state
->stateid
.data
, 0,
851 sizeof(state
->stateid
.data
));
852 /* Mark the file as being 'closed' */
855 case -NFS4ERR_EXPIRED
:
856 case -NFS4ERR_NO_GRACE
:
857 case -NFS4ERR_STALE_CLIENTID
:
866 static void nfs4_state_mark_reclaim(struct nfs4_client
*clp
)
868 struct nfs4_state_owner
*sp
;
869 struct nfs4_state
*state
;
870 struct nfs4_lock_state
*lock
;
872 /* Reset all sequence ids to zero */
873 list_for_each_entry(sp
, &clp
->cl_state_owners
, so_list
) {
874 sp
->so_seqid
.counter
= 0;
875 sp
->so_seqid
.flags
= 0;
876 spin_lock(&sp
->so_lock
);
877 list_for_each_entry(state
, &sp
->so_states
, open_states
) {
878 list_for_each_entry(lock
, &state
->lock_states
, ls_locks
) {
879 lock
->ls_seqid
.counter
= 0;
880 lock
->ls_seqid
.flags
= 0;
881 lock
->ls_flags
&= ~NFS_LOCK_INITIALIZED
;
884 spin_unlock(&sp
->so_lock
);
888 static int reclaimer(void *ptr
)
890 struct reclaimer_args
*args
= (struct reclaimer_args
*)ptr
;
891 struct nfs4_client
*clp
= args
->clp
;
892 struct nfs4_state_owner
*sp
;
893 struct nfs4_state_recovery_ops
*ops
;
896 daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp
->cl_addr
));
897 allow_signal(SIGKILL
);
899 atomic_inc(&clp
->cl_count
);
900 complete(&args
->complete
);
902 /* Ensure exclusive access to NFSv4 state */
904 down_write(&clp
->cl_sem
);
905 /* Are there any NFS mounts out there? */
906 if (list_empty(&clp
->cl_superblocks
))
909 status
= nfs4_proc_renew(clp
);
912 case -NFS4ERR_CB_PATH_DOWN
:
914 case -NFS4ERR_STALE_CLIENTID
:
915 case -NFS4ERR_LEASE_MOVED
:
916 ops
= &nfs4_reboot_recovery_ops
;
919 ops
= &nfs4_network_partition_recovery_ops
;
921 nfs4_state_mark_reclaim(clp
);
922 status
= __nfs4_init_client(clp
);
925 /* Mark all delegations for reclaim */
926 nfs_delegation_mark_reclaim(clp
);
927 /* Note: list is protected by exclusive lock on cl->cl_sem */
928 list_for_each_entry(sp
, &clp
->cl_state_owners
, so_list
) {
929 status
= nfs4_reclaim_open_state(ops
, sp
);
931 if (status
== -NFS4ERR_NO_GRACE
) {
932 ops
= &nfs4_network_partition_recovery_ops
;
933 status
= nfs4_reclaim_open_state(ops
, sp
);
935 if (status
== -NFS4ERR_STALE_CLIENTID
)
937 if (status
== -NFS4ERR_EXPIRED
)
941 nfs_delegation_reap_unclaimed(clp
);
943 set_bit(NFS4CLNT_OK
, &clp
->cl_state
);
944 up_write(&clp
->cl_sem
);
946 wake_up_all(&clp
->cl_waitq
);
947 rpc_wake_up(&clp
->cl_rpcwaitq
);
948 if (status
== -NFS4ERR_CB_PATH_DOWN
)
949 nfs_handle_cb_pathdown(clp
);
950 nfs4_put_client(clp
);
953 printk(KERN_WARNING
"Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
954 NIPQUAD(clp
->cl_addr
.s_addr
), -status
);