4 * Client-side XDR for NFSv4.
6 * Copyright (c) 2002 The Regents of the University of Michigan.
9 * Kendrick Smith <kmsmith@umich.edu>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
41 #include <linux/config.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/workqueue.h>
47 #include <linux/bitops.h>
51 #include "delegation.h"
53 #define OPENOWNER_POOL_SIZE 8
55 const nfs4_stateid zero_stateid
;
57 static DEFINE_SPINLOCK(state_spinlock
);
58 static LIST_HEAD(nfs4_clientid_list
);
60 static void nfs4_recover_state(void *);
63 init_nfsv4_state(struct nfs_server
*server
)
65 server
->nfs4_state
= NULL
;
66 INIT_LIST_HEAD(&server
->nfs4_siblings
);
70 destroy_nfsv4_state(struct nfs_server
*server
)
72 if (server
->mnt_path
) {
73 kfree(server
->mnt_path
);
74 server
->mnt_path
= NULL
;
76 if (server
->nfs4_state
) {
77 nfs4_put_client(server
->nfs4_state
);
78 server
->nfs4_state
= NULL
;
83 * nfs4_get_client(): returns an empty client structure
84 * nfs4_put_client(): drops reference to client structure
86 * Since these are allocated/deallocated very rarely, we don't
87 * bother putting them in a slab cache...
89 static struct nfs4_client
*
90 nfs4_alloc_client(struct in_addr
*addr
)
92 struct nfs4_client
*clp
;
94 if (nfs_callback_up() < 0)
96 if ((clp
= kmalloc(sizeof(*clp
), GFP_KERNEL
)) == NULL
) {
100 memset(clp
, 0, sizeof(*clp
));
101 memcpy(&clp
->cl_addr
, addr
, sizeof(clp
->cl_addr
));
102 init_rwsem(&clp
->cl_sem
);
103 INIT_LIST_HEAD(&clp
->cl_delegations
);
104 INIT_LIST_HEAD(&clp
->cl_state_owners
);
105 INIT_LIST_HEAD(&clp
->cl_unused
);
106 spin_lock_init(&clp
->cl_lock
);
107 atomic_set(&clp
->cl_count
, 1);
108 INIT_WORK(&clp
->cl_recoverd
, nfs4_recover_state
, clp
);
109 INIT_WORK(&clp
->cl_renewd
, nfs4_renew_state
, clp
);
110 INIT_LIST_HEAD(&clp
->cl_superblocks
);
111 init_waitqueue_head(&clp
->cl_waitq
);
112 rpc_init_wait_queue(&clp
->cl_rpcwaitq
, "NFS4 client");
113 clp
->cl_rpcclient
= ERR_PTR(-EINVAL
);
114 clp
->cl_boot_time
= CURRENT_TIME
;
115 clp
->cl_state
= 1 << NFS4CLNT_OK
;
120 nfs4_free_client(struct nfs4_client
*clp
)
122 struct nfs4_state_owner
*sp
;
124 while (!list_empty(&clp
->cl_unused
)) {
125 sp
= list_entry(clp
->cl_unused
.next
,
126 struct nfs4_state_owner
,
128 list_del(&sp
->so_list
);
131 BUG_ON(!list_empty(&clp
->cl_state_owners
));
133 put_rpccred(clp
->cl_cred
);
134 nfs_idmap_delete(clp
);
135 if (!IS_ERR(clp
->cl_rpcclient
))
136 rpc_shutdown_client(clp
->cl_rpcclient
);
141 static struct nfs4_client
*__nfs4_find_client(struct in_addr
*addr
)
143 struct nfs4_client
*clp
;
144 list_for_each_entry(clp
, &nfs4_clientid_list
, cl_servers
) {
145 if (memcmp(&clp
->cl_addr
, addr
, sizeof(clp
->cl_addr
)) == 0) {
146 atomic_inc(&clp
->cl_count
);
153 struct nfs4_client
*nfs4_find_client(struct in_addr
*addr
)
155 struct nfs4_client
*clp
;
156 spin_lock(&state_spinlock
);
157 clp
= __nfs4_find_client(addr
);
158 spin_unlock(&state_spinlock
);
163 nfs4_get_client(struct in_addr
*addr
)
165 struct nfs4_client
*clp
, *new = NULL
;
167 spin_lock(&state_spinlock
);
169 clp
= __nfs4_find_client(addr
);
174 list_add(&clp
->cl_servers
, &nfs4_clientid_list
);
178 spin_unlock(&state_spinlock
);
179 new = nfs4_alloc_client(addr
);
180 spin_lock(&state_spinlock
);
184 spin_unlock(&state_spinlock
);
186 nfs4_free_client(new);
191 nfs4_put_client(struct nfs4_client
*clp
)
193 if (!atomic_dec_and_lock(&clp
->cl_count
, &state_spinlock
))
195 list_del(&clp
->cl_servers
);
196 spin_unlock(&state_spinlock
);
197 BUG_ON(!list_empty(&clp
->cl_superblocks
));
198 wake_up_all(&clp
->cl_waitq
);
199 rpc_wake_up(&clp
->cl_rpcwaitq
);
200 nfs4_kill_renewd(clp
);
201 nfs4_free_client(clp
);
204 static int __nfs4_init_client(struct nfs4_client
*clp
)
206 int status
= nfs4_proc_setclientid(clp
, NFS4_CALLBACK
, nfs_callback_tcpport
);
208 status
= nfs4_proc_setclientid_confirm(clp
);
210 nfs4_schedule_state_renewal(clp
);
214 int nfs4_init_client(struct nfs4_client
*clp
)
216 return nfs4_map_errors(__nfs4_init_client(clp
));
220 nfs4_alloc_lockowner_id(struct nfs4_client
*clp
)
222 return clp
->cl_lockowner_id
++;
225 static struct nfs4_state_owner
*
226 nfs4_client_grab_unused(struct nfs4_client
*clp
, struct rpc_cred
*cred
)
228 struct nfs4_state_owner
*sp
= NULL
;
230 if (!list_empty(&clp
->cl_unused
)) {
231 sp
= list_entry(clp
->cl_unused
.next
, struct nfs4_state_owner
, so_list
);
232 atomic_inc(&sp
->so_count
);
234 list_move(&sp
->so_list
, &clp
->cl_state_owners
);
240 static struct nfs4_state_owner
*
241 nfs4_find_state_owner(struct nfs4_client
*clp
, struct rpc_cred
*cred
)
243 struct nfs4_state_owner
*sp
, *res
= NULL
;
245 list_for_each_entry(sp
, &clp
->cl_state_owners
, so_list
) {
246 if (sp
->so_cred
!= cred
)
248 atomic_inc(&sp
->so_count
);
249 /* Move to the head of the list */
250 list_move(&sp
->so_list
, &clp
->cl_state_owners
);
258 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
259 * create a new state_owner.
262 static struct nfs4_state_owner
*
263 nfs4_alloc_state_owner(void)
265 struct nfs4_state_owner
*sp
;
267 sp
= kmalloc(sizeof(*sp
),GFP_KERNEL
);
270 init_MUTEX(&sp
->so_sema
);
271 sp
->so_seqid
= 0; /* arbitrary */
272 INIT_LIST_HEAD(&sp
->so_states
);
273 INIT_LIST_HEAD(&sp
->so_delegations
);
274 atomic_set(&sp
->so_count
, 1);
279 nfs4_drop_state_owner(struct nfs4_state_owner
*sp
)
281 struct nfs4_client
*clp
= sp
->so_client
;
282 spin_lock(&clp
->cl_lock
);
283 list_del_init(&sp
->so_list
);
284 spin_unlock(&clp
->cl_lock
);
288 * Note: must be called with clp->cl_sem held in order to prevent races
289 * with reboot recovery!
291 struct nfs4_state_owner
*nfs4_get_state_owner(struct nfs_server
*server
, struct rpc_cred
*cred
)
293 struct nfs4_client
*clp
= server
->nfs4_state
;
294 struct nfs4_state_owner
*sp
, *new;
297 new = nfs4_alloc_state_owner();
298 spin_lock(&clp
->cl_lock
);
299 sp
= nfs4_find_state_owner(clp
, cred
);
301 sp
= nfs4_client_grab_unused(clp
, cred
);
302 if (sp
== NULL
&& new != NULL
) {
303 list_add(&new->so_list
, &clp
->cl_state_owners
);
304 new->so_client
= clp
;
305 new->so_id
= nfs4_alloc_lockowner_id(clp
);
310 spin_unlock(&clp
->cl_lock
);
320 * Must be called with clp->cl_sem held in order to avoid races
321 * with state recovery...
323 void nfs4_put_state_owner(struct nfs4_state_owner
*sp
)
325 struct nfs4_client
*clp
= sp
->so_client
;
326 struct rpc_cred
*cred
= sp
->so_cred
;
328 if (!atomic_dec_and_lock(&sp
->so_count
, &clp
->cl_lock
))
330 if (clp
->cl_nunused
>= OPENOWNER_POOL_SIZE
)
332 if (list_empty(&sp
->so_list
))
334 list_move(&sp
->so_list
, &clp
->cl_unused
);
336 spin_unlock(&clp
->cl_lock
);
341 list_del(&sp
->so_list
);
342 spin_unlock(&clp
->cl_lock
);
347 static struct nfs4_state
*
348 nfs4_alloc_open_state(void)
350 struct nfs4_state
*state
;
352 state
= kmalloc(sizeof(*state
), GFP_KERNEL
);
359 memset(state
->stateid
.data
, 0, sizeof(state
->stateid
.data
));
360 atomic_set(&state
->count
, 1);
361 INIT_LIST_HEAD(&state
->lock_states
);
362 init_MUTEX(&state
->lock_sema
);
363 spin_lock_init(&state
->state_lock
);
367 static struct nfs4_state
*
368 __nfs4_find_state(struct inode
*inode
, struct rpc_cred
*cred
, mode_t mode
)
370 struct nfs_inode
*nfsi
= NFS_I(inode
);
371 struct nfs4_state
*state
;
373 mode
&= (FMODE_READ
|FMODE_WRITE
);
374 list_for_each_entry(state
, &nfsi
->open_states
, inode_states
) {
375 if (state
->owner
->so_cred
!= cred
)
377 if ((mode
& FMODE_READ
) != 0 && state
->nreaders
== 0)
379 if ((mode
& FMODE_WRITE
) != 0 && state
->nwriters
== 0)
381 if ((state
->state
& mode
) != mode
)
383 atomic_inc(&state
->count
);
384 if (mode
& FMODE_READ
)
386 if (mode
& FMODE_WRITE
)
393 static struct nfs4_state
*
394 __nfs4_find_state_byowner(struct inode
*inode
, struct nfs4_state_owner
*owner
)
396 struct nfs_inode
*nfsi
= NFS_I(inode
);
397 struct nfs4_state
*state
;
399 list_for_each_entry(state
, &nfsi
->open_states
, inode_states
) {
400 /* Is this in the process of being freed? */
401 if (state
->nreaders
== 0 && state
->nwriters
== 0)
403 if (state
->owner
== owner
) {
404 atomic_inc(&state
->count
);
412 nfs4_find_state(struct inode
*inode
, struct rpc_cred
*cred
, mode_t mode
)
414 struct nfs4_state
*state
;
416 spin_lock(&inode
->i_lock
);
417 state
= __nfs4_find_state(inode
, cred
, mode
);
418 spin_unlock(&inode
->i_lock
);
423 nfs4_free_open_state(struct nfs4_state
*state
)
429 nfs4_get_open_state(struct inode
*inode
, struct nfs4_state_owner
*owner
)
431 struct nfs4_state
*state
, *new;
432 struct nfs_inode
*nfsi
= NFS_I(inode
);
434 spin_lock(&inode
->i_lock
);
435 state
= __nfs4_find_state_byowner(inode
, owner
);
436 spin_unlock(&inode
->i_lock
);
439 new = nfs4_alloc_open_state();
440 spin_lock(&inode
->i_lock
);
441 state
= __nfs4_find_state_byowner(inode
, owner
);
442 if (state
== NULL
&& new != NULL
) {
444 /* Caller *must* be holding owner->so_sem */
445 /* Note: The reclaim code dictates that we add stateless
446 * and read-only stateids to the end of the list */
447 list_add_tail(&state
->open_states
, &owner
->so_states
);
448 state
->owner
= owner
;
449 atomic_inc(&owner
->so_count
);
450 list_add(&state
->inode_states
, &nfsi
->open_states
);
451 state
->inode
= igrab(inode
);
452 spin_unlock(&inode
->i_lock
);
454 spin_unlock(&inode
->i_lock
);
456 nfs4_free_open_state(new);
463 * Beware! Caller must be holding exactly one
464 * reference to clp->cl_sem and owner->so_sema!
466 void nfs4_put_open_state(struct nfs4_state
*state
)
468 struct inode
*inode
= state
->inode
;
469 struct nfs4_state_owner
*owner
= state
->owner
;
471 if (!atomic_dec_and_lock(&state
->count
, &inode
->i_lock
))
473 if (!list_empty(&state
->inode_states
))
474 list_del(&state
->inode_states
);
475 spin_unlock(&inode
->i_lock
);
476 list_del(&state
->open_states
);
478 BUG_ON (state
->state
!= 0);
479 nfs4_free_open_state(state
);
480 nfs4_put_state_owner(owner
);
484 * Beware! Caller must be holding no references to clp->cl_sem!
487 void nfs4_close_state(struct nfs4_state
*state
, mode_t mode
)
489 struct inode
*inode
= state
->inode
;
490 struct nfs4_state_owner
*owner
= state
->owner
;
491 struct nfs4_client
*clp
= owner
->so_client
;
494 atomic_inc(&owner
->so_count
);
495 down_read(&clp
->cl_sem
);
496 down(&owner
->so_sema
);
497 /* Protect against nfs4_find_state() */
498 spin_lock(&inode
->i_lock
);
499 if (mode
& FMODE_READ
)
501 if (mode
& FMODE_WRITE
)
503 if (state
->nwriters
== 0) {
504 if (state
->nreaders
== 0)
505 list_del_init(&state
->inode_states
);
506 /* See reclaim code */
507 list_move_tail(&state
->open_states
, &owner
->so_states
);
509 spin_unlock(&inode
->i_lock
);
511 if (state
->state
!= 0) {
513 newstate
|= FMODE_READ
;
515 newstate
|= FMODE_WRITE
;
516 if (state
->state
== newstate
)
518 if (nfs4_do_close(inode
, state
, newstate
) == -EINPROGRESS
)
522 nfs4_put_open_state(state
);
524 nfs4_put_state_owner(owner
);
525 up_read(&clp
->cl_sem
);
529 * Search the state->lock_states for an existing lock_owner
530 * that is compatible with current->files
532 static struct nfs4_lock_state
*
533 __nfs4_find_lock_state(struct nfs4_state
*state
, fl_owner_t fl_owner
)
535 struct nfs4_lock_state
*pos
;
536 list_for_each_entry(pos
, &state
->lock_states
, ls_locks
) {
537 if (pos
->ls_owner
!= fl_owner
)
539 atomic_inc(&pos
->ls_count
);
546 * Return a compatible lock_state. If no initialized lock_state structure
547 * exists, return an uninitialized one.
549 * The caller must be holding state->lock_sema
551 static struct nfs4_lock_state
*nfs4_alloc_lock_state(struct nfs4_state
*state
, fl_owner_t fl_owner
)
553 struct nfs4_lock_state
*lsp
;
554 struct nfs4_client
*clp
= state
->owner
->so_client
;
556 lsp
= kmalloc(sizeof(*lsp
), GFP_KERNEL
);
560 lsp
->ls_seqid
= 0; /* arbitrary */
561 memset(lsp
->ls_stateid
.data
, 0, sizeof(lsp
->ls_stateid
.data
));
562 atomic_set(&lsp
->ls_count
, 1);
563 lsp
->ls_owner
= fl_owner
;
564 spin_lock(&clp
->cl_lock
);
565 lsp
->ls_id
= nfs4_alloc_lockowner_id(clp
);
566 spin_unlock(&clp
->cl_lock
);
567 INIT_LIST_HEAD(&lsp
->ls_locks
);
572 * Return a compatible lock_state. If no initialized lock_state structure
573 * exists, return an uninitialized one.
575 * The caller must be holding state->lock_sema and clp->cl_sem
577 static struct nfs4_lock_state
*nfs4_get_lock_state(struct nfs4_state
*state
, fl_owner_t owner
)
579 struct nfs4_lock_state
*lsp
, *new = NULL
;
582 spin_lock(&state
->state_lock
);
583 lsp
= __nfs4_find_lock_state(state
, owner
);
587 new->ls_state
= state
;
588 list_add(&new->ls_locks
, &state
->lock_states
);
589 set_bit(LK_STATE_IN_USE
, &state
->flags
);
594 spin_unlock(&state
->state_lock
);
595 new = nfs4_alloc_lock_state(state
, owner
);
599 spin_unlock(&state
->state_lock
);
605 * Release reference to lock_state, and free it if we see that
606 * it is no longer in use
608 static void nfs4_put_lock_state(struct nfs4_lock_state
*lsp
)
610 struct nfs4_state
*state
;
614 state
= lsp
->ls_state
;
615 if (!atomic_dec_and_lock(&lsp
->ls_count
, &state
->state_lock
))
617 list_del(&lsp
->ls_locks
);
618 if (list_empty(&state
->lock_states
))
619 clear_bit(LK_STATE_IN_USE
, &state
->flags
);
620 spin_unlock(&state
->state_lock
);
624 static void nfs4_fl_copy_lock(struct file_lock
*dst
, struct file_lock
*src
)
626 struct nfs4_lock_state
*lsp
= src
->fl_u
.nfs4_fl
.owner
;
628 dst
->fl_u
.nfs4_fl
.owner
= lsp
;
629 atomic_inc(&lsp
->ls_count
);
632 static void nfs4_fl_release_lock(struct file_lock
*fl
)
634 nfs4_put_lock_state(fl
->fl_u
.nfs4_fl
.owner
);
637 static struct file_lock_operations nfs4_fl_lock_ops
= {
638 .fl_copy_lock
= nfs4_fl_copy_lock
,
639 .fl_release_private
= nfs4_fl_release_lock
,
642 int nfs4_set_lock_state(struct nfs4_state
*state
, struct file_lock
*fl
)
644 struct nfs4_lock_state
*lsp
;
646 if (fl
->fl_ops
!= NULL
)
648 lsp
= nfs4_get_lock_state(state
, fl
->fl_owner
);
651 fl
->fl_u
.nfs4_fl
.owner
= lsp
;
652 fl
->fl_ops
= &nfs4_fl_lock_ops
;
657 * Byte-range lock aware utility to initialize the stateid of read/write
660 void nfs4_copy_stateid(nfs4_stateid
*dst
, struct nfs4_state
*state
, fl_owner_t fl_owner
)
662 struct nfs4_lock_state
*lsp
;
664 memcpy(dst
, &state
->stateid
, sizeof(*dst
));
665 if (test_bit(LK_STATE_IN_USE
, &state
->flags
) == 0)
668 spin_lock(&state
->state_lock
);
669 lsp
= __nfs4_find_lock_state(state
, fl_owner
);
670 if (lsp
!= NULL
&& (lsp
->ls_flags
& NFS_LOCK_INITIALIZED
) != 0)
671 memcpy(dst
, &lsp
->ls_stateid
, sizeof(*dst
));
672 spin_unlock(&state
->state_lock
);
673 nfs4_put_lock_state(lsp
);
677 * Called with state->lock_sema and clp->cl_sem held.
679 void nfs4_increment_lock_seqid(int status
, struct nfs4_lock_state
*lsp
)
681 if (status
== NFS_OK
|| seqid_mutating_err(-status
))
686 * Called with sp->so_sema and clp->cl_sem held.
688 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
689 * failed with a seqid incrementing error -
690 * see comments nfs_fs.h:seqid_mutating_error()
692 void nfs4_increment_seqid(int status
, struct nfs4_state_owner
*sp
)
694 if (status
== NFS_OK
|| seqid_mutating_err(-status
))
696 /* If the server returns BAD_SEQID, unhash state_owner here */
697 if (status
== -NFS4ERR_BAD_SEQID
)
698 nfs4_drop_state_owner(sp
);
701 static int reclaimer(void *);
702 struct reclaimer_args
{
703 struct nfs4_client
*clp
;
704 struct completion complete
;
708 * State recovery routine
711 nfs4_recover_state(void *data
)
713 struct nfs4_client
*clp
= (struct nfs4_client
*)data
;
714 struct reclaimer_args args
= {
719 init_completion(&args
.complete
);
721 if (kernel_thread(reclaimer
, &args
, CLONE_KERNEL
) < 0)
722 goto out_failed_clear
;
723 wait_for_completion(&args
.complete
);
726 set_bit(NFS4CLNT_OK
, &clp
->cl_state
);
727 wake_up_all(&clp
->cl_waitq
);
728 rpc_wake_up(&clp
->cl_rpcwaitq
);
732 * Schedule a state recovery attempt
735 nfs4_schedule_state_recovery(struct nfs4_client
*clp
)
739 if (test_and_clear_bit(NFS4CLNT_OK
, &clp
->cl_state
))
740 schedule_work(&clp
->cl_recoverd
);
743 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops
*ops
, struct nfs4_state
*state
)
745 struct inode
*inode
= state
->inode
;
746 struct file_lock
*fl
;
749 for (fl
= inode
->i_flock
; fl
!= 0; fl
= fl
->fl_next
) {
750 if (!(fl
->fl_flags
& FL_POSIX
))
752 if (((struct nfs_open_context
*)fl
->fl_file
->private_data
)->state
!= state
)
754 status
= ops
->recover_lock(state
, fl
);
759 printk(KERN_ERR
"%s: unhandled error %d. Zeroing state\n",
760 __FUNCTION__
, status
);
761 case -NFS4ERR_EXPIRED
:
762 case -NFS4ERR_NO_GRACE
:
763 case -NFS4ERR_RECLAIM_BAD
:
764 case -NFS4ERR_RECLAIM_CONFLICT
:
765 /* kill_proc(fl->fl_owner, SIGLOST, 1); */
767 case -NFS4ERR_STALE_CLIENTID
:
776 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops
*ops
, struct nfs4_state_owner
*sp
)
778 struct nfs4_state
*state
;
779 struct nfs4_lock_state
*lock
;
782 /* Note: we rely on the sp->so_states list being ordered
783 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
785 * This is needed to ensure that the server won't give us any
786 * read delegations that we have to return if, say, we are
787 * recovering after a network partition or a reboot from a
788 * server that doesn't support a grace period.
790 list_for_each_entry(state
, &sp
->so_states
, open_states
) {
791 if (state
->state
== 0)
793 status
= ops
->recover_open(sp
, state
);
794 list_for_each_entry(lock
, &state
->lock_states
, ls_locks
)
795 lock
->ls_flags
&= ~NFS_LOCK_INITIALIZED
;
797 status
= nfs4_reclaim_locks(ops
, state
);
800 list_for_each_entry(lock
, &state
->lock_states
, ls_locks
) {
801 if (!(lock
->ls_flags
& NFS_LOCK_INITIALIZED
))
802 printk("%s: Lock reclaim failed!\n",
809 printk(KERN_ERR
"%s: unhandled error %d. Zeroing state\n",
810 __FUNCTION__
, status
);
812 case -NFS4ERR_RECLAIM_BAD
:
813 case -NFS4ERR_RECLAIM_CONFLICT
:
815 * Open state on this file cannot be recovered
816 * All we can do is revert to using the zero stateid.
818 memset(state
->stateid
.data
, 0,
819 sizeof(state
->stateid
.data
));
820 /* Mark the file as being 'closed' */
823 case -NFS4ERR_EXPIRED
:
824 case -NFS4ERR_NO_GRACE
:
825 case -NFS4ERR_STALE_CLIENTID
:
834 static int reclaimer(void *ptr
)
836 struct reclaimer_args
*args
= (struct reclaimer_args
*)ptr
;
837 struct nfs4_client
*clp
= args
->clp
;
838 struct nfs4_state_owner
*sp
;
839 struct nfs4_state_recovery_ops
*ops
;
842 daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp
->cl_addr
));
843 allow_signal(SIGKILL
);
845 atomic_inc(&clp
->cl_count
);
846 complete(&args
->complete
);
848 /* Ensure exclusive access to NFSv4 state */
850 down_write(&clp
->cl_sem
);
851 /* Are there any NFS mounts out there? */
852 if (list_empty(&clp
->cl_superblocks
))
855 status
= nfs4_proc_renew(clp
);
858 case -NFS4ERR_CB_PATH_DOWN
:
860 case -NFS4ERR_STALE_CLIENTID
:
861 case -NFS4ERR_LEASE_MOVED
:
862 ops
= &nfs4_reboot_recovery_ops
;
865 ops
= &nfs4_network_partition_recovery_ops
;
867 status
= __nfs4_init_client(clp
);
870 /* Mark all delegations for reclaim */
871 nfs_delegation_mark_reclaim(clp
);
872 /* Note: list is protected by exclusive lock on cl->cl_sem */
873 list_for_each_entry(sp
, &clp
->cl_state_owners
, so_list
) {
874 status
= nfs4_reclaim_open_state(ops
, sp
);
876 if (status
== -NFS4ERR_NO_GRACE
) {
877 ops
= &nfs4_network_partition_recovery_ops
;
878 status
= nfs4_reclaim_open_state(ops
, sp
);
880 if (status
== -NFS4ERR_STALE_CLIENTID
)
882 if (status
== -NFS4ERR_EXPIRED
)
886 nfs_delegation_reap_unclaimed(clp
);
888 set_bit(NFS4CLNT_OK
, &clp
->cl_state
);
889 up_write(&clp
->cl_sem
);
891 wake_up_all(&clp
->cl_waitq
);
892 rpc_wake_up(&clp
->cl_rpcwaitq
);
893 if (status
== -NFS4ERR_CB_PATH_DOWN
)
894 nfs_handle_cb_pathdown(clp
);
895 nfs4_put_client(clp
);
898 printk(KERN_WARNING
"Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
899 NIPQUAD(clp
->cl_addr
.s_addr
), -status
);