2 * linux/fs/lockd/clntproc.c
4 * RPC procedures for the client side NLM implementation
6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
13 #include <linux/nfs_fs.h>
14 #include <linux/utsname.h>
15 #include <linux/freezer.h>
16 #include <linux/sunrpc/clnt.h>
17 #include <linux/sunrpc/svc.h>
18 #include <linux/lockd/lockd.h>
19 #include <linux/lockd/sm_inter.h>
21 #define NLMDBG_FACILITY NLMDBG_CLIENT
22 #define NLMCLNT_GRACE_WAIT (5*HZ)
23 #define NLMCLNT_POLL_TIMEOUT (30*HZ)
24 #define NLMCLNT_MAX_RETRIES 3
26 static int nlmclnt_test(struct nlm_rqst
*, struct file_lock
*);
27 static int nlmclnt_lock(struct nlm_rqst
*, struct file_lock
*);
28 static int nlmclnt_unlock(struct nlm_rqst
*, struct file_lock
*);
29 static int nlm_stat_to_errno(__be32 stat
);
30 static void nlmclnt_locks_init_private(struct file_lock
*fl
, struct nlm_host
*host
);
31 static int nlmclnt_cancel(struct nlm_host
*, int , struct file_lock
*);
33 static const struct rpc_call_ops nlmclnt_unlock_ops
;
34 static const struct rpc_call_ops nlmclnt_cancel_ops
;
37 * Cookie counter for NLM requests
39 static atomic_t nlm_cookie
= ATOMIC_INIT(0x1234);
41 void nlmclnt_next_cookie(struct nlm_cookie
*c
)
43 u32 cookie
= atomic_inc_return(&nlm_cookie
);
45 memcpy(c
->data
, &cookie
, 4);
49 static struct nlm_lockowner
*nlm_get_lockowner(struct nlm_lockowner
*lockowner
)
51 atomic_inc(&lockowner
->count
);
55 static void nlm_put_lockowner(struct nlm_lockowner
*lockowner
)
57 if (!atomic_dec_and_lock(&lockowner
->count
, &lockowner
->host
->h_lock
))
59 list_del(&lockowner
->list
);
60 spin_unlock(&lockowner
->host
->h_lock
);
61 nlm_release_host(lockowner
->host
);
65 static inline int nlm_pidbusy(struct nlm_host
*host
, uint32_t pid
)
67 struct nlm_lockowner
*lockowner
;
68 list_for_each_entry(lockowner
, &host
->h_lockowners
, list
) {
69 if (lockowner
->pid
== pid
)
75 static inline uint32_t __nlm_alloc_pid(struct nlm_host
*host
)
79 res
= host
->h_pidcount
++;
80 } while (nlm_pidbusy(host
, res
) < 0);
84 static struct nlm_lockowner
*__nlm_find_lockowner(struct nlm_host
*host
, fl_owner_t owner
)
86 struct nlm_lockowner
*lockowner
;
87 list_for_each_entry(lockowner
, &host
->h_lockowners
, list
) {
88 if (lockowner
->owner
!= owner
)
90 return nlm_get_lockowner(lockowner
);
95 static struct nlm_lockowner
*nlm_find_lockowner(struct nlm_host
*host
, fl_owner_t owner
)
97 struct nlm_lockowner
*res
, *new = NULL
;
99 spin_lock(&host
->h_lock
);
100 res
= __nlm_find_lockowner(host
, owner
);
102 spin_unlock(&host
->h_lock
);
103 new = kmalloc(sizeof(*new), GFP_KERNEL
);
104 spin_lock(&host
->h_lock
);
105 res
= __nlm_find_lockowner(host
, owner
);
106 if (res
== NULL
&& new != NULL
) {
108 atomic_set(&new->count
, 1);
110 new->pid
= __nlm_alloc_pid(host
);
111 new->host
= nlm_get_host(host
);
112 list_add(&new->list
, &host
->h_lockowners
);
116 spin_unlock(&host
->h_lock
);
122 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
124 static void nlmclnt_setlockargs(struct nlm_rqst
*req
, struct file_lock
*fl
)
126 struct nlm_args
*argp
= &req
->a_args
;
127 struct nlm_lock
*lock
= &argp
->lock
;
129 nlmclnt_next_cookie(&argp
->cookie
);
130 argp
->state
= nsm_local_state
;
131 memcpy(&lock
->fh
, NFS_FH(fl
->fl_file
->f_path
.dentry
->d_inode
), sizeof(struct nfs_fh
));
132 lock
->caller
= utsname()->nodename
;
133 lock
->oh
.data
= req
->a_owner
;
134 lock
->oh
.len
= snprintf(req
->a_owner
, sizeof(req
->a_owner
), "%u@%s",
135 (unsigned int)fl
->fl_u
.nfs_fl
.owner
->pid
,
136 utsname()->nodename
);
137 lock
->svid
= fl
->fl_u
.nfs_fl
.owner
->pid
;
138 lock
->fl
.fl_start
= fl
->fl_start
;
139 lock
->fl
.fl_end
= fl
->fl_end
;
140 lock
->fl
.fl_type
= fl
->fl_type
;
143 static void nlmclnt_release_lockargs(struct nlm_rqst
*req
)
145 BUG_ON(req
->a_args
.lock
.fl
.fl_ops
!= NULL
);
149 * This is the main entry point for the NLM client.
152 nlmclnt_proc(struct inode
*inode
, int cmd
, struct file_lock
*fl
)
154 struct rpc_clnt
*client
= NFS_CLIENT(inode
);
155 struct sockaddr_in addr
;
156 struct nfs_server
*nfssrv
= NFS_SERVER(inode
);
157 struct nlm_host
*host
;
158 struct nlm_rqst
*call
;
163 vers
= (NFS_PROTO(inode
)->version
== 3) ? 4 : 1;
164 if (NFS_PROTO(inode
)->version
> 3) {
165 printk(KERN_NOTICE
"NFSv4 file locking not implemented!\n");
169 rpc_peeraddr(client
, (struct sockaddr
*) &addr
, sizeof(addr
));
170 host
= nlmclnt_lookup_host(&addr
, client
->cl_xprt
->prot
, vers
,
171 nfssrv
->nfs_client
->cl_hostname
,
172 strlen(nfssrv
->nfs_client
->cl_hostname
));
176 call
= nlm_alloc_call(host
);
180 nlmclnt_locks_init_private(fl
, host
);
181 /* Set up the argument struct */
182 nlmclnt_setlockargs(call
, fl
);
184 /* Keep the old signal mask */
185 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
186 oldset
= current
->blocked
;
188 /* If we're cleaning up locks because the process is exiting,
189 * perform the RPC call asynchronously. */
190 if ((IS_SETLK(cmd
) || IS_SETLKW(cmd
))
191 && fl
->fl_type
== F_UNLCK
192 && (current
->flags
& PF_EXITING
)) {
193 sigfillset(¤t
->blocked
); /* Mask all signals */
196 call
->a_flags
= RPC_TASK_ASYNC
;
198 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
200 if (IS_SETLK(cmd
) || IS_SETLKW(cmd
)) {
201 if (fl
->fl_type
!= F_UNLCK
) {
202 call
->a_args
.block
= IS_SETLKW(cmd
) ? 1 : 0;
203 status
= nlmclnt_lock(call
, fl
);
205 status
= nlmclnt_unlock(call
, fl
);
206 } else if (IS_GETLK(cmd
))
207 status
= nlmclnt_test(call
, fl
);
211 fl
->fl_ops
->fl_release_private(fl
);
214 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
215 current
->blocked
= oldset
;
217 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
219 dprintk("lockd: clnt proc returns %d\n", status
);
222 EXPORT_SYMBOL(nlmclnt_proc
);
225 * Allocate an NLM RPC call struct
227 * Note: the caller must hold a reference to host. In case of failure,
228 * this reference will be released.
230 struct nlm_rqst
*nlm_alloc_call(struct nlm_host
*host
)
232 struct nlm_rqst
*call
;
235 call
= kzalloc(sizeof(*call
), GFP_KERNEL
);
237 locks_init_lock(&call
->a_args
.lock
.fl
);
238 locks_init_lock(&call
->a_res
.lock
.fl
);
244 printk("nlm_alloc_call: failed, waiting for memory\n");
245 schedule_timeout_interruptible(5*HZ
);
247 nlm_release_host(host
);
251 void nlm_release_call(struct nlm_rqst
*call
)
253 nlm_release_host(call
->a_host
);
254 nlmclnt_release_lockargs(call
);
258 static void nlmclnt_rpc_release(void *data
)
260 return nlm_release_call(data
);
263 static int nlm_wait_on_grace(wait_queue_head_t
*queue
)
268 prepare_to_wait(queue
, &wait
, TASK_INTERRUPTIBLE
);
270 schedule_timeout(NLMCLNT_GRACE_WAIT
);
275 finish_wait(queue
, &wait
);
283 nlmclnt_call(struct nlm_rqst
*req
, u32 proc
)
285 struct nlm_host
*host
= req
->a_host
;
286 struct rpc_clnt
*clnt
;
287 struct nlm_args
*argp
= &req
->a_args
;
288 struct nlm_res
*resp
= &req
->a_res
;
289 struct rpc_message msg
= {
295 dprintk("lockd: call procedure %d on %s\n",
296 (int)proc
, host
->h_name
);
299 if (host
->h_reclaiming
&& !argp
->reclaim
)
300 goto in_grace_period
;
302 /* If we have no RPC client yet, create one. */
303 if ((clnt
= nlm_bind_host(host
)) == NULL
)
305 msg
.rpc_proc
= &clnt
->cl_procinfo
[proc
];
307 /* Perform the RPC call. If an error occurs, try again */
308 if ((status
= rpc_call_sync(clnt
, &msg
, 0)) < 0) {
309 dprintk("lockd: rpc_call returned error %d\n", -status
);
311 case -EPROTONOSUPPORT
:
317 nlm_rebind_host(host
);
321 return signalled () ? -EINTR
: status
;
327 if (resp
->status
== nlm_lck_denied_grace_period
) {
328 dprintk("lockd: server in grace period\n");
331 "lockd: spurious grace period reject?!\n");
335 if (!argp
->reclaim
) {
336 /* We appear to be out of the grace period */
337 wake_up_all(&host
->h_gracewait
);
339 dprintk("lockd: server returns status %d\n", resp
->status
);
340 return 0; /* Okay, call complete */
345 * The server has rebooted and appears to be in the grace
346 * period during which locks are only allowed to be
348 * We can only back off and try again later.
350 status
= nlm_wait_on_grace(&host
->h_gracewait
);
351 } while (status
== 0);
357 * Generic NLM call, async version.
359 static int __nlm_async_call(struct nlm_rqst
*req
, u32 proc
, struct rpc_message
*msg
, const struct rpc_call_ops
*tk_ops
)
361 struct nlm_host
*host
= req
->a_host
;
362 struct rpc_clnt
*clnt
;
364 dprintk("lockd: call procedure %d on %s (async)\n",
365 (int)proc
, host
->h_name
);
367 /* If we have no RPC client yet, create one. */
368 clnt
= nlm_bind_host(host
);
371 msg
->rpc_proc
= &clnt
->cl_procinfo
[proc
];
373 /* bootstrap and kick off the async RPC call */
374 return rpc_call_async(clnt
, msg
, RPC_TASK_ASYNC
, tk_ops
, req
);
376 tk_ops
->rpc_release(req
);
380 int nlm_async_call(struct nlm_rqst
*req
, u32 proc
, const struct rpc_call_ops
*tk_ops
)
382 struct rpc_message msg
= {
383 .rpc_argp
= &req
->a_args
,
384 .rpc_resp
= &req
->a_res
,
386 return __nlm_async_call(req
, proc
, &msg
, tk_ops
);
389 int nlm_async_reply(struct nlm_rqst
*req
, u32 proc
, const struct rpc_call_ops
*tk_ops
)
391 struct rpc_message msg
= {
392 .rpc_argp
= &req
->a_res
,
394 return __nlm_async_call(req
, proc
, &msg
, tk_ops
);
398 * TEST for the presence of a conflicting lock
401 nlmclnt_test(struct nlm_rqst
*req
, struct file_lock
*fl
)
405 status
= nlmclnt_call(req
, NLMPROC_TEST
);
409 switch (req
->a_res
.status
) {
411 fl
->fl_type
= F_UNLCK
;
415 * Report the conflicting lock back to the application.
417 fl
->fl_start
= req
->a_res
.lock
.fl
.fl_start
;
418 fl
->fl_end
= req
->a_res
.lock
.fl
.fl_start
;
419 fl
->fl_type
= req
->a_res
.lock
.fl
.fl_type
;
423 status
= nlm_stat_to_errno(req
->a_res
.status
);
426 nlm_release_call(req
);
430 static void nlmclnt_locks_copy_lock(struct file_lock
*new, struct file_lock
*fl
)
432 new->fl_u
.nfs_fl
.state
= fl
->fl_u
.nfs_fl
.state
;
433 new->fl_u
.nfs_fl
.owner
= nlm_get_lockowner(fl
->fl_u
.nfs_fl
.owner
);
434 list_add_tail(&new->fl_u
.nfs_fl
.list
, &fl
->fl_u
.nfs_fl
.owner
->host
->h_granted
);
437 static void nlmclnt_locks_release_private(struct file_lock
*fl
)
439 list_del(&fl
->fl_u
.nfs_fl
.list
);
440 nlm_put_lockowner(fl
->fl_u
.nfs_fl
.owner
);
443 static struct file_lock_operations nlmclnt_lock_ops
= {
444 .fl_copy_lock
= nlmclnt_locks_copy_lock
,
445 .fl_release_private
= nlmclnt_locks_release_private
,
448 static void nlmclnt_locks_init_private(struct file_lock
*fl
, struct nlm_host
*host
)
450 BUG_ON(fl
->fl_ops
!= NULL
);
451 fl
->fl_u
.nfs_fl
.state
= 0;
452 fl
->fl_u
.nfs_fl
.owner
= nlm_find_lockowner(host
, fl
->fl_owner
);
453 INIT_LIST_HEAD(&fl
->fl_u
.nfs_fl
.list
);
454 fl
->fl_ops
= &nlmclnt_lock_ops
;
457 static int do_vfs_lock(struct file_lock
*fl
)
460 switch (fl
->fl_flags
& (FL_POSIX
|FL_FLOCK
)) {
462 res
= posix_lock_file_wait(fl
->fl_file
, fl
);
465 res
= flock_lock_file_wait(fl
->fl_file
, fl
);
474 * LOCK: Try to create a lock
476 * Programmer Harassment Alert
478 * When given a blocking lock request in a sync RPC call, the HPUX lockd
479 * will faithfully return LCK_BLOCKED but never cares to notify us when
480 * the lock could be granted. This way, our local process could hang
481 * around forever waiting for the callback.
483 * Solution A: Implement busy-waiting
484 * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
486 * For now I am implementing solution A, because I hate the idea of
487 * re-implementing lockd for a third time in two months. The async
488 * calls shouldn't be too hard to do, however.
490 * This is one of the lovely things about standards in the NFS area:
491 * they're so soft and squishy you can't really blame HP for doing this.
494 nlmclnt_lock(struct nlm_rqst
*req
, struct file_lock
*fl
)
496 struct nlm_host
*host
= req
->a_host
;
497 struct nlm_res
*resp
= &req
->a_res
;
498 struct nlm_wait
*block
= NULL
;
499 unsigned char fl_flags
= fl
->fl_flags
;
500 int status
= -ENOLCK
;
502 if (nsm_monitor(host
) < 0) {
503 printk(KERN_NOTICE
"lockd: failed to monitor %s\n",
507 fl
->fl_flags
|= FL_ACCESS
;
508 status
= do_vfs_lock(fl
);
512 block
= nlmclnt_prepare_block(host
, fl
);
515 /* Reboot protection */
516 fl
->fl_u
.nfs_fl
.state
= host
->h_state
;
517 status
= nlmclnt_call(req
, NLMPROC_LOCK
);
520 if (!req
->a_args
.block
)
522 /* Did a reclaimer thread notify us of a server reboot? */
523 if (resp
->status
== nlm_lck_denied_grace_period
)
525 if (resp
->status
!= nlm_lck_blocked
)
527 /* Wait on an NLM blocking lock */
528 status
= nlmclnt_block(block
, req
, NLMCLNT_POLL_TIMEOUT
);
529 /* if we were interrupted. Send a CANCEL request to the server
534 if (resp
->status
!= nlm_lck_blocked
)
538 if (resp
->status
== nlm_granted
) {
539 down_read(&host
->h_rwsem
);
540 /* Check whether or not the server has rebooted */
541 if (fl
->fl_u
.nfs_fl
.state
!= host
->h_state
) {
542 up_read(&host
->h_rwsem
);
545 /* Ensure the resulting lock will get added to granted list */
546 fl
->fl_flags
= fl_flags
| FL_SLEEP
;
547 if (do_vfs_lock(fl
) < 0)
548 printk(KERN_WARNING
"%s: VFS is out of sync with lock manager!\n", __FUNCTION__
);
549 up_read(&host
->h_rwsem
);
551 status
= nlm_stat_to_errno(resp
->status
);
553 nlmclnt_finish_block(block
);
554 /* Cancel the blocked request if it is still pending */
555 if (resp
->status
== nlm_lck_blocked
)
556 nlmclnt_cancel(host
, req
->a_args
.block
, fl
);
558 nlm_release_call(req
);
559 fl
->fl_flags
= fl_flags
;
564 * RECLAIM: Try to reclaim a lock
567 nlmclnt_reclaim(struct nlm_host
*host
, struct file_lock
*fl
)
569 struct nlm_rqst reqst
, *req
;
573 memset(req
, 0, sizeof(*req
));
574 locks_init_lock(&req
->a_args
.lock
.fl
);
575 locks_init_lock(&req
->a_res
.lock
.fl
);
579 /* Set up the argument struct */
580 nlmclnt_setlockargs(req
, fl
);
581 req
->a_args
.reclaim
= 1;
583 if ((status
= nlmclnt_call(req
, NLMPROC_LOCK
)) >= 0
584 && req
->a_res
.status
== nlm_granted
)
587 printk(KERN_WARNING
"lockd: failed to reclaim lock for pid %d "
588 "(errno %d, status %d)\n", fl
->fl_pid
,
589 status
, ntohl(req
->a_res
.status
));
592 * FIXME: This is a serious failure. We can
594 * a. Ignore the problem
595 * b. Send the owning process some signal (Linux doesn't have
596 * SIGLOST, though...)
597 * c. Retry the operation
599 * Until someone comes up with a simple implementation
600 * for b or c, I'll choose option a.
607 * UNLOCK: remove an existing lock
610 nlmclnt_unlock(struct nlm_rqst
*req
, struct file_lock
*fl
)
612 struct nlm_host
*host
= req
->a_host
;
613 struct nlm_res
*resp
= &req
->a_res
;
617 * Note: the server is supposed to either grant us the unlock
618 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
619 * case, we want to unlock.
621 fl
->fl_flags
|= FL_EXISTS
;
622 down_read(&host
->h_rwsem
);
623 if (do_vfs_lock(fl
) == -ENOENT
) {
624 up_read(&host
->h_rwsem
);
627 up_read(&host
->h_rwsem
);
629 if (req
->a_flags
& RPC_TASK_ASYNC
)
630 return nlm_async_call(req
, NLMPROC_UNLOCK
, &nlmclnt_unlock_ops
);
632 status
= nlmclnt_call(req
, NLMPROC_UNLOCK
);
636 if (resp
->status
== nlm_granted
)
639 if (resp
->status
!= nlm_lck_denied_nolocks
)
640 printk("lockd: unexpected unlock status: %d\n", resp
->status
);
641 /* What to do now? I'm out of my depth... */
644 nlm_release_call(req
);
648 static void nlmclnt_unlock_callback(struct rpc_task
*task
, void *data
)
650 struct nlm_rqst
*req
= data
;
651 u32 status
= ntohl(req
->a_res
.status
);
653 if (RPC_ASSASSINATED(task
))
656 if (task
->tk_status
< 0) {
657 dprintk("lockd: unlock failed (err = %d)\n", -task
->tk_status
);
660 if (status
== NLM_LCK_DENIED_GRACE_PERIOD
) {
661 rpc_delay(task
, NLMCLNT_GRACE_WAIT
);
664 if (status
!= NLM_LCK_GRANTED
)
665 printk(KERN_WARNING
"lockd: unexpected unlock status: %d\n", status
);
669 nlm_rebind_host(req
->a_host
);
671 rpc_restart_call(task
);
674 static const struct rpc_call_ops nlmclnt_unlock_ops
= {
675 .rpc_call_done
= nlmclnt_unlock_callback
,
676 .rpc_release
= nlmclnt_rpc_release
,
680 * Cancel a blocked lock request.
681 * We always use an async RPC call for this in order not to hang a
682 * process that has been Ctrl-C'ed.
684 static int nlmclnt_cancel(struct nlm_host
*host
, int block
, struct file_lock
*fl
)
686 struct nlm_rqst
*req
;
691 /* Block all signals while setting up call */
692 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
693 oldset
= current
->blocked
;
694 sigfillset(¤t
->blocked
);
696 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
698 req
= nlm_alloc_call(nlm_get_host(host
));
701 req
->a_flags
= RPC_TASK_ASYNC
;
703 nlmclnt_setlockargs(req
, fl
);
704 req
->a_args
.block
= block
;
706 status
= nlm_async_call(req
, NLMPROC_CANCEL
, &nlmclnt_cancel_ops
);
708 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
709 current
->blocked
= oldset
;
711 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
716 static void nlmclnt_cancel_callback(struct rpc_task
*task
, void *data
)
718 struct nlm_rqst
*req
= data
;
719 u32 status
= ntohl(req
->a_res
.status
);
721 if (RPC_ASSASSINATED(task
))
724 if (task
->tk_status
< 0) {
725 dprintk("lockd: CANCEL call error %d, retrying.\n",
730 dprintk("lockd: cancel status %u (task %u)\n",
731 status
, task
->tk_pid
);
734 case NLM_LCK_GRANTED
:
735 case NLM_LCK_DENIED_GRACE_PERIOD
:
737 /* Everything's good */
739 case NLM_LCK_DENIED_NOLOCKS
:
740 dprintk("lockd: CANCEL failed (server has no locks)\n");
743 printk(KERN_NOTICE
"lockd: weird return %d for CANCEL call\n",
751 /* Don't ever retry more than 3 times */
752 if (req
->a_retries
++ >= NLMCLNT_MAX_RETRIES
)
754 nlm_rebind_host(req
->a_host
);
755 rpc_restart_call(task
);
756 rpc_delay(task
, 30 * HZ
);
759 static const struct rpc_call_ops nlmclnt_cancel_ops
= {
760 .rpc_call_done
= nlmclnt_cancel_callback
,
761 .rpc_release
= nlmclnt_rpc_release
,
765 * Convert an NLM status code to a generic kernel errno
768 nlm_stat_to_errno(__be32 status
)
770 switch(ntohl(status
)) {
771 case NLM_LCK_GRANTED
:
775 case NLM_LCK_DENIED_NOLOCKS
:
776 case NLM_LCK_DENIED_GRACE_PERIOD
:
778 case NLM_LCK_BLOCKED
:
779 printk(KERN_NOTICE
"lockd: unexpected status NLM_BLOCKED\n");
781 #ifdef CONFIG_LOCKD_V4
794 printk(KERN_NOTICE
"lockd: unexpected server status %d\n", status
);