195 Need replacement for nfs/lockd+klm
[unleashed.git] / usr / src / uts / common / klm / nlm_client.c
blobca513afb150efec112bbfd7da491b0f01869c332
1 /*
2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
3 * Authors: Doug Rabson <dfr@rabson.org>
4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
30 * Copyright (c) 2012 by Delphix. All rights reserved.
34 * Client-side support for (NFS) VOP_FRLOCK, VOP_SHRLOCK.
35 * (called via klmops.c: lm_frlock, lm4_frlock)
37 * Source code derived from FreeBSD nlm_advlock.c
40 #include <sys/param.h>
41 #include <sys/fcntl.h>
42 #include <sys/lock.h>
43 #include <sys/flock.h>
44 #include <sys/mount.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/share.h>
48 #include <sys/syslog.h>
49 #include <sys/systm.h>
50 #include <sys/unistd.h>
51 #include <sys/vnode.h>
52 #include <sys/queue.h>
53 #include <sys/sdt.h>
54 #include <netinet/in.h>
56 #include <fs/fs_subr.h>
57 #include <rpcsvc/nlm_prot.h>
59 #include <nfs/nfs.h>
60 #include <nfs/nfs_clnt.h>
61 #include <nfs/export.h>
62 #include <nfs/rnode.h>
63 #include <nfs/lm.h>
65 #include "nlm_impl.h"
67 /* Extra flags for nlm_call_lock() - xflags */
68 #define NLM_X_RECLAIM 1
69 #define NLM_X_BLOCKING 2
72 * Max. number of retries nlm_call_cancel() does
73 * when NLM server is in grace period or doesn't
74 * respond correctly.
76 #define NLM_CANCEL_NRETRS 5
79 * Determines wether given lock "flp" is safe.
80 * The lock is considered to be safe when it
81 * acquires the whole file (i.e. its start
82 * and len are zeroes).
84 #define NLM_FLOCK_IS_SAFE(flp) \
85 ((flp)->l_start == 0 && (flp)->l_len == 0)
87 static volatile uint32_t nlm_xid = 1;
89 static int nlm_init_fh_by_vp(vnode_t *, struct netobj *, rpcvers_t *);
90 static int nlm_map_status(nlm4_stats);
91 static int nlm_map_clnt_stat(enum clnt_stat);
92 static void nlm_send_siglost(pid_t);
94 static int nlm_frlock_getlk(struct nlm_host *, vnode_t *,
95 struct flock64 *, int, u_offset_t, struct netobj *, int);
97 static int nlm_frlock_setlk(struct nlm_host *, vnode_t *,
98 struct flock64 *, int, u_offset_t, struct netobj *,
99 struct flk_callback *, int, bool_t);
101 static int nlm_reclaim_lock(struct nlm_host *, vnode_t *,
102 struct flock64 *, int32_t);
104 static void nlm_init_lock(struct nlm4_lock *,
105 const struct flock64 *, struct netobj *,
106 struct nlm_owner_handle *);
108 static int nlm_call_lock(vnode_t *, struct flock64 *,
109 struct nlm_host *, struct netobj *,
110 struct flk_callback *, int, int);
111 static int nlm_call_unlock(struct flock64 *, struct nlm_host *,
112 struct netobj *, int);
113 static int nlm_call_test(struct flock64 *, struct nlm_host *,
114 struct netobj *, int);
115 static int nlm_call_cancel(struct nlm4_lockargs *,
116 struct nlm_host *, int);
118 static int nlm_local_getlk(vnode_t *, struct flock64 *, int);
119 static int nlm_local_setlk(vnode_t *, struct flock64 *, int);
120 static void nlm_local_cancelk(vnode_t *, struct flock64 *);
122 static void nlm_init_share(struct nlm4_share *,
123 const struct shrlock *, struct netobj *);
125 static int nlm_call_share(struct shrlock *, struct nlm_host *,
126 struct netobj *, int, int);
127 static int nlm_call_unshare(struct shrlock *, struct nlm_host *,
128 struct netobj *, int);
129 static int nlm_reclaim_share(struct nlm_host *, vnode_t *,
130 struct shrlock *, uint32_t);
131 static int nlm_local_shrlock(vnode_t *, struct shrlock *, int, int);
132 static void nlm_local_shrcancel(vnode_t *, struct shrlock *);
135 * Reclaim locks/shares acquired by the client side
136 * on the given server represented by hostp.
137 * The function is called from a dedicated thread
138 * when server reports us that it's entered grace
139 * period.
141 void
142 nlm_reclaim_client(struct nlm_globals *g, struct nlm_host *hostp)
144 int32_t state;
145 int error, sysid;
146 struct locklist *llp_head, *llp;
147 struct nlm_shres *nsp_head, *nsp;
148 bool_t restart;
150 sysid = hostp->nh_sysid | LM_SYSID_CLIENT;
151 do {
152 error = 0;
153 restart = FALSE;
154 state = nlm_host_get_state(hostp);
156 DTRACE_PROBE3(reclaim__iter, struct nlm_globals *, g,
157 struct nlm_host *, hostp, int, state);
160 * We cancel all sleeping locks that were
161 * done by the host, because we don't allow
162 * reclamation of sleeping locks. The reason
163 * we do this is that allowing of sleeping locks
164 * reclamation can potentially break locks recovery
165 * order.
167 * Imagine that we have two client machines A and B
168 * and an NLM server machine. A adds a non sleeping
169 * lock to the file F and aquires this file. Machine
170 * B in its turn adds sleeping lock to the file
171 * F and blocks because F is already aquired by
172 * the machine A. Then server crashes and after the
173 * reboot it notifies its clients about the crash.
174 * If we would allow sleeping locks reclamation,
175 * there would be possible that machine B recovers
176 * its lock faster than machine A (by some reason).
177 * So that B aquires the file F after server crash and
178 * machine A (that by some reason recovers slower) fails
179 * to recover its non sleeping lock. Thus the original
180 * locks order becames broken.
182 nlm_host_cancel_slocks(g, hostp);
185 * Try to reclaim all active locks we have
187 llp_head = llp = flk_get_active_locks(sysid, NOPID);
188 while (llp != NULL) {
189 error = nlm_reclaim_lock(hostp, llp->ll_vp,
190 &llp->ll_flock, state);
192 if (error == 0) {
193 llp = llp->ll_next;
194 continue;
195 } else if (error == ERESTART) {
196 restart = TRUE;
197 break;
198 } else {
200 * Critical error occurred, the lock
201 * can not be recovered, just take it away.
203 nlm_local_cancelk(llp->ll_vp, &llp->ll_flock);
206 llp = llp->ll_next;
209 flk_free_locklist(llp_head);
210 if (restart) {
212 * Lock reclamation fucntion reported us that
213 * the server state was changed (again), so
214 * try to repeat the whole reclamation process.
216 continue;
219 nsp_head = nsp = nlm_get_active_shres(hostp);
220 while (nsp != NULL) {
221 error = nlm_reclaim_share(hostp, nsp->ns_vp,
222 nsp->ns_shr, state);
224 if (error == 0) {
225 nsp = nsp->ns_next;
226 continue;
227 } else if (error == ERESTART) {
228 break;
229 } else {
230 /* Failed to reclaim share */
231 nlm_shres_untrack(hostp, nsp->ns_vp,
232 nsp->ns_shr);
233 nlm_local_shrcancel(nsp->ns_vp,
234 nsp->ns_shr);
237 nsp = nsp->ns_next;
240 nlm_free_shrlist(nsp_head);
241 } while (state != nlm_host_get_state(hostp));
245 * nlm_frlock --
246 * NFS advisory byte-range locks.
247 * Called in klmops.c
249 * Note that the local locking code (os/flock.c) is used to
250 * keep track of remote locks granted by some server, so we
251 * can reclaim those locks after a server restarts. We can
252 * also sometimes use this as a cache of lock information.
254 * Was: nlm_advlock()
256 /* ARGSUSED */
258 nlm_frlock(struct vnode *vp, int cmd, struct flock64 *flkp,
259 int flags, u_offset_t offset, struct cred *crp,
260 struct netobj *fhp, struct flk_callback *flcb, int vers)
262 mntinfo_t *mi;
263 servinfo_t *sv;
264 const char *netid;
265 struct nlm_host *hostp;
266 int error;
267 struct nlm_globals *g;
269 mi = VTOMI(vp);
270 sv = mi->mi_curr_serv;
272 netid = nlm_knc_to_netid(sv->sv_knconf);
273 if (netid == NULL) {
274 NLM_ERR("nlm_frlock: unknown NFS netid");
275 return (ENOSYS);
278 g = zone_getspecific(nlm_zone_key, curzone);
279 hostp = nlm_host_findcreate(g, sv->sv_hostname, netid, &sv->sv_addr);
280 if (hostp == NULL)
281 return (ENOSYS);
284 * Purge cached attributes in order to make sure that
285 * future calls of convoff()/VOP_GETATTR() will get the
286 * latest data.
288 if (flkp->l_whence == SEEK_END)
289 PURGE_ATTRCACHE(vp);
291 /* Now flk0 is the zero-based lock request. */
292 switch (cmd) {
293 case F_GETLK:
294 error = nlm_frlock_getlk(hostp, vp, flkp, flags,
295 offset, fhp, vers);
296 break;
298 case F_SETLK:
299 case F_SETLKW:
300 error = nlm_frlock_setlk(hostp, vp, flkp, flags,
301 offset, fhp, flcb, vers, (cmd == F_SETLKW));
302 if (error == 0)
303 nlm_host_monitor(g, hostp, 0);
304 break;
306 default:
307 error = EINVAL;
308 break;
311 nlm_host_release(g, hostp);
312 return (error);
315 static int
316 nlm_frlock_getlk(struct nlm_host *hostp, vnode_t *vp,
317 struct flock64 *flkp, int flags, u_offset_t offset,
318 struct netobj *fhp, int vers)
320 struct flock64 flk0;
321 int error;
324 * Check local (cached) locks first.
325 * If we find one, no need for RPC.
327 flk0 = *flkp;
328 flk0.l_pid = curproc->p_pid;
329 error = nlm_local_getlk(vp, &flk0, flags);
330 if (error != 0)
331 return (error);
332 if (flk0.l_type != F_UNLCK) {
333 *flkp = flk0;
334 return (0);
337 /* Not found locally. Try remote. */
338 flk0 = *flkp;
339 flk0.l_pid = curproc->p_pid;
340 error = convoff(vp, &flk0, 0, (offset_t)offset);
341 if (error != 0)
342 return (error);
344 error = nlm_call_test(&flk0, hostp, fhp, vers);
345 if (error != 0)
346 return (error);
348 if (flk0.l_type == F_UNLCK) {
350 * Update the caller's *flkp with information
351 * on the conflicting lock (or lack thereof).
353 flkp->l_type = F_UNLCK;
354 } else {
356 * Found a conflicting lock. Set the
357 * caller's *flkp with the info, first
358 * converting to the caller's whence.
360 (void) convoff(vp, &flk0, flkp->l_whence, (offset_t)offset);
361 *flkp = flk0;
364 return (0);
367 static int
368 nlm_frlock_setlk(struct nlm_host *hostp, vnode_t *vp,
369 struct flock64 *flkp, int flags, u_offset_t offset,
370 struct netobj *fhp, struct flk_callback *flcb,
371 int vers, bool_t do_block)
373 int error, xflags;
375 error = convoff(vp, flkp, 0, (offset_t)offset);
376 if (error != 0)
377 return (error);
380 * NFS v2 clients should not request locks where any part
381 * of the lock range is beyond 0xffffffff. The NFS code
382 * checks that (see nfs_frlock, flk_check_lock_data), but
383 * as that's outside this module, let's check here too.
384 * This check ensures that we will be able to convert this
385 * lock request into 32-bit form without change, and that
386 * (more importantly) when the granted call back arrives,
387 * it's unchanged when converted back into 64-bit form.
388 * If this lock range were to change in any way during
389 * either of those conversions, the "granted" call back
390 * from the NLM server would not find our sleeping lock.
392 if (vers < NLM4_VERS) {
393 if (flkp->l_start > MAX_UOFF32 ||
394 flkp->l_start + flkp->l_len > MAX_UOFF32 + 1)
395 return (EINVAL);
399 * Fill in l_sysid for the local locking calls.
400 * Also, let's not trust the caller's l_pid.
402 flkp->l_sysid = hostp->nh_sysid | LM_SYSID_CLIENT;
403 flkp->l_pid = curproc->p_pid;
405 if (flkp->l_type == F_UNLCK) {
407 * Purge local (cached) lock information first,
408 * then clear the remote lock.
410 (void) nlm_local_setlk(vp, flkp, flags);
411 error = nlm_call_unlock(flkp, hostp, fhp, vers);
413 return (error);
416 if (!do_block) {
418 * This is a non-blocking "set" request,
419 * so we can check locally first, and
420 * sometimes avoid an RPC call.
422 struct flock64 flk0;
424 flk0 = *flkp;
425 error = nlm_local_getlk(vp, &flk0, flags);
426 if (error != 0 && flk0.l_type != F_UNLCK) {
427 /* Found a conflicting lock. */
428 return (EAGAIN);
431 xflags = 0;
432 } else {
433 xflags = NLM_X_BLOCKING;
436 nfs_add_locking_id(vp, curproc->p_pid, RLMPL_PID,
437 (char *)&curproc->p_pid, sizeof (pid_t));
439 error = nlm_call_lock(vp, flkp, hostp, fhp, flcb, vers, xflags);
440 if (error != 0)
441 return (error);
444 * Save the lock locally. This should not fail,
445 * because the server is authoritative about locks
446 * and it just told us we have the lock!
448 error = nlm_local_setlk(vp, flkp, flags);
449 if (error != 0) {
451 * That's unexpected situation. Just ignore the error.
453 NLM_WARN("nlm_frlock_setlk: Failed to set local lock. "
454 "[err=%d]\n", error);
455 error = 0;
458 return (error);
462 * Cancel all client side remote locks/shares on the
463 * given host. Report to the processes that own
464 * cancelled locks that they are removed by force
465 * by sending SIGLOST.
467 void
468 nlm_client_cancel_all(struct nlm_globals *g, struct nlm_host *hostp)
470 struct locklist *llp_head, *llp;
471 struct nlm_shres *nsp_head, *nsp;
472 struct netobj lm_fh;
473 rpcvers_t vers;
474 int error, sysid;
476 sysid = hostp->nh_sysid | LM_SYSID_CLIENT;
477 nlm_host_cancel_slocks(g, hostp);
480 * Destroy all active locks
482 llp_head = llp = flk_get_active_locks(sysid, NOPID);
483 while (llp != NULL) {
484 llp->ll_flock.l_type = F_UNLCK;
486 error = nlm_init_fh_by_vp(llp->ll_vp, &lm_fh, &vers);
487 if (error == 0)
488 (void) nlm_call_unlock(&llp->ll_flock, hostp,
489 &lm_fh, vers);
491 nlm_local_cancelk(llp->ll_vp, &llp->ll_flock);
492 llp = llp->ll_next;
495 flk_free_locklist(llp_head);
498 * Destroy all active share reservations
500 nsp_head = nsp = nlm_get_active_shres(hostp);
501 while (nsp != NULL) {
502 error = nlm_init_fh_by_vp(nsp->ns_vp, &lm_fh, &vers);
503 if (error == 0)
504 (void) nlm_call_unshare(nsp->ns_shr, hostp,
505 &lm_fh, vers);
507 nlm_local_shrcancel(nsp->ns_vp, nsp->ns_shr);
508 nlm_shres_untrack(hostp, nsp->ns_vp, nsp->ns_shr);
509 nsp = nsp->ns_next;
512 nlm_free_shrlist(nsp_head);
516 * The function determines whether the lock "fl" can
517 * be safely applied to the file vnode "vp" corresponds to.
518 * The lock can be "safely" applied if all the conditions
519 * above are held:
520 * - It's not a mandatory lock
521 * - The vnode wasn't mapped by anyone
522 * - The vnode was mapped, but it hasn't any locks on it.
523 * - The vnode was mapped and all locks it has occupies
524 * the whole file.
527 nlm_safelock(vnode_t *vp, const struct flock64 *fl, cred_t *cr)
529 rnode_t *rp = VTOR(vp);
530 struct vattr va;
531 int err;
533 if ((rp->r_mapcnt > 0) && (fl->l_start != 0 || fl->l_len != 0))
534 return (0);
536 va.va_mask = AT_MODE;
537 err = VOP_GETATTR(vp, &va, 0, cr, NULL);
538 if (err != 0)
539 return (0);
541 /* NLM4 doesn't allow mandatory file locking */
542 if (MANDLOCK(vp, va.va_mode))
543 return (0);
545 return (1);
549 * The function determines whether it's safe to map
550 * a file correspoding to vnode vp.
551 * The mapping is considered to be "safe" if file
552 * either has no any locks on it or all locks it
553 * has occupy the whole file.
556 nlm_safemap(const vnode_t *vp)
558 struct locklist *llp, *llp_next;
559 struct nlm_slock *nslp;
560 struct nlm_globals *g;
561 int safe = 1;
563 /* Check active locks at first */
564 llp = flk_active_locks_for_vp(vp);
565 while (llp != NULL) {
566 if ((llp->ll_vp == vp) &&
567 !NLM_FLOCK_IS_SAFE(&llp->ll_flock))
568 safe = 0;
570 llp_next = llp->ll_next;
571 VN_RELE(llp->ll_vp);
572 kmem_free(llp, sizeof (*llp));
573 llp = llp_next;
575 if (!safe)
576 return (safe);
578 /* Then check sleeping locks if any */
579 g = zone_getspecific(nlm_zone_key, curzone);
580 mutex_enter(&g->lock);
581 TAILQ_FOREACH(nslp, &g->nlm_slocks, nsl_link) {
582 if (nslp->nsl_state == NLM_SL_BLOCKED &&
583 nslp->nsl_vp == vp &&
584 (nslp->nsl_lock.l_offset != 0 ||
585 nslp->nsl_lock.l_len != 0)) {
586 safe = 0;
587 break;
591 mutex_exit(&g->lock);
592 return (safe);
596 nlm_has_sleep(const vnode_t *vp)
598 struct nlm_globals *g;
599 struct nlm_slock *nslp;
600 int has_slocks = FALSE;
602 g = zone_getspecific(nlm_zone_key, curzone);
603 mutex_enter(&g->lock);
604 TAILQ_FOREACH(nslp, &g->nlm_slocks, nsl_link) {
605 if (nslp->nsl_state == NLM_SL_BLOCKED &&
606 nslp->nsl_vp == vp) {
607 has_slocks = TRUE;
608 break;
612 mutex_exit(&g->lock);
613 return (has_slocks);
616 void
617 nlm_register_lock_locally(struct vnode *vp, struct nlm_host *hostp,
618 struct flock64 *flk, int flags, u_offset_t offset)
620 int sysid = 0;
622 if (hostp != NULL) {
623 sysid = hostp->nh_sysid | LM_SYSID_CLIENT;
626 flk->l_sysid = sysid;
627 (void) convoff(vp, flk, 0, (offset_t)offset);
628 (void) nlm_local_setlk(vp, flk, flags);
633 * The BSD code had functions here to "reclaim" (destroy)
634 * remote locks when a vnode is being forcibly destroyed.
635 * We just keep vnodes around until statd tells us the
636 * client has gone away.
639 static int
640 nlm_reclaim_lock(struct nlm_host *hostp, vnode_t *vp,
641 struct flock64 *flp, int32_t orig_state)
643 struct netobj lm_fh;
644 int error, state;
645 rpcvers_t vers;
648 * If the remote NSM state changes during recovery, the host
649 * must have rebooted a second time. In that case, we must
650 * restart the recovery.
652 state = nlm_host_get_state(hostp);
653 if (state != orig_state)
654 return (ERESTART);
656 error = nlm_init_fh_by_vp(vp, &lm_fh, &vers);
657 if (error != 0)
658 return (error);
660 return (nlm_call_lock(vp, flp, hostp, &lm_fh,
661 NULL, vers, NLM_X_RECLAIM));
665 * Get local lock information for some NFS server.
667 * This gets (checks for) a local conflicting lock.
668 * Note: Modifies passed flock, if a conflict is found,
669 * but the caller expects that.
671 static int
672 nlm_local_getlk(vnode_t *vp, struct flock64 *fl, int flags)
674 VERIFY(fl->l_whence == SEEK_SET);
675 return (reclock(vp, fl, 0, flags, 0, NULL));
679 * Set local lock information for some NFS server.
681 * Called after a lock request (set or clear) succeeded. We record the
682 * details in the local lock manager. Note that since the remote
683 * server has granted the lock, we can be sure that it doesn't
684 * conflict with any other locks we have in the local lock manager.
686 * Since it is possible that host may also make NLM client requests to
687 * our NLM server, we use a different sysid value to record our own
688 * client locks.
690 * Note that since it is possible for us to receive replies from the
691 * server in a different order than the locks were granted (e.g. if
692 * many local threads are contending for the same lock), we must use a
693 * blocking operation when registering with the local lock manager.
694 * We expect that any actual wait will be rare and short hence we
695 * ignore signals for this.
697 static int
698 nlm_local_setlk(vnode_t *vp, struct flock64 *fl, int flags)
700 VERIFY(fl->l_whence == SEEK_SET);
701 return (reclock(vp, fl, SETFLCK, flags, 0, NULL));
705 * Cancel local lock and send send SIGLOST signal
706 * to the lock owner.
708 * NOTE: modifies flp
710 static void
711 nlm_local_cancelk(vnode_t *vp, struct flock64 *flp)
713 flp->l_type = F_UNLCK;
714 (void) nlm_local_setlk(vp, flp, FREAD | FWRITE);
715 nlm_send_siglost(flp->l_pid);
719 * Do NLM_LOCK call.
720 * Was: nlm_setlock()
722 * NOTE: nlm_call_lock() function should care about locking/unlocking
723 * of rnode->r_lkserlock which should be released before nlm_call_lock()
724 * sleeps on waiting lock and acquired when it wakes up.
726 static int
727 nlm_call_lock(vnode_t *vp, struct flock64 *flp,
728 struct nlm_host *hostp, struct netobj *fhp,
729 struct flk_callback *flcb, int vers, int xflags)
731 struct nlm4_lockargs args;
732 struct nlm_owner_handle oh;
733 struct nlm_globals *g;
734 rnode_t *rnp = VTOR(vp);
735 struct nlm_slock *nslp = NULL;
736 uint32_t xid;
737 int error = 0;
739 bzero(&args, sizeof (args));
740 g = zone_getspecific(nlm_zone_key, curzone);
741 nlm_init_lock(&args.alock, flp, fhp, &oh);
743 args.exclusive = (flp->l_type == F_WRLCK);
744 args.reclaim = xflags & NLM_X_RECLAIM;
745 args.state = g->nsm_state;
746 args.cookie.n_len = sizeof (xid);
747 args.cookie.n_bytes = (char *)&xid;
749 oh.oh_sysid = hostp->nh_sysid;
750 xid = atomic_inc_32_nv(&nlm_xid);
752 if (xflags & NLM_X_BLOCKING) {
753 args.block = TRUE;
754 nslp = nlm_slock_register(g, hostp, &args.alock, vp);
757 for (;;) {
758 nlm_rpc_t *rpcp;
759 enum clnt_stat stat;
760 struct nlm4_res res;
761 enum nlm4_stats nlm_err;
763 error = nlm_host_get_rpc(hostp, vers, &rpcp);
764 if (error != 0) {
765 error = ENOLCK;
766 goto out;
769 bzero(&res, sizeof (res));
770 stat = nlm_lock_rpc(&args, &res, rpcp->nr_handle, vers);
771 nlm_host_rele_rpc(hostp, rpcp);
773 error = nlm_map_clnt_stat(stat);
774 if (error != 0) {
775 if (error == EAGAIN)
776 continue;
778 goto out;
781 DTRACE_PROBE1(lock__res, enum nlm4_stats, res.stat.stat);
782 nlm_err = res.stat.stat;
783 xdr_free((xdrproc_t)xdr_nlm4_res, (void *)&res);
784 if (nlm_err == nlm4_denied_grace_period) {
785 if (args.reclaim) {
786 error = ENOLCK;
787 goto out;
790 error = nlm_host_wait_grace(hostp);
791 if (error != 0)
792 goto out;
794 continue;
797 switch (nlm_err) {
798 case nlm4_granted:
799 case nlm4_blocked:
800 error = 0;
801 break;
803 case nlm4_denied:
804 if (nslp != NULL) {
805 NLM_WARN("nlm_call_lock: got nlm4_denied for "
806 "blocking lock\n");
809 error = EAGAIN;
810 break;
812 default:
813 error = nlm_map_status(nlm_err);
817 * If we deal with either non-blocking lock or
818 * with a blocking locks that wasn't blocked on
819 * the server side (by some reason), our work
820 * is finished.
822 if (nslp == NULL ||
823 nlm_err != nlm4_blocked ||
824 error != 0)
825 goto out;
828 * Before releasing the r_lkserlock of rnode, we should
829 * check whether the new lock is "safe". If it's not
830 * safe, disable caching for the given vnode. That is done
831 * for sleeping locks only that are waiting for a GRANT reply
832 * from the NLM server.
834 * NOTE: the vnode cache can be enabled back later if an
835 * unsafe lock will be merged with existent locks so that
836 * it will become safe. This condition is checked in the
837 * NFSv3 code (see nfs_lockcompletion).
839 if (!NLM_FLOCK_IS_SAFE(flp)) {
840 mutex_enter(&vp->v_lock);
841 vp->v_flag &= ~VNOCACHE;
842 mutex_exit(&vp->v_lock);
846 * The server should call us back with a
847 * granted message when the lock succeeds.
848 * In order to deal with broken servers,
849 * lost granted messages, or server reboots,
850 * we will also re-try every few seconds.
852 * Note: We're supposed to call these
853 * flk_invoke_callbacks when blocking.
854 * Take care on rnode->r_lkserlock, we should
855 * release it before going to sleep.
857 (void) flk_invoke_callbacks(flcb, FLK_BEFORE_SLEEP);
858 nfs_rw_exit(&rnp->r_lkserlock);
860 error = nlm_slock_wait(g, nslp, g->retrans_tmo);
863 * NFS expects that we return with rnode->r_lkserlock
864 * locked on write, lock it back.
866 * NOTE: nfs_rw_enter_sig() can be either interruptible
867 * or not. It depends on options of NFS mount. Here
868 * we're _always_ uninterruptible (independently of mount
869 * options), because nfs_frlock/nfs3_frlock expects that
870 * we return with rnode->r_lkserlock acquired. So we don't
871 * want our lock attempt to be interrupted by a signal.
873 (void) nfs_rw_enter_sig(&rnp->r_lkserlock, RW_WRITER, 0);
874 (void) flk_invoke_callbacks(flcb, FLK_AFTER_SLEEP);
876 if (error == 0) {
877 break;
878 } else if (error == EINTR) {
880 * We need to call the server to cancel our
881 * lock request.
883 DTRACE_PROBE1(cancel__lock, int, error);
884 (void) nlm_call_cancel(&args, hostp, vers);
885 break;
886 } else {
888 * Timeout happened, resend the lock request to
889 * the server. Well, we're a bit paranoid here,
890 * but keep in mind previous request could lost
891 * (especially with conectionless transport).
894 ASSERT(error == ETIMEDOUT);
895 continue;
900 * We could disable the vnode cache for the given _sleeping_
901 * (codition: nslp != NULL) lock if it was unsafe. Normally,
902 * nfs_lockcompletion() function can enable the vnode cache
903 * back if the lock becomes safe after activativation. But it
904 * will not happen if any error occurs on the locking path.
906 * Here we enable the vnode cache back if the error occurred
907 * and if there aren't any unsafe locks on the given vnode.
908 * Note that if error happened, sleeping lock was derigistered.
910 if (error != 0 && nslp != NULL && nlm_safemap(vp)) {
911 mutex_enter(&vp->v_lock);
912 vp->v_flag |= VNOCACHE;
913 mutex_exit(&vp->v_lock);
916 out:
917 if (nslp != NULL)
918 nlm_slock_unregister(g, nslp);
920 return (error);
924 * Do NLM_CANCEL call.
925 * Helper for nlm_call_lock() error recovery.
927 static int
928 nlm_call_cancel(struct nlm4_lockargs *largs,
929 struct nlm_host *hostp, int vers)
931 nlm4_cancargs cargs;
932 uint32_t xid;
933 int error, retries;
935 bzero(&cargs, sizeof (cargs));
937 xid = atomic_inc_32_nv(&nlm_xid);
938 cargs.cookie.n_len = sizeof (xid);
939 cargs.cookie.n_bytes = (char *)&xid;
940 cargs.block = largs->block;
941 cargs.exclusive = largs->exclusive;
942 cargs.alock = largs->alock;
945 * Unlike all other nlm_call_* functions, nlm_call_cancel
946 * doesn't spin forever until it gets reasonable response
947 * from NLM server. It makes limited number of retries and
948 * if server doesn't send a reasonable reply, it returns an
949 * error. It behaves like that because it's called from nlm_call_lock
950 * with blocked signals and thus it can not be interrupted from
951 * user space.
953 for (retries = 0; retries < NLM_CANCEL_NRETRS; retries++) {
954 nlm_rpc_t *rpcp;
955 enum clnt_stat stat;
956 struct nlm4_res res;
958 error = nlm_host_get_rpc(hostp, vers, &rpcp);
959 if (error != 0)
960 return (ENOLCK);
962 bzero(&res, sizeof (res));
963 stat = nlm_cancel_rpc(&cargs, &res, rpcp->nr_handle, vers);
964 nlm_host_rele_rpc(hostp, rpcp);
966 DTRACE_PROBE1(cancel__rloop_end, enum clnt_stat, stat);
967 error = nlm_map_clnt_stat(stat);
968 if (error != 0) {
969 if (error == EAGAIN)
970 continue;
972 return (error);
975 DTRACE_PROBE1(cancel__res, enum nlm4_stats, res.stat.stat);
976 switch (res.stat.stat) {
978 * There was nothing to cancel. We are going to go ahead
979 * and assume we got the lock.
981 case nlm_denied:
983 * The server has recently rebooted. Treat this as a
984 * successful cancellation.
986 case nlm4_denied_grace_period:
988 * We managed to cancel.
990 case nlm4_granted:
991 error = 0;
992 break;
994 default:
996 * Broken server implementation. Can't really do
997 * anything here.
999 error = EIO;
1000 break;
1003 xdr_free((xdrproc_t)xdr_nlm4_res, (void *)&res);
1004 break;
1007 return (error);
1011 * Do NLM_UNLOCK call.
1012 * Was: nlm_clearlock
1014 static int
1015 nlm_call_unlock(struct flock64 *flp, struct nlm_host *hostp,
1016 struct netobj *fhp, int vers)
1018 struct nlm4_unlockargs args;
1019 struct nlm_owner_handle oh;
1020 enum nlm4_stats nlm_err;
1021 uint32_t xid;
1022 int error;
1024 bzero(&args, sizeof (args));
1025 nlm_init_lock(&args.alock, flp, fhp, &oh);
1027 oh.oh_sysid = hostp->nh_sysid;
1028 xid = atomic_inc_32_nv(&nlm_xid);
1029 args.cookie.n_len = sizeof (xid);
1030 args.cookie.n_bytes = (char *)&xid;
1032 for (;;) {
1033 nlm_rpc_t *rpcp;
1034 struct nlm4_res res;
1035 enum clnt_stat stat;
1037 error = nlm_host_get_rpc(hostp, vers, &rpcp);
1038 if (error != 0)
1039 return (ENOLCK);
1041 bzero(&res, sizeof (res));
1042 stat = nlm_unlock_rpc(&args, &res, rpcp->nr_handle, vers);
1043 nlm_host_rele_rpc(hostp, rpcp);
1045 error = nlm_map_clnt_stat(stat);
1046 if (error != 0) {
1047 if (error == EAGAIN)
1048 continue;
1050 return (error);
1053 DTRACE_PROBE1(unlock__res, enum nlm4_stats, res.stat.stat);
1054 nlm_err = res.stat.stat;
1055 xdr_free((xdrproc_t)xdr_nlm4_res, (void *)&res);
1056 if (nlm_err == nlm4_denied_grace_period) {
1057 error = nlm_host_wait_grace(hostp);
1058 if (error != 0)
1059 return (error);
1061 continue;
1064 break;
1067 /* special cases */
1068 switch (nlm_err) {
1069 case nlm4_denied:
1070 error = EINVAL;
1071 break;
1072 default:
1073 error = nlm_map_status(nlm_err);
1074 break;
1077 return (error);
1081 * Do NLM_TEST call.
1082 * Was: nlm_getlock()
1084 static int
1085 nlm_call_test(struct flock64 *flp, struct nlm_host *hostp,
1086 struct netobj *fhp, int vers)
1088 struct nlm4_testargs args;
1089 struct nlm4_holder h;
1090 struct nlm_owner_handle oh;
1091 enum nlm4_stats nlm_err;
1092 uint32_t xid;
1093 int error;
1095 bzero(&args, sizeof (args));
1096 nlm_init_lock(&args.alock, flp, fhp, &oh);
1098 args.exclusive = (flp->l_type == F_WRLCK);
1099 oh.oh_sysid = hostp->nh_sysid;
1100 xid = atomic_inc_32_nv(&nlm_xid);
1101 args.cookie.n_len = sizeof (xid);
1102 args.cookie.n_bytes = (char *)&xid;
1104 for (;;) {
1105 nlm_rpc_t *rpcp;
1106 struct nlm4_testres res;
1107 enum clnt_stat stat;
1109 error = nlm_host_get_rpc(hostp, vers, &rpcp);
1110 if (error != 0)
1111 return (ENOLCK);
1113 bzero(&res, sizeof (res));
1114 stat = nlm_test_rpc(&args, &res, rpcp->nr_handle, vers);
1115 nlm_host_rele_rpc(hostp, rpcp);
1117 error = nlm_map_clnt_stat(stat);
1118 if (error != 0) {
1119 if (error == EAGAIN)
1120 continue;
1122 return (error);
1125 DTRACE_PROBE1(test__res, enum nlm4_stats, res.stat.stat);
1126 nlm_err = res.stat.stat;
1127 bcopy(&res.stat.nlm4_testrply_u.holder, &h, sizeof (h));
1128 xdr_free((xdrproc_t)xdr_nlm4_testres, (void *)&res);
1129 if (nlm_err == nlm4_denied_grace_period) {
1130 error = nlm_host_wait_grace(hostp);
1131 if (error != 0)
1132 return (error);
1134 continue;
1137 break;
1140 switch (nlm_err) {
1141 case nlm4_granted:
1142 flp->l_type = F_UNLCK;
1143 error = 0;
1144 break;
1146 case nlm4_denied:
1147 flp->l_start = h.l_offset;
1148 flp->l_len = h.l_len;
1149 flp->l_pid = h.svid;
1150 flp->l_type = (h.exclusive) ? F_WRLCK : F_RDLCK;
1151 flp->l_whence = SEEK_SET;
1152 flp->l_sysid = 0;
1153 error = 0;
1154 break;
1156 default:
1157 error = nlm_map_status(nlm_err);
1158 break;
1161 return (error);
1165 static void
1166 nlm_init_lock(struct nlm4_lock *lock,
1167 const struct flock64 *fl, struct netobj *fh,
1168 struct nlm_owner_handle *oh)
1171 /* Caller converts to zero-base. */
1172 VERIFY(fl->l_whence == SEEK_SET);
1173 bzero(lock, sizeof (*lock));
1174 bzero(oh, sizeof (*oh));
1176 lock->caller_name = uts_nodename();
1177 lock->fh.n_len = fh->n_len;
1178 lock->fh.n_bytes = fh->n_bytes;
1179 lock->oh.n_len = sizeof (*oh);
1180 lock->oh.n_bytes = (void *)oh;
1181 lock->svid = fl->l_pid;
1182 lock->l_offset = fl->l_start;
1183 lock->l_len = fl->l_len;
1186 /* ************************************************************** */
1189 nlm_shrlock(struct vnode *vp, int cmd, struct shrlock *shr,
1190 int flags, struct netobj *fh, int vers)
1192 struct shrlock shlk;
1193 mntinfo_t *mi;
1194 servinfo_t *sv;
1195 const char *netid;
1196 struct nlm_host *host = NULL;
1197 int error;
1198 struct nlm_globals *g;
1200 mi = VTOMI(vp);
1201 sv = mi->mi_curr_serv;
1203 netid = nlm_knc_to_netid(sv->sv_knconf);
1204 if (netid == NULL) {
1205 NLM_ERR("nlm_shrlock: unknown NFS netid\n");
1206 return (ENOSYS);
1209 g = zone_getspecific(nlm_zone_key, curzone);
1210 host = nlm_host_findcreate(g, sv->sv_hostname, netid, &sv->sv_addr);
1211 if (host == NULL)
1212 return (ENOSYS);
1215 * Fill in s_sysid for the local locking calls.
1216 * Also, let's not trust the caller's l_pid.
1218 shlk = *shr;
1219 shlk.s_sysid = host->nh_sysid | LM_SYSID_CLIENT;
1220 shlk.s_pid = curproc->p_pid;
1222 if (cmd == F_UNSHARE) {
1224 * Purge local (cached) share information first,
1225 * then clear the remote share.
1227 (void) nlm_local_shrlock(vp, &shlk, cmd, flags);
1228 nlm_shres_untrack(host, vp, &shlk);
1229 error = nlm_call_unshare(&shlk, host, fh, vers);
1230 goto out;
1233 nfs_add_locking_id(vp, curproc->p_pid, RLMPL_OWNER,
1234 shr->s_owner, shr->s_own_len);
1236 error = nlm_call_share(&shlk, host, fh, vers, FALSE);
1237 if (error != 0)
1238 goto out;
1241 * Save the share locally. This should not fail,
1242 * because the server is authoritative about shares
1243 * and it just told us we have the share reservation!
1245 error = nlm_local_shrlock(vp, shr, cmd, flags);
1246 if (error != 0) {
1248 * Oh oh, we really don't expect an error here.
1250 NLM_WARN("nlm_shrlock: set locally, err %d\n", error);
1251 error = 0;
1254 nlm_shres_track(host, vp, &shlk);
1255 nlm_host_monitor(g, host, 0);
1257 out:
1258 nlm_host_release(g, host);
1260 return (error);
1263 static int
1264 nlm_reclaim_share(struct nlm_host *hostp, vnode_t *vp,
1265 struct shrlock *shr, uint32_t orig_state)
1267 struct netobj lm_fh;
1268 int error, state;
1269 rpcvers_t vers;
1271 state = nlm_host_get_state(hostp);
1272 if (state != orig_state) {
1274 * It seems that NLM server rebooted while
1275 * we were busy with recovery.
1277 return (ERESTART);
1280 error = nlm_init_fh_by_vp(vp, &lm_fh, &vers);
1281 if (error != 0)
1282 return (error);
1284 return (nlm_call_share(shr, hostp, &lm_fh, vers, 1));
1288 * Set local share information for some NFS server.
1290 * Called after a share request (set or clear) succeeded. We record
1291 * the details in the local lock manager. Note that since the remote
1292 * server has granted the share, we can be sure that it doesn't
1293 * conflict with any other shares we have in the local lock manager.
1295 * Since it is possible that host may also make NLM client requests to
1296 * our NLM server, we use a different sysid value to record our own
1297 * client shares.
1300 nlm_local_shrlock(vnode_t *vp, struct shrlock *shr, int cmd, int flags)
1302 return (fs_shrlock(vp, cmd, shr, flags, CRED(), NULL));
1305 static void
1306 nlm_local_shrcancel(vnode_t *vp, struct shrlock *shr)
1308 (void) nlm_local_shrlock(vp, shr, F_UNSHARE, FREAD | FWRITE);
1309 nlm_send_siglost(shr->s_pid);
1313 * Do NLM_SHARE call.
1314 * Was: nlm_setshare()
1316 static int
1317 nlm_call_share(struct shrlock *shr, struct nlm_host *host,
1318 struct netobj *fh, int vers, int reclaim)
1320 struct nlm4_shareargs args;
1321 enum nlm4_stats nlm_err;
1322 uint32_t xid;
1323 int error;
1325 bzero(&args, sizeof (args));
1326 nlm_init_share(&args.share, shr, fh);
1328 args.reclaim = reclaim;
1329 xid = atomic_inc_32_nv(&nlm_xid);
1330 args.cookie.n_len = sizeof (xid);
1331 args.cookie.n_bytes = (char *)&xid;
1334 for (;;) {
1335 nlm_rpc_t *rpcp;
1336 struct nlm4_shareres res;
1337 enum clnt_stat stat;
1339 error = nlm_host_get_rpc(host, vers, &rpcp);
1340 if (error != 0)
1341 return (ENOLCK);
1343 bzero(&res, sizeof (res));
1344 stat = nlm_share_rpc(&args, &res, rpcp->nr_handle, vers);
1345 nlm_host_rele_rpc(host, rpcp);
1347 error = nlm_map_clnt_stat(stat);
1348 if (error != 0) {
1349 if (error == EAGAIN)
1350 continue;
1352 return (error);
1355 DTRACE_PROBE1(share__res, enum nlm4_stats, res.stat);
1356 nlm_err = res.stat;
1357 xdr_free((xdrproc_t)xdr_nlm4_shareres, (void *)&res);
1358 if (nlm_err == nlm4_denied_grace_period) {
1359 if (args.reclaim)
1360 return (ENOLCK);
1362 error = nlm_host_wait_grace(host);
1363 if (error != 0)
1364 return (error);
1366 continue;
1369 break;
1372 switch (nlm_err) {
1373 case nlm4_granted:
1374 error = 0;
1375 break;
1376 case nlm4_blocked:
1377 case nlm4_denied:
1378 error = EAGAIN;
1379 break;
1380 case nlm4_denied_nolocks:
1381 case nlm4_deadlck:
1382 error = ENOLCK;
1383 break;
1384 default:
1385 error = EINVAL;
1386 break;
1389 return (error);
1393 * Do NLM_UNSHARE call.
1395 static int
1396 nlm_call_unshare(struct shrlock *shr, struct nlm_host *host,
1397 struct netobj *fh, int vers)
1399 struct nlm4_shareargs args;
1400 enum nlm4_stats nlm_err;
1401 uint32_t xid;
1402 int error;
1404 bzero(&args, sizeof (args));
1405 nlm_init_share(&args.share, shr, fh);
1407 xid = atomic_inc_32_nv(&nlm_xid);
1408 args.cookie.n_len = sizeof (xid);
1409 args.cookie.n_bytes = (char *)&xid;
1411 for (;;) {
1412 nlm_rpc_t *rpcp;
1413 struct nlm4_shareres res;
1414 enum clnt_stat stat;
1416 error = nlm_host_get_rpc(host, vers, &rpcp);
1417 if (error != 0)
1418 return (ENOLCK);
1420 bzero(&res, sizeof (res));
1421 stat = nlm_unshare_rpc(&args, &res, rpcp->nr_handle, vers);
1422 nlm_host_rele_rpc(host, rpcp);
1424 error = nlm_map_clnt_stat(stat);
1425 if (error != 0) {
1426 if (error == EAGAIN)
1427 continue;
1429 return (error);
1432 DTRACE_PROBE1(unshare__res, enum nlm4_stats, res.stat);
1433 nlm_err = res.stat;
1434 xdr_free((xdrproc_t)xdr_nlm4_res, (void *)&res);
1435 if (nlm_err == nlm4_denied_grace_period) {
1436 error = nlm_host_wait_grace(host);
1437 if (error != 0)
1438 return (error);
1440 continue;
1443 break;
1446 switch (nlm_err) {
1447 case nlm4_granted:
1448 error = 0;
1449 break;
1450 case nlm4_denied:
1451 error = EAGAIN;
1452 break;
1453 case nlm4_denied_nolocks:
1454 error = ENOLCK;
1455 break;
1456 default:
1457 error = EINVAL;
1458 break;
1461 return (error);
1464 static void
1465 nlm_init_share(struct nlm4_share *args,
1466 const struct shrlock *shr, struct netobj *fh)
1469 bzero(args, sizeof (*args));
1471 args->caller_name = uts_nodename();
1472 args->fh.n_len = fh->n_len;
1473 args->fh.n_bytes = fh->n_bytes;
1474 args->oh.n_len = shr->s_own_len;
1475 args->oh.n_bytes = (void *)shr->s_owner;
1477 switch (shr->s_deny) {
1478 default:
1479 case F_NODNY:
1480 args->mode = fsm_DN;
1481 break;
1482 case F_RDDNY:
1483 args->mode = fsm_DR;
1484 break;
1485 case F_WRDNY:
1486 args->mode = fsm_DW;
1487 break;
1488 case F_RWDNY:
1489 args->mode = fsm_DRW;
1490 break;
1493 switch (shr->s_access) {
1494 default:
1495 case 0: /* seen with F_UNSHARE */
1496 args->access = fsa_NONE;
1497 break;
1498 case F_RDACC:
1499 args->access = fsa_R;
1500 break;
1501 case F_WRACC:
1502 args->access = fsa_W;
1503 break;
1504 case F_RWACC:
1505 args->access = fsa_RW;
1506 break;
1511 * Initialize filehandle according to the version
1512 * of NFS vnode was created on. The version of
1513 * NLM that can be used with given NFS version
1514 * is saved to lm_vers.
1516 static int
1517 nlm_init_fh_by_vp(vnode_t *vp, struct netobj *fh, rpcvers_t *lm_vers)
1519 mntinfo_t *mi = VTOMI(vp);
1522 * Too bad the NFS code doesn't just carry the FH
1523 * in a netobj or a netbuf.
1525 switch (mi->mi_vers) {
1526 case NFS_V3:
1527 /* See nfs3_frlock() */
1528 *lm_vers = NLM4_VERS;
1529 fh->n_len = VTOFH3(vp)->fh3_length;
1530 fh->n_bytes = (char *)&(VTOFH3(vp)->fh3_u.data);
1531 break;
1533 case NFS_VERSION:
1534 /* See nfs_frlock() */
1535 *lm_vers = NLM_VERS;
1536 fh->n_len = sizeof (fhandle_t);
1537 /* LINTED E_BAD_PTR_CAST_ALIGN */
1538 fh->n_bytes = (char *)VTOFH(vp);
1539 break;
1540 default:
1541 return (ENOSYS);
1544 return (0);
1548 * Send SIGLOST to the process identified by pid.
1549 * NOTE: called when NLM decides to remove lock
1550 * or share reservation ownder by the process
1551 * by force.
1553 static void
1554 nlm_send_siglost(pid_t pid)
1556 proc_t *p;
1558 mutex_enter(&pidlock);
1559 p = prfind(pid);
1560 if (p != NULL)
1561 psignal(p, SIGLOST);
1563 mutex_exit(&pidlock);
1566 static int
1567 nlm_map_clnt_stat(enum clnt_stat stat)
1569 switch (stat) {
1570 case RPC_SUCCESS:
1571 return (0);
1573 case RPC_TIMEDOUT:
1574 case RPC_PROGUNAVAIL:
1575 return (EAGAIN);
1577 case RPC_INTR:
1578 return (EINTR);
1580 default:
1581 return (EINVAL);
1585 static int
1586 nlm_map_status(enum nlm4_stats stat)
1588 switch (stat) {
1589 case nlm4_granted:
1590 return (0);
1592 case nlm4_denied:
1593 return (EAGAIN);
1595 case nlm4_denied_nolocks:
1596 return (ENOLCK);
1598 case nlm4_blocked:
1599 return (EAGAIN);
1601 case nlm4_denied_grace_period:
1602 return (EAGAIN);
1604 case nlm4_deadlck:
1605 return (EDEADLK);
1607 case nlm4_rofs:
1608 return (EROFS);
1610 case nlm4_stale_fh:
1611 return (ESTALE);
1613 case nlm4_fbig:
1614 return (EFBIG);
1616 case nlm4_failed:
1617 return (EACCES);
1619 default:
1620 return (EINVAL);