kill tsol ("Trusted Solaris") aka TX ("Trusted Extensions")
[unleashed.git] / usr / src / uts / common / fs / nfs / nfs_vfsops.c
blobb72d7600b04013db6f27c7a0424bf5455cabbdf9
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T.
25 * All rights reserved.
28 #include <sys/param.h>
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <sys/cred.h>
32 #include <sys/vfs.h>
33 #include <sys/vfs_opreg.h>
34 #include <sys/vnode.h>
35 #include <sys/pathname.h>
36 #include <sys/sysmacros.h>
37 #include <sys/kmem.h>
38 #include <sys/mkdev.h>
39 #include <sys/mount.h>
40 #include <sys/mntent.h>
41 #include <sys/statvfs.h>
42 #include <sys/errno.h>
43 #include <sys/debug.h>
44 #include <sys/cmn_err.h>
45 #include <sys/utsname.h>
46 #include <sys/bootconf.h>
47 #include <sys/modctl.h>
48 #include <sys/acl.h>
49 #include <sys/flock.h>
50 #include <sys/policy.h>
51 #include <sys/zone.h>
52 #include <sys/class.h>
53 #include <sys/socket.h>
54 #include <sys/netconfig.h>
55 #include <sys/mntent.h>
57 #include <rpc/types.h>
58 #include <rpc/auth.h>
59 #include <rpc/clnt.h>
61 #include <nfs/nfs.h>
62 #include <nfs/nfs_clnt.h>
63 #include <nfs/rnode.h>
64 #include <nfs/mount.h>
65 #include <nfs/nfs_acl.h>
67 #include <sys/fs_subr.h>
70 * From rpcsec module (common/rpcsec).
72 extern int sec_clnt_loadinfo(struct sec_data *, struct sec_data **, model_t);
73 extern void sec_clnt_freeinfo(struct sec_data *);
75 static int pathconf_copyin(struct nfs_args *, struct pathcnf *);
76 static int pathconf_get(struct mntinfo *, struct nfs_args *);
77 static void pathconf_rele(struct mntinfo *);
80 * The order and contents of this structure must be kept in sync with that of
81 * rfsreqcnt_v2_tmpl in nfs_stats.c
83 static char *rfsnames_v2[] = {
84 "null", "getattr", "setattr", "unused", "lookup", "readlink", "read",
85 "unused", "write", "create", "remove", "rename", "link", "symlink",
86 "mkdir", "rmdir", "readdir", "fsstat"
90 * This table maps from NFS protocol number into call type.
91 * Zero means a "Lookup" type call
92 * One means a "Read" type call
93 * Two means a "Write" type call
94 * This is used to select a default time-out.
96 static uchar_t call_type_v2[] = {
97 0, 0, 1, 0, 0, 0, 1,
98 0, 2, 2, 2, 2, 2, 2,
99 2, 2, 1, 0
103 * Similar table, but to determine which timer to use
104 * (only real reads and writes!)
106 static uchar_t timer_type_v2[] = {
107 0, 0, 0, 0, 0, 0, 1,
108 0, 2, 0, 0, 0, 0, 0,
109 0, 0, 1, 0
113 * This table maps from NFS protocol number into a call type
114 * for the semisoft mount option.
115 * Zero means do not repeat operation.
116 * One means repeat.
118 static uchar_t ss_call_type_v2[] = {
119 0, 0, 1, 0, 0, 0, 0,
120 0, 1, 1, 1, 1, 1, 1,
121 1, 1, 0, 0
125 * nfs vfs operations.
127 static int nfs_mount(vfs_t *, vnode_t *, struct mounta *, cred_t *);
128 static int nfs_unmount(vfs_t *, int, cred_t *);
129 static int nfs_root(vfs_t *, vnode_t **);
130 static int nfs_statvfs(vfs_t *, struct statvfs64 *);
131 static int nfs_sync(vfs_t *, short, cred_t *);
132 static int nfs_vget(vfs_t *, vnode_t **, fid_t *);
133 static int nfs_mountroot(vfs_t *, whymountroot_t);
134 static void nfs_freevfs(vfs_t *);
136 static int nfsrootvp(vnode_t **, vfs_t *, struct servinfo *,
137 int, cred_t *, zone_t *);
140 * Initialize the vfs structure
143 int nfsfstyp;
144 vfsops_t *nfs_vfsops;
147 * Debug variable to check for rdma based
148 * transport startup and cleanup. Controlled
149 * through /etc/system. Off by default.
151 int rdma_debug = 0;
154 nfsinit(int fstyp, char *name)
156 static const fs_operation_def_t nfs_vfsops_template[] = {
157 VFSNAME_MOUNT, { .vfs_mount = nfs_mount },
158 VFSNAME_UNMOUNT, { .vfs_unmount = nfs_unmount },
159 VFSNAME_ROOT, { .vfs_root = nfs_root },
160 VFSNAME_STATVFS, { .vfs_statvfs = nfs_statvfs },
161 VFSNAME_SYNC, { .vfs_sync = nfs_sync },
162 VFSNAME_VGET, { .vfs_vget = nfs_vget },
163 VFSNAME_MOUNTROOT, { .vfs_mountroot = nfs_mountroot },
164 VFSNAME_FREEVFS, { .vfs_freevfs = nfs_freevfs },
165 NULL, NULL
167 int error;
169 error = vfs_setfsops(fstyp, nfs_vfsops_template, &nfs_vfsops);
170 if (error != 0) {
171 zcmn_err(GLOBAL_ZONEID, CE_WARN,
172 "nfsinit: bad vfs ops template");
173 return (error);
176 error = vn_make_ops(name, nfs_vnodeops_template, &nfs_vnodeops);
177 if (error != 0) {
178 (void) vfs_freevfsops_by_type(fstyp);
179 zcmn_err(GLOBAL_ZONEID, CE_WARN,
180 "nfsinit: bad vnode ops template");
181 return (error);
185 nfsfstyp = fstyp;
187 return (0);
190 void
191 nfsfini(void)
195 static void
196 nfs_free_args(struct nfs_args *nargs, nfs_fhandle *fh)
199 if (fh)
200 kmem_free(fh, sizeof (*fh));
202 if (nargs->pathconf) {
203 kmem_free(nargs->pathconf, sizeof (struct pathcnf));
204 nargs->pathconf = NULL;
207 if (nargs->knconf) {
208 if (nargs->knconf->knc_protofmly)
209 kmem_free(nargs->knconf->knc_protofmly, KNC_STRSIZE);
210 if (nargs->knconf->knc_proto)
211 kmem_free(nargs->knconf->knc_proto, KNC_STRSIZE);
212 kmem_free(nargs->knconf, sizeof (*nargs->knconf));
213 nargs->knconf = NULL;
216 if (nargs->fh) {
217 kmem_free(nargs->fh, strlen(nargs->fh) + 1);
218 nargs->fh = NULL;
221 if (nargs->hostname) {
222 kmem_free(nargs->hostname, strlen(nargs->hostname) + 1);
223 nargs->hostname = NULL;
226 if (nargs->addr) {
227 if (nargs->addr->buf) {
228 ASSERT(nargs->addr->len);
229 kmem_free(nargs->addr->buf, nargs->addr->len);
231 kmem_free(nargs->addr, sizeof (struct netbuf));
232 nargs->addr = NULL;
235 if (nargs->syncaddr) {
236 ASSERT(nargs->syncaddr->len);
237 if (nargs->syncaddr->buf) {
238 ASSERT(nargs->syncaddr->len);
239 kmem_free(nargs->syncaddr->buf, nargs->syncaddr->len);
241 kmem_free(nargs->syncaddr, sizeof (struct netbuf));
242 nargs->syncaddr = NULL;
245 if (nargs->netname) {
246 kmem_free(nargs->netname, strlen(nargs->netname) + 1);
247 nargs->netname = NULL;
250 if (nargs->nfs_ext_u.nfs_extA.secdata) {
251 sec_clnt_freeinfo(nargs->nfs_ext_u.nfs_extA.secdata);
252 nargs->nfs_ext_u.nfs_extA.secdata = NULL;
256 static int
257 nfs_copyin(char *data, int datalen, struct nfs_args *nargs, nfs_fhandle *fh)
260 int error;
261 size_t nlen; /* length of netname */
262 size_t hlen; /* length of hostname */
263 char netname[MAXNETNAMELEN+1]; /* server's netname */
264 struct netbuf addr; /* server's address */
265 struct netbuf syncaddr; /* AUTH_DES time sync addr */
266 struct knetconfig *knconf; /* transport knetconfig structure */
267 struct sec_data *secdata = NULL; /* security data */
268 STRUCT_DECL(nfs_args, args); /* nfs mount arguments */
269 STRUCT_DECL(knetconfig, knconf_tmp);
270 STRUCT_DECL(netbuf, addr_tmp);
271 int flags;
272 struct pathcnf *pc; /* Pathconf */
273 char *p, *pf;
274 char *userbufptr;
277 bzero(nargs, sizeof (*nargs));
279 STRUCT_INIT(args, get_udatamodel());
280 bzero(STRUCT_BUF(args), SIZEOF_STRUCT(nfs_args, DATAMODEL_NATIVE));
281 if (copyin(data, STRUCT_BUF(args), MIN(datalen, STRUCT_SIZE(args))))
282 return (EFAULT);
284 nargs->wsize = STRUCT_FGET(args, wsize);
285 nargs->rsize = STRUCT_FGET(args, rsize);
286 nargs->timeo = STRUCT_FGET(args, timeo);
287 nargs->retrans = STRUCT_FGET(args, retrans);
288 nargs->acregmin = STRUCT_FGET(args, acregmin);
289 nargs->acregmax = STRUCT_FGET(args, acregmax);
290 nargs->acdirmin = STRUCT_FGET(args, acdirmin);
291 nargs->acdirmax = STRUCT_FGET(args, acdirmax);
293 flags = STRUCT_FGET(args, flags);
294 nargs->flags = flags;
297 addr.buf = NULL;
298 syncaddr.buf = NULL;
301 * Allocate space for a knetconfig structure and
302 * its strings and copy in from user-land.
304 knconf = kmem_zalloc(sizeof (*knconf), KM_SLEEP);
305 STRUCT_INIT(knconf_tmp, get_udatamodel());
306 if (copyin(STRUCT_FGETP(args, knconf), STRUCT_BUF(knconf_tmp),
307 STRUCT_SIZE(knconf_tmp))) {
308 kmem_free(knconf, sizeof (*knconf));
309 return (EFAULT);
312 knconf->knc_semantics = STRUCT_FGET(knconf_tmp, knc_semantics);
313 knconf->knc_protofmly = STRUCT_FGETP(knconf_tmp, knc_protofmly);
314 knconf->knc_proto = STRUCT_FGETP(knconf_tmp, knc_proto);
315 if (get_udatamodel() != DATAMODEL_LP64) {
316 knconf->knc_rdev = expldev(STRUCT_FGET(knconf_tmp, knc_rdev));
317 } else {
318 knconf->knc_rdev = STRUCT_FGET(knconf_tmp, knc_rdev);
321 pf = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
322 p = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
323 error = copyinstr(knconf->knc_protofmly, pf, KNC_STRSIZE, NULL);
324 if (error) {
325 kmem_free(pf, KNC_STRSIZE);
326 kmem_free(p, KNC_STRSIZE);
327 kmem_free(knconf, sizeof (*knconf));
328 return (error);
331 error = copyinstr(knconf->knc_proto, p, KNC_STRSIZE, NULL);
332 if (error) {
333 kmem_free(pf, KNC_STRSIZE);
334 kmem_free(p, KNC_STRSIZE);
335 kmem_free(knconf, sizeof (*knconf));
336 return (error);
340 knconf->knc_protofmly = pf;
341 knconf->knc_proto = p;
343 nargs->knconf = knconf;
345 /* Copyin pathconf if there is one */
346 if (STRUCT_FGETP(args, pathconf) != NULL) {
347 pc = kmem_alloc(sizeof (*pc), KM_SLEEP);
348 error = pathconf_copyin(STRUCT_BUF(args), pc);
349 nargs->pathconf = pc;
350 if (error)
351 goto errout;
355 * Get server address
357 STRUCT_INIT(addr_tmp, get_udatamodel());
358 if (copyin(STRUCT_FGETP(args, addr), STRUCT_BUF(addr_tmp),
359 STRUCT_SIZE(addr_tmp))) {
360 error = EFAULT;
361 goto errout;
363 nargs->addr = kmem_alloc(sizeof (struct netbuf), KM_SLEEP);
364 userbufptr = STRUCT_FGETP(addr_tmp, buf);
365 addr.len = STRUCT_FGET(addr_tmp, len);
366 addr.buf = kmem_alloc(addr.len, KM_SLEEP);
367 addr.maxlen = addr.len;
368 if (copyin(userbufptr, addr.buf, addr.len)) {
369 kmem_free(addr.buf, addr.len);
370 error = EFAULT;
371 goto errout;
373 bcopy(&addr, nargs->addr, sizeof (struct netbuf));
376 * Get the root fhandle
379 if (copyin(STRUCT_FGETP(args, fh), &fh->fh_buf, NFS_FHSIZE)) {
380 error = EFAULT;
381 goto errout;
383 fh->fh_len = NFS_FHSIZE;
386 * Get server's hostname
388 if (flags & NFSMNT_HOSTNAME) {
389 error = copyinstr(STRUCT_FGETP(args, hostname), netname,
390 sizeof (netname), &hlen);
391 if (error)
392 goto errout;
393 nargs->hostname = kmem_zalloc(hlen, KM_SLEEP);
394 (void) strcpy(nargs->hostname, netname);
396 } else {
397 nargs->hostname = NULL;
402 * If there are syncaddr and netname data, load them in. This is
403 * to support data needed for NFSV4 when AUTH_DH is the negotiated
404 * flavor via SECINFO. (instead of using MOUNT protocol in V3).
406 netname[0] = '\0';
407 if (flags & NFSMNT_SECURE) {
408 if (STRUCT_FGETP(args, syncaddr) == NULL) {
409 error = EINVAL;
410 goto errout;
412 /* get syncaddr */
413 STRUCT_INIT(addr_tmp, get_udatamodel());
414 if (copyin(STRUCT_FGETP(args, syncaddr), STRUCT_BUF(addr_tmp),
415 STRUCT_SIZE(addr_tmp))) {
416 error = EINVAL;
417 goto errout;
419 userbufptr = STRUCT_FGETP(addr_tmp, buf);
420 syncaddr.len = STRUCT_FGET(addr_tmp, len);
421 syncaddr.buf = kmem_alloc(syncaddr.len, KM_SLEEP);
422 syncaddr.maxlen = syncaddr.len;
423 if (copyin(userbufptr, syncaddr.buf, syncaddr.len)) {
424 kmem_free(syncaddr.buf, syncaddr.len);
425 error = EFAULT;
426 goto errout;
429 nargs->syncaddr = kmem_alloc(sizeof (struct netbuf), KM_SLEEP);
430 bcopy(&syncaddr, nargs->syncaddr, sizeof (struct netbuf));
432 ASSERT(STRUCT_FGETP(args, netname));
433 if (copyinstr(STRUCT_FGETP(args, netname), netname,
434 sizeof (netname), &nlen)) {
435 error = EFAULT;
436 goto errout;
439 netname[nlen] = '\0';
440 nargs->netname = kmem_zalloc(nlen, KM_SLEEP);
441 (void) strcpy(nargs->netname, netname);
445 * Get the extention data which has the security data structure.
446 * This includes data for AUTH_SYS as well.
448 if (flags & NFSMNT_NEWARGS) {
449 nargs->nfs_args_ext = STRUCT_FGET(args, nfs_args_ext);
450 if (nargs->nfs_args_ext == NFS_ARGS_EXTA ||
451 nargs->nfs_args_ext == NFS_ARGS_EXTB) {
453 * Indicating the application is using the new
454 * sec_data structure to pass in the security
455 * data.
457 if (STRUCT_FGETP(args,
458 nfs_ext_u.nfs_extA.secdata) != NULL) {
459 error = sec_clnt_loadinfo(
460 (struct sec_data *)STRUCT_FGETP(args,
461 nfs_ext_u.nfs_extA.secdata), &secdata,
462 get_udatamodel());
464 nargs->nfs_ext_u.nfs_extA.secdata = secdata;
468 if (error)
469 goto errout;
472 * Failover support:
474 * We may have a linked list of nfs_args structures,
475 * which means the user is looking for failover. If
476 * the mount is either not "read-only" or "soft",
477 * we want to bail out with EINVAL.
479 if (nargs->nfs_args_ext == NFS_ARGS_EXTB)
480 nargs->nfs_ext_u.nfs_extB.next =
481 STRUCT_FGETP(args, nfs_ext_u.nfs_extB.next);
483 errout:
484 if (error)
485 nfs_free_args(nargs, fh);
487 return (error);
492 * nfs mount vfsop
493 * Set up mount info record and attach it to vfs struct.
495 static int
496 nfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
498 char *data = uap->dataptr;
499 int error;
500 vnode_t *rtvp; /* the server's root */
501 mntinfo_t *mi; /* mount info, pointed at by vfs */
502 size_t nlen; /* length of netname */
503 struct knetconfig *knconf; /* transport knetconfig structure */
504 struct knetconfig *rdma_knconf; /* rdma transport structure */
505 rnode_t *rp;
506 struct servinfo *svp; /* nfs server info */
507 struct servinfo *svp_tail = NULL; /* previous nfs server info */
508 struct servinfo *svp_head; /* first nfs server info */
509 struct servinfo *svp_2ndlast; /* 2nd last in the server info list */
510 struct sec_data *secdata; /* security data */
511 struct nfs_args *args = NULL;
512 int flags, addr_type;
513 zone_t *zone = nfs_zone();
514 zone_t *mntzone = NULL;
515 nfs_fhandle *fhandle = NULL;
517 if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0)
518 return (error);
520 if (mvp->v_type != VDIR)
521 return (ENOTDIR);
524 * get arguments
526 * nfs_args is now versioned and is extensible, so
527 * uap->datalen might be different from sizeof (args)
528 * in a compatible situation.
530 more:
532 if (!(uap->flags & MS_SYSSPACE)) {
533 if (args == NULL)
534 args = kmem_alloc(sizeof (struct nfs_args), KM_SLEEP);
535 else {
536 nfs_free_args(args, fhandle);
537 fhandle = NULL;
539 if (fhandle == NULL)
540 fhandle = kmem_zalloc(sizeof (nfs_fhandle), KM_SLEEP);
541 error = nfs_copyin(data, uap->datalen, args, fhandle);
542 if (error) {
543 if (args)
544 kmem_free(args, sizeof (*args));
545 return (error);
547 } else {
548 args = (struct nfs_args *)data;
549 fhandle = (nfs_fhandle *)args->fh;
553 flags = args->flags;
555 if (uap->flags & MS_REMOUNT) {
556 size_t n;
557 char name[FSTYPSZ];
559 if (uap->flags & MS_SYSSPACE)
560 error = copystr(uap->fstype, name, FSTYPSZ, &n);
561 else
562 error = copyinstr(uap->fstype, name, FSTYPSZ, &n);
564 if (error) {
565 if (error == ENAMETOOLONG)
566 return (EINVAL);
567 return (error);
572 * This check is to ensure that the request is a
573 * genuine nfs remount request.
576 if (strncmp(name, "nfs", 3) != 0)
577 return (EINVAL);
580 * If the request changes the locking type, disallow the
581 * remount,
582 * because it's questionable whether we can transfer the
583 * locking state correctly.
585 * Remounts need to save the pathconf information.
586 * Part of the infamous static kludge.
589 if ((mi = VFTOMI(vfsp)) != NULL) {
590 uint_t new_mi_llock;
591 uint_t old_mi_llock;
593 new_mi_llock = (flags & NFSMNT_LLOCK) ? 1 : 0;
594 old_mi_llock = (mi->mi_flags & MI_LLOCK) ? 1 : 0;
595 if (old_mi_llock != new_mi_llock)
596 return (EBUSY);
598 error = pathconf_get((struct mntinfo *)vfsp->vfs_data, args);
600 if (!(uap->flags & MS_SYSSPACE)) {
601 nfs_free_args(args, fhandle);
602 kmem_free(args, sizeof (*args));
605 return (error);
608 mutex_enter(&mvp->v_lock);
609 if (!(uap->flags & MS_OVERLAY) &&
610 (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
611 mutex_exit(&mvp->v_lock);
612 if (!(uap->flags & MS_SYSSPACE)) {
613 nfs_free_args(args, fhandle);
614 kmem_free(args, sizeof (*args));
616 return (EBUSY);
618 mutex_exit(&mvp->v_lock);
620 /* make sure things are zeroed for errout: */
621 rtvp = NULL;
622 mi = NULL;
623 secdata = NULL;
626 * A valid knetconfig structure is required.
628 if (!(flags & NFSMNT_KNCONF)) {
629 if (!(uap->flags & MS_SYSSPACE)) {
630 nfs_free_args(args, fhandle);
631 kmem_free(args, sizeof (*args));
633 return (EINVAL);
636 if ((strlen(args->knconf->knc_protofmly) >= KNC_STRSIZE) ||
637 (strlen(args->knconf->knc_proto) >= KNC_STRSIZE)) {
638 if (!(uap->flags & MS_SYSSPACE)) {
639 nfs_free_args(args, fhandle);
640 kmem_free(args, sizeof (*args));
642 return (EINVAL);
647 * Allocate a servinfo struct.
649 svp = kmem_zalloc(sizeof (*svp), KM_SLEEP);
650 mutex_init(&svp->sv_lock, NULL, MUTEX_DEFAULT, NULL);
651 if (svp_tail) {
652 svp_2ndlast = svp_tail;
653 svp_tail->sv_next = svp;
654 } else {
655 svp_head = svp;
656 svp_2ndlast = svp;
659 svp_tail = svp;
662 * Get knetconfig and server address
664 svp->sv_knconf = args->knconf;
665 args->knconf = NULL;
667 if (args->addr == NULL || args->addr->buf == NULL) {
668 error = EINVAL;
669 goto errout;
672 svp->sv_addr.maxlen = args->addr->maxlen;
673 svp->sv_addr.len = args->addr->len;
674 svp->sv_addr.buf = args->addr->buf;
675 args->addr->buf = NULL;
678 * Get the root fhandle
680 ASSERT(fhandle);
682 bcopy(&fhandle->fh_buf, &svp->sv_fhandle.fh_buf, fhandle->fh_len);
683 svp->sv_fhandle.fh_len = fhandle->fh_len;
686 * Get server's hostname
688 if (flags & NFSMNT_HOSTNAME) {
689 if (args->hostname == NULL) {
690 error = EINVAL;
691 goto errout;
693 svp->sv_hostnamelen = strlen(args->hostname) + 1;
694 svp->sv_hostname = args->hostname;
695 args->hostname = NULL;
696 } else {
697 char *p = "unknown-host";
698 svp->sv_hostnamelen = strlen(p) + 1;
699 svp->sv_hostname = kmem_zalloc(svp->sv_hostnamelen, KM_SLEEP);
700 (void) strcpy(svp->sv_hostname, p);
705 * RDMA MOUNT SUPPORT FOR NFS v2:
706 * Establish, is it possible to use RDMA, if so overload the
707 * knconf with rdma specific knconf and free the orignal.
709 if ((flags & NFSMNT_TRYRDMA) || (flags & NFSMNT_DORDMA)) {
711 * Determine the addr type for RDMA, IPv4 or v6.
713 if (strcmp(svp->sv_knconf->knc_protofmly, NC_INET) == 0)
714 addr_type = AF_INET;
715 else if (strcmp(svp->sv_knconf->knc_protofmly, NC_INET6) == 0)
716 addr_type = AF_INET6;
718 if (rdma_reachable(addr_type, &svp->sv_addr,
719 &rdma_knconf) == 0) {
721 * If successful, hijack, the orignal knconf and
722 * replace with a new one, depending on the flags.
724 svp->sv_origknconf = svp->sv_knconf;
725 svp->sv_knconf = rdma_knconf;
726 knconf = rdma_knconf;
727 } else {
728 if (flags & NFSMNT_TRYRDMA) {
729 #ifdef DEBUG
730 if (rdma_debug)
731 zcmn_err(getzoneid(), CE_WARN,
732 "no RDMA onboard, revert\n");
733 #endif
736 if (flags & NFSMNT_DORDMA) {
738 * If proto=rdma is specified and no RDMA
739 * path to this server is avialable then
740 * ditch this server.
741 * This is not included in the mountable
742 * server list or the replica list.
743 * Check if more servers are specified;
744 * Failover case, otherwise bail out of mount.
746 if (args->nfs_args_ext == NFS_ARGS_EXTB &&
747 args->nfs_ext_u.nfs_extB.next != NULL) {
748 data = (char *)
749 args->nfs_ext_u.nfs_extB.next;
750 if (uap->flags & MS_RDONLY &&
751 !(flags & NFSMNT_SOFT)) {
752 if (svp_head->sv_next == NULL) {
753 svp_tail = NULL;
754 svp_2ndlast = NULL;
755 sv_free(svp_head);
756 goto more;
757 } else {
758 svp_tail = svp_2ndlast;
759 svp_2ndlast->sv_next =
760 NULL;
761 sv_free(svp);
762 goto more;
765 } else {
767 * This is the last server specified
768 * in the nfs_args list passed down
769 * and its not rdma capable.
771 if (svp_head->sv_next == NULL) {
773 * Is this the only one
775 error = EINVAL;
776 #ifdef DEBUG
777 if (rdma_debug)
778 zcmn_err(getzoneid(),
779 CE_WARN,
780 "No RDMA srv");
781 #endif
782 goto errout;
783 } else {
785 * There is list, since some
786 * servers specified before
787 * this passed all requirements
789 svp_tail = svp_2ndlast;
790 svp_2ndlast->sv_next = NULL;
791 sv_free(svp);
792 goto proceed;
800 * Get the extention data which has the new security data structure.
802 if (flags & NFSMNT_NEWARGS) {
803 switch (args->nfs_args_ext) {
804 case NFS_ARGS_EXTA:
805 case NFS_ARGS_EXTB:
807 * Indicating the application is using the new
808 * sec_data structure to pass in the security
809 * data.
811 secdata = args->nfs_ext_u.nfs_extA.secdata;
812 if (secdata == NULL) {
813 error = EINVAL;
814 } else {
816 * Need to validate the flavor here if
817 * sysspace, userspace was already
818 * validate from the nfs_copyin function.
820 switch (secdata->rpcflavor) {
821 case AUTH_NONE:
822 case AUTH_UNIX:
823 case AUTH_LOOPBACK:
824 case AUTH_DES:
825 case RPCSEC_GSS:
826 break;
827 default:
828 error = EINVAL;
829 goto errout;
832 args->nfs_ext_u.nfs_extA.secdata = NULL;
833 break;
835 default:
836 error = EINVAL;
837 break;
839 } else if (flags & NFSMNT_SECURE) {
841 * Keep this for backward compatibility to support
842 * NFSMNT_SECURE/NFSMNT_RPCTIMESYNC flags.
844 if (args->syncaddr == NULL || args->syncaddr->buf == NULL) {
845 error = EINVAL;
846 goto errout;
850 * get time sync address.
852 if (args->syncaddr == NULL) {
853 error = EFAULT;
854 goto errout;
858 * Move security related data to the sec_data structure.
861 dh_k4_clntdata_t *data;
862 char *pf, *p;
864 secdata = kmem_alloc(sizeof (*secdata), KM_SLEEP);
865 if (flags & NFSMNT_RPCTIMESYNC)
866 secdata->flags |= AUTH_F_RPCTIMESYNC;
867 data = kmem_alloc(sizeof (*data), KM_SLEEP);
868 bcopy(args->syncaddr, &data->syncaddr,
869 sizeof (*args->syncaddr));
873 * duplicate the knconf information for the
874 * new opaque data.
876 data->knconf = kmem_alloc(sizeof (*knconf), KM_SLEEP);
877 *data->knconf = *knconf;
878 pf = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
879 p = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
880 bcopy(knconf->knc_protofmly, pf, KNC_STRSIZE);
881 bcopy(knconf->knc_proto, pf, KNC_STRSIZE);
882 data->knconf->knc_protofmly = pf;
883 data->knconf->knc_proto = p;
885 /* move server netname to the sec_data structure */
886 nlen = strlen(args->hostname) + 1;
887 if (nlen != 0) {
888 data->netname = kmem_alloc(nlen, KM_SLEEP);
889 bcopy(args->hostname, data->netname, nlen);
890 data->netnamelen = (int)nlen;
892 secdata->secmod = secdata->rpcflavor = AUTH_DES;
893 secdata->data = (caddr_t)data;
895 } else {
896 secdata = kmem_alloc(sizeof (*secdata), KM_SLEEP);
897 secdata->secmod = secdata->rpcflavor = AUTH_UNIX;
898 secdata->data = NULL;
900 svp->sv_secdata = secdata;
903 * See bug 1180236.
904 * If mount secure failed, we will fall back to AUTH_NONE
905 * and try again. nfs3rootvp() will turn this back off.
907 * The NFS Version 2 mount uses GETATTR and STATFS procedures.
908 * The server does not care if these procedures have the proper
909 * authentication flavor, so if mount retries using AUTH_NONE
910 * that does not require a credential setup for root then the
911 * automounter would work without requiring root to be
912 * keylogged into AUTH_DES.
914 if (secdata->rpcflavor != AUTH_UNIX &&
915 secdata->rpcflavor != AUTH_LOOPBACK)
916 secdata->flags |= AUTH_F_TRYNONE;
919 * Failover support:
921 * We may have a linked list of nfs_args structures,
922 * which means the user is looking for failover. If
923 * the mount is either not "read-only" or "soft",
924 * we want to bail out with EINVAL.
926 if (args->nfs_args_ext == NFS_ARGS_EXTB &&
927 args->nfs_ext_u.nfs_extB.next != NULL) {
928 if (uap->flags & MS_RDONLY && !(flags & NFSMNT_SOFT)) {
929 data = (char *)args->nfs_ext_u.nfs_extB.next;
930 goto more;
932 error = EINVAL;
933 goto errout;
937 * Determine the zone we're being mounted into.
939 zone_hold(mntzone = zone); /* start with this assumption */
940 if (getzoneid() == GLOBAL_ZONEID) {
941 zone_rele(mntzone);
942 mntzone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));
943 ASSERT(mntzone != NULL);
944 if (mntzone != zone) {
945 error = EBUSY;
946 goto errout;
951 * Stop the mount from going any further if the zone is going away.
953 if (zone_status_get(mntzone) >= ZONE_IS_SHUTTING_DOWN) {
954 error = EBUSY;
955 goto errout;
959 * Get root vnode.
961 proceed:
962 error = nfsrootvp(&rtvp, vfsp, svp_head, flags, cr, mntzone);
964 if (error)
965 goto errout;
968 * Set option fields in the mount info record
970 mi = VTOMI(rtvp);
972 if (svp_head->sv_next)
973 mi->mi_flags |= MI_LLOCK;
975 error = nfs_setopts(rtvp, DATAMODEL_NATIVE, args);
976 if (!error) {
977 /* static pathconf kludge */
978 error = pathconf_get(mi, args);
981 errout:
982 if (rtvp != NULL) {
983 if (error) {
984 rp = VTOR(rtvp);
985 if (rp->r_flags & RHASHED)
986 rp_rmhash(rp);
988 VN_RELE(rtvp);
991 if (error) {
992 sv_free(svp_head);
993 if (mi != NULL) {
994 nfs_async_stop(vfsp);
995 nfs_async_manager_stop(vfsp);
996 if (mi->mi_io_kstats) {
997 kstat_delete(mi->mi_io_kstats);
998 mi->mi_io_kstats = NULL;
1000 if (mi->mi_ro_kstats) {
1001 kstat_delete(mi->mi_ro_kstats);
1002 mi->mi_ro_kstats = NULL;
1004 nfs_free_mi(mi);
1008 if (!(uap->flags & MS_SYSSPACE)) {
1009 nfs_free_args(args, fhandle);
1010 kmem_free(args, sizeof (*args));
1013 if (mntzone != NULL)
1014 zone_rele(mntzone);
1016 return (error);
1020 * The pathconf information is kept on a linked list of kmem_alloc'ed
1021 * structs. We search the list & add a new struct iff there is no other
1022 * struct with the same information.
1023 * See sys/pathconf.h for ``the rest of the story.''
1025 static struct pathcnf *allpc = NULL;
1027 static int
1028 pathconf_copyin(struct nfs_args *args, struct pathcnf *pc)
1030 STRUCT_DECL(pathcnf, pc_tmp);
1031 STRUCT_HANDLE(nfs_args, ap);
1032 int i;
1033 model_t model;
1035 model = get_udatamodel();
1036 STRUCT_INIT(pc_tmp, model);
1037 STRUCT_SET_HANDLE(ap, model, args);
1039 if ((STRUCT_FGET(ap, flags) & NFSMNT_POSIX) &&
1040 STRUCT_FGETP(ap, pathconf) != NULL) {
1041 if (copyin(STRUCT_FGETP(ap, pathconf), STRUCT_BUF(pc_tmp),
1042 STRUCT_SIZE(pc_tmp)))
1043 return (EFAULT);
1044 if (_PC_ISSET(_PC_ERROR, STRUCT_FGET(pc_tmp, pc_mask)))
1045 return (EINVAL);
1047 pc->pc_link_max = STRUCT_FGET(pc_tmp, pc_link_max);
1048 pc->pc_max_canon = STRUCT_FGET(pc_tmp, pc_max_canon);
1049 pc->pc_max_input = STRUCT_FGET(pc_tmp, pc_max_input);
1050 pc->pc_name_max = STRUCT_FGET(pc_tmp, pc_name_max);
1051 pc->pc_path_max = STRUCT_FGET(pc_tmp, pc_path_max);
1052 pc->pc_pipe_buf = STRUCT_FGET(pc_tmp, pc_pipe_buf);
1053 pc->pc_vdisable = STRUCT_FGET(pc_tmp, pc_vdisable);
1054 pc->pc_xxx = STRUCT_FGET(pc_tmp, pc_xxx);
1055 for (i = 0; i < _PC_N; i++)
1056 pc->pc_mask[i] = STRUCT_FGET(pc_tmp, pc_mask[i]);
1058 return (0);
1061 static int
1062 pathconf_get(struct mntinfo *mi, struct nfs_args *args)
1064 struct pathcnf *p, *pc;
1066 pc = args->pathconf;
1067 if (mi->mi_pathconf != NULL) {
1068 pathconf_rele(mi);
1069 mi->mi_pathconf = NULL;
1072 if (args->flags & NFSMNT_POSIX && args->pathconf != NULL) {
1073 if (_PC_ISSET(_PC_ERROR, pc->pc_mask))
1074 return (EINVAL);
1076 for (p = allpc; p != NULL; p = p->pc_next) {
1077 if (PCCMP(p, pc) == 0)
1078 break;
1080 if (p != NULL) {
1081 mi->mi_pathconf = p;
1082 p->pc_refcnt++;
1083 } else {
1084 p = kmem_alloc(sizeof (*p), KM_SLEEP);
1085 bcopy(pc, p, sizeof (struct pathcnf));
1086 p->pc_next = allpc;
1087 p->pc_refcnt = 1;
1088 allpc = mi->mi_pathconf = p;
1091 return (0);
1095 * release the static pathconf information
1097 static void
1098 pathconf_rele(struct mntinfo *mi)
1100 if (mi->mi_pathconf != NULL) {
1101 if (--mi->mi_pathconf->pc_refcnt == 0) {
1102 struct pathcnf *p;
1103 struct pathcnf *p2;
1105 p2 = p = allpc;
1106 while (p != NULL && p != mi->mi_pathconf) {
1107 p2 = p;
1108 p = p->pc_next;
1110 if (p == NULL) {
1111 panic("mi->pathconf");
1112 /*NOTREACHED*/
1114 if (p == allpc)
1115 allpc = p->pc_next;
1116 else
1117 p2->pc_next = p->pc_next;
1118 kmem_free(p, sizeof (*p));
1119 mi->mi_pathconf = NULL;
1124 static int nfs_dynamic = 1; /* global variable to enable dynamic retrans. */
1125 static ushort_t nfs_max_threads = 8; /* max number of active async threads */
1126 static uint_t nfs_async_clusters = 1; /* # of reqs from each async queue */
1127 static uint_t nfs_cots_timeo = NFS_COTS_TIMEO;
1129 static int
1130 nfsrootvp(vnode_t **rtvpp, vfs_t *vfsp, struct servinfo *svp,
1131 int flags, cred_t *cr, zone_t *zone)
1133 vnode_t *rtvp;
1134 mntinfo_t *mi;
1135 dev_t nfs_dev;
1136 struct vattr va;
1137 int error;
1138 rnode_t *rp;
1139 int i;
1140 struct nfs_stats *nfsstatsp;
1141 cred_t *lcr = NULL, *tcr = cr;
1143 nfsstatsp = zone_getspecific(nfsstat_zone_key, nfs_zone());
1144 ASSERT(nfsstatsp != NULL);
1147 * Create a mount record and link it to the vfs struct.
1149 mi = kmem_zalloc(sizeof (*mi), KM_SLEEP);
1150 mutex_init(&mi->mi_lock, NULL, MUTEX_DEFAULT, NULL);
1151 mutex_init(&mi->mi_remap_lock, NULL, MUTEX_DEFAULT, NULL);
1152 mi->mi_flags = MI_ACL | MI_EXTATTR;
1153 if (!(flags & NFSMNT_SOFT))
1154 mi->mi_flags |= MI_HARD;
1155 if ((flags & NFSMNT_SEMISOFT))
1156 mi->mi_flags |= MI_SEMISOFT;
1157 if ((flags & NFSMNT_NOPRINT))
1158 mi->mi_flags |= MI_NOPRINT;
1159 if (flags & NFSMNT_INT)
1160 mi->mi_flags |= MI_INT;
1161 mi->mi_retrans = NFS_RETRIES;
1162 if (svp->sv_knconf->knc_semantics == NC_TPI_COTS_ORD ||
1163 svp->sv_knconf->knc_semantics == NC_TPI_COTS)
1164 mi->mi_timeo = nfs_cots_timeo;
1165 else
1166 mi->mi_timeo = NFS_TIMEO;
1167 mi->mi_prog = NFS_PROGRAM;
1168 mi->mi_vers = NFS_VERSION;
1169 mi->mi_rfsnames = rfsnames_v2;
1170 mi->mi_reqs = nfsstatsp->nfs_stats_v2.rfsreqcnt_ptr;
1171 mi->mi_call_type = call_type_v2;
1172 mi->mi_ss_call_type = ss_call_type_v2;
1173 mi->mi_timer_type = timer_type_v2;
1174 mi->mi_aclnames = aclnames_v2;
1175 mi->mi_aclreqs = nfsstatsp->nfs_stats_v2.aclreqcnt_ptr;
1176 mi->mi_acl_call_type = acl_call_type_v2;
1177 mi->mi_acl_ss_call_type = acl_ss_call_type_v2;
1178 mi->mi_acl_timer_type = acl_timer_type_v2;
1179 cv_init(&mi->mi_failover_cv, NULL, CV_DEFAULT, NULL);
1180 mi->mi_servers = svp;
1181 mi->mi_curr_serv = svp;
1182 mi->mi_acregmin = SEC2HR(ACREGMIN);
1183 mi->mi_acregmax = SEC2HR(ACREGMAX);
1184 mi->mi_acdirmin = SEC2HR(ACDIRMIN);
1185 mi->mi_acdirmax = SEC2HR(ACDIRMAX);
1187 if (nfs_dynamic)
1188 mi->mi_flags |= MI_DYNAMIC;
1190 if (flags & NFSMNT_DIRECTIO)
1191 mi->mi_flags |= MI_DIRECTIO;
1194 * Make a vfs struct for nfs. We do this here instead of below
1195 * because rtvp needs a vfs before we can do a getattr on it.
1197 * Assign a unique device id to the mount
1199 mutex_enter(&nfs_minor_lock);
1200 do {
1201 nfs_minor = (nfs_minor + 1) & MAXMIN32;
1202 nfs_dev = makedevice(nfs_major, nfs_minor);
1203 } while (vfs_devismounted(nfs_dev));
1204 mutex_exit(&nfs_minor_lock);
1206 vfsp->vfs_dev = nfs_dev;
1207 vfs_make_fsid(&vfsp->vfs_fsid, nfs_dev, nfsfstyp);
1208 vfsp->vfs_data = (caddr_t)mi;
1209 vfsp->vfs_fstype = nfsfstyp;
1210 vfsp->vfs_bsize = NFS_MAXDATA;
1213 * Initialize fields used to support async putpage operations.
1215 for (i = 0; i < NFS_ASYNC_TYPES; i++)
1216 mi->mi_async_clusters[i] = nfs_async_clusters;
1217 mi->mi_async_init_clusters = nfs_async_clusters;
1218 mi->mi_async_curr[NFS_ASYNC_QUEUE] =
1219 mi->mi_async_curr[NFS_ASYNC_PGOPS_QUEUE] = &mi->mi_async_reqs[0];
1220 mi->mi_max_threads = nfs_max_threads;
1221 mutex_init(&mi->mi_async_lock, NULL, MUTEX_DEFAULT, NULL);
1222 cv_init(&mi->mi_async_reqs_cv, NULL, CV_DEFAULT, NULL);
1223 cv_init(&mi->mi_async_work_cv[NFS_ASYNC_QUEUE], NULL, CV_DEFAULT, NULL);
1224 cv_init(&mi->mi_async_work_cv[NFS_ASYNC_PGOPS_QUEUE], NULL,
1225 CV_DEFAULT, NULL);
1226 cv_init(&mi->mi_async_cv, NULL, CV_DEFAULT, NULL);
1228 mi->mi_vfsp = vfsp;
1229 mi->mi_zone = zone;
1230 zone_init_ref(&mi->mi_zone_ref);
1231 zone_hold_ref(zone, &mi->mi_zone_ref, ZONE_REF_NFS);
1232 nfs_mi_zonelist_add(mi);
1235 * Make the root vnode, use it to get attributes,
1236 * then remake it with the attributes.
1238 rtvp = makenfsnode((fhandle_t *)svp->sv_fhandle.fh_buf,
1239 NULL, vfsp, gethrtime(), cr, NULL, NULL);
1241 va.va_mask = AT_ALL;
1244 * If the uid is set then set the creds for secure mounts
1245 * by proxy processes such as automountd.
1247 if (svp->sv_secdata->uid != 0 &&
1248 svp->sv_secdata->rpcflavor == RPCSEC_GSS) {
1249 lcr = crdup(cr);
1250 (void) crsetugid(lcr, svp->sv_secdata->uid, crgetgid(cr));
1251 tcr = lcr;
1254 error = nfsgetattr(rtvp, &va, tcr);
1255 if (error)
1256 goto bad;
1257 rtvp->v_type = va.va_type;
1260 * Poll every server to get the filesystem stats; we're
1261 * only interested in the server's transfer size, and we
1262 * want the minimum.
1264 * While we're looping, we'll turn off AUTH_F_TRYNONE,
1265 * which is only for the mount operation.
1268 mi->mi_tsize = MIN(NFS_MAXDATA, nfstsize());
1269 mi->mi_stsize = MIN(NFS_MAXDATA, nfstsize());
1271 for (svp = mi->mi_servers; svp != NULL; svp = svp->sv_next) {
1272 struct nfsstatfs fs;
1273 int douprintf;
1275 douprintf = 1;
1276 mi->mi_curr_serv = svp;
1278 error = rfs2call(mi, RFS_STATFS, xdr_fhandle,
1279 (caddr_t)svp->sv_fhandle.fh_buf, xdr_statfs, (caddr_t)&fs,
1280 tcr, &douprintf, &fs.fs_status, 0, NULL);
1281 if (error)
1282 goto bad;
1283 mi->mi_stsize = MIN(mi->mi_stsize, fs.fs_tsize);
1284 svp->sv_secdata->flags &= ~AUTH_F_TRYNONE;
1286 mi->mi_curr_serv = mi->mi_servers;
1287 mi->mi_curread = mi->mi_tsize;
1288 mi->mi_curwrite = mi->mi_stsize;
1291 * Start the manager thread responsible for handling async worker
1292 * threads.
1294 VFS_HOLD(vfsp); /* add reference for thread */
1295 mi->mi_manager_thread = zthread_create(NULL, 0, nfs_async_manager,
1296 vfsp, 0, minclsyspri);
1297 ASSERT(mi->mi_manager_thread != NULL);
1300 * Initialize kstats
1302 nfs_mnt_kstat_init(vfsp);
1304 mi->mi_type = rtvp->v_type;
1306 *rtvpp = rtvp;
1307 if (lcr != NULL)
1308 crfree(lcr);
1310 return (0);
1311 bad:
1313 * An error occurred somewhere, need to clean up...
1314 * We need to release our reference to the root vnode and
1315 * destroy the mntinfo struct that we just created.
1317 if (lcr != NULL)
1318 crfree(lcr);
1319 rp = VTOR(rtvp);
1320 if (rp->r_flags & RHASHED)
1321 rp_rmhash(rp);
1322 VN_RELE(rtvp);
1323 nfs_async_stop(vfsp);
1324 nfs_async_manager_stop(vfsp);
1325 if (mi->mi_io_kstats) {
1326 kstat_delete(mi->mi_io_kstats);
1327 mi->mi_io_kstats = NULL;
1329 if (mi->mi_ro_kstats) {
1330 kstat_delete(mi->mi_ro_kstats);
1331 mi->mi_ro_kstats = NULL;
1333 nfs_free_mi(mi);
1334 *rtvpp = NULL;
1335 return (error);
1339 * vfs operations
1341 static int
1342 nfs_unmount(vfs_t *vfsp, int flag, cred_t *cr)
1344 mntinfo_t *mi;
1345 ushort_t omax;
1347 if (secpolicy_fs_unmount(cr, vfsp) != 0)
1348 return (EPERM);
1350 mi = VFTOMI(vfsp);
1351 if (flag & MS_FORCE) {
1353 vfsp->vfs_flag |= VFS_UNMOUNTED;
1356 * We are about to stop the async manager.
1357 * Let every one know not to schedule any
1358 * more async requests.
1360 mutex_enter(&mi->mi_async_lock);
1361 mi->mi_max_threads = 0;
1362 NFS_WAKEALL_ASYNC_WORKERS(mi->mi_async_work_cv);
1363 mutex_exit(&mi->mi_async_lock);
1366 * We need to stop the manager thread explicitly; the worker
1367 * threads can time out and exit on their own.
1369 nfs_async_manager_stop(vfsp);
1370 destroy_rtable(vfsp, cr);
1371 if (mi->mi_io_kstats) {
1372 kstat_delete(mi->mi_io_kstats);
1373 mi->mi_io_kstats = NULL;
1375 if (mi->mi_ro_kstats) {
1376 kstat_delete(mi->mi_ro_kstats);
1377 mi->mi_ro_kstats = NULL;
1379 return (0);
1382 * Wait until all asynchronous putpage operations on
1383 * this file system are complete before flushing rnodes
1384 * from the cache.
1386 omax = mi->mi_max_threads;
1387 if (nfs_async_stop_sig(vfsp)) {
1388 return (EINTR);
1390 rflush(vfsp, cr);
1392 * If there are any active vnodes on this file system,
1393 * then the file system is busy and can't be umounted.
1395 if (check_rtable(vfsp)) {
1396 mutex_enter(&mi->mi_async_lock);
1397 mi->mi_max_threads = omax;
1398 mutex_exit(&mi->mi_async_lock);
1399 return (EBUSY);
1402 * The unmount can't fail from now on; stop the manager thread.
1404 nfs_async_manager_stop(vfsp);
1406 * Destroy all rnodes belonging to this file system from the
1407 * rnode hash queues and purge any resources allocated to
1408 * them.
1410 destroy_rtable(vfsp, cr);
1411 if (mi->mi_io_kstats) {
1412 kstat_delete(mi->mi_io_kstats);
1413 mi->mi_io_kstats = NULL;
1415 if (mi->mi_ro_kstats) {
1416 kstat_delete(mi->mi_ro_kstats);
1417 mi->mi_ro_kstats = NULL;
1419 return (0);
1423 * find root of nfs
1425 static int
1426 nfs_root(vfs_t *vfsp, vnode_t **vpp)
1428 mntinfo_t *mi;
1429 vnode_t *vp;
1430 servinfo_t *svp;
1431 rnode_t *rp;
1432 int error = 0;
1434 mi = VFTOMI(vfsp);
1436 if (nfs_zone() != mi->mi_zone)
1437 return (EPERM);
1439 svp = mi->mi_curr_serv;
1440 if (svp && (svp->sv_flags & SV_ROOT_STALE)) {
1441 mutex_enter(&svp->sv_lock);
1442 svp->sv_flags &= ~SV_ROOT_STALE;
1443 mutex_exit(&svp->sv_lock);
1444 error = ENOENT;
1447 vp = makenfsnode((fhandle_t *)mi->mi_curr_serv->sv_fhandle.fh_buf,
1448 NULL, vfsp, gethrtime(), CRED(), NULL, NULL);
1451 * if the SV_ROOT_STALE flag was reset above, reset the
1452 * RSTALE flag if needed and return an error
1454 if (error == ENOENT) {
1455 rp = VTOR(vp);
1456 if (svp && rp->r_flags & RSTALE) {
1457 mutex_enter(&rp->r_statelock);
1458 rp->r_flags &= ~RSTALE;
1459 mutex_exit(&rp->r_statelock);
1461 VN_RELE(vp);
1462 return (error);
1465 ASSERT(vp->v_type == VNON || vp->v_type == mi->mi_type);
1467 vp->v_type = mi->mi_type;
1469 *vpp = vp;
1471 return (0);
1475 * Get file system statistics.
1477 static int
1478 nfs_statvfs(vfs_t *vfsp, struct statvfs64 *sbp)
1480 int error;
1481 mntinfo_t *mi;
1482 struct nfsstatfs fs;
1483 int douprintf;
1484 failinfo_t fi;
1485 vnode_t *vp;
1487 error = nfs_root(vfsp, &vp);
1488 if (error)
1489 return (error);
1491 mi = VFTOMI(vfsp);
1492 douprintf = 1;
1493 fi.vp = vp;
1494 fi.fhp = NULL; /* no need to update, filehandle not copied */
1495 fi.copyproc = nfscopyfh;
1496 fi.lookupproc = nfslookup;
1497 fi.xattrdirproc = acl_getxattrdir2;
1499 error = rfs2call(mi, RFS_STATFS, xdr_fhandle, (caddr_t)VTOFH(vp),
1500 xdr_statfs, (caddr_t)&fs, CRED(), &douprintf, &fs.fs_status, 0,
1501 &fi);
1503 if (!error) {
1504 error = geterrno(fs.fs_status);
1505 if (!error) {
1506 mutex_enter(&mi->mi_lock);
1507 if (mi->mi_stsize) {
1508 mi->mi_stsize = MIN(mi->mi_stsize, fs.fs_tsize);
1509 } else {
1510 mi->mi_stsize = fs.fs_tsize;
1511 mi->mi_curwrite = mi->mi_stsize;
1513 mutex_exit(&mi->mi_lock);
1514 sbp->f_bsize = fs.fs_bsize;
1515 sbp->f_frsize = fs.fs_bsize;
1516 sbp->f_blocks = (fsblkcnt64_t)fs.fs_blocks;
1517 sbp->f_bfree = (fsblkcnt64_t)fs.fs_bfree;
1519 * Some servers may return negative available
1520 * block counts. They may do this because they
1521 * calculate the number of available blocks by
1522 * subtracting the number of used blocks from
1523 * the total number of blocks modified by the
1524 * minimum free value. For example, if the
1525 * minumum free percentage is 10 and the file
1526 * system is greater than 90 percent full, then
1527 * 90 percent of the total blocks minus the
1528 * actual number of used blocks may be a
1529 * negative number.
1531 * In this case, we need to sign extend the
1532 * negative number through the assignment from
1533 * the 32 bit bavail count to the 64 bit bavail
1534 * count.
1536 * We need to be able to discern between there
1537 * just being a lot of available blocks on the
1538 * file system and the case described above.
1539 * We are making the assumption that it does
1540 * not make sense to have more available blocks
1541 * than there are free blocks. So, if there
1542 * are, then we treat the number as if it were
1543 * a negative number and arrange to have it
1544 * sign extended when it is converted from 32
1545 * bits to 64 bits.
1547 if (fs.fs_bavail <= fs.fs_bfree)
1548 sbp->f_bavail = (fsblkcnt64_t)fs.fs_bavail;
1549 else {
1550 sbp->f_bavail =
1551 (fsblkcnt64_t)((long)fs.fs_bavail);
1553 sbp->f_files = (fsfilcnt64_t)-1;
1554 sbp->f_ffree = (fsfilcnt64_t)-1;
1555 sbp->f_favail = (fsfilcnt64_t)-1;
1556 sbp->f_fsid = (unsigned long)vfsp->vfs_fsid.val[0];
1557 (void) strncpy(sbp->f_basetype,
1558 vfssw[vfsp->vfs_fstype].vsw_name, FSTYPSZ);
1559 sbp->f_flag = vf_to_stf(vfsp->vfs_flag);
1560 sbp->f_namemax = (uint32_t)-1;
1561 } else {
1562 PURGE_STALE_FH(error, vp, CRED());
1566 VN_RELE(vp);
1568 return (error);
1571 static kmutex_t nfs_syncbusy;
1574 * Flush dirty nfs files for file system vfsp.
1575 * If vfsp == NULL, all nfs files are flushed.
1577 /* ARGSUSED */
1578 static int
1579 nfs_sync(vfs_t *vfsp, short flag, cred_t *cr)
1582 * Cross-zone calls are OK here, since this translates to a
1583 * VOP_PUTPAGE(B_ASYNC), which gets picked up by the right zone.
1585 if (!(flag & SYNC_ATTR) && mutex_tryenter(&nfs_syncbusy) != 0) {
1586 rflush(vfsp, cr);
1587 mutex_exit(&nfs_syncbusy);
1589 return (0);
1592 /* ARGSUSED */
1593 static int
1594 nfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
1596 int error;
1597 vnode_t *vp;
1598 struct vattr va;
1599 struct nfs_fid *nfsfidp = (struct nfs_fid *)fidp;
1600 zoneid_t zoneid = VFTOMI(vfsp)->mi_zone->zone_id;
1602 if (nfs_zone() != VFTOMI(vfsp)->mi_zone)
1603 return (EPERM);
1604 if (fidp->fid_len != (sizeof (*nfsfidp) - sizeof (short))) {
1605 #ifdef DEBUG
1606 zcmn_err(zoneid, CE_WARN,
1607 "nfs_vget: bad fid len, %d/%d", fidp->fid_len,
1608 (int)(sizeof (*nfsfidp) - sizeof (short)));
1609 #endif
1610 *vpp = NULL;
1611 return (ESTALE);
1614 vp = makenfsnode((fhandle_t *)(nfsfidp->nf_data), NULL, vfsp,
1615 gethrtime(), CRED(), NULL, NULL);
1617 if (VTOR(vp)->r_flags & RSTALE) {
1618 VN_RELE(vp);
1619 *vpp = NULL;
1620 return (ENOENT);
1623 if (vp->v_type == VNON) {
1624 va.va_mask = AT_ALL;
1625 error = nfsgetattr(vp, &va, CRED());
1626 if (error) {
1627 VN_RELE(vp);
1628 *vpp = NULL;
1629 return (error);
1631 vp->v_type = va.va_type;
1634 *vpp = vp;
1636 return (0);
1639 /* ARGSUSED */
1640 static int
1641 nfs_mountroot(vfs_t *vfsp, whymountroot_t why)
1643 vnode_t *rtvp;
1644 char root_hostname[SYS_NMLN+1];
1645 struct servinfo *svp;
1646 int error;
1647 int vfsflags;
1648 size_t size;
1649 char *root_path;
1650 struct pathname pn;
1651 char *name;
1652 cred_t *cr;
1653 struct nfs_args args; /* nfs mount arguments */
1654 static char token[10];
1656 bzero(&args, sizeof (args));
1658 /* do this BEFORE getfile which causes xid stamps to be initialized */
1659 clkset(-1L); /* hack for now - until we get time svc? */
1661 if (why == ROOT_REMOUNT) {
1663 * Shouldn't happen.
1665 panic("nfs_mountroot: why == ROOT_REMOUNT");
1668 if (why == ROOT_UNMOUNT) {
1670 * Nothing to do for NFS.
1672 return (0);
1676 * why == ROOT_INIT
1679 name = token;
1680 *name = 0;
1681 getfsname("root", name, sizeof (token));
1683 pn_alloc(&pn);
1684 root_path = pn.pn_path;
1686 svp = kmem_zalloc(sizeof (*svp), KM_SLEEP);
1687 svp->sv_knconf = kmem_zalloc(sizeof (*svp->sv_knconf), KM_SLEEP);
1688 svp->sv_knconf->knc_protofmly = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
1689 svp->sv_knconf->knc_proto = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
1692 * Get server address
1693 * Get the root fhandle
1694 * Get server's transport
1695 * Get server's hostname
1696 * Get options
1698 args.addr = &svp->sv_addr;
1699 args.fh = (char *)&svp->sv_fhandle.fh_buf;
1700 args.knconf = svp->sv_knconf;
1701 args.hostname = root_hostname;
1702 vfsflags = 0;
1703 if (error = mount_root(*name ? name : "root", root_path, NFS_VERSION,
1704 &args, &vfsflags)) {
1705 nfs_cmn_err(error, CE_WARN,
1706 "nfs_mountroot: mount_root failed: %m");
1707 sv_free(svp);
1708 pn_free(&pn);
1709 return (error);
1711 svp->sv_fhandle.fh_len = NFS_FHSIZE;
1712 svp->sv_hostnamelen = (int)(strlen(root_hostname) + 1);
1713 svp->sv_hostname = kmem_alloc(svp->sv_hostnamelen, KM_SLEEP);
1714 (void) strcpy(svp->sv_hostname, root_hostname);
1717 * Force root partition to always be mounted with AUTH_UNIX for now
1719 svp->sv_secdata = kmem_alloc(sizeof (*svp->sv_secdata), KM_SLEEP);
1720 svp->sv_secdata->secmod = AUTH_UNIX;
1721 svp->sv_secdata->rpcflavor = AUTH_UNIX;
1722 svp->sv_secdata->data = NULL;
1724 cr = crgetcred();
1725 rtvp = NULL;
1727 error = nfsrootvp(&rtvp, vfsp, svp, args.flags, cr, global_zone);
1729 crfree(cr);
1731 if (error) {
1732 pn_free(&pn);
1733 sv_free(svp);
1734 return (error);
1737 error = nfs_setopts(rtvp, DATAMODEL_NATIVE, &args);
1738 if (error) {
1739 nfs_cmn_err(error, CE_WARN,
1740 "nfs_mountroot: invalid root mount options");
1741 pn_free(&pn);
1742 goto errout;
1745 (void) vfs_lock_wait(vfsp);
1746 vfs_add(NULL, vfsp, vfsflags);
1747 vfs_unlock(vfsp);
1749 size = strlen(svp->sv_hostname);
1750 (void) strcpy(rootfs.bo_name, svp->sv_hostname);
1751 rootfs.bo_name[size] = ':';
1752 (void) strcpy(&rootfs.bo_name[size + 1], root_path);
1754 pn_free(&pn);
1756 errout:
1757 if (error) {
1758 sv_free(svp);
1759 nfs_async_stop(vfsp);
1760 nfs_async_manager_stop(vfsp);
1763 if (rtvp != NULL)
1764 VN_RELE(rtvp);
1766 return (error);
1770 * Initialization routine for VFS routines. Should only be called once
1773 nfs_vfsinit(void)
1775 mutex_init(&nfs_syncbusy, NULL, MUTEX_DEFAULT, NULL);
1776 return (0);
1779 void
1780 nfs_vfsfini(void)
1782 mutex_destroy(&nfs_syncbusy);
1785 void
1786 nfs_freevfs(vfs_t *vfsp)
1788 mntinfo_t *mi;
1789 servinfo_t *svp;
1791 /* free up the resources */
1792 mi = VFTOMI(vfsp);
1793 pathconf_rele(mi);
1794 svp = mi->mi_servers;
1795 mi->mi_servers = mi->mi_curr_serv = NULL;
1796 sv_free(svp);
1799 * By this time we should have already deleted the
1800 * mi kstats in the unmount code. If they are still around
1801 * somethings wrong
1803 ASSERT(mi->mi_io_kstats == NULL);
1804 nfs_free_mi(mi);