1949 crash during reguid causes stale config
[unleashed.git] / usr / src / uts / common / fs / udfs / udf_vfsops.c
blob479898fe9ceac94662010f6e22a058c17abd1839
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
27 #include <sys/types.h>
28 #include <sys/t_lock.h>
29 #include <sys/param.h>
30 #include <sys/time.h>
31 #include <sys/systm.h>
32 #include <sys/sysmacros.h>
33 #include <sys/resource.h>
34 #include <sys/signal.h>
35 #include <sys/cred.h>
36 #include <sys/user.h>
37 #include <sys/buf.h>
38 #include <sys/vfs.h>
39 #include <sys/vfs_opreg.h>
40 #include <sys/stat.h>
41 #include <sys/vnode.h>
42 #include <sys/mode.h>
43 #include <sys/proc.h>
44 #include <sys/disp.h>
45 #include <sys/file.h>
46 #include <sys/fcntl.h>
47 #include <sys/flock.h>
48 #include <sys/kmem.h>
49 #include <sys/uio.h>
50 #include <sys/dnlc.h>
51 #include <sys/conf.h>
52 #include <sys/errno.h>
53 #include <sys/mman.h>
54 #include <sys/fbuf.h>
55 #include <sys/pathname.h>
56 #include <sys/debug.h>
57 #include <sys/vmsystm.h>
58 #include <sys/cmn_err.h>
59 #include <sys/dirent.h>
60 #include <sys/errno.h>
61 #include <sys/modctl.h>
62 #include <sys/statvfs.h>
63 #include <sys/mount.h>
64 #include <sys/sunddi.h>
65 #include <sys/bootconf.h>
66 #include <sys/policy.h>
68 #include <vm/hat.h>
69 #include <vm/page.h>
70 #include <vm/pvn.h>
71 #include <vm/as.h>
72 #include <vm/seg.h>
73 #include <vm/seg_map.h>
74 #include <vm/seg_kmem.h>
75 #include <vm/seg_vn.h>
76 #include <vm/rm.h>
77 #include <vm/page.h>
78 #include <sys/swap.h>
79 #include <sys/mntent.h>
82 #include <fs/fs_subr.h>
85 #include <sys/fs/udf_volume.h>
86 #include <sys/fs/udf_inode.h>
89 extern struct vnode *common_specvp(struct vnode *vp);
91 extern kmutex_t ud_sync_busy;
92 static int32_t ud_mountfs(struct vfs *,
93 enum whymountroot, dev_t, char *, struct cred *, int32_t);
94 static struct udf_vfs *ud_validate_and_fill_superblock(dev_t,
95 int32_t, uint32_t);
96 void ud_destroy_fsp(struct udf_vfs *);
97 void ud_convert_to_superblock(struct udf_vfs *,
98 struct log_vol_int_desc *);
99 void ud_update_superblock(struct vfs *);
100 int32_t ud_get_last_block(dev_t, daddr_t *);
101 static int32_t ud_val_get_vat(struct udf_vfs *,
102 dev_t, daddr_t, struct ud_map *);
103 int32_t ud_read_sparing_tbls(struct udf_vfs *,
104 dev_t, struct ud_map *, struct pmap_typ2 *);
105 uint32_t ud_get_lbsize(dev_t, uint32_t *);
107 static int32_t udf_mount(struct vfs *,
108 struct vnode *, struct mounta *, struct cred *);
109 static int32_t udf_unmount(struct vfs *, int, struct cred *);
110 static int32_t udf_root(struct vfs *, struct vnode **);
111 static int32_t udf_statvfs(struct vfs *, struct statvfs64 *);
112 static int32_t udf_sync(struct vfs *, int16_t, struct cred *);
113 static int32_t udf_vget(struct vfs *, struct vnode **, struct fid *);
114 static int32_t udf_mountroot(struct vfs *vfsp, enum whymountroot);
116 static int udfinit(int, char *);
118 static mntopts_t udfs_mntopts;
120 static vfsdef_t vfw = {
121 VFSDEF_VERSION,
122 "udfs",
123 udfinit,
124 VSW_HASPROTO|VSW_CANREMOUNT|VSW_STATS|VSW_CANLOFI,
125 &udfs_mntopts
128 static mntopts_t udfs_mntopts = {
130 NULL
134 * Module linkage information for the kernel.
136 extern struct mod_ops mod_fsops;
138 static struct modlfs modlfs = {
139 &mod_fsops, "filesystem for UDFS", &vfw
142 static struct modlinkage modlinkage = {
143 MODREV_1, (void *)&modlfs, NULL
146 int32_t udf_fstype = -1;
149 _init()
151 return (mod_install(&modlinkage));
155 _fini()
157 return (EBUSY);
161 _info(struct modinfo *modinfop)
163 return (mod_info(&modlinkage, modinfop));
167 /* -------------------- vfs routines -------------------- */
170 * XXX - this appears only to be used by the VM code to handle the case where
171 * UNIX is running off the mini-root. That probably wants to be done
172 * differently.
174 struct vnode *rootvp;
175 #ifndef __lint
176 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", rootvp))
177 #endif
178 static int32_t
179 udf_mount(struct vfs *vfsp, struct vnode *mvp,
180 struct mounta *uap, struct cred *cr)
182 dev_t dev;
183 struct vnode *lvp = NULL;
184 struct vnode *svp = NULL;
185 struct pathname dpn;
186 int32_t error;
187 enum whymountroot why;
188 int oflag, aflag;
190 ud_printf("udf_mount\n");
192 if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0) {
193 return (error);
196 if (mvp->v_type != VDIR) {
197 return (ENOTDIR);
200 mutex_enter(&mvp->v_lock);
201 if ((uap->flags & MS_REMOUNT) == 0 &&
202 (uap->flags & MS_OVERLAY) == 0 &&
203 (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
204 mutex_exit(&mvp->v_lock);
205 return (EBUSY);
207 mutex_exit(&mvp->v_lock);
209 if (error = pn_get(uap->dir, UIO_USERSPACE, &dpn)) {
210 return (error);
214 * Resolve path name of the file being mounted.
216 if (error = lookupname(uap->spec, UIO_USERSPACE, FOLLOW, NULLVPP,
217 &svp)) {
218 pn_free(&dpn);
219 return (error);
222 error = vfs_get_lofi(vfsp, &lvp);
224 if (error > 0) {
225 if (error == ENOENT)
226 error = ENODEV;
227 goto out;
228 } else if (error == 0) {
229 dev = lvp->v_rdev;
230 } else {
231 dev = svp->v_rdev;
233 if (svp->v_type != VBLK) {
234 error = ENOTBLK;
235 goto out;
240 * Ensure that this device isn't already mounted,
241 * unless this is a REMOUNT request
243 if (vfs_devmounting(dev, vfsp)) {
244 error = EBUSY;
245 goto out;
247 if (vfs_devismounted(dev)) {
248 if (uap->flags & MS_REMOUNT) {
249 why = ROOT_REMOUNT;
250 } else {
251 error = EBUSY;
252 goto out;
254 } else {
255 why = ROOT_INIT;
257 if (getmajor(dev) >= devcnt) {
258 error = ENXIO;
259 goto out;
263 * If the device is a tape, mount it read only
265 if (devopsp[getmajor(dev)]->devo_cb_ops->cb_flag & D_TAPE) {
266 vfsp->vfs_flag |= VFS_RDONLY;
269 if (uap->flags & MS_RDONLY) {
270 vfsp->vfs_flag |= VFS_RDONLY;
274 * Set mount options.
276 if (uap->flags & MS_RDONLY) {
277 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
279 if (uap->flags & MS_NOSUID) {
280 vfs_setmntopt(vfsp, MNTOPT_NOSUID, NULL, 0);
284 * Verify that the caller can open the device special file as
285 * required. It is not until this moment that we know whether
286 * we're mounting "ro" or not.
288 if ((vfsp->vfs_flag & VFS_RDONLY) != 0) {
289 oflag = FREAD;
290 aflag = VREAD;
291 } else {
292 oflag = FREAD | FWRITE;
293 aflag = VREAD | VWRITE;
296 if (lvp == NULL &&
297 (error = secpolicy_spec_open(cr, svp, oflag)) != 0)
298 goto out;
300 if ((error = VOP_ACCESS(svp, aflag, 0, cr, NULL)) != 0)
301 goto out;
304 * Mount the filesystem.
306 error = ud_mountfs(vfsp, why, dev, dpn.pn_path, cr, 0);
307 out:
308 VN_RELE(svp);
309 if (lvp != NULL)
310 VN_RELE(lvp);
311 pn_free(&dpn);
312 return (error);
318 * unmount the file system pointed
319 * by vfsp
321 /* ARGSUSED */
322 static int32_t
323 udf_unmount(struct vfs *vfsp, int fflag, struct cred *cr)
325 struct udf_vfs *udf_vfsp;
326 struct vnode *bvp, *rvp;
327 struct ud_inode *rip;
328 int32_t flag;
330 ud_printf("udf_unmount\n");
332 if (secpolicy_fs_unmount(cr, vfsp) != 0) {
333 return (EPERM);
337 * forced unmount is not supported by this file system
338 * and thus, ENOTSUP, is being returned.
340 if (fflag & MS_FORCE)
341 return (ENOTSUP);
343 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
344 flag = !(udf_vfsp->udf_flags & UDF_FL_RDONLY);
345 bvp = udf_vfsp->udf_devvp;
347 rvp = udf_vfsp->udf_root;
348 ASSERT(rvp != NULL);
349 rip = VTOI(rvp);
351 (void) ud_release_cache(udf_vfsp);
354 /* Flush all inodes except root */
355 if (ud_iflush(vfsp) < 0) {
356 return (EBUSY);
359 rw_enter(&rip->i_contents, RW_WRITER);
360 (void) ud_syncip(rip, B_INVAL, I_SYNC);
361 rw_exit(&rip->i_contents);
363 mutex_enter(&ud_sync_busy);
364 if ((udf_vfsp->udf_flags & UDF_FL_RDONLY) == 0) {
365 bflush(vfsp->vfs_dev);
366 mutex_enter(&udf_vfsp->udf_lock);
367 udf_vfsp->udf_clean = UDF_CLEAN;
368 mutex_exit(&udf_vfsp->udf_lock);
369 ud_update_superblock(vfsp);
371 mutex_exit(&ud_sync_busy);
373 mutex_destroy(&udf_vfsp->udf_lock);
374 mutex_destroy(&udf_vfsp->udf_rename_lck);
376 ud_delcache(rip);
377 ITIMES(rip);
378 VN_RELE(rvp);
380 ud_destroy_fsp(udf_vfsp);
382 (void) VOP_PUTPAGE(bvp, (offset_t)0, (uint32_t)0, B_INVAL, cr, NULL);
383 (void) VOP_CLOSE(bvp, flag, 1, (offset_t)0, cr, NULL);
385 (void) bfinval(vfsp->vfs_dev, 1);
386 VN_RELE(bvp);
389 return (0);
394 * Get the root vp for the
395 * file system
397 static int32_t
398 udf_root(struct vfs *vfsp, struct vnode **vpp)
400 struct udf_vfs *udf_vfsp;
401 struct vnode *vp;
403 ud_printf("udf_root\n");
405 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
407 ASSERT(udf_vfsp != NULL);
408 ASSERT(udf_vfsp->udf_root != NULL);
410 vp = udf_vfsp->udf_root;
411 VN_HOLD(vp);
412 *vpp = vp;
413 return (0);
418 * Get file system statistics.
420 static int32_t
421 udf_statvfs(struct vfs *vfsp, struct statvfs64 *sp)
423 struct udf_vfs *udf_vfsp;
424 struct ud_part *parts;
425 dev32_t d32;
426 int32_t index;
428 ud_printf("udf_statvfs\n");
430 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
431 (void) bzero(sp, sizeof (struct statvfs64));
433 mutex_enter(&udf_vfsp->udf_lock);
434 sp->f_bsize = udf_vfsp->udf_lbsize;
435 sp->f_frsize = udf_vfsp->udf_lbsize;
436 sp->f_blocks = 0;
437 sp->f_bfree = 0;
438 parts = udf_vfsp->udf_parts;
439 for (index = 0; index < udf_vfsp->udf_npart; index++) {
440 sp->f_blocks += parts->udp_nblocks;
441 sp->f_bfree += parts->udp_nfree;
442 parts++;
444 sp->f_bavail = sp->f_bfree;
447 * Since there are no real inodes allocated
448 * we will approximate
449 * each new file will occupy :
450 * 38(over head each dent) + MAXNAMLEN / 2 + inode_size(==block size)
452 sp->f_ffree = sp->f_favail =
453 (sp->f_bavail * sp->f_bsize) / (146 + sp->f_bsize);
456 * The total number of inodes is
457 * the sum of files + directories + free inodes
459 sp->f_files = sp->f_ffree + udf_vfsp->udf_nfiles + udf_vfsp->udf_ndirs;
460 (void) cmpldev(&d32, vfsp->vfs_dev);
461 sp->f_fsid = d32;
462 (void) strcpy(sp->f_basetype, vfssw[vfsp->vfs_fstype].vsw_name);
463 sp->f_flag = vf_to_stf(vfsp->vfs_flag);
464 sp->f_namemax = MAXNAMLEN;
465 (void) strcpy(sp->f_fstr, udf_vfsp->udf_volid);
467 mutex_exit(&udf_vfsp->udf_lock);
469 return (0);
474 * Flush any pending I/O to file system vfsp.
475 * The ud_update() routine will only flush *all* udf files.
477 /*ARGSUSED*/
478 /* ARGSUSED */
479 static int32_t
480 udf_sync(struct vfs *vfsp, int16_t flag, struct cred *cr)
482 ud_printf("udf_sync\n");
484 ud_update(flag);
485 return (0);
490 /* ARGSUSED */
491 static int32_t
492 udf_vget(struct vfs *vfsp,
493 struct vnode **vpp, struct fid *fidp)
495 int32_t error = 0;
496 struct udf_fid *udfid;
497 struct udf_vfs *udf_vfsp;
498 struct ud_inode *ip;
500 ud_printf("udf_vget\n");
502 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
503 if (udf_vfsp == NULL) {
504 *vpp = NULL;
505 return (0);
508 udfid = (struct udf_fid *)fidp;
509 if ((error = ud_iget(vfsp, udfid->udfid_prn,
510 udfid->udfid_icb_lbn, &ip, NULL, CRED())) != 0) {
511 *vpp = NULL;
512 return (error);
515 rw_enter(&ip->i_contents, RW_READER);
516 if ((udfid->udfid_uinq_lo != (ip->i_uniqid & 0xffffffff)) ||
517 (udfid->udfid_prn != ip->i_icb_prn)) {
518 rw_exit(&ip->i_contents);
519 VN_RELE(ITOV(ip));
520 *vpp = NULL;
521 return (EINVAL);
523 rw_exit(&ip->i_contents);
525 *vpp = ITOV(ip);
526 return (0);
531 * Mount root file system.
532 * "why" is ROOT_INIT on initial call, ROOT_REMOUNT if called to
533 * remount the root file system, and ROOT_UNMOUNT if called to
534 * unmount the root (e.g., as part of a system shutdown).
536 * XXX - this may be partially machine-dependent; it, along with the VFS_SWAPVP
537 * operation, goes along with auto-configuration. A mechanism should be
538 * provided by which machine-INdependent code in the kernel can say "get me the
539 * right root file system" and "get me the right initial swap area", and have
540 * that done in what may well be a machine-dependent fashion.
541 * Unfortunately, it is also file-system-type dependent (NFS gets it via
542 * bootparams calls, UFS gets it from various and sundry machine-dependent
543 * mechanisms, as SPECFS does for swap).
545 /* ARGSUSED */
546 static int32_t
547 udf_mountroot(struct vfs *vfsp, enum whymountroot why)
549 dev_t rootdev;
550 static int32_t udf_rootdone = 0;
551 struct vnode *vp = NULL;
552 int32_t ovflags, error;
553 ud_printf("udf_mountroot\n");
555 if (why == ROOT_INIT) {
556 if (udf_rootdone++) {
557 return (EBUSY);
559 rootdev = getrootdev();
560 if (rootdev == (dev_t)NODEV) {
561 return (ENODEV);
563 vfsp->vfs_dev = rootdev;
564 vfsp->vfs_flag |= VFS_RDONLY;
565 } else if (why == ROOT_REMOUNT) {
566 vp = ((struct udf_vfs *)vfsp->vfs_data)->udf_devvp;
567 (void) dnlc_purge_vfsp(vfsp, 0);
568 vp = common_specvp(vp);
569 (void) VOP_PUTPAGE(vp, (offset_t)0,
570 (uint32_t)0, B_INVAL, CRED(), NULL);
571 binval(vfsp->vfs_dev);
573 ovflags = vfsp->vfs_flag;
574 vfsp->vfs_flag &= ~VFS_RDONLY;
575 vfsp->vfs_flag |= VFS_REMOUNT;
576 rootdev = vfsp->vfs_dev;
577 } else if (why == ROOT_UNMOUNT) {
578 ud_update(0);
579 vp = ((struct udf_vfs *)vfsp->vfs_data)->udf_devvp;
580 (void) VOP_CLOSE(vp, FREAD|FWRITE, 1,
581 (offset_t)0, CRED(), NULL);
582 return (0);
585 if ((error = vfs_lock(vfsp)) != 0) {
586 return (error);
589 error = ud_mountfs(vfsp, why, rootdev, "/", CRED(), 1);
590 if (error) {
591 vfs_unlock(vfsp);
592 if (why == ROOT_REMOUNT) {
593 vfsp->vfs_flag = ovflags;
595 if (rootvp) {
596 VN_RELE(rootvp);
597 rootvp = (struct vnode *)0;
599 return (error);
602 if (why == ROOT_INIT) {
603 vfs_add((struct vnode *)0, vfsp,
604 (vfsp->vfs_flag & VFS_RDONLY) ? MS_RDONLY : 0);
606 vfs_unlock(vfsp);
607 return (0);
611 /* ------------------------- local routines ------------------------- */
614 static int32_t
615 ud_mountfs(struct vfs *vfsp,
616 enum whymountroot why, dev_t dev, char *name,
617 struct cred *cr, int32_t isroot)
619 struct vnode *devvp = NULL;
620 int32_t error = 0;
621 int32_t needclose = 0;
622 struct udf_vfs *udf_vfsp = NULL;
623 struct log_vol_int_desc *lvid;
624 struct ud_inode *rip = NULL;
625 struct vnode *rvp = NULL;
626 int32_t i, lbsize;
627 uint32_t avd_loc;
628 struct ud_map *map;
629 int32_t desc_len;
631 ud_printf("ud_mountfs\n");
633 if (why == ROOT_INIT) {
635 * Open the device.
637 devvp = makespecvp(dev, VBLK);
640 * Open block device mounted on.
641 * When bio is fixed for vnodes this can all be vnode
642 * operations.
644 error = VOP_OPEN(&devvp,
645 (vfsp->vfs_flag & VFS_RDONLY) ? FREAD : FREAD|FWRITE,
646 cr, NULL);
647 if (error) {
648 goto out;
650 needclose = 1;
653 * Refuse to go any further if this
654 * device is being used for swapping.
656 if (IS_SWAPVP(devvp)) {
657 error = EBUSY;
658 goto out;
663 * check for dev already mounted on
665 if (vfsp->vfs_flag & VFS_REMOUNT) {
666 struct tag *ttag;
667 int32_t index, count;
668 struct buf *tpt = 0;
669 caddr_t addr;
672 /* cannot remount to RDONLY */
673 if (vfsp->vfs_flag & VFS_RDONLY) {
674 return (EINVAL);
677 if (vfsp->vfs_dev != dev) {
678 return (EINVAL);
681 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
682 devvp = udf_vfsp->udf_devvp;
685 * fsck may have altered the file system; discard
686 * as much incore data as possible. Don't flush
687 * if this is a rw to rw remount; it's just resetting
688 * the options.
690 if (udf_vfsp->udf_flags & UDF_FL_RDONLY) {
691 (void) dnlc_purge_vfsp(vfsp, 0);
692 (void) VOP_PUTPAGE(devvp, (offset_t)0, (uint_t)0,
693 B_INVAL, CRED(), NULL);
694 (void) ud_iflush(vfsp);
695 bflush(dev);
696 binval(dev);
700 * We could read UDF1.50 and write UDF1.50 only
701 * disallow mount of any highier version
703 if ((udf_vfsp->udf_miread > UDF_150) ||
704 (udf_vfsp->udf_miwrite > UDF_150)) {
705 error = EINVAL;
706 goto remountout;
710 * read/write to read/write; all done
712 if (udf_vfsp->udf_flags & UDF_FL_RW) {
713 goto remountout;
717 * Does the media type allow a writable mount
719 if (udf_vfsp->udf_mtype != UDF_MT_OW) {
720 error = EINVAL;
721 goto remountout;
725 * Read the metadata
726 * and check if it is possible to
727 * mount in rw mode
729 tpt = ud_bread(vfsp->vfs_dev,
730 udf_vfsp->udf_iseq_loc << udf_vfsp->udf_l2d_shift,
731 udf_vfsp->udf_iseq_len);
732 if (tpt->b_flags & B_ERROR) {
733 error = EIO;
734 goto remountout;
736 count = udf_vfsp->udf_iseq_len / DEV_BSIZE;
737 addr = tpt->b_un.b_addr;
738 for (index = 0; index < count; index ++) {
739 ttag = (struct tag *)(addr + index * DEV_BSIZE);
740 desc_len = udf_vfsp->udf_iseq_len - (index * DEV_BSIZE);
741 if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_INT,
742 udf_vfsp->udf_iseq_loc +
743 (index >> udf_vfsp->udf_l2d_shift),
744 1, desc_len) == 0) {
745 struct log_vol_int_desc *lvid;
747 lvid = (struct log_vol_int_desc *)ttag;
749 if (SWAP_32(lvid->lvid_int_type) !=
750 LOG_VOL_CLOSE_INT) {
751 error = EINVAL;
752 goto remountout;
756 * Copy new data to old data
758 bcopy(udf_vfsp->udf_iseq->b_un.b_addr,
759 tpt->b_un.b_addr, udf_vfsp->udf_iseq_len);
760 break;
764 udf_vfsp->udf_flags = UDF_FL_RW;
766 mutex_enter(&udf_vfsp->udf_lock);
767 ud_sbwrite(udf_vfsp);
768 mutex_exit(&udf_vfsp->udf_lock);
769 remountout:
770 if (tpt != NULL) {
771 tpt->b_flags = B_AGE | B_STALE;
772 brelse(tpt);
774 return (error);
777 ASSERT(devvp != 0);
779 * Flush back any dirty pages on the block device to
780 * try and keep the buffer cache in sync with the page
781 * cache if someone is trying to use block devices when
782 * they really should be using the raw device.
784 (void) VOP_PUTPAGE(common_specvp(devvp), (offset_t)0,
785 (uint32_t)0, B_INVAL, cr, NULL);
789 * Check if the file system
790 * is a valid udfs and fill
791 * the required fields in udf_vfs
793 #ifndef __lint
794 _NOTE(NO_COMPETING_THREADS_NOW);
795 #endif
797 if ((lbsize = ud_get_lbsize(dev, &avd_loc)) == 0) {
798 error = EINVAL;
799 goto out;
802 udf_vfsp = ud_validate_and_fill_superblock(dev, lbsize, avd_loc);
803 if (udf_vfsp == NULL) {
804 error = EINVAL;
805 goto out;
809 * Fill in vfs private data
811 vfsp->vfs_fstype = udf_fstype;
812 vfs_make_fsid(&vfsp->vfs_fsid, dev, udf_fstype);
813 vfsp->vfs_data = (caddr_t)udf_vfsp;
814 vfsp->vfs_dev = dev;
815 vfsp->vfs_flag |= VFS_NOTRUNC;
816 udf_vfsp->udf_devvp = devvp;
818 udf_vfsp->udf_fsmnt = kmem_zalloc(strlen(name) + 1, KM_SLEEP);
819 (void) strcpy(udf_vfsp->udf_fsmnt, name);
821 udf_vfsp->udf_vfs = vfsp;
822 udf_vfsp->udf_rdclustsz = udf_vfsp->udf_wrclustsz = maxphys;
824 udf_vfsp->udf_mod = 0;
827 lvid = udf_vfsp->udf_lvid;
828 if (vfsp->vfs_flag & VFS_RDONLY) {
830 * We could read only UDF1.50
831 * disallow mount of any highier version
833 if (udf_vfsp->udf_miread > UDF_150) {
834 error = EINVAL;
835 goto out;
837 udf_vfsp->udf_flags = UDF_FL_RDONLY;
838 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) {
839 udf_vfsp->udf_clean = UDF_CLEAN;
840 } else {
841 /* Do we have a VAT at the end of the recorded media */
842 map = udf_vfsp->udf_maps;
843 for (i = 0; i < udf_vfsp->udf_nmaps; i++) {
844 if (map->udm_flags & UDM_MAP_VPM) {
845 break;
847 map++;
849 if (i == udf_vfsp->udf_nmaps) {
850 error = ENOSPC;
851 goto out;
853 udf_vfsp->udf_clean = UDF_CLEAN;
855 } else {
857 * We could read UDF1.50 and write UDF1.50 only
858 * disallow mount of any highier version
860 if ((udf_vfsp->udf_miread > UDF_150) ||
861 (udf_vfsp->udf_miwrite > UDF_150)) {
862 error = EINVAL;
863 goto out;
866 * Check if the media allows
867 * us to mount read/write
869 if (udf_vfsp->udf_mtype != UDF_MT_OW) {
870 error = EACCES;
871 goto out;
875 * Check if we have VAT on a writable media
876 * we cannot use the media in presence of VAT
877 * Dent RW mount.
879 map = udf_vfsp->udf_maps;
880 ASSERT(map != NULL);
881 for (i = 0; i < udf_vfsp->udf_nmaps; i++) {
882 if (map->udm_flags & UDM_MAP_VPM) {
883 error = EACCES;
884 goto out;
886 map++;
890 * Check if the domain Id allows
891 * us to write
893 if (udf_vfsp->udf_lvd->lvd_dom_id.reg_ids[2] & 0x3) {
894 error = EACCES;
895 goto out;
897 udf_vfsp->udf_flags = UDF_FL_RW;
899 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) {
900 udf_vfsp->udf_clean = UDF_CLEAN;
901 } else {
902 if (isroot) {
903 udf_vfsp->udf_clean = UDF_DIRTY;
904 } else {
905 error = ENOSPC;
906 goto out;
911 mutex_init(&udf_vfsp->udf_lock, NULL, MUTEX_DEFAULT, NULL);
913 mutex_init(&udf_vfsp->udf_rename_lck, NULL, MUTEX_DEFAULT, NULL);
915 #ifndef __lint
916 _NOTE(COMPETING_THREADS_NOW);
917 #endif
918 if (error = ud_iget(vfsp, udf_vfsp->udf_ricb_prn,
919 udf_vfsp->udf_ricb_loc, &rip, NULL, cr)) {
920 mutex_destroy(&udf_vfsp->udf_lock);
921 goto out;
926 * Get the root inode and
927 * initialize the root vnode
929 rvp = ITOV(rip);
930 mutex_enter(&rvp->v_lock);
931 rvp->v_flag |= VROOT;
932 mutex_exit(&rvp->v_lock);
933 udf_vfsp->udf_root = rvp;
936 if (why == ROOT_INIT && isroot)
937 rootvp = devvp;
939 ud_vfs_add(udf_vfsp);
941 if (udf_vfsp->udf_flags == UDF_FL_RW) {
942 udf_vfsp->udf_clean = UDF_DIRTY;
943 ud_update_superblock(vfsp);
946 return (0);
948 out:
949 ud_destroy_fsp(udf_vfsp);
950 if (needclose) {
951 (void) VOP_CLOSE(devvp, (vfsp->vfs_flag & VFS_RDONLY) ?
952 FREAD : FREAD|FWRITE, 1, (offset_t)0, cr, NULL);
953 bflush(dev);
954 binval(dev);
956 VN_RELE(devvp);
958 return (error);
962 static struct udf_vfs *
963 ud_validate_and_fill_superblock(dev_t dev, int32_t bsize, uint32_t avd_loc)
965 int32_t error, count, index, shift;
966 uint32_t dummy, vds_loc;
967 caddr_t addr;
968 daddr_t blkno, lblkno;
969 struct buf *secbp, *bp;
970 struct tag *ttag;
971 struct anch_vol_desc_ptr *avdp;
972 struct file_set_desc *fsd;
973 struct udf_vfs *udf_vfsp = NULL;
974 struct pmap_hdr *hdr;
975 struct pmap_typ1 *typ1;
976 struct pmap_typ2 *typ2;
977 struct ud_map *map;
978 int32_t desc_len;
980 ud_printf("ud_validate_and_fill_superblock\n");
982 if (bsize < DEV_BSIZE) {
983 return (NULL);
985 shift = 0;
986 while ((bsize >> shift) > DEV_BSIZE) {
987 shift++;
991 * Read Anchor Volume Descriptor
992 * Verify it and get the location of
993 * Main Volume Descriptor Sequence
995 secbp = ud_bread(dev, avd_loc << shift, ANCHOR_VOL_DESC_LEN);
996 if ((error = geterror(secbp)) != 0) {
997 cmn_err(CE_NOTE, "udfs : Could not read Anchor Volume Desc %x",
998 error);
999 brelse(secbp);
1000 return (NULL);
1002 avdp = (struct anch_vol_desc_ptr *)secbp->b_un.b_addr;
1003 if (ud_verify_tag_and_desc(&avdp->avd_tag, UD_ANCH_VOL_DESC,
1004 avd_loc, 1, ANCHOR_VOL_DESC_LEN) != 0) {
1005 brelse(secbp);
1006 return (NULL);
1008 udf_vfsp = (struct udf_vfs *)
1009 kmem_zalloc(sizeof (struct udf_vfs), KM_SLEEP);
1010 udf_vfsp->udf_mvds_loc = SWAP_32(avdp->avd_main_vdse.ext_loc);
1011 udf_vfsp->udf_mvds_len = SWAP_32(avdp->avd_main_vdse.ext_len);
1012 udf_vfsp->udf_rvds_loc = SWAP_32(avdp->avd_res_vdse.ext_loc);
1013 udf_vfsp->udf_rvds_len = SWAP_32(avdp->avd_res_vdse.ext_len);
1014 secbp->b_flags = B_AGE | B_STALE;
1015 brelse(secbp);
1018 * Read Main Volume Descriptor Sequence
1019 * and process it
1021 vds_loc = udf_vfsp->udf_mvds_loc;
1022 secbp = ud_bread(dev, vds_loc << shift,
1023 udf_vfsp->udf_mvds_len);
1024 if ((error = geterror(secbp)) != 0) {
1025 brelse(secbp);
1026 cmn_err(CE_NOTE, "udfs : Could not read Main Volume Desc %x",
1027 error);
1029 vds_loc = udf_vfsp->udf_rvds_loc;
1030 secbp = ud_bread(dev, vds_loc << shift,
1031 udf_vfsp->udf_rvds_len);
1032 if ((error = geterror(secbp)) != 0) {
1033 brelse(secbp);
1034 cmn_err(CE_NOTE,
1035 "udfs : Could not read Res Volume Desc %x", error);
1036 return (NULL);
1040 udf_vfsp->udf_vds = ngeteblk(udf_vfsp->udf_mvds_len);
1041 bp = udf_vfsp->udf_vds;
1042 bp->b_edev = dev;
1043 bp->b_dev = cmpdev(dev);
1044 bp->b_blkno = vds_loc << shift;
1045 bp->b_bcount = udf_vfsp->udf_mvds_len;
1046 bcopy(secbp->b_un.b_addr, bp->b_un.b_addr, udf_vfsp->udf_mvds_len);
1047 secbp->b_flags |= B_STALE | B_AGE;
1048 brelse(secbp);
1051 count = udf_vfsp->udf_mvds_len / DEV_BSIZE;
1052 addr = bp->b_un.b_addr;
1053 for (index = 0; index < count; index ++) {
1054 ttag = (struct tag *)(addr + index * DEV_BSIZE);
1055 desc_len = udf_vfsp->udf_mvds_len - (index * DEV_BSIZE);
1056 if (ud_verify_tag_and_desc(ttag, UD_PRI_VOL_DESC,
1057 vds_loc + (index >> shift),
1058 1, desc_len) == 0) {
1059 if (udf_vfsp->udf_pvd == NULL) {
1060 udf_vfsp->udf_pvd =
1061 (struct pri_vol_desc *)ttag;
1062 } else {
1063 struct pri_vol_desc *opvd, *npvd;
1065 opvd = udf_vfsp->udf_pvd;
1066 npvd = (struct pri_vol_desc *)ttag;
1068 if ((strncmp(opvd->pvd_vsi,
1069 npvd->pvd_vsi, 128) == 0) &&
1070 (strncmp(opvd->pvd_vol_id,
1071 npvd->pvd_vol_id, 32) == 0) &&
1072 (strncmp((caddr_t)&opvd->pvd_desc_cs,
1073 (caddr_t)&npvd->pvd_desc_cs,
1074 sizeof (charspec_t)) == 0)) {
1076 if (SWAP_32(opvd->pvd_vdsn) <
1077 SWAP_32(npvd->pvd_vdsn)) {
1078 udf_vfsp->udf_pvd = npvd;
1080 } else {
1081 goto out;
1084 } else if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_DESC,
1085 vds_loc + (index >> shift),
1086 1, desc_len) == 0) {
1087 struct log_vol_desc *lvd;
1089 lvd = (struct log_vol_desc *)ttag;
1090 if (strncmp(lvd->lvd_dom_id.reg_id,
1091 UDF_DOMAIN_NAME, 23) != 0) {
1092 printf("Domain ID in lvd is not valid\n");
1093 goto out;
1096 if (udf_vfsp->udf_lvd == NULL) {
1097 udf_vfsp->udf_lvd = lvd;
1098 } else {
1099 struct log_vol_desc *olvd;
1101 olvd = udf_vfsp->udf_lvd;
1102 if ((strncmp((caddr_t)&olvd->lvd_desc_cs,
1103 (caddr_t)&lvd->lvd_desc_cs,
1104 sizeof (charspec_t)) == 0) &&
1105 (strncmp(olvd->lvd_lvid,
1106 lvd->lvd_lvid, 128) == 0)) {
1107 if (SWAP_32(olvd->lvd_vdsn) <
1108 SWAP_32(lvd->lvd_vdsn)) {
1109 udf_vfsp->udf_lvd = lvd;
1111 } else {
1112 goto out;
1115 } else if (ud_verify_tag_and_desc(ttag, UD_PART_DESC,
1116 vds_loc + (index >> shift),
1117 1, desc_len) == 0) {
1118 int32_t i;
1119 struct phdr_desc *hdr;
1120 struct part_desc *pdesc;
1121 struct ud_part *pnew, *pold, *part;
1123 pdesc = (struct part_desc *)ttag;
1124 pold = udf_vfsp->udf_parts;
1125 for (i = 0; i < udf_vfsp->udf_npart; i++) {
1126 if (pold->udp_number !=
1127 SWAP_16(pdesc->pd_pnum)) {
1128 pold++;
1129 continue;
1132 if (SWAP_32(pdesc->pd_vdsn) >
1133 pold->udp_seqno) {
1134 pold->udp_seqno =
1135 SWAP_32(pdesc->pd_vdsn);
1136 pold->udp_access =
1137 SWAP_32(pdesc->pd_acc_type);
1138 pold->udp_start =
1139 SWAP_32(pdesc->pd_part_start);
1140 pold->udp_length =
1141 SWAP_32(pdesc->pd_part_length);
1143 goto loop_end;
1145 pold = udf_vfsp->udf_parts;
1146 udf_vfsp->udf_npart++;
1147 pnew = kmem_zalloc(udf_vfsp->udf_npart *
1148 sizeof (struct ud_part), KM_SLEEP);
1149 udf_vfsp->udf_parts = pnew;
1150 if (pold) {
1151 bcopy(pold, pnew,
1152 sizeof (struct ud_part) *
1153 (udf_vfsp->udf_npart - 1));
1154 kmem_free(pold,
1155 sizeof (struct ud_part) *
1156 (udf_vfsp->udf_npart - 1));
1158 part = pnew + (udf_vfsp->udf_npart - 1);
1159 part->udp_number = SWAP_16(pdesc->pd_pnum);
1160 part->udp_seqno = SWAP_32(pdesc->pd_vdsn);
1161 part->udp_access = SWAP_32(pdesc->pd_acc_type);
1162 part->udp_start = SWAP_32(pdesc->pd_part_start);
1163 part->udp_length = SWAP_32(pdesc->pd_part_length);
1164 part->udp_last_alloc = 0;
1167 * Figure out space bitmaps
1168 * or space tables
1170 hdr = (struct phdr_desc *)pdesc->pd_pc_use;
1171 if (hdr->phdr_ust.sad_ext_len) {
1172 part->udp_flags = UDP_SPACETBLS;
1173 part->udp_unall_loc =
1174 SWAP_32(hdr->phdr_ust.sad_ext_loc);
1175 part->udp_unall_len =
1176 SWAP_32(hdr->phdr_ust.sad_ext_len);
1177 part->udp_freed_loc =
1178 SWAP_32(hdr->phdr_fst.sad_ext_loc);
1179 part->udp_freed_len =
1180 SWAP_32(hdr->phdr_fst.sad_ext_len);
1181 } else {
1182 part->udp_flags = UDP_BITMAPS;
1183 part->udp_unall_loc =
1184 SWAP_32(hdr->phdr_usb.sad_ext_loc);
1185 part->udp_unall_len =
1186 SWAP_32(hdr->phdr_usb.sad_ext_len);
1187 part->udp_freed_loc =
1188 SWAP_32(hdr->phdr_fsb.sad_ext_loc);
1189 part->udp_freed_len =
1190 SWAP_32(hdr->phdr_fsb.sad_ext_len);
1192 } else if (ud_verify_tag_and_desc(ttag, UD_TERM_DESC,
1193 vds_loc + (index >> shift),
1194 1, desc_len) == 0) {
1196 break;
1198 loop_end:
1201 if ((udf_vfsp->udf_pvd == NULL) ||
1202 (udf_vfsp->udf_lvd == NULL) ||
1203 (udf_vfsp->udf_parts == NULL)) {
1204 goto out;
1208 * Process Primary Volume Descriptor
1210 (void) strncpy(udf_vfsp->udf_volid, udf_vfsp->udf_pvd->pvd_vol_id, 32);
1211 udf_vfsp->udf_volid[31] = '\0';
1212 udf_vfsp->udf_tsno = SWAP_16(udf_vfsp->udf_pvd->pvd_tag.tag_sno);
1215 * Process Logical Volume Descriptor
1217 udf_vfsp->udf_lbsize =
1218 SWAP_32(udf_vfsp->udf_lvd->lvd_log_bsize);
1219 udf_vfsp->udf_lbmask = udf_vfsp->udf_lbsize - 1;
1220 udf_vfsp->udf_l2d_shift = shift;
1221 udf_vfsp->udf_l2b_shift = shift + DEV_BSHIFT;
1224 * Check if the media is in
1225 * proper domain.
1227 if (strcmp(udf_vfsp->udf_lvd->lvd_dom_id.reg_id,
1228 UDF_DOMAIN_NAME) != 0) {
1229 goto out;
1233 * AVDS offset does not match with the lbsize
1234 * in the lvd
1236 if (udf_vfsp->udf_lbsize != bsize) {
1237 goto out;
1240 udf_vfsp->udf_iseq_loc =
1241 SWAP_32(udf_vfsp->udf_lvd->lvd_int_seq_ext.ext_loc);
1242 udf_vfsp->udf_iseq_len =
1243 SWAP_32(udf_vfsp->udf_lvd->lvd_int_seq_ext.ext_len);
1245 udf_vfsp->udf_fsd_prn =
1246 SWAP_16(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_prn);
1247 udf_vfsp->udf_fsd_loc =
1248 SWAP_32(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_loc);
1249 udf_vfsp->udf_fsd_len =
1250 SWAP_32(udf_vfsp->udf_lvd->lvd_lvcu.lad_ext_len);
1254 * process paritions
1256 udf_vfsp->udf_mtype = udf_vfsp->udf_parts[0].udp_access;
1257 for (index = 0; index < udf_vfsp->udf_npart; index ++) {
1258 if (udf_vfsp->udf_parts[index].udp_access <
1259 udf_vfsp->udf_mtype) {
1260 udf_vfsp->udf_mtype =
1261 udf_vfsp->udf_parts[index].udp_access;
1264 if ((udf_vfsp->udf_mtype < UDF_MT_RO) ||
1265 (udf_vfsp->udf_mtype > UDF_MT_OW)) {
1266 udf_vfsp->udf_mtype = UDF_MT_RO;
1269 udf_vfsp->udf_nmaps = 0;
1270 hdr = (struct pmap_hdr *)udf_vfsp->udf_lvd->lvd_pmaps;
1271 count = SWAP_32(udf_vfsp->udf_lvd->lvd_num_pmaps);
1272 for (index = 0; index < count; index++) {
1274 if ((hdr->maph_type == MAP_TYPE1) &&
1275 (hdr->maph_length == MAP_TYPE1_LEN)) {
1276 typ1 = (struct pmap_typ1 *)hdr;
1278 map = udf_vfsp->udf_maps;
1279 udf_vfsp->udf_maps =
1280 kmem_zalloc(sizeof (struct ud_map) *
1281 (udf_vfsp->udf_nmaps + 1), KM_SLEEP);
1282 if (map != NULL) {
1283 bcopy(map, udf_vfsp->udf_maps,
1284 sizeof (struct ud_map) *
1285 udf_vfsp->udf_nmaps);
1286 kmem_free(map, sizeof (struct ud_map) *
1287 udf_vfsp->udf_nmaps);
1289 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps;
1290 map->udm_flags = UDM_MAP_NORM;
1291 map->udm_vsn = SWAP_16(typ1->map1_vsn);
1292 map->udm_pn = SWAP_16(typ1->map1_pn);
1293 udf_vfsp->udf_nmaps ++;
1294 } else if ((hdr->maph_type == MAP_TYPE2) &&
1295 (hdr->maph_length == MAP_TYPE2_LEN)) {
1296 typ2 = (struct pmap_typ2 *)hdr;
1298 if (strncmp(typ2->map2_pti.reg_id,
1299 UDF_VIRT_PART, 23) == 0) {
1301 * Add this to the normal
1302 * partition table so that
1303 * we donot
1305 map = udf_vfsp->udf_maps;
1306 udf_vfsp->udf_maps =
1307 kmem_zalloc(sizeof (struct ud_map) *
1308 (udf_vfsp->udf_nmaps + 1), KM_SLEEP);
1309 if (map != NULL) {
1310 bcopy(map, udf_vfsp->udf_maps,
1311 sizeof (struct ud_map) *
1312 udf_vfsp->udf_nmaps);
1313 kmem_free(map,
1314 sizeof (struct ud_map) *
1315 udf_vfsp->udf_nmaps);
1317 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps;
1318 map->udm_flags = UDM_MAP_VPM;
1319 map->udm_vsn = SWAP_16(typ2->map2_vsn);
1320 map->udm_pn = SWAP_16(typ2->map2_pn);
1321 udf_vfsp->udf_nmaps ++;
1322 if (error = ud_get_last_block(dev, &lblkno)) {
1323 goto out;
1325 if (error = ud_val_get_vat(udf_vfsp, dev,
1326 lblkno, map)) {
1327 goto out;
1329 } else if (strncmp(typ2->map2_pti.reg_id,
1330 UDF_SPAR_PART, 23) == 0) {
1332 if (SWAP_16(typ2->map2_pl) != 32) {
1333 printf(
1334 "Packet Length is not valid %x\n",
1335 SWAP_16(typ2->map2_pl));
1336 goto out;
1338 if ((typ2->map2_nst < 1) ||
1339 (typ2->map2_nst > 4)) {
1340 goto out;
1342 map = udf_vfsp->udf_maps;
1343 udf_vfsp->udf_maps =
1344 kmem_zalloc(sizeof (struct ud_map) *
1345 (udf_vfsp->udf_nmaps + 1),
1346 KM_SLEEP);
1347 if (map != NULL) {
1348 bcopy(map, udf_vfsp->udf_maps,
1349 sizeof (struct ud_map) *
1350 udf_vfsp->udf_nmaps);
1351 kmem_free(map,
1352 sizeof (struct ud_map) *
1353 udf_vfsp->udf_nmaps);
1355 map = udf_vfsp->udf_maps + udf_vfsp->udf_nmaps;
1356 map->udm_flags = UDM_MAP_SPM;
1357 map->udm_vsn = SWAP_16(typ2->map2_vsn);
1358 map->udm_pn = SWAP_16(typ2->map2_pn);
1360 udf_vfsp->udf_nmaps ++;
1362 if (error = ud_read_sparing_tbls(udf_vfsp,
1363 dev, map, typ2)) {
1364 goto out;
1366 } else {
1368 * Unknown type of partition
1369 * Bail out
1371 goto out;
1373 } else {
1375 * Unknown type of partition
1376 * Bail out
1378 goto out;
1380 hdr = (struct pmap_hdr *)(((uint8_t *)hdr) + hdr->maph_length);
1385 * Read Logical Volume Integrity Sequence
1386 * and process it
1388 secbp = ud_bread(dev, udf_vfsp->udf_iseq_loc << shift,
1389 udf_vfsp->udf_iseq_len);
1390 if ((error = geterror(secbp)) != 0) {
1391 cmn_err(CE_NOTE,
1392 "udfs : Could not read Logical Volume Integrity Sequence %x",
1393 error);
1394 brelse(secbp);
1395 goto out;
1397 udf_vfsp->udf_iseq = ngeteblk(udf_vfsp->udf_iseq_len);
1398 bp = udf_vfsp->udf_iseq;
1399 bp->b_edev = dev;
1400 bp->b_dev = cmpdev(dev);
1401 bp->b_blkno = udf_vfsp->udf_iseq_loc << shift;
1402 bp->b_bcount = udf_vfsp->udf_iseq_len;
1403 bcopy(secbp->b_un.b_addr, bp->b_un.b_addr, udf_vfsp->udf_iseq_len);
1404 secbp->b_flags |= B_STALE | B_AGE;
1405 brelse(secbp);
1407 count = udf_vfsp->udf_iseq_len / DEV_BSIZE;
1408 addr = bp->b_un.b_addr;
1409 for (index = 0; index < count; index ++) {
1410 ttag = (struct tag *)(addr + index * DEV_BSIZE);
1411 desc_len = udf_vfsp->udf_iseq_len - (index * DEV_BSIZE);
1412 if (ud_verify_tag_and_desc(ttag, UD_LOG_VOL_INT,
1413 udf_vfsp->udf_iseq_loc + (index >> shift),
1414 1, desc_len) == 0) {
1416 struct log_vol_int_desc *lvid;
1418 lvid = (struct log_vol_int_desc *)ttag;
1419 udf_vfsp->udf_lvid = lvid;
1421 if (SWAP_32(lvid->lvid_int_type) == LOG_VOL_CLOSE_INT) {
1422 udf_vfsp->udf_clean = UDF_CLEAN;
1423 } else {
1424 udf_vfsp->udf_clean = UDF_DIRTY;
1428 * update superblock with the metadata
1430 ud_convert_to_superblock(udf_vfsp, lvid);
1431 break;
1435 if (udf_vfsp->udf_lvid == NULL) {
1436 goto out;
1439 if ((blkno = ud_xlate_to_daddr(udf_vfsp,
1440 udf_vfsp->udf_fsd_prn, udf_vfsp->udf_fsd_loc,
1441 1, &dummy)) == 0) {
1442 goto out;
1444 secbp = ud_bread(dev, blkno << shift, udf_vfsp->udf_fsd_len);
1445 if ((error = geterror(secbp)) != 0) {
1446 cmn_err(CE_NOTE,
1447 "udfs : Could not read File Set Descriptor %x", error);
1448 brelse(secbp);
1449 goto out;
1451 fsd = (struct file_set_desc *)secbp->b_un.b_addr;
1452 if (ud_verify_tag_and_desc(&fsd->fsd_tag, UD_FILE_SET_DESC,
1453 udf_vfsp->udf_fsd_loc,
1454 1, udf_vfsp->udf_fsd_len) != 0) {
1455 secbp->b_flags = B_AGE | B_STALE;
1456 brelse(secbp);
1457 goto out;
1459 udf_vfsp->udf_ricb_prn = SWAP_16(fsd->fsd_root_icb.lad_ext_prn);
1460 udf_vfsp->udf_ricb_loc = SWAP_32(fsd->fsd_root_icb.lad_ext_loc);
1461 udf_vfsp->udf_ricb_len = SWAP_32(fsd->fsd_root_icb.lad_ext_len);
1462 secbp->b_flags = B_AGE | B_STALE;
1463 brelse(secbp);
1464 udf_vfsp->udf_root_blkno = ud_xlate_to_daddr(udf_vfsp,
1465 udf_vfsp->udf_ricb_prn, udf_vfsp->udf_ricb_loc,
1466 1, &dummy);
1468 return (udf_vfsp);
1469 out:
1470 ud_destroy_fsp(udf_vfsp);
1472 return (NULL);
1476 * release/free resources from one ud_map; map data was zalloc'd in
1477 * ud_validate_and_fill_superblock() and fields may later point to
1478 * valid data
1480 static void
1481 ud_free_map(struct ud_map *map)
1483 uint32_t n;
1485 if (map->udm_flags & UDM_MAP_VPM) {
1486 if (map->udm_count) {
1487 kmem_free(map->udm_count,
1488 map->udm_nent * sizeof (*map->udm_count));
1489 map->udm_count = NULL;
1491 if (map->udm_bp) {
1492 for (n = 0; n < map->udm_nent; n++) {
1493 if (map->udm_bp[n])
1494 brelse(map->udm_bp[n]);
1496 kmem_free(map->udm_bp,
1497 map->udm_nent * sizeof (*map->udm_bp));
1498 map->udm_bp = NULL;
1500 if (map->udm_addr) {
1501 kmem_free(map->udm_addr,
1502 map->udm_nent * sizeof (*map->udm_addr));
1503 map->udm_addr = NULL;
1506 if (map->udm_flags & UDM_MAP_SPM) {
1507 for (n = 0; n < MAX_SPM; n++) {
1508 if (map->udm_sbp[n]) {
1509 brelse(map->udm_sbp[n]);
1510 map->udm_sbp[n] = NULL;
1511 map->udm_spaddr[n] = NULL;
1517 void
1518 ud_destroy_fsp(struct udf_vfs *udf_vfsp)
1520 int32_t i;
1522 ud_printf("ud_destroy_fsp\n");
1523 if (udf_vfsp == NULL)
1524 return;
1526 if (udf_vfsp->udf_maps) {
1527 for (i = 0; i < udf_vfsp->udf_nmaps; i++)
1528 ud_free_map(&udf_vfsp->udf_maps[i]);
1530 kmem_free(udf_vfsp->udf_maps,
1531 udf_vfsp->udf_nmaps * sizeof (*udf_vfsp->udf_maps));
1534 if (udf_vfsp->udf_parts) {
1535 kmem_free(udf_vfsp->udf_parts,
1536 udf_vfsp->udf_npart * sizeof (*udf_vfsp->udf_parts));
1538 if (udf_vfsp->udf_iseq) {
1539 udf_vfsp->udf_iseq->b_flags |= (B_STALE|B_AGE);
1540 brelse(udf_vfsp->udf_iseq);
1542 if (udf_vfsp->udf_vds) {
1543 udf_vfsp->udf_vds->b_flags |= (B_STALE|B_AGE);
1544 brelse(udf_vfsp->udf_vds);
1546 if (udf_vfsp->udf_vfs)
1547 ud_vfs_remove(udf_vfsp);
1548 if (udf_vfsp->udf_fsmnt) {
1549 kmem_free(udf_vfsp->udf_fsmnt,
1550 strlen(udf_vfsp->udf_fsmnt) + 1);
1552 kmem_free(udf_vfsp, sizeof (*udf_vfsp));
1555 void
1556 ud_convert_to_superblock(struct udf_vfs *udf_vfsp,
1557 struct log_vol_int_desc *lvid)
1559 int32_t i, c;
1560 uint32_t *temp;
1561 struct ud_part *ud_part;
1562 struct lvid_iu *iu;
1564 udf_vfsp->udf_maxuniq = SWAP_64(lvid->lvid_uniqid);
1565 temp = lvid->lvid_fst;
1566 c = SWAP_32(lvid->lvid_npart);
1567 ud_part = udf_vfsp->udf_parts;
1568 for (i = 0; i < c; i++) {
1569 if (i >= udf_vfsp->udf_npart) {
1570 continue;
1572 ud_part->udp_nfree = SWAP_32(temp[i]);
1573 ud_part->udp_nblocks = SWAP_32(temp[c + i]);
1574 udf_vfsp->udf_freeblks += SWAP_32(temp[i]);
1575 udf_vfsp->udf_totalblks += SWAP_32(temp[c + i]);
1576 ud_part++;
1579 iu = (struct lvid_iu *)(temp + c * 2);
1580 udf_vfsp->udf_nfiles = SWAP_32(iu->lvidiu_nfiles);
1581 udf_vfsp->udf_ndirs = SWAP_32(iu->lvidiu_ndirs);
1582 udf_vfsp->udf_miread = BCD2HEX_16(SWAP_16(iu->lvidiu_mread));
1583 udf_vfsp->udf_miwrite = BCD2HEX_16(SWAP_16(iu->lvidiu_mwrite));
1584 udf_vfsp->udf_mawrite = BCD2HEX_16(SWAP_16(iu->lvidiu_maxwr));
1587 void
1588 ud_update_superblock(struct vfs *vfsp)
1590 struct udf_vfs *udf_vfsp;
1592 ud_printf("ud_update_superblock\n");
1594 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
1596 mutex_enter(&udf_vfsp->udf_lock);
1597 ud_sbwrite(udf_vfsp);
1598 mutex_exit(&udf_vfsp->udf_lock);
1602 #include <sys/dkio.h>
1603 #include <sys/cdio.h>
1604 #include <sys/vtoc.h>
1607 * This part of the code is known
1608 * to work with only sparc. It needs
1609 * to be evluated before using it with x86
1611 int32_t
1612 ud_get_last_block(dev_t dev, daddr_t *blkno)
1614 struct vtoc vtoc;
1615 struct dk_cinfo dki_info;
1616 int32_t rval, error;
1618 if ((error = cdev_ioctl(dev, DKIOCGVTOC, (intptr_t)&vtoc,
1619 FKIOCTL|FREAD|FNATIVE, CRED(), &rval)) != 0) {
1620 cmn_err(CE_NOTE, "Could not get the vtoc information");
1621 return (error);
1624 if (vtoc.v_sanity != VTOC_SANE) {
1625 return (EINVAL);
1627 if ((error = cdev_ioctl(dev, DKIOCINFO, (intptr_t)&dki_info,
1628 FKIOCTL|FREAD|FNATIVE, CRED(), &rval)) != 0) {
1629 cmn_err(CE_NOTE, "Could not get the slice information");
1630 return (error);
1633 if (dki_info.dki_partition > V_NUMPAR) {
1634 return (EINVAL);
1638 *blkno = vtoc.v_part[dki_info.dki_partition].p_size;
1640 return (0);
1643 /* Search sequentially N - 2, N, N - 152, N - 150 for vat icb */
1645 * int32_t ud_sub_blks[] = {2, 0, 152, 150};
1647 int32_t ud_sub_blks[] = {152, 150, 2, 0};
1648 int32_t ud_sub_count = 4;
1651 * Validate the VAT ICB
1653 static int32_t
1654 ud_val_get_vat(struct udf_vfs *udf_vfsp, dev_t dev,
1655 daddr_t blkno, struct ud_map *udm)
1657 struct buf *secbp;
1658 struct file_entry *fe;
1659 int32_t end_loc, i, j, ad_type;
1660 struct short_ad *sad;
1661 struct long_ad *lad;
1662 uint32_t count, blk;
1663 struct ud_part *ud_part;
1664 int err = 0;
1666 end_loc = (blkno >> udf_vfsp->udf_l2d_shift) - 1;
1668 for (i = 0; i < ud_sub_count; i++) {
1669 udm->udm_vat_icb = end_loc - ud_sub_blks[i];
1671 secbp = ud_bread(dev,
1672 udm->udm_vat_icb << udf_vfsp->udf_l2d_shift,
1673 udf_vfsp->udf_lbsize);
1674 ASSERT(secbp->b_un.b_addr);
1676 fe = (struct file_entry *)secbp->b_un.b_addr;
1677 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY, 0,
1678 0, 0) == 0) {
1679 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
1680 SWAP_32(fe->fe_tag.tag_loc),
1681 1, udf_vfsp->udf_lbsize) == 0) {
1682 if (fe->fe_icb_tag.itag_ftype == 0) {
1683 break;
1687 secbp->b_flags |= B_AGE | B_STALE;
1688 brelse(secbp);
1690 if (i == ud_sub_count) {
1691 return (EINVAL);
1694 ad_type = SWAP_16(fe->fe_icb_tag.itag_flags) & 0x3;
1695 if (ad_type == ICB_FLAG_ONE_AD) {
1696 udm->udm_nent = 1;
1697 } else if (ad_type == ICB_FLAG_SHORT_AD) {
1698 udm->udm_nent =
1699 SWAP_32(fe->fe_len_adesc) / sizeof (struct short_ad);
1700 } else if (ad_type == ICB_FLAG_LONG_AD) {
1701 udm->udm_nent =
1702 SWAP_32(fe->fe_len_adesc) / sizeof (struct long_ad);
1703 } else {
1704 err = EINVAL;
1705 goto end;
1708 udm->udm_count = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_count),
1709 KM_SLEEP);
1710 udm->udm_bp = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_bp),
1711 KM_SLEEP);
1712 udm->udm_addr = kmem_zalloc(udm->udm_nent * sizeof (*udm->udm_addr),
1713 KM_SLEEP);
1715 if (ad_type == ICB_FLAG_ONE_AD) {
1716 udm->udm_count[0] = (SWAP_64(fe->fe_info_len) - 36) /
1717 sizeof (uint32_t);
1718 udm->udm_bp[0] = secbp;
1719 udm->udm_addr[0] = (uint32_t *)
1720 &fe->fe_spec[SWAP_32(fe->fe_len_ear)];
1721 return (0);
1723 for (i = 0; i < udm->udm_nent; i++) {
1724 if (ad_type == ICB_FLAG_SHORT_AD) {
1725 sad = (struct short_ad *)
1726 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1727 sad += i;
1728 count = SWAP_32(sad->sad_ext_len);
1729 blk = SWAP_32(sad->sad_ext_loc);
1730 } else {
1731 lad = (struct long_ad *)
1732 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1733 lad += i;
1734 count = SWAP_32(lad->lad_ext_len);
1735 blk = SWAP_32(lad->lad_ext_loc);
1736 ASSERT(SWAP_16(lad->lad_ext_prn) == udm->udm_pn);
1738 if ((count & 0x3FFFFFFF) == 0) {
1739 break;
1741 if (i < udm->udm_nent - 1) {
1742 udm->udm_count[i] = count / 4;
1743 } else {
1744 udm->udm_count[i] = (count - 36) / 4;
1746 ud_part = udf_vfsp->udf_parts;
1747 for (j = 0; j < udf_vfsp->udf_npart; j++) {
1748 if (udm->udm_pn == ud_part->udp_number) {
1749 blk = ud_part->udp_start + blk;
1750 break;
1753 if (j == udf_vfsp->udf_npart) {
1754 err = EINVAL;
1755 break;
1758 count = (count + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1759 udm->udm_bp[i] = ud_bread(dev,
1760 blk << udf_vfsp->udf_l2d_shift, count);
1761 if ((udm->udm_bp[i]->b_error != 0) ||
1762 (udm->udm_bp[i]->b_resid)) {
1763 err = EINVAL;
1764 break;
1766 udm->udm_addr[i] = (uint32_t *)udm->udm_bp[i]->b_un.b_addr;
1769 end:
1770 if (err)
1771 ud_free_map(udm);
1772 secbp->b_flags |= B_AGE | B_STALE;
1773 brelse(secbp);
1774 return (err);
1777 int32_t
1778 ud_read_sparing_tbls(struct udf_vfs *udf_vfsp,
1779 dev_t dev, struct ud_map *map, struct pmap_typ2 *typ2)
1781 int32_t index, valid = 0;
1782 uint32_t sz;
1783 struct buf *bp;
1784 struct stbl *stbl;
1786 map->udm_plen = SWAP_16(typ2->map2_pl);
1787 map->udm_nspm = typ2->map2_nst;
1788 map->udm_spsz = SWAP_32(typ2->map2_sest);
1789 sz = (map->udm_spsz + udf_vfsp->udf_lbmask) & ~udf_vfsp->udf_lbmask;
1790 if (sz == 0) {
1791 return (0);
1794 for (index = 0; index < map->udm_nspm; index++) {
1795 map->udm_loc[index] = SWAP_32(typ2->map2_st[index]);
1797 bp = ud_bread(dev,
1798 map->udm_loc[index] << udf_vfsp->udf_l2d_shift, sz);
1799 if ((bp->b_error != 0) || (bp->b_resid)) {
1800 brelse(bp);
1801 continue;
1803 stbl = (struct stbl *)bp->b_un.b_addr;
1804 if (strncmp(stbl->stbl_si.reg_id, UDF_SPAR_TBL, 23) != 0) {
1805 printf("Sparing Identifier does not match\n");
1806 bp->b_flags |= B_AGE | B_STALE;
1807 brelse(bp);
1808 continue;
1810 map->udm_sbp[index] = bp;
1811 map->udm_spaddr[index] = bp->b_un.b_addr;
1812 #ifdef UNDEF
1814 struct stbl_entry *te;
1815 int32_t i, tbl_len;
1817 te = (struct stbl_entry *)&stbl->stbl_entry;
1818 tbl_len = SWAP_16(stbl->stbl_len);
1820 printf("%x %x\n", tbl_len, SWAP_32(stbl->stbl_seqno));
1821 printf("%x %x\n", bp->b_un.b_addr, te);
1823 for (i = 0; i < tbl_len; i++) {
1824 printf("%x %x\n", SWAP_32(te->sent_ol), SWAP_32(te->sent_ml));
1825 te ++;
1828 #endif
1829 valid ++;
1832 if (valid) {
1833 return (0);
1835 return (EINVAL);
1838 uint32_t
1839 ud_get_lbsize(dev_t dev, uint32_t *loc)
1841 int32_t bsize, shift, index, end_index;
1842 daddr_t last_block;
1843 uint32_t avd_loc;
1844 struct buf *bp;
1845 struct anch_vol_desc_ptr *avdp;
1846 uint32_t session_offset = 0;
1847 int32_t rval;
1849 if (ud_get_last_block(dev, &last_block) != 0) {
1850 end_index = 1;
1851 } else {
1852 end_index = 3;
1855 if (cdev_ioctl(dev, CDROMREADOFFSET, (intptr_t)&session_offset,
1856 FKIOCTL|FREAD|FNATIVE, CRED(), &rval) != 0) {
1857 session_offset = 0;
1860 for (index = 0; index < end_index; index++) {
1862 for (bsize = DEV_BSIZE, shift = 0;
1863 bsize <= MAXBSIZE; bsize <<= 1, shift++) {
1865 if (index == 0) {
1866 avd_loc = 256;
1867 if (bsize <= 2048) {
1868 avd_loc +=
1869 session_offset * 2048 / bsize;
1870 } else {
1871 avd_loc +=
1872 session_offset / (bsize / 2048);
1874 } else if (index == 1) {
1875 avd_loc = last_block - (1 << shift);
1876 } else {
1877 avd_loc = last_block - (256 << shift);
1880 bp = ud_bread(dev, avd_loc << shift,
1881 ANCHOR_VOL_DESC_LEN);
1882 if (geterror(bp) != 0) {
1883 brelse(bp);
1884 continue;
1888 * Verify if we have avdp here
1890 avdp = (struct anch_vol_desc_ptr *)bp->b_un.b_addr;
1891 if (ud_verify_tag_and_desc(&avdp->avd_tag,
1892 UD_ANCH_VOL_DESC, avd_loc,
1893 1, ANCHOR_VOL_DESC_LEN) != 0) {
1894 bp->b_flags |= B_AGE | B_STALE;
1895 brelse(bp);
1896 continue;
1898 bp->b_flags |= B_AGE | B_STALE;
1899 brelse(bp);
1900 *loc = avd_loc;
1901 return (bsize);
1906 * Did not find AVD at all the locations
1908 return (0);
1911 static int
1912 udfinit(int fstype, char *name)
1914 static const fs_operation_def_t udf_vfsops_template[] = {
1915 VFSNAME_MOUNT, { .vfs_mount = udf_mount },
1916 VFSNAME_UNMOUNT, { .vfs_unmount = udf_unmount },
1917 VFSNAME_ROOT, { .vfs_root = udf_root },
1918 VFSNAME_STATVFS, { .vfs_statvfs = udf_statvfs },
1919 VFSNAME_SYNC, { .vfs_sync = udf_sync },
1920 VFSNAME_VGET, { .vfs_vget = udf_vget },
1921 VFSNAME_MOUNTROOT, { .vfs_mountroot = udf_mountroot },
1922 NULL, NULL
1924 extern struct vnodeops *udf_vnodeops;
1925 extern const fs_operation_def_t udf_vnodeops_template[];
1926 int error;
1928 ud_printf("udfinit\n");
1930 error = vfs_setfsops(fstype, udf_vfsops_template, NULL);
1931 if (error != 0) {
1932 cmn_err(CE_WARN, "udfinit: bad vfs ops template");
1933 return (error);
1936 error = vn_make_ops(name, udf_vnodeops_template, &udf_vnodeops);
1937 if (error != 0) {
1938 (void) vfs_freevfsops_by_type(fstype);
1939 cmn_err(CE_WARN, "udfinit: bad vnode ops template");
1940 return (error);
1943 udf_fstype = fstype;
1945 ud_init_inodes();
1947 return (0);