Fix a buffer cache deadlock which can occur when simulated disk devices
[dragonfly.git] / sys / dev / disk / vn / vn.c
blobe012a26aa261d0bf70861ddc298ddb3054355d78
1 /*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * from: Utah Hdr: vn.c 1.13 94/04/02
40 * from: @(#)vn.c 8.6 (Berkeley) 4/1/94
41 * $FreeBSD: src/sys/dev/vn/vn.c,v 1.105.2.4 2001/11/18 07:11:00 dillon Exp $
42 * $DragonFly: src/sys/dev/disk/vn/vn.c,v 1.38 2008/07/01 02:02:53 dillon Exp $
46 * Vnode disk driver.
48 * Block/character interface to a vnode. Allows one to treat a file
49 * as a disk (e.g. build a filesystem in it, mount it, etc.).
51 * NOTE 1: There is a security issue involved with this driver.
52 * Once mounted all access to the contents of the "mapped" file via
53 * the special file is controlled by the permissions on the special
54 * file, the protection of the mapped file is ignored (effectively,
55 * by using root credentials in all transactions).
57 * NOTE 2: Doesn't interact with leases, should it?
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/proc.h>
64 #include <sys/nlookup.h>
65 #include <sys/buf.h>
66 #include <sys/malloc.h>
67 #include <sys/mount.h>
68 #include <sys/vnode.h>
69 #include <sys/fcntl.h>
70 #include <sys/conf.h>
71 #include <sys/diskslice.h>
72 #include <sys/disk.h>
73 #include <sys/stat.h>
74 #include <sys/module.h>
75 #include <sys/vnioctl.h>
77 #include <vm/vm.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_pager.h>
81 #include <vm/vm_pageout.h>
82 #include <vm/swap_pager.h>
83 #include <vm/vm_extern.h>
84 #include <vm/vm_zone.h>
86 static d_ioctl_t vnioctl;
87 static d_open_t vnopen;
88 static d_close_t vnclose;
89 static d_psize_t vnsize;
90 static d_strategy_t vnstrategy;
92 #define CDEV_MAJOR 43
94 #define VN_BSIZE_BEST 8192
97 * dev_ops
98 * D_DISK we want to look like a disk
99 * D_CANFREE We support BUF_CMD_FREEBLKS
102 static struct dev_ops vn_ops = {
103 { "vn", CDEV_MAJOR, D_DISK | D_CANFREE },
104 .d_open = vnopen,
105 .d_close = vnclose,
106 .d_read = physread,
107 .d_write = physwrite,
108 .d_ioctl = vnioctl,
109 .d_strategy = vnstrategy,
110 .d_psize = vnsize
113 struct vn_softc {
114 int sc_unit;
115 int sc_flags; /* flags */
116 u_int64_t sc_size; /* size of vn, sc_secsize scale */
117 int sc_secsize; /* sector size */
118 struct diskslices *sc_slices; /* XXX fields from struct disk */
119 struct disk_info sc_info; /* XXX fields from struct disk */
120 struct vnode *sc_vp; /* vnode if not NULL */
121 vm_object_t sc_object; /* backing object if not NULL */
122 struct ucred *sc_cred; /* credentials */
123 int sc_maxactive; /* max # of active requests */
124 struct buf sc_tab; /* transfer queue */
125 u_long sc_options; /* options */
126 cdev_t sc_devlist; /* devices that refer to this unit */
127 SLIST_ENTRY(vn_softc) sc_list;
130 static SLIST_HEAD(, vn_softc) vn_list;
132 /* sc_flags */
133 #define VNF_INITED 0x01
134 #define VNF_READONLY 0x02
136 static u_long vn_options;
138 #define IFOPT(vn,opt) if (((vn)->sc_options|vn_options) & (opt))
139 #define TESTOPT(vn,opt) (((vn)->sc_options|vn_options) & (opt))
141 static int vnsetcred (struct vn_softc *vn, struct ucred *cred);
142 static void vnclear (struct vn_softc *vn);
143 static int vnget (cdev_t dev, struct vn_softc *vn , struct vn_user *vnu);
144 static int vn_modevent (module_t, int, void *);
145 static int vniocattach_file (struct vn_softc *, struct vn_ioctl *, cdev_t dev, int flag, struct ucred *cred);
146 static int vniocattach_swap (struct vn_softc *, struct vn_ioctl *, cdev_t dev, int flag, struct ucred *cred);
148 static int
149 vnclose(struct dev_close_args *ap)
151 cdev_t dev = ap->a_head.a_dev;
152 struct vn_softc *vn = dev->si_drv1;
154 IFOPT(vn, VN_LABELS)
155 if (vn->sc_slices != NULL)
156 dsclose(dev, ap->a_devtype, vn->sc_slices);
157 return (0);
161 * Called only when si_drv1 is NULL. Locate the associated vn node and
162 * attach the device to it.
164 static struct vn_softc *
165 vnfindvn(cdev_t dev)
167 int unit;
168 struct vn_softc *vn;
170 unit = dkunit(dev);
171 SLIST_FOREACH(vn, &vn_list, sc_list) {
172 if (vn->sc_unit == unit) {
173 dev->si_drv1 = vn;
174 dev->si_drv2 = vn->sc_devlist;
175 vn->sc_devlist = dev;
176 reference_dev(dev);
177 break;
180 if (vn == NULL) {
181 vn = kmalloc(sizeof *vn, M_DEVBUF, M_WAITOK | M_ZERO);
182 vn->sc_unit = unit;
183 dev->si_drv1 = vn;
184 vn->sc_devlist = make_dev(&vn_ops, 0, UID_ROOT,
185 GID_OPERATOR, 0640, "vn%d", unit);
186 if (vn->sc_devlist->si_drv1 == NULL) {
187 reference_dev(vn->sc_devlist);
188 vn->sc_devlist->si_drv1 = vn;
189 vn->sc_devlist->si_drv2 = NULL;
191 if (vn->sc_devlist != dev) {
192 dev->si_drv1 = vn;
193 dev->si_drv2 = vn->sc_devlist;
194 vn->sc_devlist = dev;
195 reference_dev(dev);
197 SLIST_INSERT_HEAD(&vn_list, vn, sc_list);
199 return (vn);
202 static int
203 vnopen(struct dev_open_args *ap)
205 cdev_t dev = ap->a_head.a_dev;
206 struct vn_softc *vn;
207 struct disk_info *info;
210 * Locate preexisting device
213 if ((vn = dev->si_drv1) == NULL)
214 vn = vnfindvn(dev);
217 * Update si_bsize fields for device. This data will be overriden by
218 * the slice/parition code for vn accesses through partitions, and
219 * used directly if you open the 'whole disk' device.
221 * si_bsize_best must be reinitialized in case VN has been
222 * reconfigured, plus make it at least VN_BSIZE_BEST for efficiency.
224 dev->si_bsize_phys = vn->sc_secsize;
225 dev->si_bsize_best = vn->sc_secsize;
226 if (dev->si_bsize_best < VN_BSIZE_BEST)
227 dev->si_bsize_best = VN_BSIZE_BEST;
229 if ((ap->a_oflags & FWRITE) && (vn->sc_flags & VNF_READONLY))
230 return (EACCES);
232 IFOPT(vn, VN_FOLLOW)
233 kprintf("vnopen(%s, 0x%x, 0x%x)\n",
234 devtoname(dev), ap->a_oflags, ap->a_devtype);
237 * Initialize label
240 IFOPT(vn, VN_LABELS) {
241 if (vn->sc_flags & VNF_INITED) {
242 info = &vn->sc_info;
243 bzero(info, sizeof(*info));
244 info->d_media_blksize = vn->sc_secsize;
245 info->d_media_blocks = vn->sc_size;
247 * reserve mbr sector for backwards compatibility
248 * when no slices exist.
250 info->d_dsflags = DSO_COMPATMBR;
252 info->d_secpertrack = 32;
253 info->d_nheads = 64 / (vn->sc_secsize / DEV_BSIZE);
254 info->d_secpercyl = info->d_secpertrack *
255 info->d_nheads;
256 info->d_ncylinders = vn->sc_size / info->d_secpercyl;
258 return (dsopen(dev, ap->a_devtype, 0,
259 &vn->sc_slices, info));
261 if (dkslice(dev) != WHOLE_DISK_SLICE ||
262 dkpart(dev) != WHOLE_SLICE_PART ||
263 ap->a_devtype != S_IFCHR) {
264 return (ENXIO);
267 return(0);
271 * vnstrategy:
273 * Run strategy routine for VN device. We use VOP_READ/VOP_WRITE calls
274 * for vnode-backed vn's, and the new vm_pager_strategy() call for
275 * vm_object-backed vn's.
277 * Currently B_ASYNC is only partially handled - for OBJT_SWAP I/O only.
279 static int
280 vnstrategy(struct dev_strategy_args *ap)
282 cdev_t dev = ap->a_head.a_dev;
283 struct bio *bio = ap->a_bio;
284 struct buf *bp;
285 struct bio *nbio;
286 int unit;
287 struct vn_softc *vn;
288 int error;
290 unit = dkunit(dev);
291 if ((vn = dev->si_drv1) == NULL)
292 vn = vnfindvn(dev);
294 bp = bio->bio_buf;
296 IFOPT(vn, VN_DEBUG)
297 kprintf("vnstrategy(%p): unit %d\n", bp, unit);
299 if ((vn->sc_flags & VNF_INITED) == 0) {
300 bp->b_error = ENXIO;
301 bp->b_flags |= B_ERROR;
302 biodone(bio);
303 return(0);
306 bp->b_resid = bp->b_bcount;
308 IFOPT(vn, VN_LABELS) {
310 * The vnode device is using disk/slice label support.
312 * The dscheck() function is called for validating the
313 * slices that exist ON the vnode device itself, and
314 * translate the "slice-relative" block number, again.
315 * dscheck() will call biodone() and return NULL if
316 * we are at EOF or beyond the device size.
318 if (vn->sc_slices == NULL) {
319 nbio = bio;
320 } else if ((nbio = dscheck(dev, bio, vn->sc_slices)) == NULL) {
321 goto done;
323 } else {
324 int64_t pbn; /* in sc_secsize chunks */
325 long sz; /* in sc_secsize chunks */
328 * Check for required alignment. Transfers must be a valid
329 * multiple of the sector size.
331 if (bp->b_bcount % vn->sc_secsize != 0 ||
332 bio->bio_offset % vn->sc_secsize != 0) {
333 goto bad;
336 pbn = bio->bio_offset / vn->sc_secsize;
337 sz = howmany(bp->b_bcount, vn->sc_secsize);
340 * Check for an illegal pbn or EOF truncation
342 if (pbn < 0)
343 goto bad;
344 if (pbn + sz > vn->sc_size) {
345 if (pbn > vn->sc_size || (bp->b_flags & B_BNOCLIP))
346 goto bad;
347 if (pbn == vn->sc_size) {
348 bp->b_resid = bp->b_bcount;
349 bp->b_flags |= B_INVAL;
350 goto done;
352 bp->b_bcount = (vn->sc_size - pbn) * vn->sc_secsize;
354 nbio = push_bio(bio);
355 nbio->bio_offset = pbn * vn->sc_secsize;
359 * Use the translated nbio from this point on
361 if (vn->sc_vp && bp->b_cmd == BUF_CMD_FREEBLKS) {
363 * Freeblks is not handled for vnode-backed elements yet.
365 bp->b_resid = 0;
366 /* operation complete */
367 } else if (vn->sc_vp) {
369 * VNODE I/O
371 * If an error occurs, we set B_ERROR but we do not set
372 * B_INVAL because (for a write anyway), the buffer is
373 * still valid.
375 struct uio auio;
376 struct iovec aiov;
378 bzero(&auio, sizeof(auio));
380 aiov.iov_base = bp->b_data;
381 aiov.iov_len = bp->b_bcount;
382 auio.uio_iov = &aiov;
383 auio.uio_iovcnt = 1;
384 auio.uio_offset = nbio->bio_offset;
385 auio.uio_segflg = UIO_SYSSPACE;
386 if (bp->b_cmd == BUF_CMD_READ)
387 auio.uio_rw = UIO_READ;
388 else
389 auio.uio_rw = UIO_WRITE;
390 auio.uio_resid = bp->b_bcount;
391 auio.uio_td = curthread;
392 vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY);
393 if (bp->b_cmd == BUF_CMD_READ)
394 error = VOP_READ(vn->sc_vp, &auio, IO_DIRECT | IO_RECURSE, vn->sc_cred);
395 else
396 error = VOP_WRITE(vn->sc_vp, &auio, IO_DIRECT | IO_RECURSE, vn->sc_cred);
397 vn_unlock(vn->sc_vp);
398 bp->b_resid = auio.uio_resid;
399 if (error) {
400 bp->b_error = error;
401 bp->b_flags |= B_ERROR;
403 /* operation complete */
404 } else if (vn->sc_object) {
406 * OBJT_SWAP I/O (handles read, write, freebuf)
408 * We have nothing to do if freeing blocks on a reserved
409 * swap area, othrewise execute the op.
411 if (bp->b_cmd == BUF_CMD_FREEBLKS && TESTOPT(vn, VN_RESERVE)) {
412 bp->b_resid = 0;
413 /* operation complete */
414 } else {
415 vm_pager_strategy(vn->sc_object, nbio);
416 return(0);
417 /* NOT REACHED */
419 } else {
420 bp->b_resid = bp->b_bcount;
421 bp->b_flags |= B_ERROR | B_INVAL;
422 bp->b_error = EINVAL;
423 /* operation complete */
425 biodone(nbio);
426 return(0);
429 * Shortcuts / check failures on the original bio (not nbio).
431 bad:
432 bp->b_error = EINVAL;
433 bp->b_flags |= B_ERROR | B_INVAL;
434 done:
435 biodone(bio);
436 return(0);
439 /* ARGSUSED */
440 static int
441 vnioctl(struct dev_ioctl_args *ap)
443 cdev_t dev = ap->a_head.a_dev;
444 struct vn_softc *vn;
445 struct vn_ioctl *vio;
446 int error;
447 u_long *f;
449 vn = dev->si_drv1;
450 IFOPT(vn,VN_FOLLOW) {
451 kprintf("vnioctl(%s, 0x%lx, %p, 0x%x): unit %d\n",
452 devtoname(dev), ap->a_cmd, ap->a_data, ap->a_fflag,
453 dkunit(dev));
456 switch (ap->a_cmd) {
457 case VNIOCATTACH:
458 case VNIOCDETACH:
459 case VNIOCGSET:
460 case VNIOCGCLEAR:
461 case VNIOCGET:
462 case VNIOCUSET:
463 case VNIOCUCLEAR:
464 goto vn_specific;
467 IFOPT(vn,VN_LABELS) {
468 if (vn->sc_slices != NULL) {
469 error = dsioctl(dev, ap->a_cmd, ap->a_data,
470 ap->a_fflag,
471 &vn->sc_slices, &vn->sc_info);
472 if (error != ENOIOCTL)
473 return (error);
475 if (dkslice(dev) != WHOLE_DISK_SLICE ||
476 dkpart(dev) != WHOLE_SLICE_PART)
477 return (ENOTTY);
480 vn_specific:
482 error = suser_cred(ap->a_cred, 0);
483 if (error)
484 return (error);
486 vio = (struct vn_ioctl *)ap->a_data;
487 f = (u_long*)ap->a_data;
489 switch (ap->a_cmd) {
490 case VNIOCATTACH:
491 if (vn->sc_flags & VNF_INITED)
492 return(EBUSY);
494 if (vio->vn_file == NULL)
495 error = vniocattach_swap(vn, vio, dev, ap->a_fflag, ap->a_cred);
496 else
497 error = vniocattach_file(vn, vio, dev, ap->a_fflag, ap->a_cred);
498 break;
500 case VNIOCDETACH:
501 if ((vn->sc_flags & VNF_INITED) == 0)
502 return(ENXIO);
504 * XXX handle i/o in progress. Return EBUSY, or wait, or
505 * flush the i/o.
506 * XXX handle multiple opens of the device. Return EBUSY,
507 * or revoke the fd's.
508 * How are these problems handled for removable and failing
509 * hardware devices? (Hint: They are not)
511 vnclear(vn);
512 IFOPT(vn, VN_FOLLOW)
513 kprintf("vnioctl: CLRed\n");
514 break;
516 case VNIOCGET:
517 error = vnget(dev, vn, (struct vn_user *) ap->a_data);
518 break;
520 case VNIOCGSET:
521 vn_options |= *f;
522 *f = vn_options;
523 break;
525 case VNIOCGCLEAR:
526 vn_options &= ~(*f);
527 *f = vn_options;
528 break;
530 case VNIOCUSET:
531 vn->sc_options |= *f;
532 *f = vn->sc_options;
533 break;
535 case VNIOCUCLEAR:
536 vn->sc_options &= ~(*f);
537 *f = vn->sc_options;
538 break;
540 default:
541 error = ENOTTY;
542 break;
544 return(error);
548 * vniocattach_file:
550 * Attach a file to a VN partition. Return the size in the vn_size
551 * field.
554 static int
555 vniocattach_file(struct vn_softc *vn, struct vn_ioctl *vio, cdev_t dev,
556 int flag, struct ucred *cred)
558 struct vattr vattr;
559 struct nlookupdata nd;
560 int error, flags;
561 struct vnode *vp;
563 flags = FREAD|FWRITE;
564 error = nlookup_init(&nd, vio->vn_file,
565 UIO_USERSPACE, NLC_FOLLOW|NLC_LOCKVP);
566 if (error)
567 return (error);
568 if ((error = vn_open(&nd, NULL, flags, 0)) != 0) {
569 if (error != EACCES && error != EPERM && error != EROFS)
570 goto done;
571 flags &= ~FWRITE;
572 nlookup_done(&nd);
573 error = nlookup_init(&nd, vio->vn_file, UIO_USERSPACE, NLC_FOLLOW|NLC_LOCKVP);
574 if (error)
575 return (error);
576 if ((error = vn_open(&nd, NULL, flags, 0)) != 0)
577 goto done;
579 vp = nd.nl_open_vp;
580 if (vp->v_type != VREG ||
581 (error = VOP_GETATTR(vp, &vattr))) {
582 if (error == 0)
583 error = EINVAL;
584 goto done;
586 vn_unlock(vp);
587 vn->sc_secsize = DEV_BSIZE;
588 vn->sc_vp = vp;
589 nd.nl_open_vp = NULL;
592 * If the size is specified, override the file attributes. Note that
593 * the vn_size argument is in PAGE_SIZE sized blocks.
595 if (vio->vn_size)
596 vn->sc_size = vio->vn_size * PAGE_SIZE / vn->sc_secsize;
597 else
598 vn->sc_size = vattr.va_size / vn->sc_secsize;
599 error = vnsetcred(vn, cred);
600 if (error) {
601 vn->sc_vp = NULL;
602 vn_close(vp, flags);
603 goto done;
605 vn->sc_flags |= VNF_INITED;
606 if (flags == FREAD)
607 vn->sc_flags |= VNF_READONLY;
608 IFOPT(vn, VN_LABELS) {
610 * Reopen so that `ds' knows which devices are open.
611 * If this is the first VNIOCSET, then we've
612 * guaranteed that the device is the cdev and that
613 * no other slices or labels are open. Otherwise,
614 * we rely on VNIOCCLR not being abused.
616 error = dev_dopen(dev, flag, S_IFCHR, cred);
617 if (error)
618 vnclear(vn);
620 IFOPT(vn, VN_FOLLOW)
621 kprintf("vnioctl: SET vp %p size %llx blks\n",
622 vn->sc_vp, vn->sc_size);
623 done:
624 nlookup_done(&nd);
625 return(error);
629 * vniocattach_swap:
631 * Attach swap backing store to a VN partition of the size specified
632 * in vn_size.
635 static int
636 vniocattach_swap(struct vn_softc *vn, struct vn_ioctl *vio, cdev_t dev,
637 int flag, struct ucred *cred)
639 int error;
642 * Range check. Disallow negative sizes or any size less then the
643 * size of a page. Then round to a page.
646 if (vio->vn_size <= 0)
647 return(EDOM);
650 * Allocate an OBJT_SWAP object.
652 * sc_secsize is PAGE_SIZE'd
654 * vio->vn_size is in PAGE_SIZE'd chunks.
655 * sc_size must be in PAGE_SIZE'd chunks.
656 * Note the truncation.
659 vn->sc_secsize = PAGE_SIZE;
660 vn->sc_size = vio->vn_size;
661 vn->sc_object = vm_pager_allocate(OBJT_SWAP, NULL,
662 vn->sc_secsize * (off_t)vio->vn_size,
663 VM_PROT_DEFAULT, 0);
664 IFOPT(vn, VN_RESERVE) {
665 if (swap_pager_reserve(vn->sc_object, 0, vn->sc_size) < 0) {
666 vm_pager_deallocate(vn->sc_object);
667 vn->sc_object = NULL;
668 return(EDOM);
671 vn->sc_flags |= VNF_INITED;
673 error = vnsetcred(vn, cred);
674 if (error == 0) {
675 IFOPT(vn, VN_LABELS) {
677 * Reopen so that `ds' knows which devices are open.
678 * If this is the first VNIOCSET, then we've
679 * guaranteed that the device is the cdev and that
680 * no other slices or labels are open. Otherwise,
681 * we rely on VNIOCCLR not being abused.
683 error = dev_dopen(dev, flag, S_IFCHR, cred);
686 if (error == 0) {
687 IFOPT(vn, VN_FOLLOW) {
688 kprintf("vnioctl: SET vp %p size %llx\n",
689 vn->sc_vp, vn->sc_size);
692 if (error)
693 vnclear(vn);
694 return(error);
698 * Duplicate the current processes' credentials. Since we are called only
699 * as the result of a SET ioctl and only root can do that, any future access
700 * to this "disk" is essentially as root. Note that credentials may change
701 * if some other uid can write directly to the mapped file (NFS).
704 vnsetcred(struct vn_softc *vn, struct ucred *cred)
706 char *tmpbuf;
707 int error = 0;
710 * Set credits in our softc
713 if (vn->sc_cred)
714 crfree(vn->sc_cred);
715 vn->sc_cred = crdup(cred);
718 * Horrible kludge to establish credentials for NFS XXX.
721 if (vn->sc_vp) {
722 struct uio auio;
723 struct iovec aiov;
725 tmpbuf = kmalloc(vn->sc_secsize, M_TEMP, M_WAITOK);
726 bzero(&auio, sizeof(auio));
728 aiov.iov_base = tmpbuf;
729 aiov.iov_len = vn->sc_secsize;
730 auio.uio_iov = &aiov;
731 auio.uio_iovcnt = 1;
732 auio.uio_offset = 0;
733 auio.uio_rw = UIO_READ;
734 auio.uio_segflg = UIO_SYSSPACE;
735 auio.uio_resid = aiov.iov_len;
736 vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY);
737 error = VOP_READ(vn->sc_vp, &auio, 0, vn->sc_cred);
738 vn_unlock(vn->sc_vp);
739 kfree(tmpbuf, M_TEMP);
741 return (error);
744 void
745 vnclear(struct vn_softc *vn)
747 IFOPT(vn, VN_FOLLOW)
748 kprintf("vnclear(%p): vp=%p\n", vn, vn->sc_vp);
749 if (vn->sc_slices != NULL)
750 dsgone(&vn->sc_slices);
751 vn->sc_flags &= ~VNF_INITED;
752 if (vn->sc_vp != NULL) {
753 vn_close(vn->sc_vp,
754 (vn->sc_flags & VNF_READONLY) ? FREAD : (FREAD|FWRITE));
755 vn->sc_vp = NULL;
757 vn->sc_flags &= ~VNF_READONLY;
758 if (vn->sc_cred) {
759 crfree(vn->sc_cred);
760 vn->sc_cred = NULL;
762 if (vn->sc_object != NULL) {
763 vm_pager_deallocate(vn->sc_object);
764 vn->sc_object = NULL;
766 vn->sc_size = 0;
770 * vnget:
772 * populate a struct vn_user for the VNIOCGET ioctl.
773 * interface conventions defined in sys/sys/vnioctl.h.
776 static int
777 vnget(cdev_t dev, struct vn_softc *vn, struct vn_user *vnu)
779 int error, found = 0;
780 char *freepath, *fullpath;
781 struct vattr vattr;
783 if (vnu->vnu_unit == -1) {
784 vnu->vnu_unit = dkunit(dev);
786 else if (vnu->vnu_unit < 0)
787 return (EINVAL);
789 SLIST_FOREACH(vn, &vn_list, sc_list) {
791 if(vn->sc_unit != vnu->vnu_unit)
792 continue;
794 found = 1;
796 if (vn->sc_flags & VNF_INITED && vn->sc_vp != NULL) {
798 /* note: u_cred checked in vnioctl above */
799 error = VOP_GETATTR(vn->sc_vp, &vattr);
800 if (error) {
801 kprintf("vnget: VOP_GETATTR for %p failed\n",
802 vn->sc_vp);
803 return (error);
806 error = vn_fullpath(curproc, vn->sc_vp,
807 &fullpath, &freepath);
809 if (error) {
810 kprintf("vnget: unable to resolve vp %p\n",
811 vn->sc_vp);
812 return(error);
815 strlcpy(vnu->vnu_file, fullpath,
816 sizeof(vnu->vnu_file));
817 kfree(freepath, M_TEMP);
818 vnu->vnu_dev = vattr.va_fsid;
819 vnu->vnu_ino = vattr.va_fileid;
822 else if (vn->sc_flags & VNF_INITED && vn->sc_object != NULL){
824 strlcpy(vnu->vnu_file, _VN_USER_SWAP,
825 sizeof(vnu->vnu_file));
826 vnu->vnu_size = vn->sc_size;
827 vnu->vnu_secsize = vn->sc_secsize;
829 } else {
831 bzero(vnu->vnu_file, sizeof(vnu->vnu_file));
832 vnu->vnu_dev = 0;
833 vnu->vnu_ino = 0;
836 break;
839 if (!found)
840 return(ENXIO);
842 return(0);
845 static int
846 vnsize(struct dev_psize_args *ap)
848 cdev_t dev = ap->a_head.a_dev;
849 struct vn_softc *vn;
851 vn = dev->si_drv1;
852 if (!vn)
853 return(ENXIO);
854 if ((vn->sc_flags & VNF_INITED) == 0)
855 return(ENXIO);
856 ap->a_result = (int64_t)vn->sc_size;
857 return(0);
860 static int
861 vn_modevent(module_t mod, int type, void *data)
863 struct vn_softc *vn;
864 cdev_t dev;
866 switch (type) {
867 case MOD_LOAD:
868 dev_ops_add(&vn_ops, 0, 0);
869 break;
870 case MOD_UNLOAD:
871 /* fall through */
872 case MOD_SHUTDOWN:
873 while ((vn = SLIST_FIRST(&vn_list)) != NULL) {
874 SLIST_REMOVE_HEAD(&vn_list, sc_list);
875 if (vn->sc_flags & VNF_INITED)
876 vnclear(vn);
877 /* Cleanup all cdev_t's that refer to this unit */
878 while ((dev = vn->sc_devlist) != NULL) {
879 vn->sc_devlist = dev->si_drv2;
880 dev->si_drv1 = dev->si_drv2 = NULL;
881 destroy_dev(dev);
883 kfree(vn, M_DEVBUF);
885 dev_ops_remove(&vn_ops, 0, 0);
886 break;
887 default:
888 break;
890 return 0;
893 DEV_MODULE(vn, vn_modevent, 0);