Ignore machine-check MSRs
[freebsd-src/fkvm-freebsd.git] / sys / kern / vfs_vnops.c
blob6e7c5a072c2825d0ac59693c173ec3355cc2f3b5
1 /*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include "opt_mac.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/kdb.h>
47 #include <sys/stat.h>
48 #include <sys/priv.h>
49 #include <sys/proc.h>
50 #include <sys/limits.h>
51 #include <sys/lock.h>
52 #include <sys/mount.h>
53 #include <sys/mutex.h>
54 #include <sys/namei.h>
55 #include <sys/vnode.h>
56 #include <sys/bio.h>
57 #include <sys/buf.h>
58 #include <sys/filio.h>
59 #include <sys/sx.h>
60 #include <sys/ttycom.h>
61 #include <sys/conf.h>
62 #include <sys/syslog.h>
63 #include <sys/unistd.h>
65 #include <security/mac/mac_framework.h>
67 static fo_rdwr_t vn_read;
68 static fo_rdwr_t vn_write;
69 static fo_truncate_t vn_truncate;
70 static fo_ioctl_t vn_ioctl;
71 static fo_poll_t vn_poll;
72 static fo_kqfilter_t vn_kqfilter;
73 static fo_stat_t vn_statfile;
74 static fo_close_t vn_closefile;
76 struct fileops vnops = {
77 .fo_read = vn_read,
78 .fo_write = vn_write,
79 .fo_truncate = vn_truncate,
80 .fo_ioctl = vn_ioctl,
81 .fo_poll = vn_poll,
82 .fo_kqfilter = vn_kqfilter,
83 .fo_stat = vn_statfile,
84 .fo_close = vn_closefile,
85 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
88 int
89 vn_open(ndp, flagp, cmode, fp)
90 struct nameidata *ndp;
91 int *flagp, cmode;
92 struct file *fp;
94 struct thread *td = ndp->ni_cnd.cn_thread;
96 return (vn_open_cred(ndp, flagp, cmode, td->td_ucred, fp));
100 * Common code for vnode open operations.
101 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
103 * Note that this does NOT free nameidata for the successful case,
104 * due to the NDINIT being done elsewhere.
107 vn_open_cred(ndp, flagp, cmode, cred, fp)
108 struct nameidata *ndp;
109 int *flagp, cmode;
110 struct ucred *cred;
111 struct file *fp;
113 struct vnode *vp;
114 struct mount *mp;
115 struct thread *td = ndp->ni_cnd.cn_thread;
116 struct vattr vat;
117 struct vattr *vap = &vat;
118 int mode, fmode, error;
119 int vfslocked, mpsafe;
121 mpsafe = ndp->ni_cnd.cn_flags & MPSAFE;
122 restart:
123 vfslocked = 0;
124 fmode = *flagp;
125 if (fmode & O_CREAT) {
126 ndp->ni_cnd.cn_nameiop = CREATE;
127 ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF |
128 MPSAFE | AUDITVNODE1;
129 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
130 ndp->ni_cnd.cn_flags |= FOLLOW;
131 bwillwrite();
132 if ((error = namei(ndp)) != 0)
133 return (error);
134 vfslocked = NDHASGIANT(ndp);
135 if (!mpsafe)
136 ndp->ni_cnd.cn_flags &= ~MPSAFE;
137 if (ndp->ni_vp == NULL) {
138 VATTR_NULL(vap);
139 vap->va_type = VREG;
140 vap->va_mode = cmode;
141 if (fmode & O_EXCL)
142 vap->va_vaflags |= VA_EXCLUSIVE;
143 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
144 NDFREE(ndp, NDF_ONLY_PNBUF);
145 vput(ndp->ni_dvp);
146 VFS_UNLOCK_GIANT(vfslocked);
147 if ((error = vn_start_write(NULL, &mp,
148 V_XSLEEP | PCATCH)) != 0)
149 return (error);
150 goto restart;
152 #ifdef MAC
153 error = mac_vnode_check_create(cred, ndp->ni_dvp,
154 &ndp->ni_cnd, vap);
155 if (error == 0) {
156 #endif
157 VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
158 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
159 &ndp->ni_cnd, vap);
160 #ifdef MAC
162 #endif
163 vput(ndp->ni_dvp);
164 vn_finished_write(mp);
165 if (error) {
166 VFS_UNLOCK_GIANT(vfslocked);
167 NDFREE(ndp, NDF_ONLY_PNBUF);
168 return (error);
170 fmode &= ~O_TRUNC;
171 vp = ndp->ni_vp;
172 } else {
173 if (ndp->ni_dvp == ndp->ni_vp)
174 vrele(ndp->ni_dvp);
175 else
176 vput(ndp->ni_dvp);
177 ndp->ni_dvp = NULL;
178 vp = ndp->ni_vp;
179 if (fmode & O_EXCL) {
180 error = EEXIST;
181 goto bad;
183 fmode &= ~O_CREAT;
185 } else {
186 ndp->ni_cnd.cn_nameiop = LOOKUP;
187 ndp->ni_cnd.cn_flags = ISOPEN |
188 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) |
189 LOCKLEAF | MPSAFE | AUDITVNODE1;
190 if ((error = namei(ndp)) != 0)
191 return (error);
192 if (!mpsafe)
193 ndp->ni_cnd.cn_flags &= ~MPSAFE;
194 vfslocked = NDHASGIANT(ndp);
195 vp = ndp->ni_vp;
197 if (vp->v_type == VLNK) {
198 error = EMLINK;
199 goto bad;
201 if (vp->v_type == VSOCK) {
202 error = EOPNOTSUPP;
203 goto bad;
205 mode = 0;
206 if (fmode & (FWRITE | O_TRUNC)) {
207 if (vp->v_type == VDIR) {
208 error = EISDIR;
209 goto bad;
211 mode |= VWRITE;
213 if (fmode & FREAD)
214 mode |= VREAD;
215 if (fmode & FEXEC)
216 mode |= VEXEC;
217 if (fmode & O_APPEND)
218 mode |= VAPPEND;
219 #ifdef MAC
220 error = mac_vnode_check_open(cred, vp, mode);
221 if (error)
222 goto bad;
223 #endif
224 if ((fmode & O_CREAT) == 0) {
225 if (mode & VWRITE) {
226 error = vn_writechk(vp);
227 if (error)
228 goto bad;
230 if (mode) {
231 error = VOP_ACCESS(vp, mode, cred, td);
232 if (error)
233 goto bad;
236 if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0)
237 goto bad;
239 if (fmode & FWRITE)
240 vp->v_writecount++;
241 *flagp = fmode;
242 ASSERT_VOP_ELOCKED(vp, "vn_open_cred");
243 if (!mpsafe)
244 VFS_UNLOCK_GIANT(vfslocked);
245 return (0);
246 bad:
247 NDFREE(ndp, NDF_ONLY_PNBUF);
248 vput(vp);
249 VFS_UNLOCK_GIANT(vfslocked);
250 *flagp = fmode;
251 ndp->ni_vp = NULL;
252 return (error);
256 * Check for write permissions on the specified vnode.
257 * Prototype text segments cannot be written.
260 vn_writechk(vp)
261 register struct vnode *vp;
264 ASSERT_VOP_LOCKED(vp, "vn_writechk");
266 * If there's shared text associated with
267 * the vnode, try to free it up once. If
268 * we fail, we can't allow writing.
270 if (vp->v_vflag & VV_TEXT)
271 return (ETXTBSY);
273 return (0);
277 * Vnode close call
280 vn_close(vp, flags, file_cred, td)
281 register struct vnode *vp;
282 int flags;
283 struct ucred *file_cred;
284 struct thread *td;
286 struct mount *mp;
287 int error;
289 VFS_ASSERT_GIANT(vp->v_mount);
291 vn_start_write(vp, &mp, V_WAIT);
292 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
293 if (flags & FWRITE) {
294 VNASSERT(vp->v_writecount > 0, vp,
295 ("vn_close: negative writecount"));
296 vp->v_writecount--;
298 error = VOP_CLOSE(vp, flags, file_cred, td);
299 vput(vp);
300 vn_finished_write(mp);
301 return (error);
305 * Heuristic to detect sequential operation.
307 static int
308 sequential_heuristic(struct uio *uio, struct file *fp)
312 * Offset 0 is handled specially. open() sets f_seqcount to 1 so
313 * that the first I/O is normally considered to be slightly
314 * sequential. Seeking to offset 0 doesn't change sequentiality
315 * unless previous seeks have reduced f_seqcount to 0, in which
316 * case offset 0 is not special.
318 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
319 uio->uio_offset == fp->f_nextoff) {
321 * f_seqcount is in units of fixed-size blocks so that it
322 * depends mainly on the amount of sequential I/O and not
323 * much on the number of sequential I/O's. The fixed size
324 * of 16384 is hard-coded here since it is (not quite) just
325 * a magic size that works well here. This size is more
326 * closely related to the best I/O size for real disks than
327 * to any block size used by software.
329 fp->f_seqcount += howmany(uio->uio_resid, 16384);
330 if (fp->f_seqcount > IO_SEQMAX)
331 fp->f_seqcount = IO_SEQMAX;
332 return (fp->f_seqcount << IO_SEQSHIFT);
335 /* Not sequential. Quickly draw-down sequentiality. */
336 if (fp->f_seqcount > 1)
337 fp->f_seqcount = 1;
338 else
339 fp->f_seqcount = 0;
340 return (0);
344 * Package up an I/O request on a vnode into a uio and do it.
347 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred,
348 aresid, td)
349 enum uio_rw rw;
350 struct vnode *vp;
351 void *base;
352 int len;
353 off_t offset;
354 enum uio_seg segflg;
355 int ioflg;
356 struct ucred *active_cred;
357 struct ucred *file_cred;
358 int *aresid;
359 struct thread *td;
361 struct uio auio;
362 struct iovec aiov;
363 struct mount *mp;
364 struct ucred *cred;
365 int error;
367 VFS_ASSERT_GIANT(vp->v_mount);
369 if ((ioflg & IO_NODELOCKED) == 0) {
370 mp = NULL;
371 if (rw == UIO_WRITE) {
372 if (vp->v_type != VCHR &&
373 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH))
374 != 0)
375 return (error);
376 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
377 } else {
379 * XXX This should be LK_SHARED but I don't trust VFS
380 * enough to leave it like that until it has been
381 * reviewed further.
383 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
387 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
388 auio.uio_iov = &aiov;
389 auio.uio_iovcnt = 1;
390 aiov.iov_base = base;
391 aiov.iov_len = len;
392 auio.uio_resid = len;
393 auio.uio_offset = offset;
394 auio.uio_segflg = segflg;
395 auio.uio_rw = rw;
396 auio.uio_td = td;
397 error = 0;
398 #ifdef MAC
399 if ((ioflg & IO_NOMACCHECK) == 0) {
400 if (rw == UIO_READ)
401 error = mac_vnode_check_read(active_cred, file_cred,
402 vp);
403 else
404 error = mac_vnode_check_write(active_cred, file_cred,
405 vp);
407 #endif
408 if (error == 0) {
409 if (file_cred)
410 cred = file_cred;
411 else
412 cred = active_cred;
413 if (rw == UIO_READ)
414 error = VOP_READ(vp, &auio, ioflg, cred);
415 else
416 error = VOP_WRITE(vp, &auio, ioflg, cred);
418 if (aresid)
419 *aresid = auio.uio_resid;
420 else
421 if (auio.uio_resid && error == 0)
422 error = EIO;
423 if ((ioflg & IO_NODELOCKED) == 0) {
424 if (rw == UIO_WRITE && vp->v_type != VCHR)
425 vn_finished_write(mp);
426 VOP_UNLOCK(vp, 0);
428 return (error);
432 * Package up an I/O request on a vnode into a uio and do it. The I/O
433 * request is split up into smaller chunks and we try to avoid saturating
434 * the buffer cache while potentially holding a vnode locked, so we
435 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
436 * to give other processes a chance to lock the vnode (either other processes
437 * core'ing the same binary, or unrelated processes scanning the directory).
440 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred,
441 file_cred, aresid, td)
442 enum uio_rw rw;
443 struct vnode *vp;
444 void *base;
445 size_t len;
446 off_t offset;
447 enum uio_seg segflg;
448 int ioflg;
449 struct ucred *active_cred;
450 struct ucred *file_cred;
451 size_t *aresid;
452 struct thread *td;
454 int error = 0;
455 int iaresid;
457 VFS_ASSERT_GIANT(vp->v_mount);
459 do {
460 int chunk;
463 * Force `offset' to a multiple of MAXBSIZE except possibly
464 * for the first chunk, so that filesystems only need to
465 * write full blocks except possibly for the first and last
466 * chunks.
468 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
470 if (chunk > len)
471 chunk = len;
472 if (rw != UIO_READ && vp->v_type == VREG)
473 bwillwrite();
474 iaresid = 0;
475 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
476 ioflg, active_cred, file_cred, &iaresid, td);
477 len -= chunk; /* aresid calc already includes length */
478 if (error)
479 break;
480 offset += chunk;
481 base = (char *)base + chunk;
482 uio_yield();
483 } while (len);
484 if (aresid)
485 *aresid = len + iaresid;
486 return (error);
490 * File table vnode read routine.
492 static int
493 vn_read(fp, uio, active_cred, flags, td)
494 struct file *fp;
495 struct uio *uio;
496 struct ucred *active_cred;
497 struct thread *td;
498 int flags;
500 struct vnode *vp;
501 int error, ioflag;
502 struct mtx *mtxp;
503 int vfslocked;
505 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
506 uio->uio_td, td));
507 mtxp = NULL;
508 vp = fp->f_vnode;
509 ioflag = 0;
510 if (fp->f_flag & FNONBLOCK)
511 ioflag |= IO_NDELAY;
512 if (fp->f_flag & O_DIRECT)
513 ioflag |= IO_DIRECT;
514 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
515 VOP_LEASE(vp, td, fp->f_cred, LEASE_READ);
517 * According to McKusick the vn lock was protecting f_offset here.
518 * It is now protected by the FOFFSET_LOCKED flag.
520 if ((flags & FOF_OFFSET) == 0) {
521 mtxp = mtx_pool_find(mtxpool_sleep, fp);
522 mtx_lock(mtxp);
523 while(fp->f_vnread_flags & FOFFSET_LOCKED) {
524 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
525 msleep(&fp->f_vnread_flags, mtxp, PUSER -1,
526 "vnread offlock", 0);
528 fp->f_vnread_flags |= FOFFSET_LOCKED;
529 mtx_unlock(mtxp);
530 vn_lock(vp, LK_SHARED | LK_RETRY);
531 uio->uio_offset = fp->f_offset;
532 } else
533 vn_lock(vp, LK_SHARED | LK_RETRY);
535 ioflag |= sequential_heuristic(uio, fp);
537 #ifdef MAC
538 error = mac_vnode_check_read(active_cred, fp->f_cred, vp);
539 if (error == 0)
540 #endif
541 error = VOP_READ(vp, uio, ioflag, fp->f_cred);
542 if ((flags & FOF_OFFSET) == 0) {
543 fp->f_offset = uio->uio_offset;
544 mtx_lock(mtxp);
545 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
546 wakeup(&fp->f_vnread_flags);
547 fp->f_vnread_flags = 0;
548 mtx_unlock(mtxp);
550 fp->f_nextoff = uio->uio_offset;
551 VOP_UNLOCK(vp, 0);
552 VFS_UNLOCK_GIANT(vfslocked);
553 return (error);
557 * File table vnode write routine.
559 static int
560 vn_write(fp, uio, active_cred, flags, td)
561 struct file *fp;
562 struct uio *uio;
563 struct ucred *active_cred;
564 struct thread *td;
565 int flags;
567 struct vnode *vp;
568 struct mount *mp;
569 int error, ioflag;
570 int vfslocked;
572 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
573 uio->uio_td, td));
574 vp = fp->f_vnode;
575 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
576 if (vp->v_type == VREG)
577 bwillwrite();
578 ioflag = IO_UNIT;
579 if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
580 ioflag |= IO_APPEND;
581 if (fp->f_flag & FNONBLOCK)
582 ioflag |= IO_NDELAY;
583 if (fp->f_flag & O_DIRECT)
584 ioflag |= IO_DIRECT;
585 if ((fp->f_flag & O_FSYNC) ||
586 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
587 ioflag |= IO_SYNC;
588 mp = NULL;
589 if (vp->v_type != VCHR &&
590 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
591 goto unlock;
592 VOP_LEASE(vp, td, fp->f_cred, LEASE_WRITE);
593 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
594 if ((flags & FOF_OFFSET) == 0)
595 uio->uio_offset = fp->f_offset;
596 ioflag |= sequential_heuristic(uio, fp);
597 #ifdef MAC
598 error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
599 if (error == 0)
600 #endif
601 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
602 if ((flags & FOF_OFFSET) == 0)
603 fp->f_offset = uio->uio_offset;
604 fp->f_nextoff = uio->uio_offset;
605 VOP_UNLOCK(vp, 0);
606 if (vp->v_type != VCHR)
607 vn_finished_write(mp);
608 unlock:
609 VFS_UNLOCK_GIANT(vfslocked);
610 return (error);
614 * File table truncate routine.
616 static int
617 vn_truncate(fp, length, active_cred, td)
618 struct file *fp;
619 off_t length;
620 struct ucred *active_cred;
621 struct thread *td;
623 struct vattr vattr;
624 struct mount *mp;
625 struct vnode *vp;
626 int vfslocked;
627 int error;
629 vp = fp->f_vnode;
630 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
631 error = vn_start_write(vp, &mp, V_WAIT | PCATCH);
632 if (error) {
633 VFS_UNLOCK_GIANT(vfslocked);
634 return (error);
636 VOP_LEASE(vp, td, active_cred, LEASE_WRITE);
637 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
638 if (vp->v_type == VDIR) {
639 error = EISDIR;
640 goto out;
642 #ifdef MAC
643 error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
644 if (error)
645 goto out;
646 #endif
647 error = vn_writechk(vp);
648 if (error == 0) {
649 VATTR_NULL(&vattr);
650 vattr.va_size = length;
651 error = VOP_SETATTR(vp, &vattr, fp->f_cred);
653 out:
654 VOP_UNLOCK(vp, 0);
655 vn_finished_write(mp);
656 VFS_UNLOCK_GIANT(vfslocked);
657 return (error);
661 * File table vnode stat routine.
663 static int
664 vn_statfile(fp, sb, active_cred, td)
665 struct file *fp;
666 struct stat *sb;
667 struct ucred *active_cred;
668 struct thread *td;
670 struct vnode *vp = fp->f_vnode;
671 int vfslocked;
672 int error;
674 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
675 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
676 error = vn_stat(vp, sb, active_cred, fp->f_cred, td);
677 VOP_UNLOCK(vp, 0);
678 VFS_UNLOCK_GIANT(vfslocked);
680 return (error);
684 * Stat a vnode; implementation for the stat syscall
687 vn_stat(vp, sb, active_cred, file_cred, td)
688 struct vnode *vp;
689 register struct stat *sb;
690 struct ucred *active_cred;
691 struct ucred *file_cred;
692 struct thread *td;
694 struct vattr vattr;
695 register struct vattr *vap;
696 int error;
697 u_short mode;
699 #ifdef MAC
700 error = mac_vnode_check_stat(active_cred, file_cred, vp);
701 if (error)
702 return (error);
703 #endif
705 vap = &vattr;
706 error = VOP_GETATTR(vp, vap, active_cred);
707 if (error)
708 return (error);
711 * Zero the spare stat fields
713 bzero(sb, sizeof *sb);
716 * Copy from vattr table
718 if (vap->va_fsid != VNOVAL)
719 sb->st_dev = vap->va_fsid;
720 else
721 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
722 sb->st_ino = vap->va_fileid;
723 mode = vap->va_mode;
724 switch (vap->va_type) {
725 case VREG:
726 mode |= S_IFREG;
727 break;
728 case VDIR:
729 mode |= S_IFDIR;
730 break;
731 case VBLK:
732 mode |= S_IFBLK;
733 break;
734 case VCHR:
735 mode |= S_IFCHR;
736 break;
737 case VLNK:
738 mode |= S_IFLNK;
739 break;
740 case VSOCK:
741 mode |= S_IFSOCK;
742 break;
743 case VFIFO:
744 mode |= S_IFIFO;
745 break;
746 default:
747 return (EBADF);
749 sb->st_mode = mode;
750 sb->st_nlink = vap->va_nlink;
751 sb->st_uid = vap->va_uid;
752 sb->st_gid = vap->va_gid;
753 sb->st_rdev = vap->va_rdev;
754 if (vap->va_size > OFF_MAX)
755 return (EOVERFLOW);
756 sb->st_size = vap->va_size;
757 sb->st_atimespec = vap->va_atime;
758 sb->st_mtimespec = vap->va_mtime;
759 sb->st_ctimespec = vap->va_ctime;
760 sb->st_birthtimespec = vap->va_birthtime;
763 * According to www.opengroup.org, the meaning of st_blksize is
764 * "a filesystem-specific preferred I/O block size for this
765 * object. In some filesystem types, this may vary from file
766 * to file"
767 * Default to PAGE_SIZE after much discussion.
768 * XXX: min(PAGE_SIZE, vp->v_bufobj.bo_bsize) may be more correct.
771 sb->st_blksize = PAGE_SIZE;
773 sb->st_flags = vap->va_flags;
774 if (priv_check(td, PRIV_VFS_GENERATION))
775 sb->st_gen = 0;
776 else
777 sb->st_gen = vap->va_gen;
779 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
780 return (0);
784 * File table vnode ioctl routine.
786 static int
787 vn_ioctl(fp, com, data, active_cred, td)
788 struct file *fp;
789 u_long com;
790 void *data;
791 struct ucred *active_cred;
792 struct thread *td;
794 struct vnode *vp = fp->f_vnode;
795 struct vattr vattr;
796 int vfslocked;
797 int error;
799 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
800 error = ENOTTY;
801 switch (vp->v_type) {
802 case VREG:
803 case VDIR:
804 if (com == FIONREAD) {
805 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
806 error = VOP_GETATTR(vp, &vattr, active_cred);
807 VOP_UNLOCK(vp, 0);
808 if (!error)
809 *(int *)data = vattr.va_size - fp->f_offset;
811 if (com == FIONBIO || com == FIOASYNC) /* XXX */
812 error = 0;
813 else
814 error = VOP_IOCTL(vp, com, data, fp->f_flag,
815 active_cred, td);
816 break;
818 default:
819 break;
821 VFS_UNLOCK_GIANT(vfslocked);
822 return (error);
826 * File table vnode poll routine.
828 static int
829 vn_poll(fp, events, active_cred, td)
830 struct file *fp;
831 int events;
832 struct ucred *active_cred;
833 struct thread *td;
835 struct vnode *vp;
836 int vfslocked;
837 int error;
839 vp = fp->f_vnode;
840 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
841 #ifdef MAC
842 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
843 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
844 VOP_UNLOCK(vp, 0);
845 if (!error)
846 #endif
848 error = VOP_POLL(vp, events, fp->f_cred, td);
849 VFS_UNLOCK_GIANT(vfslocked);
850 return (error);
854 * Acquire the requested lock and then check for validity. LK_RETRY
855 * permits vn_lock to return doomed vnodes.
858 _vn_lock(struct vnode *vp, int flags, char *file, int line)
860 int error;
862 VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
863 ("vn_lock called with no locktype."));
864 do {
865 error = VOP_LOCK1(vp, flags, file, line);
866 flags &= ~LK_INTERLOCK; /* Interlock is always dropped. */
867 KASSERT((flags & LK_RETRY) == 0 || error == 0,
868 ("LK_RETRY set with incompatible flags %d\n", flags));
870 * Callers specify LK_RETRY if they wish to get dead vnodes.
871 * If RETRY is not set, we return ENOENT instead.
873 if (error == 0 && vp->v_iflag & VI_DOOMED &&
874 (flags & LK_RETRY) == 0) {
875 VOP_UNLOCK(vp, 0);
876 error = ENOENT;
877 break;
879 } while (flags & LK_RETRY && error != 0);
880 return (error);
884 * File table vnode close routine.
886 static int
887 vn_closefile(fp, td)
888 struct file *fp;
889 struct thread *td;
891 struct vnode *vp;
892 struct flock lf;
893 int vfslocked;
894 int error;
896 vp = fp->f_vnode;
898 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
899 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) {
900 lf.l_whence = SEEK_SET;
901 lf.l_start = 0;
902 lf.l_len = 0;
903 lf.l_type = F_UNLCK;
904 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
907 fp->f_ops = &badfileops;
909 error = vn_close(vp, fp->f_flag, fp->f_cred, td);
910 VFS_UNLOCK_GIANT(vfslocked);
911 return (error);
915 * Preparing to start a filesystem write operation. If the operation is
916 * permitted, then we bump the count of operations in progress and
917 * proceed. If a suspend request is in progress, we wait until the
918 * suspension is over, and then proceed.
921 vn_start_write(vp, mpp, flags)
922 struct vnode *vp;
923 struct mount **mpp;
924 int flags;
926 struct mount *mp;
927 int error;
929 error = 0;
931 * If a vnode is provided, get and return the mount point that
932 * to which it will write.
934 if (vp != NULL) {
935 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
936 *mpp = NULL;
937 if (error != EOPNOTSUPP)
938 return (error);
939 return (0);
942 if ((mp = *mpp) == NULL)
943 return (0);
944 MNT_ILOCK(mp);
945 if (vp == NULL)
946 MNT_REF(mp);
948 * Check on status of suspension.
950 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
951 if (flags & V_NOWAIT) {
952 error = EWOULDBLOCK;
953 goto unlock;
955 error = msleep(&mp->mnt_flag, MNT_MTX(mp),
956 (PUSER - 1) | (flags & PCATCH), "suspfs", 0);
957 if (error)
958 goto unlock;
960 if (flags & V_XSLEEP)
961 goto unlock;
962 mp->mnt_writeopcount++;
963 unlock:
964 MNT_REL(mp);
965 MNT_IUNLOCK(mp);
966 return (error);
970 * Secondary suspension. Used by operations such as vop_inactive
971 * routines that are needed by the higher level functions. These
972 * are allowed to proceed until all the higher level functions have
973 * completed (indicated by mnt_writeopcount dropping to zero). At that
974 * time, these operations are halted until the suspension is over.
977 vn_write_suspend_wait(vp, mp, flags)
978 struct vnode *vp;
979 struct mount *mp;
980 int flags;
982 int error;
984 if (vp != NULL) {
985 if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) {
986 if (error != EOPNOTSUPP)
987 return (error);
988 return (0);
992 * If we are not suspended or have not yet reached suspended
993 * mode, then let the operation proceed.
995 if (mp == NULL)
996 return (0);
997 MNT_ILOCK(mp);
998 if (vp == NULL)
999 MNT_REF(mp);
1000 if ((mp->mnt_kern_flag & MNTK_SUSPENDED) == 0) {
1001 MNT_REL(mp);
1002 MNT_IUNLOCK(mp);
1003 return (0);
1005 if (flags & V_NOWAIT) {
1006 MNT_REL(mp);
1007 MNT_IUNLOCK(mp);
1008 return (EWOULDBLOCK);
1011 * Wait for the suspension to finish.
1013 error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1014 (PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0);
1015 vfs_rel(mp);
1016 return (error);
1020 * Secondary suspension. Used by operations such as vop_inactive
1021 * routines that are needed by the higher level functions. These
1022 * are allowed to proceed until all the higher level functions have
1023 * completed (indicated by mnt_writeopcount dropping to zero). At that
1024 * time, these operations are halted until the suspension is over.
1027 vn_start_secondary_write(vp, mpp, flags)
1028 struct vnode *vp;
1029 struct mount **mpp;
1030 int flags;
1032 struct mount *mp;
1033 int error;
1035 retry:
1036 if (vp != NULL) {
1037 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1038 *mpp = NULL;
1039 if (error != EOPNOTSUPP)
1040 return (error);
1041 return (0);
1045 * If we are not suspended or have not yet reached suspended
1046 * mode, then let the operation proceed.
1048 if ((mp = *mpp) == NULL)
1049 return (0);
1050 MNT_ILOCK(mp);
1051 if (vp == NULL)
1052 MNT_REF(mp);
1053 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) {
1054 mp->mnt_secondary_writes++;
1055 mp->mnt_secondary_accwrites++;
1056 MNT_REL(mp);
1057 MNT_IUNLOCK(mp);
1058 return (0);
1060 if (flags & V_NOWAIT) {
1061 MNT_REL(mp);
1062 MNT_IUNLOCK(mp);
1063 return (EWOULDBLOCK);
1066 * Wait for the suspension to finish.
1068 error = msleep(&mp->mnt_flag, MNT_MTX(mp),
1069 (PUSER - 1) | (flags & PCATCH) | PDROP, "suspfs", 0);
1070 vfs_rel(mp);
1071 if (error == 0)
1072 goto retry;
1073 return (error);
1077 * Filesystem write operation has completed. If we are suspending and this
1078 * operation is the last one, notify the suspender that the suspension is
1079 * now in effect.
1081 void
1082 vn_finished_write(mp)
1083 struct mount *mp;
1085 if (mp == NULL)
1086 return;
1087 MNT_ILOCK(mp);
1088 mp->mnt_writeopcount--;
1089 if (mp->mnt_writeopcount < 0)
1090 panic("vn_finished_write: neg cnt");
1091 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1092 mp->mnt_writeopcount <= 0)
1093 wakeup(&mp->mnt_writeopcount);
1094 MNT_IUNLOCK(mp);
1099 * Filesystem secondary write operation has completed. If we are
1100 * suspending and this operation is the last one, notify the suspender
1101 * that the suspension is now in effect.
1103 void
1104 vn_finished_secondary_write(mp)
1105 struct mount *mp;
1107 if (mp == NULL)
1108 return;
1109 MNT_ILOCK(mp);
1110 mp->mnt_secondary_writes--;
1111 if (mp->mnt_secondary_writes < 0)
1112 panic("vn_finished_secondary_write: neg cnt");
1113 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
1114 mp->mnt_secondary_writes <= 0)
1115 wakeup(&mp->mnt_secondary_writes);
1116 MNT_IUNLOCK(mp);
1122 * Request a filesystem to suspend write operations.
1125 vfs_write_suspend(mp)
1126 struct mount *mp;
1128 struct thread *td = curthread;
1129 int error;
1131 MNT_ILOCK(mp);
1132 if (mp->mnt_kern_flag & MNTK_SUSPEND) {
1133 MNT_IUNLOCK(mp);
1134 return (0);
1136 mp->mnt_kern_flag |= MNTK_SUSPEND;
1137 if (mp->mnt_writeopcount > 0)
1138 (void) msleep(&mp->mnt_writeopcount,
1139 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0);
1140 else
1141 MNT_IUNLOCK(mp);
1142 if ((error = VFS_SYNC(mp, MNT_SUSPEND, td)) != 0)
1143 vfs_write_resume(mp);
1144 return (error);
1148 * Request a filesystem to resume write operations.
1150 void
1151 vfs_write_resume(mp)
1152 struct mount *mp;
1155 MNT_ILOCK(mp);
1156 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1157 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 |
1158 MNTK_SUSPENDED);
1159 wakeup(&mp->mnt_writeopcount);
1160 wakeup(&mp->mnt_flag);
1162 MNT_IUNLOCK(mp);
1166 * Implement kqueues for files by translating it to vnode operation.
1168 static int
1169 vn_kqfilter(struct file *fp, struct knote *kn)
1171 int vfslocked;
1172 int error;
1174 vfslocked = VFS_LOCK_GIANT(fp->f_vnode->v_mount);
1175 error = VOP_KQFILTER(fp->f_vnode, kn);
1176 VFS_UNLOCK_GIANT(vfslocked);
1178 return error;
1182 * Simplified in-kernel wrapper calls for extended attribute access.
1183 * Both calls pass in a NULL credential, authorizing as "kernel" access.
1184 * Set IO_NODELOCKED in ioflg if the vnode is already locked.
1187 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1188 const char *attrname, int *buflen, char *buf, struct thread *td)
1190 struct uio auio;
1191 struct iovec iov;
1192 int error;
1194 iov.iov_len = *buflen;
1195 iov.iov_base = buf;
1197 auio.uio_iov = &iov;
1198 auio.uio_iovcnt = 1;
1199 auio.uio_rw = UIO_READ;
1200 auio.uio_segflg = UIO_SYSSPACE;
1201 auio.uio_td = td;
1202 auio.uio_offset = 0;
1203 auio.uio_resid = *buflen;
1205 if ((ioflg & IO_NODELOCKED) == 0)
1206 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1208 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1210 /* authorize attribute retrieval as kernel */
1211 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
1212 td);
1214 if ((ioflg & IO_NODELOCKED) == 0)
1215 VOP_UNLOCK(vp, 0);
1217 if (error == 0) {
1218 *buflen = *buflen - auio.uio_resid;
1221 return (error);
1225 * XXX failure mode if partially written?
1228 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1229 const char *attrname, int buflen, char *buf, struct thread *td)
1231 struct uio auio;
1232 struct iovec iov;
1233 struct mount *mp;
1234 int error;
1236 iov.iov_len = buflen;
1237 iov.iov_base = buf;
1239 auio.uio_iov = &iov;
1240 auio.uio_iovcnt = 1;
1241 auio.uio_rw = UIO_WRITE;
1242 auio.uio_segflg = UIO_SYSSPACE;
1243 auio.uio_td = td;
1244 auio.uio_offset = 0;
1245 auio.uio_resid = buflen;
1247 if ((ioflg & IO_NODELOCKED) == 0) {
1248 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1249 return (error);
1250 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1253 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1255 /* authorize attribute setting as kernel */
1256 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
1258 if ((ioflg & IO_NODELOCKED) == 0) {
1259 vn_finished_write(mp);
1260 VOP_UNLOCK(vp, 0);
1263 return (error);
1267 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1268 const char *attrname, struct thread *td)
1270 struct mount *mp;
1271 int error;
1273 if ((ioflg & IO_NODELOCKED) == 0) {
1274 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
1275 return (error);
1276 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1279 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
1281 /* authorize attribute removal as kernel */
1282 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
1283 if (error == EOPNOTSUPP)
1284 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1285 NULL, td);
1287 if ((ioflg & IO_NODELOCKED) == 0) {
1288 vn_finished_write(mp);
1289 VOP_UNLOCK(vp, 0);
1292 return (error);