hammer2 - Refactor frontend part 9/many
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
blob080bcd5f650d5acffe365a18013421b3a572ac41
1 /*
2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 * Kernel Filesystem interface
39 * NOTE! local ipdata pointers must be reloaded on any modifying operation
40 * to the inode as its underlying chain may have changed.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
60 #include "hammer2.h"
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
63 int seqcount);
64 static int hammer2_write_file(hammer2_trans_t *trans, hammer2_inode_t *ip,
65 struct uio *uio, int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
69 struct objcache *cache_xops;
71 static __inline
72 void
73 hammer2_knote(struct vnode *vp, int flags)
75 if (flags)
76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
80 * Last reference to a vnode is going away but it is still cached.
82 static
83 int
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
86 hammer2_inode_t *ip;
87 struct vnode *vp;
89 LOCKSTART;
90 vp = ap->a_vp;
91 ip = VTOI(vp);
94 * Degenerate case
96 if (ip == NULL) {
97 vrecycle(vp);
98 LOCKSTOP;
99 return (0);
103 * Check for deleted inodes and recycle immediately on the last
104 * release. Be sure to destroy any left-over buffer cache buffers
105 * so we do not waste time trying to flush them.
107 * WARNING: nvtruncbuf() can only be safely called without the inode
108 * lock held due to the way our write thread works.
110 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
111 hammer2_key_t lbase;
112 int nblksize;
115 * Detect updates to the embedded data which may be
116 * synchronized by the strategy code. Simply mark the
117 * inode modified so it gets picked up by our normal flush.
119 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
120 nvtruncbuf(vp, 0, nblksize, 0, 0);
121 vrecycle(vp);
123 LOCKSTOP;
124 return (0);
128 * Reclaim a vnode so that it can be reused; after the inode is
129 * disassociated, the filesystem must manage it alone.
131 static
133 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
135 hammer2_inode_t *ip;
136 hammer2_pfs_t *pmp;
137 struct vnode *vp;
139 LOCKSTART;
140 vp = ap->a_vp;
141 ip = VTOI(vp);
142 if (ip == NULL) {
143 LOCKSTOP;
144 return(0);
146 pmp = ip->pmp;
149 * The final close of a deleted file or directory marks it for
150 * destruction. The DELETED flag allows the flusher to shortcut
151 * any modified blocks still unflushed (that is, just ignore them).
153 * HAMMER2 usually does not try to optimize the freemap by returning
154 * deleted blocks to it as it does not usually know how many snapshots
155 * might be referencing portions of the file/dir.
157 vp->v_data = NULL;
158 ip->vp = NULL;
161 * NOTE! We do not attempt to flush chains here, flushing is
162 * really fragile and could also deadlock.
164 vclrisdirty(vp);
167 * Once reclaimed the inode is disconnected from the normal flush
168 * mechanism and must be tracked
170 * A reclaim can occur at any time so we cannot safely start a
171 * transaction to handle reclamation of unlinked files. Instead,
172 * the ip is left with a reference and placed on a linked list and
173 * handled later on.
175 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
176 hammer2_inode_unlink_t *ipul;
178 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
179 ipul->ip = ip;
181 hammer2_spin_ex(&pmp->list_spin);
182 TAILQ_INSERT_TAIL(&pmp->unlinkq, ipul, entry);
183 hammer2_spin_unex(&pmp->list_spin);
184 /* retain ref from vp for ipul */
185 } else {
186 hammer2_inode_drop(ip); /* vp ref */
190 * XXX handle background sync when ip dirty, kernel will no longer
191 * notify us regarding this inode because there is no longer a
192 * vnode attached to it.
195 LOCKSTOP;
196 return (0);
199 static
201 hammer2_vop_fsync(struct vop_fsync_args *ap)
203 hammer2_inode_t *ip;
204 hammer2_trans_t trans;
205 struct vnode *vp;
207 LOCKSTART;
208 vp = ap->a_vp;
209 ip = VTOI(vp);
211 #if 0
212 /* XXX can't do this yet */
213 hammer2_trans_init(&trans, ip->pmp, HAMMER2_TRANS_ISFLUSH);
214 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
215 #endif
216 hammer2_trans_init(&trans, ip->pmp, 0);
217 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
220 * Calling chain_flush here creates a lot of duplicative
221 * COW operations due to non-optimal vnode ordering.
223 * Only do it for an actual fsync() syscall. The other forms
224 * which call this function will eventually call chain_flush
225 * on the volume root as a catch-all, which is far more optimal.
227 hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
228 if (ip->flags & HAMMER2_INODE_MODIFIED)
229 hammer2_inode_fsync(&trans, ip, NULL);
230 hammer2_inode_unlock(ip, NULL);
231 #if 0
232 atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED);
233 /*vclrisdirty(vp);*/
234 if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MTIME))
235 hammer2_inode_fsync(&trans, ip, cluster);
236 #endif
238 hammer2_trans_done(&trans);
240 LOCKSTOP;
241 return (0);
244 static
246 hammer2_vop_access(struct vop_access_args *ap)
248 hammer2_inode_t *ip = VTOI(ap->a_vp);
249 uid_t uid;
250 gid_t gid;
251 int error;
253 LOCKSTART;
254 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
255 uid = hammer2_to_unix_xid(&ip->meta.uid);
256 gid = hammer2_to_unix_xid(&ip->meta.gid);
257 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
258 hammer2_inode_unlock(ip, NULL);
260 LOCKSTOP;
261 return (error);
264 static
266 hammer2_vop_getattr(struct vop_getattr_args *ap)
268 hammer2_pfs_t *pmp;
269 hammer2_inode_t *ip;
270 struct vnode *vp;
271 struct vattr *vap;
273 LOCKSTART;
274 vp = ap->a_vp;
275 vap = ap->a_vap;
277 ip = VTOI(vp);
278 pmp = ip->pmp;
280 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
282 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
283 vap->va_fileid = ip->meta.inum;
284 vap->va_mode = ip->meta.mode;
285 vap->va_nlink = ip->meta.nlinks;
286 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
287 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
288 vap->va_rmajor = 0;
289 vap->va_rminor = 0;
290 vap->va_size = ip->meta.size; /* protected by shared lock */
291 vap->va_blocksize = HAMMER2_PBUFSIZE;
292 vap->va_flags = ip->meta.uflags;
293 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
294 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
295 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
296 vap->va_gen = 1;
297 vap->va_bytes = ip->bref.data_count;
298 vap->va_type = hammer2_get_vtype(ip->meta.type);
299 vap->va_filerev = 0;
300 vap->va_uid_uuid = ip->meta.uid;
301 vap->va_gid_uuid = ip->meta.gid;
302 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
303 VA_FSID_UUID_VALID;
305 hammer2_inode_unlock(ip, NULL);
307 LOCKSTOP;
308 return (0);
311 static
313 hammer2_vop_setattr(struct vop_setattr_args *ap)
315 hammer2_inode_t *ip;
316 hammer2_trans_t trans;
317 struct vnode *vp;
318 struct vattr *vap;
319 int error;
320 int kflags = 0;
321 uint64_t ctime;
323 LOCKSTART;
324 vp = ap->a_vp;
325 vap = ap->a_vap;
326 hammer2_update_time(&ctime);
328 ip = VTOI(vp);
330 if (ip->pmp->ronly) {
331 LOCKSTOP;
332 return(EROFS);
335 hammer2_pfs_memory_wait(ip->pmp);
336 hammer2_trans_init(&trans, ip->pmp, 0);
337 hammer2_inode_lock(ip, 0);
338 error = 0;
340 if (vap->va_flags != VNOVAL) {
341 u_int32_t flags;
343 flags = ip->meta.uflags;
344 error = vop_helper_setattr_flags(&flags, vap->va_flags,
345 hammer2_to_unix_xid(&ip->meta.uid),
346 ap->a_cred);
347 if (error == 0) {
348 if (ip->meta.uflags != flags) {
349 hammer2_inode_modify(&trans, ip);
350 ip->meta.uflags = flags;
351 ip->meta.ctime = ctime;
352 kflags |= NOTE_ATTRIB;
354 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
355 error = 0;
356 goto done;
359 goto done;
361 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
362 error = EPERM;
363 goto done;
365 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
366 mode_t cur_mode = ip->meta.mode;
367 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
368 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
369 uuid_t uuid_uid;
370 uuid_t uuid_gid;
372 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
373 ap->a_cred,
374 &cur_uid, &cur_gid, &cur_mode);
375 if (error == 0) {
376 hammer2_guid_to_uuid(&uuid_uid, cur_uid);
377 hammer2_guid_to_uuid(&uuid_gid, cur_gid);
378 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
379 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
380 ip->meta.mode != cur_mode
382 hammer2_inode_modify(&trans, ip);
383 ip->meta.uid = uuid_uid;
384 ip->meta.gid = uuid_gid;
385 ip->meta.mode = cur_mode;
386 ip->meta.ctime = ctime;
388 kflags |= NOTE_ATTRIB;
393 * Resize the file
395 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
396 switch(vp->v_type) {
397 case VREG:
398 if (vap->va_size == ip->meta.size)
399 break;
400 if (vap->va_size < ip->meta.size) {
401 hammer2_truncate_file(ip, vap->va_size);
402 } else {
403 hammer2_extend_file(ip, vap->va_size);
405 hammer2_inode_modify(&trans, ip);
406 ip->meta.mtime = ctime;
407 break;
408 default:
409 error = EINVAL;
410 goto done;
413 #if 0
414 /* atime not supported */
415 if (vap->va_atime.tv_sec != VNOVAL) {
416 hammer2_inode_modify(&trans, ip);
417 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
418 kflags |= NOTE_ATTRIB;
420 #endif
421 if (vap->va_mode != (mode_t)VNOVAL) {
422 mode_t cur_mode = ip->meta.mode;
423 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
424 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
426 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
427 cur_uid, cur_gid, &cur_mode);
428 if (error == 0 && ip->meta.mode != cur_mode) {
429 hammer2_inode_modify(&trans, ip);
430 ip->meta.mode = cur_mode;
431 ip->meta.ctime = ctime;
432 kflags |= NOTE_ATTRIB;
436 if (vap->va_mtime.tv_sec != VNOVAL) {
437 hammer2_inode_modify(&trans, ip);
438 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
439 kflags |= NOTE_ATTRIB;
442 done:
444 * If a truncation occurred we must call inode_fsync() now in order
445 * to trim the related data chains, otherwise a later expansion can
446 * cause havoc.
448 * If an extend occured that changed the DIRECTDATA state, we must
449 * call inode_fsync now in order to prepare the inode's indirect
450 * block table.
452 if (ip->flags & HAMMER2_INODE_RESIZED)
453 hammer2_inode_fsync(&trans, ip, NULL);
456 * Cleanup.
458 hammer2_inode_unlock(ip, NULL);
459 hammer2_trans_done(&trans);
460 hammer2_knote(ip->vp, kflags);
462 LOCKSTOP;
463 return (error);
466 static
468 hammer2_vop_readdir(struct vop_readdir_args *ap)
470 hammer2_xop_readdir_t *xop;
471 hammer2_blockref_t bref;
472 hammer2_inode_t *ip;
473 hammer2_tid_t inum;
474 hammer2_key_t lkey;
475 struct uio *uio;
476 off_t *cookies;
477 off_t saveoff;
478 int cookie_index;
479 int ncookies;
480 int error;
481 int eofflag;
482 int dtype;
483 int r;
485 LOCKSTART;
486 ip = VTOI(ap->a_vp);
487 uio = ap->a_uio;
488 saveoff = uio->uio_offset;
489 eofflag = 0;
490 error = 0;
493 * Setup cookies directory entry cookies if requested
495 if (ap->a_ncookies) {
496 ncookies = uio->uio_resid / 16 + 1;
497 if (ncookies > 1024)
498 ncookies = 1024;
499 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
500 } else {
501 ncookies = -1;
502 cookies = NULL;
504 cookie_index = 0;
506 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
509 * Handle artificial entries. To ensure that only positive 64 bit
510 * quantities are returned to userland we always strip off bit 63.
511 * The hash code is designed such that codes 0x0000-0x7FFF are not
512 * used, allowing us to use these codes for articial entries.
514 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
515 * allow '..' to cross the mount point into (e.g.) the super-root.
517 if (saveoff == 0) {
518 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
519 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
520 if (r)
521 goto done;
522 if (cookies)
523 cookies[cookie_index] = saveoff;
524 ++saveoff;
525 ++cookie_index;
526 if (cookie_index == ncookies)
527 goto done;
530 if (saveoff == 1) {
532 * Be careful with lockorder when accessing ".."
534 * (ip is the current dir. xip is the parent dir).
536 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
537 if (ip->pip && ip != ip->pmp->iroot)
538 inum = ip->pip->meta.inum & HAMMER2_DIRHASH_USERMSK;
539 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
540 if (r)
541 goto done;
542 if (cookies)
543 cookies[cookie_index] = saveoff;
544 ++saveoff;
545 ++cookie_index;
546 if (cookie_index == ncookies)
547 goto done;
550 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
551 if (hammer2_debug & 0x0020)
552 kprintf("readdir: lkey %016jx\n", lkey);
553 if (error)
554 goto done;
557 * Use XOP for cluster scan.
559 * parent is the inode cluster, already locked for us. Don't
560 * double lock shared locks as this will screw up upgrades.
562 xop = &hammer2_xop_alloc(ip, hammer2_xop_readdir)->xop_readdir;
563 xop->head.lkey = lkey;
564 hammer2_xop_start(&xop->head);
566 for (;;) {
567 const hammer2_inode_data_t *ripdata;
569 error = hammer2_xop_collect(&xop->head);
570 if (error)
571 break;
572 if (cookie_index == ncookies)
573 break;
574 if (hammer2_debug & 0x0020)
575 kprintf("cluster chain %p %p\n",
576 xop->head.cluster.focus,
577 (xop->head.cluster.focus ?
578 xop->head.cluster.focus->data : (void *)-1));
579 ripdata = &hammer2_cluster_rdata(&xop->head.cluster)->ipdata;
580 hammer2_cluster_bref(&xop->head.cluster, &bref);
581 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
582 dtype = hammer2_get_dtype(ripdata);
583 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
584 r = vop_write_dirent(&error, uio,
585 ripdata->meta.inum &
586 HAMMER2_DIRHASH_USERMSK,
587 dtype,
588 ripdata->meta.name_len,
589 ripdata->filename);
590 if (r)
591 break;
592 if (cookies)
593 cookies[cookie_index] = saveoff;
594 ++cookie_index;
595 } else {
596 /* XXX chain error */
597 kprintf("bad chain type readdir %d\n", bref.type);
600 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
601 if (error == ENOENT) {
602 error = 0;
603 eofflag = 1;
604 saveoff = (hammer2_key_t)-1;
605 } else {
606 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
608 done:
609 hammer2_inode_unlock(ip, NULL);
610 if (ap->a_eofflag)
611 *ap->a_eofflag = eofflag;
612 if (hammer2_debug & 0x0020)
613 kprintf("readdir: done at %016jx\n", saveoff);
614 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
615 if (error && cookie_index == 0) {
616 if (cookies) {
617 kfree(cookies, M_TEMP);
618 *ap->a_ncookies = 0;
619 *ap->a_cookies = NULL;
621 } else {
622 if (cookies) {
623 *ap->a_ncookies = cookie_index;
624 *ap->a_cookies = cookies;
627 LOCKSTOP;
628 return (error);
632 * hammer2_vop_readlink { vp, uio, cred }
634 static
636 hammer2_vop_readlink(struct vop_readlink_args *ap)
638 struct vnode *vp;
639 hammer2_inode_t *ip;
640 int error;
642 vp = ap->a_vp;
643 if (vp->v_type != VLNK)
644 return (EINVAL);
645 ip = VTOI(vp);
647 error = hammer2_read_file(ip, ap->a_uio, 0);
648 return (error);
651 static
653 hammer2_vop_read(struct vop_read_args *ap)
655 struct vnode *vp;
656 hammer2_inode_t *ip;
657 struct uio *uio;
658 int error;
659 int seqcount;
660 int bigread;
663 * Read operations supported on this vnode?
665 vp = ap->a_vp;
666 if (vp->v_type != VREG)
667 return (EINVAL);
670 * Misc
672 ip = VTOI(vp);
673 uio = ap->a_uio;
674 error = 0;
676 seqcount = ap->a_ioflag >> 16;
677 bigread = (uio->uio_resid > 100 * 1024 * 1024);
679 error = hammer2_read_file(ip, uio, seqcount);
680 return (error);
683 static
685 hammer2_vop_write(struct vop_write_args *ap)
687 hammer2_inode_t *ip;
688 hammer2_trans_t trans;
689 thread_t td;
690 struct vnode *vp;
691 struct uio *uio;
692 int error;
693 int seqcount;
694 int bigwrite;
697 * Read operations supported on this vnode?
699 vp = ap->a_vp;
700 if (vp->v_type != VREG)
701 return (EINVAL);
704 * Misc
706 ip = VTOI(vp);
707 uio = ap->a_uio;
708 error = 0;
709 if (ip->pmp->ronly) {
710 return (EROFS);
713 seqcount = ap->a_ioflag >> 16;
714 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
717 * Check resource limit
719 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
720 uio->uio_offset + uio->uio_resid >
721 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
722 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
723 return (EFBIG);
726 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
729 * The transaction interlocks against flushes initiations
730 * (note: but will run concurrently with the actual flush).
732 hammer2_trans_init(&trans, ip->pmp, 0);
733 error = hammer2_write_file(&trans, ip, uio, ap->a_ioflag, seqcount);
734 hammer2_trans_done(&trans);
736 return (error);
740 * Perform read operations on a file or symlink given an UNLOCKED
741 * inode and uio.
743 * The passed ip is not locked.
745 static
747 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
749 hammer2_off_t size;
750 struct buf *bp;
751 int error;
753 error = 0;
756 * UIO read loop.
758 * WARNING! Assumes that the kernel interlocks size changes at the
759 * vnode level.
761 hammer2_mtx_sh(&ip->lock);
762 size = ip->meta.size;
763 hammer2_mtx_unlock(&ip->lock);
765 while (uio->uio_resid > 0 && uio->uio_offset < size) {
766 hammer2_key_t lbase;
767 hammer2_key_t leof;
768 int lblksize;
769 int loff;
770 int n;
772 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
773 &lbase, &leof);
775 error = cluster_read(ip->vp, leof, lbase, lblksize,
776 uio->uio_resid, seqcount * BKVASIZE,
777 &bp);
779 if (error)
780 break;
781 loff = (int)(uio->uio_offset - lbase);
782 n = lblksize - loff;
783 if (n > uio->uio_resid)
784 n = uio->uio_resid;
785 if (n > size - uio->uio_offset)
786 n = (int)(size - uio->uio_offset);
787 bp->b_flags |= B_AGE;
788 uiomove((char *)bp->b_data + loff, n, uio);
789 bqrelse(bp);
791 return (error);
795 * Write to the file represented by the inode via the logical buffer cache.
796 * The inode may represent a regular file or a symlink.
798 * The inode must not be locked.
800 static
802 hammer2_write_file(hammer2_trans_t *trans, hammer2_inode_t *ip,
803 struct uio *uio, int ioflag, int seqcount)
805 hammer2_key_t old_eof;
806 hammer2_key_t new_eof;
807 struct buf *bp;
808 int kflags;
809 int error;
810 int modified;
813 * Setup if append
815 * WARNING! Assumes that the kernel interlocks size changes at the
816 * vnode level.
818 hammer2_mtx_ex(&ip->lock);
819 if (ioflag & IO_APPEND)
820 uio->uio_offset = ip->meta.size;
821 old_eof = ip->meta.size;
824 * Extend the file if necessary. If the write fails at some point
825 * we will truncate it back down to cover as much as we were able
826 * to write.
828 * Doing this now makes it easier to calculate buffer sizes in
829 * the loop.
831 kflags = 0;
832 error = 0;
833 modified = 0;
835 if (uio->uio_offset + uio->uio_resid > old_eof) {
836 new_eof = uio->uio_offset + uio->uio_resid;
837 modified = 1;
838 hammer2_extend_file(ip, new_eof);
839 kflags |= NOTE_EXTEND;
840 } else {
841 new_eof = old_eof;
843 hammer2_mtx_unlock(&ip->lock);
846 * UIO write loop
848 while (uio->uio_resid > 0) {
849 hammer2_key_t lbase;
850 int trivial;
851 int endofblk;
852 int lblksize;
853 int loff;
854 int n;
857 * Don't allow the buffer build to blow out the buffer
858 * cache.
860 if ((ioflag & IO_RECURSE) == 0)
861 bwillwrite(HAMMER2_PBUFSIZE);
864 * This nominally tells us how much we can cluster and
865 * what the logical buffer size needs to be. Currently
866 * we don't try to cluster the write and just handle one
867 * block at a time.
869 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
870 &lbase, NULL);
871 loff = (int)(uio->uio_offset - lbase);
873 KKASSERT(lblksize <= 65536);
876 * Calculate bytes to copy this transfer and whether the
877 * copy completely covers the buffer or not.
879 trivial = 0;
880 n = lblksize - loff;
881 if (n > uio->uio_resid) {
882 n = uio->uio_resid;
883 if (loff == lbase && uio->uio_offset + n == new_eof)
884 trivial = 1;
885 endofblk = 0;
886 } else {
887 if (loff == 0)
888 trivial = 1;
889 endofblk = 1;
893 * Get the buffer
895 if (uio->uio_segflg == UIO_NOCOPY) {
897 * Issuing a write with the same data backing the
898 * buffer. Instantiate the buffer to collect the
899 * backing vm pages, then read-in any missing bits.
901 * This case is used by vop_stdputpages().
903 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
904 if ((bp->b_flags & B_CACHE) == 0) {
905 bqrelse(bp);
906 error = bread(ip->vp, lbase, lblksize, &bp);
908 } else if (trivial) {
910 * Even though we are entirely overwriting the buffer
911 * we may still have to zero it out to avoid a
912 * mmap/write visibility issue.
914 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
915 if ((bp->b_flags & B_CACHE) == 0)
916 vfs_bio_clrbuf(bp);
917 } else {
919 * Partial overwrite, read in any missing bits then
920 * replace the portion being written.
922 * (The strategy code will detect zero-fill physical
923 * blocks for this case).
925 error = bread(ip->vp, lbase, lblksize, &bp);
926 if (error == 0)
927 bheavy(bp);
930 if (error) {
931 brelse(bp);
932 break;
936 * Ok, copy the data in
938 error = uiomove(bp->b_data + loff, n, uio);
939 kflags |= NOTE_WRITE;
940 modified = 1;
941 if (error) {
942 brelse(bp);
943 break;
947 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
948 * with IO_SYNC or IO_ASYNC set. These writes
949 * must be handled as the pageout daemon expects.
951 if (ioflag & IO_SYNC) {
952 bwrite(bp);
953 } else if ((ioflag & IO_DIRECT) && endofblk) {
954 bawrite(bp);
955 } else if (ioflag & IO_ASYNC) {
956 bawrite(bp);
957 } else {
958 bdwrite(bp);
963 * Cleanup. If we extended the file EOF but failed to write through
964 * the entire write is a failure and we have to back-up.
966 if (error && new_eof != old_eof) {
967 hammer2_mtx_ex(&ip->lock);
968 hammer2_truncate_file(ip, old_eof);
969 if (ip->flags & HAMMER2_INODE_MODIFIED)
970 hammer2_inode_fsync(trans, ip, NULL);
971 hammer2_mtx_unlock(&ip->lock);
972 } else if (modified) {
973 hammer2_mtx_ex(&ip->lock);
974 hammer2_inode_modify(NULL, ip);
975 hammer2_update_time(&ip->meta.mtime);
976 if (ip->flags & HAMMER2_INODE_MODIFIED)
977 hammer2_inode_fsync(trans, ip, NULL);
978 hammer2_mtx_unlock(&ip->lock);
979 hammer2_knote(ip->vp, kflags);
981 hammer2_trans_assert_strategy(ip->pmp);
983 return error;
987 * Truncate the size of a file. The inode must not be locked.
989 * We must unconditionally set HAMMER2_INODE_RESIZED to properly
990 * ensure that any on-media data beyond the new file EOF has been destroyed.
992 * WARNING: nvtruncbuf() can only be safely called without the inode lock
993 * held due to the way our write thread works.
995 * WARNING! Assumes that the kernel interlocks size changes at the
996 * vnode level.
998 static
999 void
1000 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1002 hammer2_key_t lbase;
1003 int nblksize;
1005 LOCKSTART;
1006 hammer2_mtx_unlock(&ip->lock);
1007 if (ip->vp) {
1008 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1009 nvtruncbuf(ip->vp, nsize,
1010 nblksize, (int)nsize & (nblksize - 1),
1013 hammer2_mtx_ex(&ip->lock);
1014 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1015 ip->osize = ip->meta.size;
1016 ip->meta.size = nsize;
1017 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED |
1018 HAMMER2_INODE_RESIZED);
1019 LOCKSTOP;
1023 * Extend the size of a file. The inode must not be locked.
1025 * Even though the file size is changing, we do not have to set the
1026 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1027 * boundary. When this occurs a hammer2_inode_fsync() is required
1028 * to prepare the inode cluster's indirect block table.
1030 * WARNING! Assumes that the kernel interlocks size changes at the
1031 * vnode level.
1033 static
1034 void
1035 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1037 hammer2_key_t lbase;
1038 hammer2_key_t osize;
1039 int oblksize;
1040 int nblksize;
1042 LOCKSTART;
1044 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1045 osize = ip->meta.size;
1046 ip->osize = osize;
1047 ip->meta.size = nsize;
1048 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1050 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES)
1051 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1053 hammer2_mtx_unlock(&ip->lock);
1054 if (ip->vp) {
1055 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1056 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1057 nvextendbuf(ip->vp,
1058 osize, nsize,
1059 oblksize, nblksize,
1060 -1, -1, 0);
1062 hammer2_mtx_ex(&ip->lock);
1064 LOCKSTOP;
1067 static
1069 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1071 hammer2_inode_t *ip;
1072 hammer2_inode_t *dip;
1073 hammer2_cluster_t *cparent;
1074 hammer2_cluster_t *cluster;
1075 const hammer2_inode_data_t *ripdata;
1076 hammer2_key_t key_next;
1077 hammer2_key_t lhc;
1078 struct namecache *ncp;
1079 const uint8_t *name;
1080 size_t name_len;
1081 int error = 0;
1082 struct vnode *vp;
1084 LOCKSTART;
1085 dip = VTOI(ap->a_dvp);
1086 ncp = ap->a_nch->ncp;
1087 name = ncp->nc_name;
1088 name_len = ncp->nc_nlen;
1089 lhc = hammer2_dirhash(name, name_len);
1092 * Note: In DragonFly the kernel handles '.' and '..'.
1094 hammer2_inode_lock(dip, HAMMER2_RESOLVE_ALWAYS |
1095 HAMMER2_RESOLVE_SHARED);
1096 cparent = hammer2_inode_cluster(dip, HAMMER2_RESOLVE_ALWAYS |
1097 HAMMER2_RESOLVE_SHARED);
1099 cluster = hammer2_cluster_lookup(cparent, &key_next,
1100 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1101 HAMMER2_LOOKUP_SHARED);
1102 while (cluster) {
1103 if (hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE) {
1104 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1105 if (ripdata->meta.name_len == name_len &&
1106 bcmp(ripdata->filename, name, name_len) == 0) {
1107 break;
1110 cluster = hammer2_cluster_next(cparent, cluster, &key_next,
1111 key_next,
1112 lhc + HAMMER2_DIRHASH_LOMASK,
1113 HAMMER2_LOOKUP_SHARED);
1115 hammer2_inode_unlock(dip, cparent);
1118 * Resolve hardlink entries before acquiring the inode.
1120 if (cluster) {
1121 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1122 if (ripdata->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
1123 hammer2_tid_t inum = ripdata->meta.inum;
1124 error = hammer2_hardlink_find(dip, NULL, &cluster);
1125 if (error) {
1126 kprintf("hammer2: unable to find hardlink "
1127 "0x%016jx\n", inum);
1128 LOCKSTOP;
1130 return error;
1136 * nresolve needs to resolve hardlinks, the original cluster is not
1137 * sufficient.
1139 if (cluster) {
1140 ip = hammer2_inode_get(dip->pmp, dip, cluster);
1141 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1142 if (ripdata->meta.type == HAMMER2_OBJTYPE_HARDLINK) {
1143 kprintf("nresolve: fixup hardlink\n");
1144 hammer2_inode_ref(ip);
1145 hammer2_inode_unlock(ip, NULL);
1146 hammer2_cluster_unlock(cluster);
1147 hammer2_cluster_drop(cluster);
1148 hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
1149 cluster = hammer2_inode_cluster(ip,
1150 HAMMER2_RESOLVE_ALWAYS);
1151 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1152 hammer2_inode_drop(ip);
1153 kprintf("nresolve: fixup to type %02x\n",
1154 ripdata->meta.type);
1156 } else {
1157 ip = NULL;
1160 #if 0
1162 * Deconsolidate any hardlink whos nlinks == 1. Ignore errors.
1163 * If an error occurs chain and ip are left alone.
1165 * XXX upgrade shared lock?
1167 if (ochain && chain &&
1168 chain->data->ipdata.meta.nlinks == 1 && !dip->pmp->ronly) {
1169 kprintf("hammer2: need to unconsolidate hardlink for %s\n",
1170 chain->data->ipdata.filename);
1171 /* XXX retain shared lock on dip? (currently not held) */
1172 hammer2_trans_init(&trans, dip->pmp, 0);
1173 hammer2_hardlink_deconsolidate(&trans, dip, &chain, &ochain);
1174 hammer2_trans_done(&trans);
1176 #endif
1179 * Acquire the related vnode
1181 * NOTE: For error processing, only ENOENT resolves the namecache
1182 * entry to NULL, otherwise we just return the error and
1183 * leave the namecache unresolved.
1185 * NOTE: multiple hammer2_inode structures can be aliased to the
1186 * same chain element, for example for hardlinks. This
1187 * use case does not 'reattach' inode associations that
1188 * might already exist, but always allocates a new one.
1190 * WARNING: inode structure is locked exclusively via inode_get
1191 * but chain was locked shared. inode_unlock()
1192 * will handle it properly.
1194 if (cluster) {
1195 vp = hammer2_igetv(ip, cluster, &error);
1196 if (error == 0) {
1197 vn_unlock(vp);
1198 cache_setvp(ap->a_nch, vp);
1199 } else if (error == ENOENT) {
1200 cache_setvp(ap->a_nch, NULL);
1202 hammer2_inode_unlock(ip, cluster);
1205 * The vp should not be released until after we've disposed
1206 * of our locks, because it might cause vop_inactive() to
1207 * be called.
1209 if (vp)
1210 vrele(vp);
1211 } else {
1212 error = ENOENT;
1213 cache_setvp(ap->a_nch, NULL);
1215 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1216 ("resolve error %d/%p ap %p\n",
1217 error, ap->a_nch->ncp->nc_vp, ap));
1218 LOCKSTOP;
1219 return error;
1222 static
1224 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1226 hammer2_inode_t *dip;
1227 hammer2_inode_t *ip;
1228 hammer2_cluster_t *cparent;
1229 int error;
1231 LOCKSTART;
1232 dip = VTOI(ap->a_dvp);
1234 if ((ip = dip->pip) == NULL) {
1235 *ap->a_vpp = NULL;
1236 LOCKSTOP;
1237 return ENOENT;
1239 hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
1240 cparent = hammer2_inode_cluster(ip, HAMMER2_RESOLVE_ALWAYS);
1241 *ap->a_vpp = hammer2_igetv(ip, cparent, &error);
1242 hammer2_inode_unlock(ip, cparent);
1244 LOCKSTOP;
1245 return error;
1248 static
1250 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1252 hammer2_inode_t *dip;
1253 hammer2_inode_t *nip;
1254 hammer2_trans_t trans;
1255 hammer2_cluster_t *cluster;
1256 struct namecache *ncp;
1257 const uint8_t *name;
1258 size_t name_len;
1259 int error;
1261 LOCKSTART;
1262 dip = VTOI(ap->a_dvp);
1263 if (dip->pmp->ronly) {
1264 LOCKSTOP;
1265 return (EROFS);
1268 ncp = ap->a_nch->ncp;
1269 name = ncp->nc_name;
1270 name_len = ncp->nc_nlen;
1271 cluster = NULL;
1273 hammer2_pfs_memory_wait(dip->pmp);
1274 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1275 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1276 name, name_len,
1277 &cluster, 0, &error);
1278 if (error) {
1279 KKASSERT(nip == NULL);
1280 *ap->a_vpp = NULL;
1281 } else {
1282 *ap->a_vpp = hammer2_igetv(nip, cluster, &error);
1283 hammer2_inode_unlock(nip, cluster);
1285 hammer2_trans_done(&trans);
1287 if (error == 0) {
1288 cache_setunresolved(ap->a_nch);
1289 cache_setvp(ap->a_nch, *ap->a_vpp);
1291 LOCKSTOP;
1292 return error;
1295 static
1297 hammer2_vop_open(struct vop_open_args *ap)
1299 return vop_stdopen(ap);
1303 * hammer2_vop_advlock { vp, id, op, fl, flags }
1305 static
1307 hammer2_vop_advlock(struct vop_advlock_args *ap)
1309 hammer2_inode_t *ip = VTOI(ap->a_vp);
1310 const hammer2_inode_data_t *ripdata;
1311 hammer2_cluster_t *cparent;
1312 hammer2_off_t size;
1314 hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS |
1315 HAMMER2_RESOLVE_SHARED);
1316 cparent = hammer2_inode_cluster(ip, HAMMER2_RESOLVE_ALWAYS |
1317 HAMMER2_RESOLVE_SHARED);
1318 ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
1319 size = ripdata->meta.size;
1320 hammer2_inode_unlock(ip, cparent);
1321 return (lf_advlock(ap, &ip->advlock, size));
1325 static
1327 hammer2_vop_close(struct vop_close_args *ap)
1329 return vop_stdclose(ap);
1333 * hammer2_vop_nlink { nch, dvp, vp, cred }
1335 * Create a hardlink from (vp) to {dvp, nch}.
1337 static
1339 hammer2_vop_nlink(struct vop_nlink_args *ap)
1341 hammer2_inode_t *fdip; /* target directory to create link in */
1342 hammer2_inode_t *tdip; /* target directory to create link in */
1343 hammer2_inode_t *cdip; /* common parent directory */
1344 hammer2_inode_t *ip; /* inode we are hardlinking to */
1345 hammer2_cluster_t *cluster;
1346 hammer2_cluster_t *fdcluster;
1347 hammer2_cluster_t *tdcluster;
1348 hammer2_cluster_t *cdcluster;
1349 hammer2_trans_t trans;
1350 struct namecache *ncp;
1351 const uint8_t *name;
1352 size_t name_len;
1353 int error;
1355 LOCKSTART;
1356 tdip = VTOI(ap->a_dvp);
1357 if (tdip->pmp->ronly) {
1358 LOCKSTOP;
1359 return (EROFS);
1362 ncp = ap->a_nch->ncp;
1363 name = ncp->nc_name;
1364 name_len = ncp->nc_nlen;
1367 * ip represents the file being hardlinked. The file could be a
1368 * normal file or a hardlink target if it has already been hardlinked.
1369 * If ip is a hardlinked target then ip->pip represents the location
1370 * of the hardlinked target, NOT the location of the hardlink pointer.
1372 * Bump nlinks and potentially also create or move the hardlink
1373 * target in the parent directory common to (ip) and (tdip). The
1374 * consolidation code can modify ip->cluster and ip->pip. The
1375 * returned cluster is locked.
1377 ip = VTOI(ap->a_vp);
1378 hammer2_pfs_memory_wait(ip->pmp);
1379 hammer2_trans_init(&trans, ip->pmp, HAMMER2_TRANS_NEWINODE);
1382 * The common parent directory must be locked first to avoid deadlocks.
1383 * Also note that fdip and/or tdip might match cdip.
1385 fdip = ip->pip;
1386 cdip = hammer2_inode_common_parent(fdip, tdip);
1387 hammer2_inode_lock(cdip, HAMMER2_RESOLVE_ALWAYS);
1388 hammer2_inode_lock(fdip, HAMMER2_RESOLVE_ALWAYS);
1389 hammer2_inode_lock(tdip, HAMMER2_RESOLVE_ALWAYS);
1390 cdcluster = hammer2_inode_cluster(cdip, HAMMER2_RESOLVE_ALWAYS);
1391 fdcluster = hammer2_inode_cluster(fdip, HAMMER2_RESOLVE_ALWAYS);
1392 tdcluster = hammer2_inode_cluster(tdip, HAMMER2_RESOLVE_ALWAYS);
1394 hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
1395 cluster = hammer2_inode_cluster(ip, HAMMER2_RESOLVE_ALWAYS);
1397 error = hammer2_hardlink_consolidate(&trans, ip, &cluster,
1398 cdip, cdcluster, 1);
1399 if (error)
1400 goto done;
1403 * Create a directory entry connected to the specified cluster.
1405 * WARNING! chain can get moved by the connect (indirectly due to
1406 * potential indirect block creation).
1408 error = hammer2_inode_connect(&trans,
1409 ip, &cluster, 1,
1410 tdip, tdcluster,
1411 name, name_len, 0);
1412 if (error == 0) {
1413 cache_setunresolved(ap->a_nch);
1414 cache_setvp(ap->a_nch, ap->a_vp);
1416 done:
1417 hammer2_inode_unlock(ip, cluster);
1418 hammer2_inode_unlock(tdip, tdcluster);
1419 hammer2_inode_unlock(fdip, fdcluster);
1420 hammer2_inode_unlock(cdip, cdcluster);
1421 hammer2_inode_drop(cdip);
1422 hammer2_trans_done(&trans);
1424 LOCKSTOP;
1425 return error;
1429 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1431 * The operating system has already ensured that the directory entry
1432 * does not exist and done all appropriate namespace locking.
1434 static
1436 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1438 hammer2_inode_t *dip;
1439 hammer2_inode_t *nip;
1440 hammer2_trans_t trans;
1441 hammer2_cluster_t *ncluster;
1442 struct namecache *ncp;
1443 const uint8_t *name;
1444 size_t name_len;
1445 int error;
1447 LOCKSTART;
1448 dip = VTOI(ap->a_dvp);
1449 if (dip->pmp->ronly) {
1450 LOCKSTOP;
1451 return (EROFS);
1454 ncp = ap->a_nch->ncp;
1455 name = ncp->nc_name;
1456 name_len = ncp->nc_nlen;
1457 hammer2_pfs_memory_wait(dip->pmp);
1458 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1459 ncluster = NULL;
1461 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1462 name, name_len,
1463 &ncluster, 0, &error);
1464 if (error) {
1465 KKASSERT(nip == NULL);
1466 *ap->a_vpp = NULL;
1467 } else {
1468 *ap->a_vpp = hammer2_igetv(nip, ncluster, &error);
1469 hammer2_inode_unlock(nip, ncluster);
1471 hammer2_trans_done(&trans);
1473 if (error == 0) {
1474 cache_setunresolved(ap->a_nch);
1475 cache_setvp(ap->a_nch, *ap->a_vpp);
1477 LOCKSTOP;
1478 return error;
1482 * Make a device node (typically a fifo)
1484 static
1486 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1488 hammer2_inode_t *dip;
1489 hammer2_inode_t *nip;
1490 hammer2_trans_t trans;
1491 hammer2_cluster_t *ncluster;
1492 struct namecache *ncp;
1493 const uint8_t *name;
1494 size_t name_len;
1495 int error;
1497 LOCKSTART;
1498 dip = VTOI(ap->a_dvp);
1499 if (dip->pmp->ronly) {
1500 LOCKSTOP;
1501 return (EROFS);
1504 ncp = ap->a_nch->ncp;
1505 name = ncp->nc_name;
1506 name_len = ncp->nc_nlen;
1507 hammer2_pfs_memory_wait(dip->pmp);
1508 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1509 ncluster = NULL;
1511 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1512 name, name_len,
1513 &ncluster, 0, &error);
1514 if (error) {
1515 KKASSERT(nip == NULL);
1516 *ap->a_vpp = NULL;
1517 } else {
1518 *ap->a_vpp = hammer2_igetv(nip, ncluster, &error);
1519 hammer2_inode_unlock(nip, ncluster);
1521 hammer2_trans_done(&trans);
1523 if (error == 0) {
1524 cache_setunresolved(ap->a_nch);
1525 cache_setvp(ap->a_nch, *ap->a_vpp);
1527 LOCKSTOP;
1528 return error;
1532 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1534 static
1536 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1538 hammer2_inode_t *dip;
1539 hammer2_inode_t *nip;
1540 hammer2_cluster_t *ncparent;
1541 hammer2_trans_t trans;
1542 struct namecache *ncp;
1543 const uint8_t *name;
1544 size_t name_len;
1545 int error;
1547 dip = VTOI(ap->a_dvp);
1548 if (dip->pmp->ronly)
1549 return (EROFS);
1551 ncp = ap->a_nch->ncp;
1552 name = ncp->nc_name;
1553 name_len = ncp->nc_nlen;
1554 hammer2_pfs_memory_wait(dip->pmp);
1555 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1556 ncparent = NULL;
1558 ap->a_vap->va_type = VLNK; /* enforce type */
1560 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1561 name, name_len,
1562 &ncparent, 0, &error);
1563 if (error) {
1564 KKASSERT(nip == NULL);
1565 *ap->a_vpp = NULL;
1566 hammer2_trans_done(&trans);
1567 return error;
1569 *ap->a_vpp = hammer2_igetv(nip, ncparent, &error);
1572 * Build the softlink (~like file data) and finalize the namecache.
1574 if (error == 0) {
1575 size_t bytes;
1576 struct uio auio;
1577 struct iovec aiov;
1578 hammer2_inode_data_t *nipdata;
1580 nipdata = &hammer2_cluster_wdata(ncparent)->ipdata;
1581 /* nipdata = &nip->chain->data->ipdata;XXX */
1582 bytes = strlen(ap->a_target);
1584 if (bytes <= HAMMER2_EMBEDDED_BYTES) {
1585 KKASSERT(nipdata->meta.op_flags &
1586 HAMMER2_OPFLAG_DIRECTDATA);
1587 bcopy(ap->a_target, nipdata->u.data, bytes);
1588 nipdata->meta.size = bytes;
1589 nip->meta.size = bytes;
1590 hammer2_cluster_modsync(ncparent);
1591 hammer2_inode_unlock(nip, ncparent);
1592 /* nipdata = NULL; not needed */
1593 } else {
1594 hammer2_inode_unlock(nip, ncparent);
1595 /* nipdata = NULL; not needed */
1596 bzero(&auio, sizeof(auio));
1597 bzero(&aiov, sizeof(aiov));
1598 auio.uio_iov = &aiov;
1599 auio.uio_segflg = UIO_SYSSPACE;
1600 auio.uio_rw = UIO_WRITE;
1601 auio.uio_resid = bytes;
1602 auio.uio_iovcnt = 1;
1603 auio.uio_td = curthread;
1604 aiov.iov_base = ap->a_target;
1605 aiov.iov_len = bytes;
1606 error = hammer2_write_file(&trans, nip,
1607 &auio, IO_APPEND, 0);
1608 /* XXX handle error */
1609 error = 0;
1611 } else {
1612 hammer2_inode_unlock(nip, ncparent);
1614 hammer2_trans_done(&trans);
1617 * Finalize namecache
1619 if (error == 0) {
1620 cache_setunresolved(ap->a_nch);
1621 cache_setvp(ap->a_nch, *ap->a_vpp);
1622 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1624 return error;
1628 * hammer2_vop_nremove { nch, dvp, cred }
1630 static
1632 hammer2_vop_nremove(struct vop_nremove_args *ap)
1634 hammer2_inode_t *dip;
1635 hammer2_trans_t trans;
1636 struct namecache *ncp;
1637 const uint8_t *name;
1638 size_t name_len;
1639 int error;
1641 LOCKSTART;
1642 dip = VTOI(ap->a_dvp);
1643 if (dip->pmp->ronly) {
1644 LOCKSTOP;
1645 return(EROFS);
1648 ncp = ap->a_nch->ncp;
1649 name = ncp->nc_name;
1650 name_len = ncp->nc_nlen;
1652 hammer2_pfs_memory_wait(dip->pmp);
1653 hammer2_trans_init(&trans, dip->pmp, 0);
1654 error = hammer2_unlink_file(&trans, dip, NULL, name, name_len,
1655 0, NULL, ap->a_nch, -1);
1656 hammer2_run_unlinkq(&trans, dip->pmp);
1657 hammer2_trans_done(&trans);
1658 if (error == 0)
1659 cache_unlink(ap->a_nch);
1660 LOCKSTOP;
1661 return (error);
1665 * hammer2_vop_nrmdir { nch, dvp, cred }
1667 static
1669 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1671 hammer2_inode_t *dip;
1672 hammer2_trans_t trans;
1673 struct namecache *ncp;
1674 const uint8_t *name;
1675 size_t name_len;
1676 int error;
1678 LOCKSTART;
1679 dip = VTOI(ap->a_dvp);
1680 if (dip->pmp->ronly) {
1681 LOCKSTOP;
1682 return(EROFS);
1685 ncp = ap->a_nch->ncp;
1686 name = ncp->nc_name;
1687 name_len = ncp->nc_nlen;
1689 hammer2_pfs_memory_wait(dip->pmp);
1690 hammer2_trans_init(&trans, dip->pmp, 0);
1691 hammer2_run_unlinkq(&trans, dip->pmp);
1692 error = hammer2_unlink_file(&trans, dip, NULL, name, name_len,
1693 1, NULL, ap->a_nch, -1);
1694 hammer2_trans_done(&trans);
1695 if (error == 0)
1696 cache_unlink(ap->a_nch);
1697 LOCKSTOP;
1698 return (error);
1702 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1704 static
1706 hammer2_vop_nrename(struct vop_nrename_args *ap)
1708 struct namecache *fncp;
1709 struct namecache *tncp;
1710 hammer2_inode_t *cdip;
1711 hammer2_inode_t *fdip;
1712 hammer2_inode_t *tdip;
1713 hammer2_inode_t *ip;
1714 hammer2_cluster_t *cluster;
1715 hammer2_cluster_t *fdcluster;
1716 hammer2_cluster_t *tdcluster;
1717 hammer2_cluster_t *cdcluster;
1718 hammer2_trans_t trans;
1719 const uint8_t *fname;
1720 size_t fname_len;
1721 const uint8_t *tname;
1722 size_t tname_len;
1723 int error;
1724 int tnch_error;
1725 int hlink;
1727 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1728 return(EXDEV);
1729 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1730 return(EXDEV);
1732 fdip = VTOI(ap->a_fdvp); /* source directory */
1733 tdip = VTOI(ap->a_tdvp); /* target directory */
1735 if (fdip->pmp->ronly)
1736 return(EROFS);
1738 LOCKSTART;
1739 fncp = ap->a_fnch->ncp; /* entry name in source */
1740 fname = fncp->nc_name;
1741 fname_len = fncp->nc_nlen;
1743 tncp = ap->a_tnch->ncp; /* entry name in target */
1744 tname = tncp->nc_name;
1745 tname_len = tncp->nc_nlen;
1747 hammer2_pfs_memory_wait(tdip->pmp);
1748 hammer2_trans_init(&trans, tdip->pmp, 0);
1751 * ip is the inode being renamed. If this is a hardlink then
1752 * ip represents the actual file and not the hardlink marker.
1754 ip = VTOI(fncp->nc_vp);
1755 cluster = NULL;
1759 * The common parent directory must be locked first to avoid deadlocks.
1760 * Also note that fdip and/or tdip might match cdip.
1762 * WARNING! fdip may not match ip->pip. That is, if the source file
1763 * is already a hardlink then what we are renaming is the
1764 * hardlink pointer, not the hardlink itself. The hardlink
1765 * directory (ip->pip) will already be at a common parent
1766 * of fdrip.
1768 * Be sure to use ip->pip when finding the common parent
1769 * against tdip or we might accidently move the hardlink
1770 * target into a subdirectory that makes it inaccessible to
1771 * other pointers.
1773 cdip = hammer2_inode_common_parent(ip->pip, tdip);
1774 hammer2_inode_lock(cdip, HAMMER2_RESOLVE_ALWAYS);
1775 hammer2_inode_lock(fdip, HAMMER2_RESOLVE_ALWAYS);
1776 hammer2_inode_lock(tdip, HAMMER2_RESOLVE_ALWAYS);
1777 cdcluster = hammer2_inode_cluster(cdip, HAMMER2_RESOLVE_ALWAYS);
1778 fdcluster = hammer2_inode_cluster(fdip, HAMMER2_RESOLVE_ALWAYS);
1779 tdcluster = hammer2_inode_cluster(tdip, HAMMER2_RESOLVE_ALWAYS);
1782 * Keep a tight grip on the inode so the temporary unlinking from
1783 * the source location prior to linking to the target location
1784 * does not cause the cluster to be destroyed.
1786 * NOTE: To avoid deadlocks we cannot lock (ip) while we are
1787 * unlinking elements from their directories. Locking
1788 * the nlinks field does not lock the whole inode.
1790 hammer2_inode_ref(ip);
1793 * Remove target if it exists.
1795 error = hammer2_unlink_file(&trans, tdip, NULL, tname, tname_len,
1796 -1, NULL, ap->a_tnch, -1);
1797 tnch_error = error;
1798 if (error && error != ENOENT)
1799 goto done2;
1802 * When renaming a hardlinked file we may have to re-consolidate
1803 * the location of the hardlink target.
1805 * If ip represents a regular file the consolidation code essentially
1806 * does nothing other than return the same locked cluster that was
1807 * passed in.
1809 * The returned cluster will be locked.
1811 * WARNING! We do not currently have a local copy of ipdata but
1812 * we do use one later remember that it must be reloaded
1813 * on any modification to the inode, including connects.
1815 hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
1816 cluster = hammer2_inode_cluster(ip, HAMMER2_RESOLVE_ALWAYS);
1817 error = hammer2_hardlink_consolidate(&trans, ip, &cluster,
1818 cdip, cdcluster, 0);
1819 if (error)
1820 goto done1;
1823 * Disconnect (fdip, fname) from the source directory. This will
1824 * disconnect (ip) if it represents a direct file. If (ip) represents
1825 * a hardlink the HARDLINK pointer object will be removed but the
1826 * hardlink will stay intact.
1828 * Always pass nch as NULL because we intend to reconnect the inode,
1829 * so we don't want hammer2_unlink_file() to rename it to the hidden
1830 * open-but-unlinked directory.
1832 * The target cluster may be marked DELETED but will not be destroyed
1833 * since we retain our hold on ip and cluster.
1835 * NOTE: We pass nlinks as 0 (not -1) in order to retain the file's
1836 * link count.
1838 error = hammer2_unlink_file(&trans, fdip, ip, fname, fname_len,
1839 -1, &hlink, NULL, 0);
1840 KKASSERT(error != EAGAIN);
1841 if (error)
1842 goto done1;
1845 * Reconnect ip to target directory using cluster. Chains cannot
1846 * actually be moved, so this will duplicate the cluster in the new
1847 * spot and assign it to the ip, replacing the old cluster.
1849 * WARNING: Because recursive locks are allowed and we unlinked the
1850 * file that we have a cluster-in-hand for just above, the
1851 * cluster might have been delete-duplicated. We must
1852 * refactor the cluster.
1854 * WARNING: Chain locks can lock buffer cache buffers, to avoid
1855 * deadlocks we want to unlock before issuing a cache_*()
1856 * op (that might have to lock a vnode).
1858 * NOTE: Pass nlinks as 0 because we retained the link count from
1859 * the unlink, so we do not have to modify it.
1861 error = hammer2_inode_connect(&trans,
1862 ip, &cluster, hlink,
1863 tdip, tdcluster,
1864 tname, tname_len, 0);
1865 if (error == 0) {
1866 KKASSERT(cluster != NULL);
1867 hammer2_inode_repoint(ip, (hlink ? ip->pip : tdip), cluster);
1869 done1:
1870 hammer2_inode_unlock(ip, cluster);
1871 done2:
1872 hammer2_inode_unlock(tdip, tdcluster);
1873 hammer2_inode_unlock(fdip, fdcluster);
1874 hammer2_inode_unlock(cdip, cdcluster);
1875 hammer2_inode_drop(ip);
1876 hammer2_inode_drop(cdip);
1877 hammer2_run_unlinkq(&trans, fdip->pmp);
1878 hammer2_trans_done(&trans);
1881 * Issue the namecache update after unlocking all the internal
1882 * hammer structures, otherwise we might deadlock.
1884 if (tnch_error == 0) {
1885 cache_unlink(ap->a_tnch);
1886 cache_setunresolved(ap->a_tnch);
1888 if (error == 0)
1889 cache_rename(ap->a_fnch, ap->a_tnch);
1891 LOCKSTOP;
1892 return (error);
1896 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
1898 static
1900 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
1902 hammer2_inode_t *ip;
1903 int error;
1905 LOCKSTART;
1906 ip = VTOI(ap->a_vp);
1908 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
1909 ap->a_fflag, ap->a_cred);
1910 LOCKSTOP;
1911 return (error);
1914 static
1915 int
1916 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
1918 struct mount *mp;
1919 hammer2_pfs_t *pmp;
1920 int rc;
1922 LOCKSTART;
1923 switch (ap->a_op) {
1924 case (MOUNTCTL_SET_EXPORT):
1925 mp = ap->a_head.a_ops->head.vv_mount;
1926 pmp = MPTOPMP(mp);
1928 if (ap->a_ctllen != sizeof(struct export_args))
1929 rc = (EINVAL);
1930 else
1931 rc = vfs_export(mp, &pmp->export,
1932 (const struct export_args *)ap->a_ctl);
1933 break;
1934 default:
1935 rc = vop_stdmountctl(ap);
1936 break;
1938 LOCKSTOP;
1939 return (rc);
1943 * This handles unlinked open files after the vnode is finally dereferenced.
1944 * To avoid deadlocks it cannot be called from the normal vnode recycling
1945 * path, so we call it (1) after a unlink, rmdir, or rename, (2) on every
1946 * flush, and (3) on umount.
1948 void
1949 hammer2_run_unlinkq(hammer2_trans_t *trans, hammer2_pfs_t *pmp)
1951 const hammer2_inode_data_t *ripdata;
1952 hammer2_inode_unlink_t *ipul;
1953 hammer2_inode_t *ip;
1954 hammer2_cluster_t *cluster;
1955 hammer2_cluster_t *cparent;
1957 if (TAILQ_EMPTY(&pmp->unlinkq))
1958 return;
1960 LOCKSTART;
1961 hammer2_spin_ex(&pmp->list_spin);
1962 while ((ipul = TAILQ_FIRST(&pmp->unlinkq)) != NULL) {
1963 TAILQ_REMOVE(&pmp->unlinkq, ipul, entry);
1964 hammer2_spin_unex(&pmp->list_spin);
1965 ip = ipul->ip;
1966 kfree(ipul, pmp->minode);
1968 hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
1969 cluster = hammer2_inode_cluster(ip, HAMMER2_RESOLVE_ALWAYS);
1970 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1971 if (hammer2_debug & 0x400) {
1972 kprintf("hammer2: unlink on reclaim: %s refs=%d\n",
1973 ripdata->filename, ip->refs);
1977 * NOTE: Due to optimizations to avoid I/O on the inode for
1978 * the last unlink, ripdata->nlinks is not necessarily
1979 * 0 here.
1981 /* KKASSERT(ripdata->nlinks == 0); (see NOTE) */
1982 cparent = hammer2_cluster_parent(cluster);
1983 hammer2_cluster_delete(trans, cparent, cluster,
1984 HAMMER2_DELETE_PERMANENT);
1985 hammer2_cluster_unlock(cparent);
1986 hammer2_cluster_drop(cparent);
1987 hammer2_inode_unlock(ip, cluster); /* inode lock */
1988 hammer2_inode_drop(ip); /* ipul ref */
1990 hammer2_spin_ex(&pmp->list_spin);
1992 hammer2_spin_unex(&pmp->list_spin);
1993 LOCKSTOP;
1998 * KQFILTER
2000 static void filt_hammer2detach(struct knote *kn);
2001 static int filt_hammer2read(struct knote *kn, long hint);
2002 static int filt_hammer2write(struct knote *kn, long hint);
2003 static int filt_hammer2vnode(struct knote *kn, long hint);
2005 static struct filterops hammer2read_filtops =
2006 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2007 NULL, filt_hammer2detach, filt_hammer2read };
2008 static struct filterops hammer2write_filtops =
2009 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2010 NULL, filt_hammer2detach, filt_hammer2write };
2011 static struct filterops hammer2vnode_filtops =
2012 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2013 NULL, filt_hammer2detach, filt_hammer2vnode };
2015 static
2017 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2019 struct vnode *vp = ap->a_vp;
2020 struct knote *kn = ap->a_kn;
2022 switch (kn->kn_filter) {
2023 case EVFILT_READ:
2024 kn->kn_fop = &hammer2read_filtops;
2025 break;
2026 case EVFILT_WRITE:
2027 kn->kn_fop = &hammer2write_filtops;
2028 break;
2029 case EVFILT_VNODE:
2030 kn->kn_fop = &hammer2vnode_filtops;
2031 break;
2032 default:
2033 return (EOPNOTSUPP);
2036 kn->kn_hook = (caddr_t)vp;
2038 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2040 return(0);
2043 static void
2044 filt_hammer2detach(struct knote *kn)
2046 struct vnode *vp = (void *)kn->kn_hook;
2048 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2051 static int
2052 filt_hammer2read(struct knote *kn, long hint)
2054 struct vnode *vp = (void *)kn->kn_hook;
2055 hammer2_inode_t *ip = VTOI(vp);
2056 off_t off;
2058 if (hint == NOTE_REVOKE) {
2059 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2060 return(1);
2062 off = ip->meta.size - kn->kn_fp->f_offset;
2063 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2064 if (kn->kn_sfflags & NOTE_OLDAPI)
2065 return(1);
2066 return (kn->kn_data != 0);
2070 static int
2071 filt_hammer2write(struct knote *kn, long hint)
2073 if (hint == NOTE_REVOKE)
2074 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2075 kn->kn_data = 0;
2076 return (1);
2079 static int
2080 filt_hammer2vnode(struct knote *kn, long hint)
2082 if (kn->kn_sfflags & hint)
2083 kn->kn_fflags |= hint;
2084 if (hint == NOTE_REVOKE) {
2085 kn->kn_flags |= (EV_EOF | EV_NODATA);
2086 return (1);
2088 return (kn->kn_fflags != 0);
2092 * FIFO VOPS
2094 static
2096 hammer2_vop_markatime(struct vop_markatime_args *ap)
2098 hammer2_inode_t *ip;
2099 struct vnode *vp;
2101 vp = ap->a_vp;
2102 ip = VTOI(vp);
2104 if (ip->pmp->ronly)
2105 return(EROFS);
2106 return(0);
2109 static
2111 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2113 int error;
2115 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2116 if (error)
2117 error = hammer2_vop_kqfilter(ap);
2118 return(error);
2122 * VOPS vector
2124 struct vop_ops hammer2_vnode_vops = {
2125 .vop_default = vop_defaultop,
2126 .vop_fsync = hammer2_vop_fsync,
2127 .vop_getpages = vop_stdgetpages,
2128 .vop_putpages = vop_stdputpages,
2129 .vop_access = hammer2_vop_access,
2130 .vop_advlock = hammer2_vop_advlock,
2131 .vop_close = hammer2_vop_close,
2132 .vop_nlink = hammer2_vop_nlink,
2133 .vop_ncreate = hammer2_vop_ncreate,
2134 .vop_nsymlink = hammer2_vop_nsymlink,
2135 .vop_nremove = hammer2_vop_nremove,
2136 .vop_nrmdir = hammer2_vop_nrmdir,
2137 .vop_nrename = hammer2_vop_nrename,
2138 .vop_getattr = hammer2_vop_getattr,
2139 .vop_setattr = hammer2_vop_setattr,
2140 .vop_readdir = hammer2_vop_readdir,
2141 .vop_readlink = hammer2_vop_readlink,
2142 .vop_getpages = vop_stdgetpages,
2143 .vop_putpages = vop_stdputpages,
2144 .vop_read = hammer2_vop_read,
2145 .vop_write = hammer2_vop_write,
2146 .vop_open = hammer2_vop_open,
2147 .vop_inactive = hammer2_vop_inactive,
2148 .vop_reclaim = hammer2_vop_reclaim,
2149 .vop_nresolve = hammer2_vop_nresolve,
2150 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2151 .vop_nmkdir = hammer2_vop_nmkdir,
2152 .vop_nmknod = hammer2_vop_nmknod,
2153 .vop_ioctl = hammer2_vop_ioctl,
2154 .vop_mountctl = hammer2_vop_mountctl,
2155 .vop_bmap = hammer2_vop_bmap,
2156 .vop_strategy = hammer2_vop_strategy,
2157 .vop_kqfilter = hammer2_vop_kqfilter
2160 struct vop_ops hammer2_spec_vops = {
2161 .vop_default = vop_defaultop,
2162 .vop_fsync = hammer2_vop_fsync,
2163 .vop_read = vop_stdnoread,
2164 .vop_write = vop_stdnowrite,
2165 .vop_access = hammer2_vop_access,
2166 .vop_close = hammer2_vop_close,
2167 .vop_markatime = hammer2_vop_markatime,
2168 .vop_getattr = hammer2_vop_getattr,
2169 .vop_inactive = hammer2_vop_inactive,
2170 .vop_reclaim = hammer2_vop_reclaim,
2171 .vop_setattr = hammer2_vop_setattr
2174 struct vop_ops hammer2_fifo_vops = {
2175 .vop_default = fifo_vnoperate,
2176 .vop_fsync = hammer2_vop_fsync,
2177 #if 0
2178 .vop_read = hammer2_vop_fiforead,
2179 .vop_write = hammer2_vop_fifowrite,
2180 #endif
2181 .vop_access = hammer2_vop_access,
2182 #if 0
2183 .vop_close = hammer2_vop_fifoclose,
2184 #endif
2185 .vop_markatime = hammer2_vop_markatime,
2186 .vop_getattr = hammer2_vop_getattr,
2187 .vop_inactive = hammer2_vop_inactive,
2188 .vop_reclaim = hammer2_vop_reclaim,
2189 .vop_setattr = hammer2_vop_setattr,
2190 .vop_kqfilter = hammer2_vop_fifokqfilter