hammer2: Add required check to hammer2_vop_nlink()
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
blobab72706223112b4710a325b40a13cea53edaa745
1 /*
2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 * Kernel Filesystem interface
39 * NOTE! local ipdata pointers must be reloaded on any modifying operation
40 * to the inode as its underlying chain may have changed.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
60 #include "hammer2.h"
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
63 int seqcount);
64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
65 int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
69 struct objcache *cache_xops;
71 static __inline
72 void
73 hammer2_knote(struct vnode *vp, int flags)
75 if (flags)
76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
80 * Last reference to a vnode is going away but it is still cached.
82 static
83 int
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
86 hammer2_inode_t *ip;
87 struct vnode *vp;
89 LOCKSTART;
90 vp = ap->a_vp;
91 ip = VTOI(vp);
94 * Degenerate case
96 if (ip == NULL) {
97 vrecycle(vp);
98 LOCKSTOP;
99 return (0);
103 * Check for deleted inodes and recycle immediately on the last
104 * release. Be sure to destroy any left-over buffer cache buffers
105 * so we do not waste time trying to flush them.
107 * Note that deleting the file block chains under the inode chain
108 * would just be a waste of energy, so don't do it.
110 * WARNING: nvtruncbuf() can only be safely called without the inode
111 * lock held due to the way our write thread works.
113 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
114 hammer2_key_t lbase;
115 int nblksize;
118 * Detect updates to the embedded data which may be
119 * synchronized by the strategy code. Simply mark the
120 * inode modified so it gets picked up by our normal flush.
122 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
123 nvtruncbuf(vp, 0, nblksize, 0, 0);
124 vrecycle(vp);
126 LOCKSTOP;
127 return (0);
131 * Reclaim a vnode so that it can be reused; after the inode is
132 * disassociated, the filesystem must manage it alone.
134 static
136 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
138 hammer2_inode_t *ip;
139 hammer2_pfs_t *pmp;
140 struct vnode *vp;
142 LOCKSTART;
143 vp = ap->a_vp;
144 ip = VTOI(vp);
145 if (ip == NULL) {
146 LOCKSTOP;
147 return(0);
149 pmp = ip->pmp;
152 * The final close of a deleted file or directory marks it for
153 * destruction. The DELETED flag allows the flusher to shortcut
154 * any modified blocks still unflushed (that is, just ignore them).
156 * HAMMER2 usually does not try to optimize the freemap by returning
157 * deleted blocks to it as it does not usually know how many snapshots
158 * might be referencing portions of the file/dir.
160 vp->v_data = NULL;
161 ip->vp = NULL;
164 * NOTE! We do not attempt to flush chains here, flushing is
165 * really fragile and could also deadlock.
167 vclrisdirty(vp);
170 * This occurs if the inode was unlinked while open. Reclamation of
171 * these inodes requires processing we cannot safely do here so add
172 * the inode to the sideq in that situation.
174 * A modified inode may require chain synchronization which will no
175 * longer be driven by a sync or fsync without the vnode, also use
176 * the sideq for that.
178 * A reclaim can occur at any time so we cannot safely start a
179 * transaction to handle reclamation of unlinked files. Instead,
180 * the ip is left with a reference and placed on a linked list and
181 * handled later on.
184 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED |
185 HAMMER2_INODE_MODIFIED |
186 HAMMER2_INODE_RESIZED)) &&
187 (ip->flags & HAMMER2_INODE_ISDELETED) == 0) {
188 hammer2_inode_sideq_t *ipul;
190 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
191 ipul->ip = ip;
193 hammer2_spin_ex(&pmp->list_spin);
194 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
195 /* ref -> sideq */
196 atomic_set_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
197 TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
198 hammer2_spin_unex(&pmp->list_spin);
199 } else {
200 hammer2_spin_unex(&pmp->list_spin);
201 kfree(ipul, pmp->minode);
202 hammer2_inode_drop(ip); /* vp ref */
204 /* retain ref from vp for ipul */
205 } else {
206 hammer2_inode_drop(ip); /* vp ref */
210 * XXX handle background sync when ip dirty, kernel will no longer
211 * notify us regarding this inode because there is no longer a
212 * vnode attached to it.
215 LOCKSTOP;
216 return (0);
219 static
221 hammer2_vop_fsync(struct vop_fsync_args *ap)
223 hammer2_inode_t *ip;
224 struct vnode *vp;
226 LOCKSTART;
227 vp = ap->a_vp;
228 ip = VTOI(vp);
230 #if 0
231 /* XXX can't do this yet */
232 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH);
233 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
234 #endif
235 hammer2_trans_init(ip->pmp, 0);
236 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
239 * Calling chain_flush here creates a lot of duplicative
240 * COW operations due to non-optimal vnode ordering.
242 * Only do it for an actual fsync() syscall. The other forms
243 * which call this function will eventually call chain_flush
244 * on the volume root as a catch-all, which is far more optimal.
246 hammer2_inode_lock(ip, 0);
247 if (ip->flags & HAMMER2_INODE_MODIFIED)
248 hammer2_inode_chain_sync(ip);
249 hammer2_inode_unlock(ip);
250 hammer2_trans_done(ip->pmp);
252 LOCKSTOP;
253 return (0);
256 static
258 hammer2_vop_access(struct vop_access_args *ap)
260 hammer2_inode_t *ip = VTOI(ap->a_vp);
261 uid_t uid;
262 gid_t gid;
263 int error;
265 LOCKSTART;
266 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
267 uid = hammer2_to_unix_xid(&ip->meta.uid);
268 gid = hammer2_to_unix_xid(&ip->meta.gid);
269 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
270 hammer2_inode_unlock(ip);
272 LOCKSTOP;
273 return (error);
276 static
278 hammer2_vop_getattr(struct vop_getattr_args *ap)
280 hammer2_pfs_t *pmp;
281 hammer2_inode_t *ip;
282 struct vnode *vp;
283 struct vattr *vap;
284 hammer2_chain_t *chain;
285 int i;
287 LOCKSTART;
288 vp = ap->a_vp;
289 vap = ap->a_vap;
291 ip = VTOI(vp);
292 pmp = ip->pmp;
294 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
296 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
297 vap->va_fileid = ip->meta.inum;
298 vap->va_mode = ip->meta.mode;
299 vap->va_nlink = ip->meta.nlinks;
300 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
301 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
302 vap->va_rmajor = 0;
303 vap->va_rminor = 0;
304 vap->va_size = ip->meta.size; /* protected by shared lock */
305 vap->va_blocksize = HAMMER2_PBUFSIZE;
306 vap->va_flags = ip->meta.uflags;
307 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
308 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
309 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
310 vap->va_gen = 1;
311 vap->va_bytes = 0;
312 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
314 * Can't really calculate directory use sans the files under
315 * it, just assume one block for now.
317 vap->va_bytes += HAMMER2_INODE_BYTES;
318 } else {
319 for (i = 0; i < ip->cluster.nchains; ++i) {
320 if ((chain = ip->cluster.array[i].chain) != NULL) {
321 if (vap->va_bytes < chain->bref.data_count)
322 vap->va_bytes = chain->bref.data_count;
326 vap->va_type = hammer2_get_vtype(ip->meta.type);
327 vap->va_filerev = 0;
328 vap->va_uid_uuid = ip->meta.uid;
329 vap->va_gid_uuid = ip->meta.gid;
330 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
331 VA_FSID_UUID_VALID;
333 hammer2_inode_unlock(ip);
335 LOCKSTOP;
336 return (0);
339 static
341 hammer2_vop_setattr(struct vop_setattr_args *ap)
343 hammer2_inode_t *ip;
344 struct vnode *vp;
345 struct vattr *vap;
346 int error;
347 int kflags = 0;
348 uint64_t ctime;
350 LOCKSTART;
351 vp = ap->a_vp;
352 vap = ap->a_vap;
353 hammer2_update_time(&ctime);
355 ip = VTOI(vp);
357 if (ip->pmp->ronly) {
358 LOCKSTOP;
359 return(EROFS);
362 hammer2_pfs_memory_wait(ip->pmp);
363 hammer2_trans_init(ip->pmp, 0);
364 hammer2_inode_lock(ip, 0);
365 error = 0;
367 if (vap->va_flags != VNOVAL) {
368 uint32_t flags;
370 flags = ip->meta.uflags;
371 error = vop_helper_setattr_flags(&flags, vap->va_flags,
372 hammer2_to_unix_xid(&ip->meta.uid),
373 ap->a_cred);
374 if (error == 0) {
375 if (ip->meta.uflags != flags) {
376 hammer2_inode_modify(ip);
377 ip->meta.uflags = flags;
378 ip->meta.ctime = ctime;
379 kflags |= NOTE_ATTRIB;
381 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
382 error = 0;
383 goto done;
386 goto done;
388 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
389 error = EPERM;
390 goto done;
392 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
393 mode_t cur_mode = ip->meta.mode;
394 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
395 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
396 uuid_t uuid_uid;
397 uuid_t uuid_gid;
399 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
400 ap->a_cred,
401 &cur_uid, &cur_gid, &cur_mode);
402 if (error == 0) {
403 hammer2_guid_to_uuid(&uuid_uid, cur_uid);
404 hammer2_guid_to_uuid(&uuid_gid, cur_gid);
405 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
406 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
407 ip->meta.mode != cur_mode
409 hammer2_inode_modify(ip);
410 ip->meta.uid = uuid_uid;
411 ip->meta.gid = uuid_gid;
412 ip->meta.mode = cur_mode;
413 ip->meta.ctime = ctime;
415 kflags |= NOTE_ATTRIB;
420 * Resize the file
422 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
423 switch(vp->v_type) {
424 case VREG:
425 if (vap->va_size == ip->meta.size)
426 break;
427 if (vap->va_size < ip->meta.size) {
428 hammer2_mtx_ex(&ip->truncate_lock);
429 hammer2_truncate_file(ip, vap->va_size);
430 hammer2_mtx_unlock(&ip->truncate_lock);
431 } else {
432 hammer2_extend_file(ip, vap->va_size);
434 hammer2_inode_modify(ip);
435 ip->meta.mtime = ctime;
436 break;
437 default:
438 error = EINVAL;
439 goto done;
442 #if 0
443 /* atime not supported */
444 if (vap->va_atime.tv_sec != VNOVAL) {
445 hammer2_inode_modify(ip);
446 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
447 kflags |= NOTE_ATTRIB;
449 #endif
450 if (vap->va_mode != (mode_t)VNOVAL) {
451 mode_t cur_mode = ip->meta.mode;
452 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
453 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
455 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
456 cur_uid, cur_gid, &cur_mode);
457 if (error == 0 && ip->meta.mode != cur_mode) {
458 hammer2_inode_modify(ip);
459 ip->meta.mode = cur_mode;
460 ip->meta.ctime = ctime;
461 kflags |= NOTE_ATTRIB;
465 if (vap->va_mtime.tv_sec != VNOVAL) {
466 hammer2_inode_modify(ip);
467 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
468 kflags |= NOTE_ATTRIB;
471 done:
473 * If a truncation occurred we must call inode_fsync() now in order
474 * to trim the related data chains, otherwise a later expansion can
475 * cause havoc.
477 * If an extend occured that changed the DIRECTDATA state, we must
478 * call inode_fsync now in order to prepare the inode's indirect
479 * block table.
481 if (ip->flags & HAMMER2_INODE_RESIZED)
482 hammer2_inode_chain_sync(ip);
485 * Cleanup.
487 hammer2_inode_unlock(ip);
488 hammer2_trans_done(ip->pmp);
489 hammer2_knote(ip->vp, kflags);
491 LOCKSTOP;
492 return (error);
495 static
497 hammer2_vop_readdir(struct vop_readdir_args *ap)
499 hammer2_xop_readdir_t *xop;
500 hammer2_blockref_t bref;
501 hammer2_inode_t *ip;
502 hammer2_tid_t inum;
503 hammer2_key_t lkey;
504 struct uio *uio;
505 off_t *cookies;
506 off_t saveoff;
507 int cookie_index;
508 int ncookies;
509 int error;
510 int eofflag;
511 int dtype;
512 int r;
514 LOCKSTART;
515 ip = VTOI(ap->a_vp);
516 uio = ap->a_uio;
517 saveoff = uio->uio_offset;
518 eofflag = 0;
519 error = 0;
522 * Setup cookies directory entry cookies if requested
524 if (ap->a_ncookies) {
525 ncookies = uio->uio_resid / 16 + 1;
526 if (ncookies > 1024)
527 ncookies = 1024;
528 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
529 } else {
530 ncookies = -1;
531 cookies = NULL;
533 cookie_index = 0;
535 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
538 * Handle artificial entries. To ensure that only positive 64 bit
539 * quantities are returned to userland we always strip off bit 63.
540 * The hash code is designed such that codes 0x0000-0x7FFF are not
541 * used, allowing us to use these codes for articial entries.
543 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
544 * allow '..' to cross the mount point into (e.g.) the super-root.
546 if (saveoff == 0) {
547 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
548 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
549 if (r)
550 goto done;
551 if (cookies)
552 cookies[cookie_index] = saveoff;
553 ++saveoff;
554 ++cookie_index;
555 if (cookie_index == ncookies)
556 goto done;
559 if (saveoff == 1) {
561 * Be careful with lockorder when accessing ".."
563 * (ip is the current dir. xip is the parent dir).
565 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
566 if (ip != ip->pmp->iroot)
567 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
568 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
569 if (r)
570 goto done;
571 if (cookies)
572 cookies[cookie_index] = saveoff;
573 ++saveoff;
574 ++cookie_index;
575 if (cookie_index == ncookies)
576 goto done;
579 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
580 if (hammer2_debug & 0x0020)
581 kprintf("readdir: lkey %016jx\n", lkey);
582 if (error)
583 goto done;
586 * Use XOP for cluster scan.
588 * parent is the inode cluster, already locked for us. Don't
589 * double lock shared locks as this will screw up upgrades.
591 xop = hammer2_xop_alloc(ip, 0);
592 xop->lkey = lkey;
593 hammer2_xop_start(&xop->head, hammer2_xop_readdir);
595 for (;;) {
596 const hammer2_inode_data_t *ripdata;
598 error = hammer2_xop_collect(&xop->head, 0);
599 if (error)
600 break;
601 if (cookie_index == ncookies)
602 break;
603 if (hammer2_debug & 0x0020)
604 kprintf("cluster chain %p %p\n",
605 xop->head.cluster.focus,
606 (xop->head.cluster.focus ?
607 xop->head.cluster.focus->data : (void *)-1));
608 ripdata = &hammer2_cluster_rdata(&xop->head.cluster)->ipdata;
609 hammer2_cluster_bref(&xop->head.cluster, &bref);
610 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
611 dtype = hammer2_get_dtype(ripdata);
612 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
613 r = vop_write_dirent(&error, uio,
614 ripdata->meta.inum &
615 HAMMER2_DIRHASH_USERMSK,
616 dtype,
617 ripdata->meta.name_len,
618 ripdata->filename);
619 if (r)
620 break;
621 if (cookies)
622 cookies[cookie_index] = saveoff;
623 ++cookie_index;
624 } else {
625 /* XXX chain error */
626 kprintf("bad chain type readdir %d\n", bref.type);
629 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
630 if (error == ENOENT) {
631 error = 0;
632 eofflag = 1;
633 saveoff = (hammer2_key_t)-1;
634 } else {
635 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
637 done:
638 hammer2_inode_unlock(ip);
639 if (ap->a_eofflag)
640 *ap->a_eofflag = eofflag;
641 if (hammer2_debug & 0x0020)
642 kprintf("readdir: done at %016jx\n", saveoff);
643 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
644 if (error && cookie_index == 0) {
645 if (cookies) {
646 kfree(cookies, M_TEMP);
647 *ap->a_ncookies = 0;
648 *ap->a_cookies = NULL;
650 } else {
651 if (cookies) {
652 *ap->a_ncookies = cookie_index;
653 *ap->a_cookies = cookies;
656 LOCKSTOP;
657 return (error);
661 * hammer2_vop_readlink { vp, uio, cred }
663 static
665 hammer2_vop_readlink(struct vop_readlink_args *ap)
667 struct vnode *vp;
668 hammer2_inode_t *ip;
669 int error;
671 vp = ap->a_vp;
672 if (vp->v_type != VLNK)
673 return (EINVAL);
674 ip = VTOI(vp);
676 error = hammer2_read_file(ip, ap->a_uio, 0);
677 return (error);
680 static
682 hammer2_vop_read(struct vop_read_args *ap)
684 struct vnode *vp;
685 hammer2_inode_t *ip;
686 struct uio *uio;
687 int error;
688 int seqcount;
689 int bigread;
692 * Read operations supported on this vnode?
694 vp = ap->a_vp;
695 if (vp->v_type != VREG)
696 return (EINVAL);
699 * Misc
701 ip = VTOI(vp);
702 uio = ap->a_uio;
703 error = 0;
705 seqcount = ap->a_ioflag >> 16;
706 bigread = (uio->uio_resid > 100 * 1024 * 1024);
708 error = hammer2_read_file(ip, uio, seqcount);
709 return (error);
712 static
714 hammer2_vop_write(struct vop_write_args *ap)
716 hammer2_inode_t *ip;
717 thread_t td;
718 struct vnode *vp;
719 struct uio *uio;
720 int error;
721 int seqcount;
724 * Read operations supported on this vnode?
726 vp = ap->a_vp;
727 if (vp->v_type != VREG)
728 return (EINVAL);
731 * Misc
733 ip = VTOI(vp);
734 uio = ap->a_uio;
735 error = 0;
736 if (ip->pmp->ronly) {
737 return (EROFS);
740 seqcount = ap->a_ioflag >> 16;
743 * Check resource limit
745 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
746 uio->uio_offset + uio->uio_resid >
747 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
748 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
749 return (EFBIG);
753 * The transaction interlocks against flushes initiations
754 * (note: but will run concurrently with the actual flush).
756 hammer2_trans_init(ip->pmp, 0);
757 error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount);
758 hammer2_trans_done(ip->pmp);
760 return (error);
764 * Perform read operations on a file or symlink given an UNLOCKED
765 * inode and uio.
767 * The passed ip is not locked.
769 static
771 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
773 hammer2_off_t size;
774 struct buf *bp;
775 int error;
777 error = 0;
780 * UIO read loop.
782 * WARNING! Assumes that the kernel interlocks size changes at the
783 * vnode level.
785 hammer2_mtx_sh(&ip->lock);
786 hammer2_mtx_sh(&ip->truncate_lock);
787 size = ip->meta.size;
788 hammer2_mtx_unlock(&ip->lock);
790 while (uio->uio_resid > 0 && uio->uio_offset < size) {
791 hammer2_key_t lbase;
792 hammer2_key_t leof;
793 int lblksize;
794 int loff;
795 int n;
797 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
798 &lbase, &leof);
800 #if 1
801 error = cluster_read(ip->vp, leof, lbase, lblksize,
802 uio->uio_resid, seqcount * BKVASIZE,
803 &bp);
804 #else
805 if (uio->uio_segflg == UIO_NOCOPY) {
806 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
807 if (bp->b_flags & B_CACHE) {
808 int i;
809 int j = 0;
810 if (bp->b_xio.xio_npages != 16)
811 kprintf("NPAGES BAD\n");
812 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
813 vm_page_t m;
814 m = bp->b_xio.xio_pages[i];
815 if (m == NULL || m->valid == 0) {
816 kprintf("bp %016jx %016jx pg %d inv",
817 lbase, leof, i);
818 if (m)
819 kprintf("m->object %p/%p", m->object, ip->vp->v_object);
820 kprintf("\n");
821 j = 1;
824 if (j)
825 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
827 bqrelse(bp);
829 error = bread(ip->vp, lbase, lblksize, &bp);
830 #endif
831 if (error) {
832 brelse(bp);
833 break;
835 loff = (int)(uio->uio_offset - lbase);
836 n = lblksize - loff;
837 if (n > uio->uio_resid)
838 n = uio->uio_resid;
839 if (n > size - uio->uio_offset)
840 n = (int)(size - uio->uio_offset);
841 bp->b_flags |= B_AGE;
842 uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
843 bqrelse(bp);
845 hammer2_mtx_unlock(&ip->truncate_lock);
847 return (error);
851 * Write to the file represented by the inode via the logical buffer cache.
852 * The inode may represent a regular file or a symlink.
854 * The inode must not be locked.
856 static
858 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
859 int ioflag, int seqcount)
861 hammer2_key_t old_eof;
862 hammer2_key_t new_eof;
863 struct buf *bp;
864 int kflags;
865 int error;
866 int modified;
869 * Setup if append
871 * WARNING! Assumes that the kernel interlocks size changes at the
872 * vnode level.
874 hammer2_mtx_ex(&ip->lock);
875 hammer2_mtx_sh(&ip->truncate_lock);
876 if (ioflag & IO_APPEND)
877 uio->uio_offset = ip->meta.size;
878 old_eof = ip->meta.size;
881 * Extend the file if necessary. If the write fails at some point
882 * we will truncate it back down to cover as much as we were able
883 * to write.
885 * Doing this now makes it easier to calculate buffer sizes in
886 * the loop.
888 kflags = 0;
889 error = 0;
890 modified = 0;
892 if (uio->uio_offset + uio->uio_resid > old_eof) {
893 new_eof = uio->uio_offset + uio->uio_resid;
894 modified = 1;
895 hammer2_extend_file(ip, new_eof);
896 kflags |= NOTE_EXTEND;
897 } else {
898 new_eof = old_eof;
900 hammer2_mtx_unlock(&ip->lock);
903 * UIO write loop
905 while (uio->uio_resid > 0) {
906 hammer2_key_t lbase;
907 int trivial;
908 int endofblk;
909 int lblksize;
910 int loff;
911 int n;
914 * Don't allow the buffer build to blow out the buffer
915 * cache.
917 if ((ioflag & IO_RECURSE) == 0)
918 bwillwrite(HAMMER2_PBUFSIZE);
921 * This nominally tells us how much we can cluster and
922 * what the logical buffer size needs to be. Currently
923 * we don't try to cluster the write and just handle one
924 * block at a time.
926 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
927 &lbase, NULL);
928 loff = (int)(uio->uio_offset - lbase);
930 KKASSERT(lblksize <= 65536);
933 * Calculate bytes to copy this transfer and whether the
934 * copy completely covers the buffer or not.
936 trivial = 0;
937 n = lblksize - loff;
938 if (n > uio->uio_resid) {
939 n = uio->uio_resid;
940 if (loff == lbase && uio->uio_offset + n == new_eof)
941 trivial = 1;
942 endofblk = 0;
943 } else {
944 if (loff == 0)
945 trivial = 1;
946 endofblk = 1;
948 if (lbase >= new_eof)
949 trivial = 1;
952 * Get the buffer
954 if (uio->uio_segflg == UIO_NOCOPY) {
956 * Issuing a write with the same data backing the
957 * buffer. Instantiate the buffer to collect the
958 * backing vm pages, then read-in any missing bits.
960 * This case is used by vop_stdputpages().
962 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
963 if ((bp->b_flags & B_CACHE) == 0) {
964 bqrelse(bp);
965 error = bread(ip->vp, lbase, lblksize, &bp);
967 } else if (trivial) {
969 * Even though we are entirely overwriting the buffer
970 * we may still have to zero it out to avoid a
971 * mmap/write visibility issue.
973 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
974 if ((bp->b_flags & B_CACHE) == 0)
975 vfs_bio_clrbuf(bp);
976 } else {
978 * Partial overwrite, read in any missing bits then
979 * replace the portion being written.
981 * (The strategy code will detect zero-fill physical
982 * blocks for this case).
984 error = bread(ip->vp, lbase, lblksize, &bp);
985 if (error == 0)
986 bheavy(bp);
989 if (error) {
990 brelse(bp);
991 break;
995 * Ok, copy the data in
997 error = uiomovebp(bp, bp->b_data + loff, n, uio);
998 kflags |= NOTE_WRITE;
999 modified = 1;
1000 if (error) {
1001 brelse(bp);
1002 break;
1006 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1007 * with IO_SYNC or IO_ASYNC set. These writes
1008 * must be handled as the pageout daemon expects.
1010 * NOTE! H2 relies on cluster_write() here because it
1011 * cannot preallocate disk blocks at the logical
1012 * level due to not knowing what the compression
1013 * size will be at this time.
1015 * We must use cluster_write() here and we depend
1016 * on the write-behind feature to flush buffers
1017 * appropriately. If we let the buffer daemons do
1018 * it the block allocations will be all over the
1019 * map.
1021 if (ioflag & IO_SYNC) {
1022 bwrite(bp);
1023 } else if ((ioflag & IO_DIRECT) && endofblk) {
1024 bawrite(bp);
1025 } else if (ioflag & IO_ASYNC) {
1026 bawrite(bp);
1027 } else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1028 bdwrite(bp);
1029 } else {
1030 #if 1
1031 bp->b_flags |= B_CLUSTEROK;
1032 cluster_write(bp, new_eof, lblksize, seqcount);
1033 #else
1034 bp->b_flags |= B_CLUSTEROK;
1035 bdwrite(bp);
1036 #endif
1041 * Cleanup. If we extended the file EOF but failed to write through
1042 * the entire write is a failure and we have to back-up.
1044 if (error && new_eof != old_eof) {
1045 hammer2_mtx_unlock(&ip->truncate_lock);
1046 hammer2_mtx_ex(&ip->lock);
1047 hammer2_mtx_ex(&ip->truncate_lock);
1048 hammer2_truncate_file(ip, old_eof);
1049 if (ip->flags & HAMMER2_INODE_MODIFIED)
1050 hammer2_inode_chain_sync(ip);
1051 hammer2_mtx_unlock(&ip->lock);
1052 } else if (modified) {
1053 hammer2_mtx_ex(&ip->lock);
1054 hammer2_inode_modify(ip);
1055 hammer2_update_time(&ip->meta.mtime);
1056 if (ip->flags & HAMMER2_INODE_MODIFIED)
1057 hammer2_inode_chain_sync(ip);
1058 hammer2_mtx_unlock(&ip->lock);
1059 hammer2_knote(ip->vp, kflags);
1061 hammer2_trans_assert_strategy(ip->pmp);
1062 hammer2_mtx_unlock(&ip->truncate_lock);
1064 return error;
1068 * Truncate the size of a file. The inode must not be locked.
1070 * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1071 * ensure that any on-media data beyond the new file EOF has been destroyed.
1073 * WARNING: nvtruncbuf() can only be safely called without the inode lock
1074 * held due to the way our write thread works. If the truncation
1075 * occurs in the middle of a buffer, nvtruncbuf() is responsible
1076 * for dirtying that buffer and zeroing out trailing bytes.
1078 * WARNING! Assumes that the kernel interlocks size changes at the
1079 * vnode level.
1081 * WARNING! Caller assumes responsibility for removing dead blocks
1082 * if INODE_RESIZED is set.
1084 static
1085 void
1086 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1088 hammer2_key_t lbase;
1089 int nblksize;
1091 LOCKSTART;
1092 hammer2_mtx_unlock(&ip->lock);
1093 if (ip->vp) {
1094 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1095 nvtruncbuf(ip->vp, nsize,
1096 nblksize, (int)nsize & (nblksize - 1),
1099 hammer2_mtx_ex(&ip->lock);
1100 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1101 ip->osize = ip->meta.size;
1102 ip->meta.size = nsize;
1103 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1104 hammer2_inode_modify(ip);
1105 LOCKSTOP;
1109 * Extend the size of a file. The inode must not be locked.
1111 * Even though the file size is changing, we do not have to set the
1112 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1113 * boundary. When this occurs a hammer2_inode_chain_sync() is required
1114 * to prepare the inode cluster's indirect block table, otherwise
1115 * async execution of the strategy code will implode on us.
1117 * WARNING! Assumes that the kernel interlocks size changes at the
1118 * vnode level.
1120 * WARNING! Caller assumes responsibility for transitioning out
1121 * of the inode DIRECTDATA mode if INODE_RESIZED is set.
1123 static
1124 void
1125 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1127 hammer2_key_t lbase;
1128 hammer2_key_t osize;
1129 int oblksize;
1130 int nblksize;
1132 LOCKSTART;
1134 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1135 hammer2_inode_modify(ip);
1136 osize = ip->meta.size;
1137 ip->osize = osize;
1138 ip->meta.size = nsize;
1140 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1141 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1142 hammer2_inode_chain_sync(ip);
1145 hammer2_mtx_unlock(&ip->lock);
1146 if (ip->vp) {
1147 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1148 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1149 nvextendbuf(ip->vp,
1150 osize, nsize,
1151 oblksize, nblksize,
1152 -1, -1, 0);
1154 hammer2_mtx_ex(&ip->lock);
1156 LOCKSTOP;
1159 static
1161 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1163 hammer2_xop_nresolve_t *xop;
1164 hammer2_inode_t *ip;
1165 hammer2_inode_t *dip;
1166 struct namecache *ncp;
1167 struct vnode *vp;
1168 int error;
1170 LOCKSTART;
1171 dip = VTOI(ap->a_dvp);
1172 xop = hammer2_xop_alloc(dip, 0);
1174 ncp = ap->a_nch->ncp;
1175 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1178 * Note: In DragonFly the kernel handles '.' and '..'.
1180 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1181 hammer2_xop_start(&xop->head, hammer2_xop_nresolve);
1183 error = hammer2_xop_collect(&xop->head, 0);
1184 if (error) {
1185 ip = NULL;
1186 } else {
1187 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1189 hammer2_inode_unlock(dip);
1192 * Acquire the related vnode
1194 * NOTE: For error processing, only ENOENT resolves the namecache
1195 * entry to NULL, otherwise we just return the error and
1196 * leave the namecache unresolved.
1198 * NOTE: multiple hammer2_inode structures can be aliased to the
1199 * same chain element, for example for hardlinks. This
1200 * use case does not 'reattach' inode associations that
1201 * might already exist, but always allocates a new one.
1203 * WARNING: inode structure is locked exclusively via inode_get
1204 * but chain was locked shared. inode_unlock()
1205 * will handle it properly.
1207 if (ip) {
1208 vp = hammer2_igetv(ip, &error);
1209 if (error == 0) {
1210 vn_unlock(vp);
1211 cache_setvp(ap->a_nch, vp);
1212 } else if (error == ENOENT) {
1213 cache_setvp(ap->a_nch, NULL);
1215 hammer2_inode_unlock(ip);
1218 * The vp should not be released until after we've disposed
1219 * of our locks, because it might cause vop_inactive() to
1220 * be called.
1222 if (vp)
1223 vrele(vp);
1224 } else {
1225 error = ENOENT;
1226 cache_setvp(ap->a_nch, NULL);
1228 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1229 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1230 ("resolve error %d/%p ap %p\n",
1231 error, ap->a_nch->ncp->nc_vp, ap));
1232 LOCKSTOP;
1234 return error;
1237 static
1239 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1241 hammer2_inode_t *dip;
1242 hammer2_tid_t inum;
1243 int error;
1245 LOCKSTART;
1246 dip = VTOI(ap->a_dvp);
1247 inum = dip->meta.iparent;
1248 *ap->a_vpp = NULL;
1250 if (inum) {
1251 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1252 inum, ap->a_vpp);
1253 } else {
1254 error = ENOENT;
1256 LOCKSTOP;
1257 return error;
1260 static
1262 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1264 hammer2_inode_t *dip;
1265 hammer2_inode_t *nip;
1266 struct namecache *ncp;
1267 const uint8_t *name;
1268 size_t name_len;
1269 hammer2_tid_t inum;
1270 int error;
1272 LOCKSTART;
1273 dip = VTOI(ap->a_dvp);
1274 if (dip->pmp->ronly) {
1275 LOCKSTOP;
1276 return (EROFS);
1279 ncp = ap->a_nch->ncp;
1280 name = ncp->nc_name;
1281 name_len = ncp->nc_nlen;
1283 hammer2_pfs_memory_wait(dip->pmp);
1284 hammer2_trans_init(dip->pmp, 0);
1286 inum = hammer2_trans_newinum(dip->pmp);
1289 * Create the actual inode as a hidden file in the iroot, then
1290 * create the directory entry as a hardlink to it. The creation
1291 * of the actual inode sets its nlinks to 1 which is the value
1292 * we desire.
1294 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1295 NULL, 0, inum,
1296 inum, 0, 0,
1297 0, &error);
1298 if (error == 0) {
1299 hammer2_inode_create(dip, dip, NULL, NULL,
1300 name, name_len, 0,
1301 nip->meta.inum,
1302 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type,
1303 0, &error);
1306 if (error) {
1307 KKASSERT(nip == NULL);
1308 *ap->a_vpp = NULL;
1309 } else {
1310 *ap->a_vpp = hammer2_igetv(nip, &error);
1311 hammer2_inode_unlock(nip);
1315 * Update dip's mtime
1317 if (error == 0) {
1318 uint64_t mtime;
1320 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1321 hammer2_update_time(&mtime);
1322 hammer2_inode_modify(dip);
1323 dip->meta.mtime = mtime;
1324 hammer2_inode_unlock(dip);
1327 hammer2_trans_done(dip->pmp);
1329 if (error == 0) {
1330 cache_setunresolved(ap->a_nch);
1331 cache_setvp(ap->a_nch, *ap->a_vpp);
1333 LOCKSTOP;
1334 return error;
1337 static
1339 hammer2_vop_open(struct vop_open_args *ap)
1341 return vop_stdopen(ap);
1345 * hammer2_vop_advlock { vp, id, op, fl, flags }
1347 static
1349 hammer2_vop_advlock(struct vop_advlock_args *ap)
1351 hammer2_inode_t *ip = VTOI(ap->a_vp);
1352 hammer2_off_t size;
1354 size = ip->meta.size;
1355 return (lf_advlock(ap, &ip->advlock, size));
1358 static
1360 hammer2_vop_close(struct vop_close_args *ap)
1362 return vop_stdclose(ap);
1366 * hammer2_vop_nlink { nch, dvp, vp, cred }
1368 * Create a hardlink from (vp) to {dvp, nch}.
1370 static
1372 hammer2_vop_nlink(struct vop_nlink_args *ap)
1374 hammer2_inode_t *tdip; /* target directory to create link in */
1375 hammer2_inode_t *ip; /* inode we are hardlinking to */
1376 struct namecache *ncp;
1377 const uint8_t *name;
1378 size_t name_len;
1379 int error;
1381 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1382 return(EXDEV);
1384 LOCKSTART;
1385 tdip = VTOI(ap->a_dvp);
1386 if (tdip->pmp->ronly) {
1387 LOCKSTOP;
1388 return (EROFS);
1391 ncp = ap->a_nch->ncp;
1392 name = ncp->nc_name;
1393 name_len = ncp->nc_nlen;
1396 * ip represents the file being hardlinked. The file could be a
1397 * normal file or a hardlink target if it has already been hardlinked.
1398 * (with the new semantics, it will almost always be a hardlink
1399 * target).
1401 * Bump nlinks and potentially also create or move the hardlink
1402 * target in the parent directory common to (ip) and (tdip). The
1403 * consolidation code can modify ip->cluster. The returned cluster
1404 * is locked.
1406 ip = VTOI(ap->a_vp);
1407 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1408 hammer2_pfs_memory_wait(ip->pmp);
1409 hammer2_trans_init(ip->pmp, 0);
1412 * Target should be an indexed inode or there's no way we will ever
1413 * be able to find it!
1415 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1417 error = 0;
1420 * Can return NULL and error == EXDEV if the common parent
1421 * crosses a directory with the xlink flag set.
1423 hammer2_inode_lock(tdip, 0);
1424 hammer2_inode_lock(ip, 0);
1427 * Create the hardlink target and bump nlinks.
1429 if (error == 0) {
1430 hammer2_inode_create(tdip, tdip, NULL, NULL,
1431 name, name_len, 0,
1432 ip->meta.inum,
1433 HAMMER2_OBJTYPE_HARDLINK, ip->meta.type,
1434 0, &error);
1435 hammer2_inode_modify(ip);
1436 ++ip->meta.nlinks;
1438 if (error == 0) {
1440 * Update dip's mtime
1442 uint64_t mtime;
1444 hammer2_update_time(&mtime);
1445 hammer2_inode_modify(tdip);
1446 tdip->meta.mtime = mtime;
1448 cache_setunresolved(ap->a_nch);
1449 cache_setvp(ap->a_nch, ap->a_vp);
1451 hammer2_inode_unlock(ip);
1452 hammer2_inode_unlock(tdip);
1454 hammer2_trans_done(ip->pmp);
1456 LOCKSTOP;
1457 return error;
1461 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1463 * The operating system has already ensured that the directory entry
1464 * does not exist and done all appropriate namespace locking.
1466 static
1468 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1470 hammer2_inode_t *dip;
1471 hammer2_inode_t *nip;
1472 struct namecache *ncp;
1473 const uint8_t *name;
1474 size_t name_len;
1475 hammer2_tid_t inum;
1476 int error;
1478 LOCKSTART;
1479 dip = VTOI(ap->a_dvp);
1480 if (dip->pmp->ronly) {
1481 LOCKSTOP;
1482 return (EROFS);
1485 ncp = ap->a_nch->ncp;
1486 name = ncp->nc_name;
1487 name_len = ncp->nc_nlen;
1488 hammer2_pfs_memory_wait(dip->pmp);
1489 hammer2_trans_init(dip->pmp, 0);
1491 inum = hammer2_trans_newinum(dip->pmp);
1494 * Create the actual inode as a hidden file in the iroot, then
1495 * create the directory entry as a hardlink to it. The creation
1496 * of the actual inode sets its nlinks to 1 which is the value
1497 * we desire.
1499 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1500 NULL, 0, inum,
1501 inum, 0, 0,
1502 0, &error);
1504 if (error == 0) {
1505 hammer2_inode_create(dip, dip, NULL, NULL,
1506 name, name_len, 0,
1507 nip->meta.inum,
1508 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type,
1509 0, &error);
1511 if (error) {
1512 KKASSERT(nip == NULL);
1513 *ap->a_vpp = NULL;
1514 } else {
1515 *ap->a_vpp = hammer2_igetv(nip, &error);
1516 hammer2_inode_unlock(nip);
1520 * Update dip's mtime
1522 if (error == 0) {
1523 uint64_t mtime;
1525 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1526 hammer2_update_time(&mtime);
1527 hammer2_inode_modify(dip);
1528 dip->meta.mtime = mtime;
1529 hammer2_inode_unlock(dip);
1532 hammer2_trans_done(dip->pmp);
1534 if (error == 0) {
1535 cache_setunresolved(ap->a_nch);
1536 cache_setvp(ap->a_nch, *ap->a_vpp);
1538 LOCKSTOP;
1539 return error;
1543 * Make a device node (typically a fifo)
1545 static
1547 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1549 hammer2_inode_t *dip;
1550 hammer2_inode_t *nip;
1551 struct namecache *ncp;
1552 const uint8_t *name;
1553 size_t name_len;
1554 hammer2_tid_t inum;
1555 int error;
1557 LOCKSTART;
1558 dip = VTOI(ap->a_dvp);
1559 if (dip->pmp->ronly) {
1560 LOCKSTOP;
1561 return (EROFS);
1564 ncp = ap->a_nch->ncp;
1565 name = ncp->nc_name;
1566 name_len = ncp->nc_nlen;
1567 hammer2_pfs_memory_wait(dip->pmp);
1568 hammer2_trans_init(dip->pmp, 0);
1571 * The device node is entered as the directory entry itself and not
1572 * as a hardlink to an inode. Since one cannot obtain a
1573 * file handle on the filesystem entry representing the device, we
1574 * do not have to worry about indexing its inode.
1576 inum = hammer2_trans_newinum(dip->pmp);
1577 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1578 NULL, 0, inum,
1579 inum, 0, 0,
1580 0, &error);
1581 if (error == 0) {
1582 hammer2_inode_create(dip, dip, NULL, NULL,
1583 name, name_len, 0,
1584 nip->meta.inum,
1585 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type,
1586 0, &error);
1590 if (error) {
1591 KKASSERT(nip == NULL);
1592 *ap->a_vpp = NULL;
1593 } else {
1594 *ap->a_vpp = hammer2_igetv(nip, &error);
1595 hammer2_inode_unlock(nip);
1599 * Update dip's mtime
1601 if (error == 0) {
1602 uint64_t mtime;
1604 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1605 hammer2_update_time(&mtime);
1606 hammer2_inode_modify(dip);
1607 dip->meta.mtime = mtime;
1608 hammer2_inode_unlock(dip);
1611 hammer2_trans_done(dip->pmp);
1613 if (error == 0) {
1614 cache_setunresolved(ap->a_nch);
1615 cache_setvp(ap->a_nch, *ap->a_vpp);
1617 LOCKSTOP;
1618 return error;
1622 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1624 static
1626 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1628 hammer2_inode_t *dip;
1629 hammer2_inode_t *nip;
1630 struct namecache *ncp;
1631 const uint8_t *name;
1632 size_t name_len;
1633 hammer2_tid_t inum;
1634 int error;
1636 dip = VTOI(ap->a_dvp);
1637 if (dip->pmp->ronly)
1638 return (EROFS);
1640 ncp = ap->a_nch->ncp;
1641 name = ncp->nc_name;
1642 name_len = ncp->nc_nlen;
1643 hammer2_pfs_memory_wait(dip->pmp);
1644 hammer2_trans_init(dip->pmp, 0);
1646 ap->a_vap->va_type = VLNK; /* enforce type */
1649 * The softlink is entered into the directory itself and not
1650 * as a hardlink to an inode. Since one cannot obtain a
1651 * file handle on the softlink itself we do not have to worry
1652 * about indexing its inode.
1654 inum = hammer2_trans_newinum(dip->pmp);
1656 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1657 NULL, 0, inum,
1658 inum, 0, 0,
1659 0, &error);
1660 if (error == 0) {
1661 hammer2_inode_create(dip, dip, NULL, NULL,
1662 name, name_len, 0,
1663 nip->meta.inum,
1664 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type,
1665 0, &error);
1669 if (error) {
1670 KKASSERT(nip == NULL);
1671 *ap->a_vpp = NULL;
1672 hammer2_trans_done(dip->pmp);
1673 return error;
1675 *ap->a_vpp = hammer2_igetv(nip, &error);
1678 * Build the softlink (~like file data) and finalize the namecache.
1680 if (error == 0) {
1681 size_t bytes;
1682 struct uio auio;
1683 struct iovec aiov;
1685 bytes = strlen(ap->a_target);
1687 hammer2_inode_unlock(nip);
1688 bzero(&auio, sizeof(auio));
1689 bzero(&aiov, sizeof(aiov));
1690 auio.uio_iov = &aiov;
1691 auio.uio_segflg = UIO_SYSSPACE;
1692 auio.uio_rw = UIO_WRITE;
1693 auio.uio_resid = bytes;
1694 auio.uio_iovcnt = 1;
1695 auio.uio_td = curthread;
1696 aiov.iov_base = ap->a_target;
1697 aiov.iov_len = bytes;
1698 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1699 /* XXX handle error */
1700 error = 0;
1701 } else {
1702 hammer2_inode_unlock(nip);
1706 * Update dip's mtime
1708 if (error == 0) {
1709 uint64_t mtime;
1711 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1712 hammer2_update_time(&mtime);
1713 hammer2_inode_modify(dip);
1714 dip->meta.mtime = mtime;
1715 hammer2_inode_unlock(dip);
1718 hammer2_trans_done(dip->pmp);
1721 * Finalize namecache
1723 if (error == 0) {
1724 cache_setunresolved(ap->a_nch);
1725 cache_setvp(ap->a_nch, *ap->a_vpp);
1726 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1728 return error;
1732 * hammer2_vop_nremove { nch, dvp, cred }
1734 static
1736 hammer2_vop_nremove(struct vop_nremove_args *ap)
1738 hammer2_xop_unlink_t *xop;
1739 hammer2_inode_t *dip;
1740 hammer2_inode_t *ip;
1741 struct namecache *ncp;
1742 int error;
1743 int isopen;
1745 LOCKSTART;
1746 dip = VTOI(ap->a_dvp);
1747 if (dip->pmp->ronly) {
1748 LOCKSTOP;
1749 return(EROFS);
1752 ncp = ap->a_nch->ncp;
1754 hammer2_pfs_memory_wait(dip->pmp);
1755 hammer2_trans_init(dip->pmp, 0);
1756 hammer2_inode_lock(dip, 0);
1759 * The unlink XOP unlinks the path from the directory and
1760 * locates and returns the cluster associated with the real inode.
1761 * We have to handle nlinks here on the frontend.
1763 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1764 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1765 isopen = cache_isopen(ap->a_nch);
1766 xop->isdir = 0;
1767 xop->dopermanent = 0;
1768 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1771 * Collect the real inode and adjust nlinks, destroy the real
1772 * inode if nlinks transitions to 0 and it was the real inode
1773 * (else it has already been removed).
1775 error = hammer2_xop_collect(&xop->head, 0);
1776 hammer2_inode_unlock(dip);
1778 if (error == 0) {
1779 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1780 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1781 if (ip) {
1782 hammer2_inode_unlink_finisher(ip, isopen);
1783 hammer2_inode_unlock(ip);
1785 } else {
1786 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1790 * Update dip's mtime
1792 if (error == 0) {
1793 uint64_t mtime;
1795 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1796 hammer2_update_time(&mtime);
1797 hammer2_inode_modify(dip);
1798 dip->meta.mtime = mtime;
1799 hammer2_inode_unlock(dip);
1802 hammer2_inode_run_sideq(dip->pmp);
1803 hammer2_trans_done(dip->pmp);
1804 if (error == 0)
1805 cache_unlink(ap->a_nch);
1806 LOCKSTOP;
1807 return (error);
1811 * hammer2_vop_nrmdir { nch, dvp, cred }
1813 static
1815 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1817 hammer2_xop_unlink_t *xop;
1818 hammer2_inode_t *dip;
1819 hammer2_inode_t *ip;
1820 struct namecache *ncp;
1821 int isopen;
1822 int error;
1824 LOCKSTART;
1825 dip = VTOI(ap->a_dvp);
1826 if (dip->pmp->ronly) {
1827 LOCKSTOP;
1828 return(EROFS);
1831 hammer2_pfs_memory_wait(dip->pmp);
1832 hammer2_trans_init(dip->pmp, 0);
1833 hammer2_inode_lock(dip, 0);
1835 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1837 ncp = ap->a_nch->ncp;
1838 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1839 isopen = cache_isopen(ap->a_nch);
1840 xop->isdir = 1;
1841 xop->dopermanent = 0;
1842 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1845 * Collect the real inode and adjust nlinks, destroy the real
1846 * inode if nlinks transitions to 0 and it was the real inode
1847 * (else it has already been removed).
1849 error = hammer2_xop_collect(&xop->head, 0);
1850 hammer2_inode_unlock(dip);
1852 if (error == 0) {
1853 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1854 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1855 if (ip) {
1856 hammer2_inode_unlink_finisher(ip, isopen);
1857 hammer2_inode_unlock(ip);
1859 } else {
1860 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1864 * Update dip's mtime
1866 if (error == 0) {
1867 uint64_t mtime;
1869 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1870 hammer2_update_time(&mtime);
1871 hammer2_inode_modify(dip);
1872 dip->meta.mtime = mtime;
1873 hammer2_inode_unlock(dip);
1876 hammer2_inode_run_sideq(dip->pmp);
1877 hammer2_trans_done(dip->pmp);
1878 if (error == 0)
1879 cache_unlink(ap->a_nch);
1880 LOCKSTOP;
1881 return (error);
1885 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1887 static
1889 hammer2_vop_nrename(struct vop_nrename_args *ap)
1891 struct namecache *fncp;
1892 struct namecache *tncp;
1893 hammer2_inode_t *fdip;
1894 hammer2_inode_t *tdip;
1895 hammer2_inode_t *ip;
1896 const uint8_t *fname;
1897 size_t fname_len;
1898 const uint8_t *tname;
1899 size_t tname_len;
1900 int error;
1901 int tnch_error;
1902 int update_tdip;
1903 int update_fdip;
1904 hammer2_key_t tlhc;
1906 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1907 return(EXDEV);
1908 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1909 return(EXDEV);
1911 fdip = VTOI(ap->a_fdvp); /* source directory */
1912 tdip = VTOI(ap->a_tdvp); /* target directory */
1914 if (fdip->pmp->ronly)
1915 return(EROFS);
1917 LOCKSTART;
1918 fncp = ap->a_fnch->ncp; /* entry name in source */
1919 fname = fncp->nc_name;
1920 fname_len = fncp->nc_nlen;
1922 tncp = ap->a_tnch->ncp; /* entry name in target */
1923 tname = tncp->nc_name;
1924 tname_len = tncp->nc_nlen;
1926 hammer2_pfs_memory_wait(tdip->pmp);
1927 hammer2_trans_init(tdip->pmp, 0);
1929 update_tdip = 0;
1930 update_fdip = 0;
1933 * ip is the inode being renamed. If this is a hardlink then
1934 * ip represents the actual file and not the hardlink marker.
1936 ip = VTOI(fncp->nc_vp);
1938 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1941 * Can return NULL and error == EXDEV if the common parent
1942 * crosses a directory with the xlink flag set.
1944 error = 0;
1945 hammer2_inode_lock(fdip, 0);
1946 hammer2_inode_lock(tdip, 0);
1947 hammer2_inode_ref(ip); /* extra ref */
1949 hammer2_inode_lock(ip, 0);
1952 * Delete the target namespace.
1955 hammer2_xop_unlink_t *xop2;
1956 hammer2_inode_t *tip;
1957 int isopen;
1960 * The unlink XOP unlinks the path from the directory and
1961 * locates and returns the cluster associated with the real
1962 * inode. We have to handle nlinks here on the frontend.
1964 xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
1965 hammer2_xop_setname(&xop2->head, tname, tname_len);
1966 isopen = cache_isopen(ap->a_tnch);
1967 xop2->isdir = -1;
1968 xop2->dopermanent = 0;
1969 hammer2_xop_start(&xop2->head, hammer2_xop_unlink);
1972 * Collect the real inode and adjust nlinks, destroy the real
1973 * inode if nlinks transitions to 0 and it was the real inode
1974 * (else it has already been removed).
1976 tnch_error = hammer2_xop_collect(&xop2->head, 0);
1977 /* hammer2_inode_unlock(tdip); */
1979 if (tnch_error == 0) {
1980 tip = hammer2_inode_get(tdip->pmp, NULL,
1981 &xop2->head.cluster, -1);
1982 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
1983 if (tip) {
1984 hammer2_inode_unlink_finisher(tip, isopen);
1985 hammer2_inode_unlock(tip);
1987 } else {
1988 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
1990 /* hammer2_inode_lock(tdip, 0); */
1992 if (tnch_error && tnch_error != ENOENT) {
1993 error = tnch_error;
1994 goto done2;
1996 update_tdip = 1;
2000 * Resolve the collision space for (tdip, tname, tname_len)
2002 * tdip must be held exclusively locked to prevent races.
2005 hammer2_xop_scanlhc_t *sxop;
2006 hammer2_tid_t lhcbase;
2008 tlhc = hammer2_dirhash(tname, tname_len);
2009 lhcbase = tlhc;
2010 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2011 sxop->lhc = tlhc;
2012 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
2013 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2014 if (tlhc != sxop->head.cluster.focus->bref.key)
2015 break;
2016 ++tlhc;
2018 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2020 if (error) {
2021 if (error != ENOENT)
2022 goto done2;
2023 ++tlhc;
2024 error = 0;
2026 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2027 error = ENOSPC;
2028 goto done2;
2033 * Everything is setup, do the rename.
2035 * We have to synchronize ip->meta to the underlying operation.
2037 * NOTE: To avoid deadlocks we cannot lock (ip) while we are
2038 * unlinking elements from their directories. Locking
2039 * the nlinks field does not lock the whole inode.
2041 /* hammer2_inode_lock(ip, 0); */
2042 if (error == 0) {
2043 hammer2_xop_nrename_t *xop4;
2045 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2046 xop4->lhc = tlhc;
2047 xop4->ip_key = ip->meta.name_key;
2048 hammer2_xop_setip2(&xop4->head, ip);
2049 hammer2_xop_setip3(&xop4->head, tdip);
2050 hammer2_xop_setname(&xop4->head, fname, fname_len);
2051 hammer2_xop_setname2(&xop4->head, tname, tname_len);
2052 hammer2_xop_start(&xop4->head, hammer2_xop_nrename);
2054 error = hammer2_xop_collect(&xop4->head, 0);
2055 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2057 if (error == ENOENT)
2058 error = 0;
2059 if (error == 0 &&
2060 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2061 hammer2_inode_modify(ip);
2062 ip->meta.name_len = tname_len;
2063 ip->meta.name_key = tlhc;
2066 update_fdip = 1;
2067 update_fdip = 1;
2070 done2:
2072 * Update directory mtimes to represent the something changed.
2074 if (update_fdip || update_tdip) {
2075 uint64_t mtime;
2077 hammer2_update_time(&mtime);
2078 if (update_fdip) {
2079 hammer2_inode_modify(fdip);
2080 fdip->meta.mtime = mtime;
2082 if (update_tdip) {
2083 hammer2_inode_modify(tdip);
2084 tdip->meta.mtime = mtime;
2087 hammer2_inode_unlock(ip);
2088 hammer2_inode_unlock(tdip);
2089 hammer2_inode_unlock(fdip);
2090 hammer2_inode_drop(ip);
2091 hammer2_inode_run_sideq(fdip->pmp);
2093 hammer2_trans_done(tdip->pmp);
2096 * Issue the namecache update after unlocking all the internal
2097 * hammer structures, otherwise we might deadlock.
2099 if (tnch_error == 0) {
2100 cache_unlink(ap->a_tnch);
2101 cache_setunresolved(ap->a_tnch);
2103 if (error == 0)
2104 cache_rename(ap->a_fnch, ap->a_tnch);
2106 LOCKSTOP;
2107 return (error);
2111 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2113 static
2115 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2117 hammer2_inode_t *ip;
2118 int error;
2120 LOCKSTART;
2121 ip = VTOI(ap->a_vp);
2123 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2124 ap->a_fflag, ap->a_cred);
2125 LOCKSTOP;
2126 return (error);
2129 static
2130 int
2131 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2133 struct mount *mp;
2134 hammer2_pfs_t *pmp;
2135 int rc;
2137 LOCKSTART;
2138 switch (ap->a_op) {
2139 case (MOUNTCTL_SET_EXPORT):
2140 mp = ap->a_head.a_ops->head.vv_mount;
2141 pmp = MPTOPMP(mp);
2143 if (ap->a_ctllen != sizeof(struct export_args))
2144 rc = (EINVAL);
2145 else
2146 rc = vfs_export(mp, &pmp->export,
2147 (const struct export_args *)ap->a_ctl);
2148 break;
2149 default:
2150 rc = vop_stdmountctl(ap);
2151 break;
2153 LOCKSTOP;
2154 return (rc);
2158 * KQFILTER
2160 static void filt_hammer2detach(struct knote *kn);
2161 static int filt_hammer2read(struct knote *kn, long hint);
2162 static int filt_hammer2write(struct knote *kn, long hint);
2163 static int filt_hammer2vnode(struct knote *kn, long hint);
2165 static struct filterops hammer2read_filtops =
2166 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2167 NULL, filt_hammer2detach, filt_hammer2read };
2168 static struct filterops hammer2write_filtops =
2169 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2170 NULL, filt_hammer2detach, filt_hammer2write };
2171 static struct filterops hammer2vnode_filtops =
2172 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2173 NULL, filt_hammer2detach, filt_hammer2vnode };
2175 static
2177 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2179 struct vnode *vp = ap->a_vp;
2180 struct knote *kn = ap->a_kn;
2182 switch (kn->kn_filter) {
2183 case EVFILT_READ:
2184 kn->kn_fop = &hammer2read_filtops;
2185 break;
2186 case EVFILT_WRITE:
2187 kn->kn_fop = &hammer2write_filtops;
2188 break;
2189 case EVFILT_VNODE:
2190 kn->kn_fop = &hammer2vnode_filtops;
2191 break;
2192 default:
2193 return (EOPNOTSUPP);
2196 kn->kn_hook = (caddr_t)vp;
2198 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2200 return(0);
2203 static void
2204 filt_hammer2detach(struct knote *kn)
2206 struct vnode *vp = (void *)kn->kn_hook;
2208 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2211 static int
2212 filt_hammer2read(struct knote *kn, long hint)
2214 struct vnode *vp = (void *)kn->kn_hook;
2215 hammer2_inode_t *ip = VTOI(vp);
2216 off_t off;
2218 if (hint == NOTE_REVOKE) {
2219 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2220 return(1);
2222 off = ip->meta.size - kn->kn_fp->f_offset;
2223 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2224 if (kn->kn_sfflags & NOTE_OLDAPI)
2225 return(1);
2226 return (kn->kn_data != 0);
2230 static int
2231 filt_hammer2write(struct knote *kn, long hint)
2233 if (hint == NOTE_REVOKE)
2234 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2235 kn->kn_data = 0;
2236 return (1);
2239 static int
2240 filt_hammer2vnode(struct knote *kn, long hint)
2242 if (kn->kn_sfflags & hint)
2243 kn->kn_fflags |= hint;
2244 if (hint == NOTE_REVOKE) {
2245 kn->kn_flags |= (EV_EOF | EV_NODATA);
2246 return (1);
2248 return (kn->kn_fflags != 0);
2252 * FIFO VOPS
2254 static
2256 hammer2_vop_markatime(struct vop_markatime_args *ap)
2258 hammer2_inode_t *ip;
2259 struct vnode *vp;
2261 vp = ap->a_vp;
2262 ip = VTOI(vp);
2264 if (ip->pmp->ronly)
2265 return(EROFS);
2266 return(0);
2269 static
2271 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2273 int error;
2275 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2276 if (error)
2277 error = hammer2_vop_kqfilter(ap);
2278 return(error);
2282 * VOPS vector
2284 struct vop_ops hammer2_vnode_vops = {
2285 .vop_default = vop_defaultop,
2286 .vop_fsync = hammer2_vop_fsync,
2287 .vop_getpages = vop_stdgetpages,
2288 .vop_putpages = vop_stdputpages,
2289 .vop_access = hammer2_vop_access,
2290 .vop_advlock = hammer2_vop_advlock,
2291 .vop_close = hammer2_vop_close,
2292 .vop_nlink = hammer2_vop_nlink,
2293 .vop_ncreate = hammer2_vop_ncreate,
2294 .vop_nsymlink = hammer2_vop_nsymlink,
2295 .vop_nremove = hammer2_vop_nremove,
2296 .vop_nrmdir = hammer2_vop_nrmdir,
2297 .vop_nrename = hammer2_vop_nrename,
2298 .vop_getattr = hammer2_vop_getattr,
2299 .vop_setattr = hammer2_vop_setattr,
2300 .vop_readdir = hammer2_vop_readdir,
2301 .vop_readlink = hammer2_vop_readlink,
2302 .vop_getpages = vop_stdgetpages,
2303 .vop_putpages = vop_stdputpages,
2304 .vop_read = hammer2_vop_read,
2305 .vop_write = hammer2_vop_write,
2306 .vop_open = hammer2_vop_open,
2307 .vop_inactive = hammer2_vop_inactive,
2308 .vop_reclaim = hammer2_vop_reclaim,
2309 .vop_nresolve = hammer2_vop_nresolve,
2310 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2311 .vop_nmkdir = hammer2_vop_nmkdir,
2312 .vop_nmknod = hammer2_vop_nmknod,
2313 .vop_ioctl = hammer2_vop_ioctl,
2314 .vop_mountctl = hammer2_vop_mountctl,
2315 .vop_bmap = hammer2_vop_bmap,
2316 .vop_strategy = hammer2_vop_strategy,
2317 .vop_kqfilter = hammer2_vop_kqfilter
2320 struct vop_ops hammer2_spec_vops = {
2321 .vop_default = vop_defaultop,
2322 .vop_fsync = hammer2_vop_fsync,
2323 .vop_read = vop_stdnoread,
2324 .vop_write = vop_stdnowrite,
2325 .vop_access = hammer2_vop_access,
2326 .vop_close = hammer2_vop_close,
2327 .vop_markatime = hammer2_vop_markatime,
2328 .vop_getattr = hammer2_vop_getattr,
2329 .vop_inactive = hammer2_vop_inactive,
2330 .vop_reclaim = hammer2_vop_reclaim,
2331 .vop_setattr = hammer2_vop_setattr
2334 struct vop_ops hammer2_fifo_vops = {
2335 .vop_default = fifo_vnoperate,
2336 .vop_fsync = hammer2_vop_fsync,
2337 #if 0
2338 .vop_read = hammer2_vop_fiforead,
2339 .vop_write = hammer2_vop_fifowrite,
2340 #endif
2341 .vop_access = hammer2_vop_access,
2342 #if 0
2343 .vop_close = hammer2_vop_fifoclose,
2344 #endif
2345 .vop_markatime = hammer2_vop_markatime,
2346 .vop_getattr = hammer2_vop_getattr,
2347 .vop_inactive = hammer2_vop_inactive,
2348 .vop_reclaim = hammer2_vop_reclaim,
2349 .vop_setattr = hammer2_vop_setattr,
2350 .vop_kqfilter = hammer2_vop_fifokqfilter