hammer2 - Adjust blockref to create an embedded area, start dirent work
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
blob5dcfc85f7a23a02a565360405b1eff2f58aad7a8
1 /*
2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 * Kernel Filesystem interface
39 * NOTE! local ipdata pointers must be reloaded on any modifying operation
40 * to the inode as its underlying chain may have changed.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
60 #include "hammer2.h"
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
63 int seqcount);
64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
65 int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
69 struct objcache *cache_xops;
71 static __inline
72 void
73 hammer2_knote(struct vnode *vp, int flags)
75 if (flags)
76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
80 * Last reference to a vnode is going away but it is still cached.
82 static
83 int
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
86 hammer2_inode_t *ip;
87 struct vnode *vp;
89 LOCKSTART;
90 vp = ap->a_vp;
91 ip = VTOI(vp);
94 * Degenerate case
96 if (ip == NULL) {
97 vrecycle(vp);
98 LOCKSTOP;
99 return (0);
103 * Check for deleted inodes and recycle immediately on the last
104 * release. Be sure to destroy any left-over buffer cache buffers
105 * so we do not waste time trying to flush them.
107 * Note that deleting the file block chains under the inode chain
108 * would just be a waste of energy, so don't do it.
110 * WARNING: nvtruncbuf() can only be safely called without the inode
111 * lock held due to the way our write thread works.
113 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
114 hammer2_key_t lbase;
115 int nblksize;
118 * Detect updates to the embedded data which may be
119 * synchronized by the strategy code. Simply mark the
120 * inode modified so it gets picked up by our normal flush.
122 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
123 nvtruncbuf(vp, 0, nblksize, 0, 0);
124 vrecycle(vp);
126 LOCKSTOP;
127 return (0);
131 * Reclaim a vnode so that it can be reused; after the inode is
132 * disassociated, the filesystem must manage it alone.
134 static
136 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
138 hammer2_inode_t *ip;
139 hammer2_pfs_t *pmp;
140 struct vnode *vp;
142 LOCKSTART;
143 vp = ap->a_vp;
144 ip = VTOI(vp);
145 if (ip == NULL) {
146 LOCKSTOP;
147 return(0);
149 pmp = ip->pmp;
152 * The final close of a deleted file or directory marks it for
153 * destruction. The DELETED flag allows the flusher to shortcut
154 * any modified blocks still unflushed (that is, just ignore them).
156 * HAMMER2 usually does not try to optimize the freemap by returning
157 * deleted blocks to it as it does not usually know how many snapshots
158 * might be referencing portions of the file/dir.
160 vp->v_data = NULL;
161 ip->vp = NULL;
164 * NOTE! We do not attempt to flush chains here, flushing is
165 * really fragile and could also deadlock.
167 vclrisdirty(vp);
170 * This occurs if the inode was unlinked while open. Reclamation of
171 * these inodes requires processing we cannot safely do here so add
172 * the inode to the sideq in that situation.
174 * A modified inode may require chain synchronization which will no
175 * longer be driven by a sync or fsync without the vnode, also use
176 * the sideq for that.
178 * A reclaim can occur at any time so we cannot safely start a
179 * transaction to handle reclamation of unlinked files. Instead,
180 * the ip is left with a reference and placed on a linked list and
181 * handled later on.
184 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED |
185 HAMMER2_INODE_MODIFIED |
186 HAMMER2_INODE_RESIZED)) &&
187 (ip->flags & HAMMER2_INODE_ISDELETED) == 0) {
188 hammer2_inode_sideq_t *ipul;
190 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
191 ipul->ip = ip;
193 hammer2_spin_ex(&pmp->list_spin);
194 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
195 /* ref -> sideq */
196 atomic_set_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
197 TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
198 hammer2_spin_unex(&pmp->list_spin);
199 } else {
200 hammer2_spin_unex(&pmp->list_spin);
201 kfree(ipul, pmp->minode);
202 hammer2_inode_drop(ip); /* vp ref */
204 /* retain ref from vp for ipul */
205 } else {
206 hammer2_inode_drop(ip); /* vp ref */
210 * XXX handle background sync when ip dirty, kernel will no longer
211 * notify us regarding this inode because there is no longer a
212 * vnode attached to it.
215 LOCKSTOP;
216 return (0);
219 static
221 hammer2_vop_fsync(struct vop_fsync_args *ap)
223 hammer2_inode_t *ip;
224 struct vnode *vp;
226 LOCKSTART;
227 vp = ap->a_vp;
228 ip = VTOI(vp);
230 #if 0
231 /* XXX can't do this yet */
232 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH);
233 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
234 #endif
235 hammer2_trans_init(ip->pmp, 0);
236 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
239 * Calling chain_flush here creates a lot of duplicative
240 * COW operations due to non-optimal vnode ordering.
242 * Only do it for an actual fsync() syscall. The other forms
243 * which call this function will eventually call chain_flush
244 * on the volume root as a catch-all, which is far more optimal.
246 hammer2_inode_lock(ip, 0);
247 if (ip->flags & HAMMER2_INODE_MODIFIED)
248 hammer2_inode_chain_sync(ip);
249 hammer2_inode_unlock(ip);
250 hammer2_trans_done(ip->pmp);
252 LOCKSTOP;
253 return (0);
256 static
258 hammer2_vop_access(struct vop_access_args *ap)
260 hammer2_inode_t *ip = VTOI(ap->a_vp);
261 uid_t uid;
262 gid_t gid;
263 int error;
265 LOCKSTART;
266 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
267 uid = hammer2_to_unix_xid(&ip->meta.uid);
268 gid = hammer2_to_unix_xid(&ip->meta.gid);
269 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
270 hammer2_inode_unlock(ip);
272 LOCKSTOP;
273 return (error);
276 static
278 hammer2_vop_getattr(struct vop_getattr_args *ap)
280 hammer2_pfs_t *pmp;
281 hammer2_inode_t *ip;
282 struct vnode *vp;
283 struct vattr *vap;
284 hammer2_chain_t *chain;
285 int i;
287 LOCKSTART;
288 vp = ap->a_vp;
289 vap = ap->a_vap;
291 ip = VTOI(vp);
292 pmp = ip->pmp;
294 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
296 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
297 vap->va_fileid = ip->meta.inum;
298 vap->va_mode = ip->meta.mode;
299 vap->va_nlink = ip->meta.nlinks;
300 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
301 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
302 vap->va_rmajor = 0;
303 vap->va_rminor = 0;
304 vap->va_size = ip->meta.size; /* protected by shared lock */
305 vap->va_blocksize = HAMMER2_PBUFSIZE;
306 vap->va_flags = ip->meta.uflags;
307 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
308 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
309 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
310 vap->va_gen = 1;
311 vap->va_bytes = 0;
312 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
314 * Can't really calculate directory use sans the files under
315 * it, just assume one block for now.
317 vap->va_bytes += HAMMER2_INODE_BYTES;
318 } else {
319 for (i = 0; i < ip->cluster.nchains; ++i) {
320 if ((chain = ip->cluster.array[i].chain) != NULL) {
321 if (vap->va_bytes <
322 chain->bref.embed.stats.data_count) {
323 vap->va_bytes =
324 chain->bref.embed.stats.data_count;
329 vap->va_type = hammer2_get_vtype(ip->meta.type);
330 vap->va_filerev = 0;
331 vap->va_uid_uuid = ip->meta.uid;
332 vap->va_gid_uuid = ip->meta.gid;
333 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
334 VA_FSID_UUID_VALID;
336 hammer2_inode_unlock(ip);
338 LOCKSTOP;
339 return (0);
342 static
344 hammer2_vop_setattr(struct vop_setattr_args *ap)
346 hammer2_inode_t *ip;
347 struct vnode *vp;
348 struct vattr *vap;
349 int error;
350 int kflags = 0;
351 uint64_t ctime;
353 LOCKSTART;
354 vp = ap->a_vp;
355 vap = ap->a_vap;
356 hammer2_update_time(&ctime);
358 ip = VTOI(vp);
360 if (ip->pmp->ronly) {
361 LOCKSTOP;
362 return(EROFS);
365 hammer2_pfs_memory_wait(ip->pmp);
366 hammer2_trans_init(ip->pmp, 0);
367 hammer2_inode_lock(ip, 0);
368 error = 0;
370 if (vap->va_flags != VNOVAL) {
371 uint32_t flags;
373 flags = ip->meta.uflags;
374 error = vop_helper_setattr_flags(&flags, vap->va_flags,
375 hammer2_to_unix_xid(&ip->meta.uid),
376 ap->a_cred);
377 if (error == 0) {
378 if (ip->meta.uflags != flags) {
379 hammer2_inode_modify(ip);
380 ip->meta.uflags = flags;
381 ip->meta.ctime = ctime;
382 kflags |= NOTE_ATTRIB;
384 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
385 error = 0;
386 goto done;
389 goto done;
391 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
392 error = EPERM;
393 goto done;
395 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
396 mode_t cur_mode = ip->meta.mode;
397 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
398 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
399 uuid_t uuid_uid;
400 uuid_t uuid_gid;
402 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
403 ap->a_cred,
404 &cur_uid, &cur_gid, &cur_mode);
405 if (error == 0) {
406 hammer2_guid_to_uuid(&uuid_uid, cur_uid);
407 hammer2_guid_to_uuid(&uuid_gid, cur_gid);
408 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
409 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
410 ip->meta.mode != cur_mode
412 hammer2_inode_modify(ip);
413 ip->meta.uid = uuid_uid;
414 ip->meta.gid = uuid_gid;
415 ip->meta.mode = cur_mode;
416 ip->meta.ctime = ctime;
418 kflags |= NOTE_ATTRIB;
423 * Resize the file
425 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
426 switch(vp->v_type) {
427 case VREG:
428 if (vap->va_size == ip->meta.size)
429 break;
430 if (vap->va_size < ip->meta.size) {
431 hammer2_mtx_ex(&ip->truncate_lock);
432 hammer2_truncate_file(ip, vap->va_size);
433 hammer2_mtx_unlock(&ip->truncate_lock);
434 } else {
435 hammer2_extend_file(ip, vap->va_size);
437 hammer2_inode_modify(ip);
438 ip->meta.mtime = ctime;
439 break;
440 default:
441 error = EINVAL;
442 goto done;
445 #if 0
446 /* atime not supported */
447 if (vap->va_atime.tv_sec != VNOVAL) {
448 hammer2_inode_modify(ip);
449 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
450 kflags |= NOTE_ATTRIB;
452 #endif
453 if (vap->va_mode != (mode_t)VNOVAL) {
454 mode_t cur_mode = ip->meta.mode;
455 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
456 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
458 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
459 cur_uid, cur_gid, &cur_mode);
460 if (error == 0 && ip->meta.mode != cur_mode) {
461 hammer2_inode_modify(ip);
462 ip->meta.mode = cur_mode;
463 ip->meta.ctime = ctime;
464 kflags |= NOTE_ATTRIB;
468 if (vap->va_mtime.tv_sec != VNOVAL) {
469 hammer2_inode_modify(ip);
470 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
471 kflags |= NOTE_ATTRIB;
474 done:
476 * If a truncation occurred we must call inode_fsync() now in order
477 * to trim the related data chains, otherwise a later expansion can
478 * cause havoc.
480 * If an extend occured that changed the DIRECTDATA state, we must
481 * call inode_fsync now in order to prepare the inode's indirect
482 * block table.
484 if (ip->flags & HAMMER2_INODE_RESIZED)
485 hammer2_inode_chain_sync(ip);
488 * Cleanup.
490 hammer2_inode_unlock(ip);
491 hammer2_trans_done(ip->pmp);
492 hammer2_knote(ip->vp, kflags);
494 LOCKSTOP;
495 return (error);
498 static
500 hammer2_vop_readdir(struct vop_readdir_args *ap)
502 hammer2_xop_readdir_t *xop;
503 hammer2_blockref_t bref;
504 hammer2_inode_t *ip;
505 hammer2_tid_t inum;
506 hammer2_key_t lkey;
507 struct uio *uio;
508 off_t *cookies;
509 off_t saveoff;
510 int cookie_index;
511 int ncookies;
512 int error;
513 int eofflag;
514 int dtype;
515 int r;
517 LOCKSTART;
518 ip = VTOI(ap->a_vp);
519 uio = ap->a_uio;
520 saveoff = uio->uio_offset;
521 eofflag = 0;
522 error = 0;
525 * Setup cookies directory entry cookies if requested
527 if (ap->a_ncookies) {
528 ncookies = uio->uio_resid / 16 + 1;
529 if (ncookies > 1024)
530 ncookies = 1024;
531 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
532 } else {
533 ncookies = -1;
534 cookies = NULL;
536 cookie_index = 0;
538 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
541 * Handle artificial entries. To ensure that only positive 64 bit
542 * quantities are returned to userland we always strip off bit 63.
543 * The hash code is designed such that codes 0x0000-0x7FFF are not
544 * used, allowing us to use these codes for articial entries.
546 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
547 * allow '..' to cross the mount point into (e.g.) the super-root.
549 if (saveoff == 0) {
550 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
551 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
552 if (r)
553 goto done;
554 if (cookies)
555 cookies[cookie_index] = saveoff;
556 ++saveoff;
557 ++cookie_index;
558 if (cookie_index == ncookies)
559 goto done;
562 if (saveoff == 1) {
564 * Be careful with lockorder when accessing ".."
566 * (ip is the current dir. xip is the parent dir).
568 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
569 if (ip != ip->pmp->iroot)
570 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
571 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
572 if (r)
573 goto done;
574 if (cookies)
575 cookies[cookie_index] = saveoff;
576 ++saveoff;
577 ++cookie_index;
578 if (cookie_index == ncookies)
579 goto done;
582 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
583 if (hammer2_debug & 0x0020)
584 kprintf("readdir: lkey %016jx\n", lkey);
585 if (error)
586 goto done;
589 * Use XOP for cluster scan.
591 * parent is the inode cluster, already locked for us. Don't
592 * double lock shared locks as this will screw up upgrades.
594 xop = hammer2_xop_alloc(ip, 0);
595 xop->lkey = lkey;
596 hammer2_xop_start(&xop->head, hammer2_xop_readdir);
598 for (;;) {
599 const hammer2_inode_data_t *ripdata;
601 error = hammer2_xop_collect(&xop->head, 0);
602 if (error)
603 break;
604 if (cookie_index == ncookies)
605 break;
606 if (hammer2_debug & 0x0020)
607 kprintf("cluster chain %p %p\n",
608 xop->head.cluster.focus,
609 (xop->head.cluster.focus ?
610 xop->head.cluster.focus->data : (void *)-1));
611 ripdata = &hammer2_cluster_rdata(&xop->head.cluster)->ipdata;
612 hammer2_cluster_bref(&xop->head.cluster, &bref);
613 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
614 dtype = hammer2_get_dtype(ripdata);
615 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
616 r = vop_write_dirent(&error, uio,
617 ripdata->meta.inum &
618 HAMMER2_DIRHASH_USERMSK,
619 dtype,
620 ripdata->meta.name_len,
621 ripdata->filename);
622 if (r)
623 break;
624 if (cookies)
625 cookies[cookie_index] = saveoff;
626 ++cookie_index;
627 } else {
628 /* XXX chain error */
629 kprintf("bad chain type readdir %d\n", bref.type);
632 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
633 if (error == ENOENT) {
634 error = 0;
635 eofflag = 1;
636 saveoff = (hammer2_key_t)-1;
637 } else {
638 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
640 done:
641 hammer2_inode_unlock(ip);
642 if (ap->a_eofflag)
643 *ap->a_eofflag = eofflag;
644 if (hammer2_debug & 0x0020)
645 kprintf("readdir: done at %016jx\n", saveoff);
646 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
647 if (error && cookie_index == 0) {
648 if (cookies) {
649 kfree(cookies, M_TEMP);
650 *ap->a_ncookies = 0;
651 *ap->a_cookies = NULL;
653 } else {
654 if (cookies) {
655 *ap->a_ncookies = cookie_index;
656 *ap->a_cookies = cookies;
659 LOCKSTOP;
660 return (error);
664 * hammer2_vop_readlink { vp, uio, cred }
666 static
668 hammer2_vop_readlink(struct vop_readlink_args *ap)
670 struct vnode *vp;
671 hammer2_inode_t *ip;
672 int error;
674 vp = ap->a_vp;
675 if (vp->v_type != VLNK)
676 return (EINVAL);
677 ip = VTOI(vp);
679 error = hammer2_read_file(ip, ap->a_uio, 0);
680 return (error);
683 static
685 hammer2_vop_read(struct vop_read_args *ap)
687 struct vnode *vp;
688 hammer2_inode_t *ip;
689 struct uio *uio;
690 int error;
691 int seqcount;
692 int bigread;
695 * Read operations supported on this vnode?
697 vp = ap->a_vp;
698 if (vp->v_type != VREG)
699 return (EINVAL);
702 * Misc
704 ip = VTOI(vp);
705 uio = ap->a_uio;
706 error = 0;
708 seqcount = ap->a_ioflag >> 16;
709 bigread = (uio->uio_resid > 100 * 1024 * 1024);
711 error = hammer2_read_file(ip, uio, seqcount);
712 return (error);
715 static
717 hammer2_vop_write(struct vop_write_args *ap)
719 hammer2_inode_t *ip;
720 thread_t td;
721 struct vnode *vp;
722 struct uio *uio;
723 int error;
724 int seqcount;
727 * Read operations supported on this vnode?
729 vp = ap->a_vp;
730 if (vp->v_type != VREG)
731 return (EINVAL);
734 * Misc
736 ip = VTOI(vp);
737 uio = ap->a_uio;
738 error = 0;
739 if (ip->pmp->ronly) {
740 return (EROFS);
743 seqcount = ap->a_ioflag >> 16;
746 * Check resource limit
748 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
749 uio->uio_offset + uio->uio_resid >
750 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
751 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
752 return (EFBIG);
756 * The transaction interlocks against flush initiations
757 * (note: but will run concurrently with the actual flush).
759 * To avoid deadlocking against the VM system, we must flag any
760 * transaction related to the buffer cache or other direct
761 * VM page manipulation.
763 if (uio->uio_segflg == UIO_NOCOPY)
764 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE);
765 else
766 hammer2_trans_init(ip->pmp, 0);
767 error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount);
768 hammer2_trans_done(ip->pmp);
770 return (error);
774 * Perform read operations on a file or symlink given an UNLOCKED
775 * inode and uio.
777 * The passed ip is not locked.
779 static
781 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
783 hammer2_off_t size;
784 struct buf *bp;
785 int error;
787 error = 0;
790 * UIO read loop.
792 * WARNING! Assumes that the kernel interlocks size changes at the
793 * vnode level.
795 hammer2_mtx_sh(&ip->lock);
796 hammer2_mtx_sh(&ip->truncate_lock);
797 size = ip->meta.size;
798 hammer2_mtx_unlock(&ip->lock);
800 while (uio->uio_resid > 0 && uio->uio_offset < size) {
801 hammer2_key_t lbase;
802 hammer2_key_t leof;
803 int lblksize;
804 int loff;
805 int n;
807 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
808 &lbase, &leof);
810 #if 1
811 error = cluster_read(ip->vp, leof, lbase, lblksize,
812 uio->uio_resid, seqcount * MAXBSIZE,
813 &bp);
814 #else
815 if (uio->uio_segflg == UIO_NOCOPY) {
816 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
817 if (bp->b_flags & B_CACHE) {
818 int i;
819 int j = 0;
820 if (bp->b_xio.xio_npages != 16)
821 kprintf("NPAGES BAD\n");
822 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
823 vm_page_t m;
824 m = bp->b_xio.xio_pages[i];
825 if (m == NULL || m->valid == 0) {
826 kprintf("bp %016jx %016jx pg %d inv",
827 lbase, leof, i);
828 if (m)
829 kprintf("m->object %p/%p", m->object, ip->vp->v_object);
830 kprintf("\n");
831 j = 1;
834 if (j)
835 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
837 bqrelse(bp);
839 error = bread(ip->vp, lbase, lblksize, &bp);
840 #endif
841 if (error) {
842 brelse(bp);
843 break;
845 loff = (int)(uio->uio_offset - lbase);
846 n = lblksize - loff;
847 if (n > uio->uio_resid)
848 n = uio->uio_resid;
849 if (n > size - uio->uio_offset)
850 n = (int)(size - uio->uio_offset);
851 bp->b_flags |= B_AGE;
852 uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
853 bqrelse(bp);
855 hammer2_mtx_unlock(&ip->truncate_lock);
857 return (error);
861 * Write to the file represented by the inode via the logical buffer cache.
862 * The inode may represent a regular file or a symlink.
864 * The inode must not be locked.
866 static
868 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
869 int ioflag, int seqcount)
871 hammer2_key_t old_eof;
872 hammer2_key_t new_eof;
873 struct buf *bp;
874 int kflags;
875 int error;
876 int modified;
879 * Setup if append
881 * WARNING! Assumes that the kernel interlocks size changes at the
882 * vnode level.
884 hammer2_mtx_ex(&ip->lock);
885 hammer2_mtx_sh(&ip->truncate_lock);
886 if (ioflag & IO_APPEND)
887 uio->uio_offset = ip->meta.size;
888 old_eof = ip->meta.size;
891 * Extend the file if necessary. If the write fails at some point
892 * we will truncate it back down to cover as much as we were able
893 * to write.
895 * Doing this now makes it easier to calculate buffer sizes in
896 * the loop.
898 kflags = 0;
899 error = 0;
900 modified = 0;
902 if (uio->uio_offset + uio->uio_resid > old_eof) {
903 new_eof = uio->uio_offset + uio->uio_resid;
904 modified = 1;
905 hammer2_extend_file(ip, new_eof);
906 kflags |= NOTE_EXTEND;
907 } else {
908 new_eof = old_eof;
910 hammer2_mtx_unlock(&ip->lock);
913 * UIO write loop
915 while (uio->uio_resid > 0) {
916 hammer2_key_t lbase;
917 int trivial;
918 int endofblk;
919 int lblksize;
920 int loff;
921 int n;
924 * Don't allow the buffer build to blow out the buffer
925 * cache.
927 if ((ioflag & IO_RECURSE) == 0)
928 bwillwrite(HAMMER2_PBUFSIZE);
931 * This nominally tells us how much we can cluster and
932 * what the logical buffer size needs to be. Currently
933 * we don't try to cluster the write and just handle one
934 * block at a time.
936 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
937 &lbase, NULL);
938 loff = (int)(uio->uio_offset - lbase);
940 KKASSERT(lblksize <= 65536);
943 * Calculate bytes to copy this transfer and whether the
944 * copy completely covers the buffer or not.
946 trivial = 0;
947 n = lblksize - loff;
948 if (n > uio->uio_resid) {
949 n = uio->uio_resid;
950 if (loff == lbase && uio->uio_offset + n == new_eof)
951 trivial = 1;
952 endofblk = 0;
953 } else {
954 if (loff == 0)
955 trivial = 1;
956 endofblk = 1;
958 if (lbase >= new_eof)
959 trivial = 1;
962 * Get the buffer
964 if (uio->uio_segflg == UIO_NOCOPY) {
966 * Issuing a write with the same data backing the
967 * buffer. Instantiate the buffer to collect the
968 * backing vm pages, then read-in any missing bits.
970 * This case is used by vop_stdputpages().
972 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
973 if ((bp->b_flags & B_CACHE) == 0) {
974 bqrelse(bp);
975 error = bread(ip->vp, lbase, lblksize, &bp);
977 } else if (trivial) {
979 * Even though we are entirely overwriting the buffer
980 * we may still have to zero it out to avoid a
981 * mmap/write visibility issue.
983 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
984 if ((bp->b_flags & B_CACHE) == 0)
985 vfs_bio_clrbuf(bp);
986 } else {
988 * Partial overwrite, read in any missing bits then
989 * replace the portion being written.
991 * (The strategy code will detect zero-fill physical
992 * blocks for this case).
994 error = bread(ip->vp, lbase, lblksize, &bp);
995 if (error == 0)
996 bheavy(bp);
999 if (error) {
1000 brelse(bp);
1001 break;
1005 * Ok, copy the data in
1007 error = uiomovebp(bp, bp->b_data + loff, n, uio);
1008 kflags |= NOTE_WRITE;
1009 modified = 1;
1010 if (error) {
1011 brelse(bp);
1012 break;
1016 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1017 * with IO_SYNC or IO_ASYNC set. These writes
1018 * must be handled as the pageout daemon expects.
1020 * NOTE! H2 relies on cluster_write() here because it
1021 * cannot preallocate disk blocks at the logical
1022 * level due to not knowing what the compression
1023 * size will be at this time.
1025 * We must use cluster_write() here and we depend
1026 * on the write-behind feature to flush buffers
1027 * appropriately. If we let the buffer daemons do
1028 * it the block allocations will be all over the
1029 * map.
1031 if (ioflag & IO_SYNC) {
1032 bwrite(bp);
1033 } else if ((ioflag & IO_DIRECT) && endofblk) {
1034 bawrite(bp);
1035 } else if (ioflag & IO_ASYNC) {
1036 bawrite(bp);
1037 } else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1038 bdwrite(bp);
1039 } else {
1040 #if 1
1041 bp->b_flags |= B_CLUSTEROK;
1042 cluster_write(bp, new_eof, lblksize, seqcount);
1043 #else
1044 bp->b_flags |= B_CLUSTEROK;
1045 bdwrite(bp);
1046 #endif
1051 * Cleanup. If we extended the file EOF but failed to write through
1052 * the entire write is a failure and we have to back-up.
1054 if (error && new_eof != old_eof) {
1055 hammer2_mtx_unlock(&ip->truncate_lock);
1056 hammer2_mtx_ex(&ip->lock);
1057 hammer2_mtx_ex(&ip->truncate_lock);
1058 hammer2_truncate_file(ip, old_eof);
1059 if (ip->flags & HAMMER2_INODE_MODIFIED)
1060 hammer2_inode_chain_sync(ip);
1061 hammer2_mtx_unlock(&ip->lock);
1062 } else if (modified) {
1063 hammer2_mtx_ex(&ip->lock);
1064 hammer2_inode_modify(ip);
1065 hammer2_update_time(&ip->meta.mtime);
1066 if (ip->flags & HAMMER2_INODE_MODIFIED)
1067 hammer2_inode_chain_sync(ip);
1068 hammer2_mtx_unlock(&ip->lock);
1069 hammer2_knote(ip->vp, kflags);
1071 hammer2_trans_assert_strategy(ip->pmp);
1072 hammer2_mtx_unlock(&ip->truncate_lock);
1074 return error;
1078 * Truncate the size of a file. The inode must not be locked.
1080 * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1081 * ensure that any on-media data beyond the new file EOF has been destroyed.
1083 * WARNING: nvtruncbuf() can only be safely called without the inode lock
1084 * held due to the way our write thread works. If the truncation
1085 * occurs in the middle of a buffer, nvtruncbuf() is responsible
1086 * for dirtying that buffer and zeroing out trailing bytes.
1088 * WARNING! Assumes that the kernel interlocks size changes at the
1089 * vnode level.
1091 * WARNING! Caller assumes responsibility for removing dead blocks
1092 * if INODE_RESIZED is set.
1094 static
1095 void
1096 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1098 hammer2_key_t lbase;
1099 int nblksize;
1101 LOCKSTART;
1102 hammer2_mtx_unlock(&ip->lock);
1103 if (ip->vp) {
1104 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1105 nvtruncbuf(ip->vp, nsize,
1106 nblksize, (int)nsize & (nblksize - 1),
1109 hammer2_mtx_ex(&ip->lock);
1110 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1111 ip->osize = ip->meta.size;
1112 ip->meta.size = nsize;
1113 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1114 hammer2_inode_modify(ip);
1115 LOCKSTOP;
1119 * Extend the size of a file. The inode must not be locked.
1121 * Even though the file size is changing, we do not have to set the
1122 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1123 * boundary. When this occurs a hammer2_inode_chain_sync() is required
1124 * to prepare the inode cluster's indirect block table, otherwise
1125 * async execution of the strategy code will implode on us.
1127 * WARNING! Assumes that the kernel interlocks size changes at the
1128 * vnode level.
1130 * WARNING! Caller assumes responsibility for transitioning out
1131 * of the inode DIRECTDATA mode if INODE_RESIZED is set.
1133 static
1134 void
1135 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1137 hammer2_key_t lbase;
1138 hammer2_key_t osize;
1139 int oblksize;
1140 int nblksize;
1142 LOCKSTART;
1144 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1145 hammer2_inode_modify(ip);
1146 osize = ip->meta.size;
1147 ip->osize = osize;
1148 ip->meta.size = nsize;
1150 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1151 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1152 hammer2_inode_chain_sync(ip);
1155 hammer2_mtx_unlock(&ip->lock);
1156 if (ip->vp) {
1157 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1158 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1159 nvextendbuf(ip->vp,
1160 osize, nsize,
1161 oblksize, nblksize,
1162 -1, -1, 0);
1164 hammer2_mtx_ex(&ip->lock);
1166 LOCKSTOP;
1169 static
1171 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1173 hammer2_xop_nresolve_t *xop;
1174 hammer2_inode_t *ip;
1175 hammer2_inode_t *dip;
1176 struct namecache *ncp;
1177 struct vnode *vp;
1178 int error;
1180 LOCKSTART;
1181 dip = VTOI(ap->a_dvp);
1182 xop = hammer2_xop_alloc(dip, 0);
1184 ncp = ap->a_nch->ncp;
1185 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1188 * Note: In DragonFly the kernel handles '.' and '..'.
1190 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1191 hammer2_xop_start(&xop->head, hammer2_xop_nresolve);
1193 error = hammer2_xop_collect(&xop->head, 0);
1194 if (error) {
1195 ip = NULL;
1196 } else {
1197 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1199 hammer2_inode_unlock(dip);
1202 * Acquire the related vnode
1204 * NOTE: For error processing, only ENOENT resolves the namecache
1205 * entry to NULL, otherwise we just return the error and
1206 * leave the namecache unresolved.
1208 * NOTE: multiple hammer2_inode structures can be aliased to the
1209 * same chain element, for example for hardlinks. This
1210 * use case does not 'reattach' inode associations that
1211 * might already exist, but always allocates a new one.
1213 * WARNING: inode structure is locked exclusively via inode_get
1214 * but chain was locked shared. inode_unlock()
1215 * will handle it properly.
1217 if (ip) {
1218 vp = hammer2_igetv(ip, &error);
1219 if (error == 0) {
1220 vn_unlock(vp);
1221 cache_setvp(ap->a_nch, vp);
1222 } else if (error == ENOENT) {
1223 cache_setvp(ap->a_nch, NULL);
1225 hammer2_inode_unlock(ip);
1228 * The vp should not be released until after we've disposed
1229 * of our locks, because it might cause vop_inactive() to
1230 * be called.
1232 if (vp)
1233 vrele(vp);
1234 } else {
1235 error = ENOENT;
1236 cache_setvp(ap->a_nch, NULL);
1238 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1239 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1240 ("resolve error %d/%p ap %p\n",
1241 error, ap->a_nch->ncp->nc_vp, ap));
1242 LOCKSTOP;
1244 return error;
1247 static
1249 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1251 hammer2_inode_t *dip;
1252 hammer2_tid_t inum;
1253 int error;
1255 LOCKSTART;
1256 dip = VTOI(ap->a_dvp);
1257 inum = dip->meta.iparent;
1258 *ap->a_vpp = NULL;
1260 if (inum) {
1261 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1262 inum, ap->a_vpp);
1263 } else {
1264 error = ENOENT;
1266 LOCKSTOP;
1267 return error;
1270 static
1272 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1274 hammer2_inode_t *dip;
1275 hammer2_inode_t *nip;
1276 struct namecache *ncp;
1277 const uint8_t *name;
1278 size_t name_len;
1279 hammer2_tid_t inum;
1280 int error;
1282 LOCKSTART;
1283 dip = VTOI(ap->a_dvp);
1284 if (dip->pmp->ronly) {
1285 LOCKSTOP;
1286 return (EROFS);
1289 ncp = ap->a_nch->ncp;
1290 name = ncp->nc_name;
1291 name_len = ncp->nc_nlen;
1293 hammer2_pfs_memory_wait(dip->pmp);
1294 hammer2_trans_init(dip->pmp, 0);
1296 inum = hammer2_trans_newinum(dip->pmp);
1299 * Create the actual inode as a hidden file in the iroot, then
1300 * create the directory entry as a hardlink to it. The creation
1301 * of the actual inode sets its nlinks to 1 which is the value
1302 * we desire.
1304 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1305 NULL, 0, inum,
1306 inum, 0, 0,
1307 0, &error);
1308 if (error == 0) {
1309 hammer2_inode_create(dip, dip, NULL, NULL,
1310 name, name_len, 0,
1311 nip->meta.inum,
1312 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type,
1313 0, &error);
1316 if (error) {
1317 KKASSERT(nip == NULL);
1318 *ap->a_vpp = NULL;
1319 } else {
1320 *ap->a_vpp = hammer2_igetv(nip, &error);
1321 hammer2_inode_unlock(nip);
1325 * Update dip's mtime
1327 if (error == 0) {
1328 uint64_t mtime;
1330 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1331 hammer2_update_time(&mtime);
1332 hammer2_inode_modify(dip);
1333 dip->meta.mtime = mtime;
1334 hammer2_inode_unlock(dip);
1337 hammer2_trans_done(dip->pmp);
1339 if (error == 0) {
1340 cache_setunresolved(ap->a_nch);
1341 cache_setvp(ap->a_nch, *ap->a_vpp);
1343 LOCKSTOP;
1344 return error;
1347 static
1349 hammer2_vop_open(struct vop_open_args *ap)
1351 return vop_stdopen(ap);
1355 * hammer2_vop_advlock { vp, id, op, fl, flags }
1357 static
1359 hammer2_vop_advlock(struct vop_advlock_args *ap)
1361 hammer2_inode_t *ip = VTOI(ap->a_vp);
1362 hammer2_off_t size;
1364 size = ip->meta.size;
1365 return (lf_advlock(ap, &ip->advlock, size));
1368 static
1370 hammer2_vop_close(struct vop_close_args *ap)
1372 return vop_stdclose(ap);
1376 * hammer2_vop_nlink { nch, dvp, vp, cred }
1378 * Create a hardlink from (vp) to {dvp, nch}.
1380 static
1382 hammer2_vop_nlink(struct vop_nlink_args *ap)
1384 hammer2_inode_t *tdip; /* target directory to create link in */
1385 hammer2_inode_t *ip; /* inode we are hardlinking to */
1386 struct namecache *ncp;
1387 const uint8_t *name;
1388 size_t name_len;
1389 int error;
1391 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1392 return(EXDEV);
1394 LOCKSTART;
1395 tdip = VTOI(ap->a_dvp);
1396 if (tdip->pmp->ronly) {
1397 LOCKSTOP;
1398 return (EROFS);
1401 ncp = ap->a_nch->ncp;
1402 name = ncp->nc_name;
1403 name_len = ncp->nc_nlen;
1406 * ip represents the file being hardlinked. The file could be a
1407 * normal file or a hardlink target if it has already been hardlinked.
1408 * (with the new semantics, it will almost always be a hardlink
1409 * target).
1411 * Bump nlinks and potentially also create or move the hardlink
1412 * target in the parent directory common to (ip) and (tdip). The
1413 * consolidation code can modify ip->cluster. The returned cluster
1414 * is locked.
1416 ip = VTOI(ap->a_vp);
1417 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1418 hammer2_pfs_memory_wait(ip->pmp);
1419 hammer2_trans_init(ip->pmp, 0);
1422 * Target should be an indexed inode or there's no way we will ever
1423 * be able to find it!
1425 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1427 error = 0;
1430 * Can return NULL and error == EXDEV if the common parent
1431 * crosses a directory with the xlink flag set.
1433 hammer2_inode_lock(tdip, 0);
1434 hammer2_inode_lock(ip, 0);
1437 * Create the hardlink target and bump nlinks.
1439 if (error == 0) {
1440 hammer2_inode_create(tdip, tdip, NULL, NULL,
1441 name, name_len, 0,
1442 ip->meta.inum,
1443 HAMMER2_OBJTYPE_HARDLINK, ip->meta.type,
1444 0, &error);
1445 hammer2_inode_modify(ip);
1446 ++ip->meta.nlinks;
1448 if (error == 0) {
1450 * Update dip's mtime
1452 uint64_t mtime;
1454 hammer2_update_time(&mtime);
1455 hammer2_inode_modify(tdip);
1456 tdip->meta.mtime = mtime;
1458 cache_setunresolved(ap->a_nch);
1459 cache_setvp(ap->a_nch, ap->a_vp);
1461 hammer2_inode_unlock(ip);
1462 hammer2_inode_unlock(tdip);
1464 hammer2_trans_done(ip->pmp);
1466 LOCKSTOP;
1467 return error;
1471 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1473 * The operating system has already ensured that the directory entry
1474 * does not exist and done all appropriate namespace locking.
1476 static
1478 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1480 hammer2_inode_t *dip;
1481 hammer2_inode_t *nip;
1482 struct namecache *ncp;
1483 const uint8_t *name;
1484 size_t name_len;
1485 hammer2_tid_t inum;
1486 int error;
1488 LOCKSTART;
1489 dip = VTOI(ap->a_dvp);
1490 if (dip->pmp->ronly) {
1491 LOCKSTOP;
1492 return (EROFS);
1495 ncp = ap->a_nch->ncp;
1496 name = ncp->nc_name;
1497 name_len = ncp->nc_nlen;
1498 hammer2_pfs_memory_wait(dip->pmp);
1499 hammer2_trans_init(dip->pmp, 0);
1501 inum = hammer2_trans_newinum(dip->pmp);
1504 * Create the actual inode as a hidden file in the iroot, then
1505 * create the directory entry as a hardlink to it. The creation
1506 * of the actual inode sets its nlinks to 1 which is the value
1507 * we desire.
1509 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1510 NULL, 0, inum,
1511 inum, 0, 0,
1512 0, &error);
1514 if (error == 0) {
1515 hammer2_inode_create(dip, dip, NULL, NULL,
1516 name, name_len, 0,
1517 nip->meta.inum,
1518 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type,
1519 0, &error);
1521 if (error) {
1522 KKASSERT(nip == NULL);
1523 *ap->a_vpp = NULL;
1524 } else {
1525 *ap->a_vpp = hammer2_igetv(nip, &error);
1526 hammer2_inode_unlock(nip);
1530 * Update dip's mtime
1532 if (error == 0) {
1533 uint64_t mtime;
1535 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1536 hammer2_update_time(&mtime);
1537 hammer2_inode_modify(dip);
1538 dip->meta.mtime = mtime;
1539 hammer2_inode_unlock(dip);
1542 hammer2_trans_done(dip->pmp);
1544 if (error == 0) {
1545 cache_setunresolved(ap->a_nch);
1546 cache_setvp(ap->a_nch, *ap->a_vpp);
1548 LOCKSTOP;
1549 return error;
1553 * Make a device node (typically a fifo)
1555 static
1557 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1559 hammer2_inode_t *dip;
1560 hammer2_inode_t *nip;
1561 struct namecache *ncp;
1562 const uint8_t *name;
1563 size_t name_len;
1564 hammer2_tid_t inum;
1565 int error;
1567 LOCKSTART;
1568 dip = VTOI(ap->a_dvp);
1569 if (dip->pmp->ronly) {
1570 LOCKSTOP;
1571 return (EROFS);
1574 ncp = ap->a_nch->ncp;
1575 name = ncp->nc_name;
1576 name_len = ncp->nc_nlen;
1577 hammer2_pfs_memory_wait(dip->pmp);
1578 hammer2_trans_init(dip->pmp, 0);
1581 * The device node is entered as the directory entry itself and not
1582 * as a hardlink to an inode. Since one cannot obtain a
1583 * file handle on the filesystem entry representing the device, we
1584 * do not have to worry about indexing its inode.
1586 inum = hammer2_trans_newinum(dip->pmp);
1587 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1588 NULL, 0, inum,
1589 inum, 0, 0,
1590 0, &error);
1591 if (error == 0) {
1592 hammer2_inode_create(dip, dip, NULL, NULL,
1593 name, name_len, 0,
1594 nip->meta.inum,
1595 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type,
1596 0, &error);
1600 if (error) {
1601 KKASSERT(nip == NULL);
1602 *ap->a_vpp = NULL;
1603 } else {
1604 *ap->a_vpp = hammer2_igetv(nip, &error);
1605 hammer2_inode_unlock(nip);
1609 * Update dip's mtime
1611 if (error == 0) {
1612 uint64_t mtime;
1614 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1615 hammer2_update_time(&mtime);
1616 hammer2_inode_modify(dip);
1617 dip->meta.mtime = mtime;
1618 hammer2_inode_unlock(dip);
1621 hammer2_trans_done(dip->pmp);
1623 if (error == 0) {
1624 cache_setunresolved(ap->a_nch);
1625 cache_setvp(ap->a_nch, *ap->a_vpp);
1627 LOCKSTOP;
1628 return error;
1632 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1634 static
1636 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1638 hammer2_inode_t *dip;
1639 hammer2_inode_t *nip;
1640 struct namecache *ncp;
1641 const uint8_t *name;
1642 size_t name_len;
1643 hammer2_tid_t inum;
1644 int error;
1646 dip = VTOI(ap->a_dvp);
1647 if (dip->pmp->ronly)
1648 return (EROFS);
1650 ncp = ap->a_nch->ncp;
1651 name = ncp->nc_name;
1652 name_len = ncp->nc_nlen;
1653 hammer2_pfs_memory_wait(dip->pmp);
1654 hammer2_trans_init(dip->pmp, 0);
1656 ap->a_vap->va_type = VLNK; /* enforce type */
1659 * The softlink is entered into the directory itself and not
1660 * as a hardlink to an inode. Since one cannot obtain a
1661 * file handle on the softlink itself we do not have to worry
1662 * about indexing its inode.
1664 inum = hammer2_trans_newinum(dip->pmp);
1666 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1667 NULL, 0, inum,
1668 inum, 0, 0,
1669 0, &error);
1670 if (error == 0) {
1671 hammer2_inode_create(dip, dip, NULL, NULL,
1672 name, name_len, 0,
1673 nip->meta.inum,
1674 HAMMER2_OBJTYPE_HARDLINK, nip->meta.type,
1675 0, &error);
1679 if (error) {
1680 KKASSERT(nip == NULL);
1681 *ap->a_vpp = NULL;
1682 hammer2_trans_done(dip->pmp);
1683 return error;
1685 *ap->a_vpp = hammer2_igetv(nip, &error);
1688 * Build the softlink (~like file data) and finalize the namecache.
1690 if (error == 0) {
1691 size_t bytes;
1692 struct uio auio;
1693 struct iovec aiov;
1695 bytes = strlen(ap->a_target);
1697 hammer2_inode_unlock(nip);
1698 bzero(&auio, sizeof(auio));
1699 bzero(&aiov, sizeof(aiov));
1700 auio.uio_iov = &aiov;
1701 auio.uio_segflg = UIO_SYSSPACE;
1702 auio.uio_rw = UIO_WRITE;
1703 auio.uio_resid = bytes;
1704 auio.uio_iovcnt = 1;
1705 auio.uio_td = curthread;
1706 aiov.iov_base = ap->a_target;
1707 aiov.iov_len = bytes;
1708 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1709 /* XXX handle error */
1710 error = 0;
1711 } else {
1712 hammer2_inode_unlock(nip);
1716 * Update dip's mtime
1718 if (error == 0) {
1719 uint64_t mtime;
1721 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1722 hammer2_update_time(&mtime);
1723 hammer2_inode_modify(dip);
1724 dip->meta.mtime = mtime;
1725 hammer2_inode_unlock(dip);
1728 hammer2_trans_done(dip->pmp);
1731 * Finalize namecache
1733 if (error == 0) {
1734 cache_setunresolved(ap->a_nch);
1735 cache_setvp(ap->a_nch, *ap->a_vpp);
1736 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1738 return error;
1742 * hammer2_vop_nremove { nch, dvp, cred }
1744 static
1746 hammer2_vop_nremove(struct vop_nremove_args *ap)
1748 hammer2_xop_unlink_t *xop;
1749 hammer2_inode_t *dip;
1750 hammer2_inode_t *ip;
1751 struct namecache *ncp;
1752 int error;
1753 int isopen;
1755 LOCKSTART;
1756 dip = VTOI(ap->a_dvp);
1757 if (dip->pmp->ronly) {
1758 LOCKSTOP;
1759 return(EROFS);
1762 ncp = ap->a_nch->ncp;
1764 hammer2_pfs_memory_wait(dip->pmp);
1765 hammer2_trans_init(dip->pmp, 0);
1766 hammer2_inode_lock(dip, 0);
1769 * The unlink XOP unlinks the path from the directory and
1770 * locates and returns the cluster associated with the real inode.
1771 * We have to handle nlinks here on the frontend.
1773 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1774 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1775 isopen = cache_isopen(ap->a_nch);
1776 xop->isdir = 0;
1777 xop->dopermanent = 0;
1778 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1781 * Collect the real inode and adjust nlinks, destroy the real
1782 * inode if nlinks transitions to 0 and it was the real inode
1783 * (else it has already been removed).
1785 error = hammer2_xop_collect(&xop->head, 0);
1786 hammer2_inode_unlock(dip);
1788 if (error == 0) {
1789 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1790 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1791 if (ip) {
1792 hammer2_inode_unlink_finisher(ip, isopen);
1793 hammer2_inode_unlock(ip);
1795 } else {
1796 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1800 * Update dip's mtime
1802 if (error == 0) {
1803 uint64_t mtime;
1805 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1806 hammer2_update_time(&mtime);
1807 hammer2_inode_modify(dip);
1808 dip->meta.mtime = mtime;
1809 hammer2_inode_unlock(dip);
1812 hammer2_inode_run_sideq(dip->pmp);
1813 hammer2_trans_done(dip->pmp);
1814 if (error == 0)
1815 cache_unlink(ap->a_nch);
1816 LOCKSTOP;
1817 return (error);
1821 * hammer2_vop_nrmdir { nch, dvp, cred }
1823 static
1825 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1827 hammer2_xop_unlink_t *xop;
1828 hammer2_inode_t *dip;
1829 hammer2_inode_t *ip;
1830 struct namecache *ncp;
1831 int isopen;
1832 int error;
1834 LOCKSTART;
1835 dip = VTOI(ap->a_dvp);
1836 if (dip->pmp->ronly) {
1837 LOCKSTOP;
1838 return(EROFS);
1841 hammer2_pfs_memory_wait(dip->pmp);
1842 hammer2_trans_init(dip->pmp, 0);
1843 hammer2_inode_lock(dip, 0);
1845 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1847 ncp = ap->a_nch->ncp;
1848 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1849 isopen = cache_isopen(ap->a_nch);
1850 xop->isdir = 1;
1851 xop->dopermanent = 0;
1852 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1855 * Collect the real inode and adjust nlinks, destroy the real
1856 * inode if nlinks transitions to 0 and it was the real inode
1857 * (else it has already been removed).
1859 error = hammer2_xop_collect(&xop->head, 0);
1860 hammer2_inode_unlock(dip);
1862 if (error == 0) {
1863 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1864 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1865 if (ip) {
1866 hammer2_inode_unlink_finisher(ip, isopen);
1867 hammer2_inode_unlock(ip);
1869 } else {
1870 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1874 * Update dip's mtime
1876 if (error == 0) {
1877 uint64_t mtime;
1879 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1880 hammer2_update_time(&mtime);
1881 hammer2_inode_modify(dip);
1882 dip->meta.mtime = mtime;
1883 hammer2_inode_unlock(dip);
1886 hammer2_inode_run_sideq(dip->pmp);
1887 hammer2_trans_done(dip->pmp);
1888 if (error == 0)
1889 cache_unlink(ap->a_nch);
1890 LOCKSTOP;
1891 return (error);
1895 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1897 static
1899 hammer2_vop_nrename(struct vop_nrename_args *ap)
1901 struct namecache *fncp;
1902 struct namecache *tncp;
1903 hammer2_inode_t *fdip;
1904 hammer2_inode_t *tdip;
1905 hammer2_inode_t *ip;
1906 const uint8_t *fname;
1907 size_t fname_len;
1908 const uint8_t *tname;
1909 size_t tname_len;
1910 int error;
1911 int tnch_error;
1912 int update_tdip;
1913 int update_fdip;
1914 hammer2_key_t tlhc;
1916 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1917 return(EXDEV);
1918 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1919 return(EXDEV);
1921 fdip = VTOI(ap->a_fdvp); /* source directory */
1922 tdip = VTOI(ap->a_tdvp); /* target directory */
1924 if (fdip->pmp->ronly)
1925 return(EROFS);
1927 LOCKSTART;
1928 fncp = ap->a_fnch->ncp; /* entry name in source */
1929 fname = fncp->nc_name;
1930 fname_len = fncp->nc_nlen;
1932 tncp = ap->a_tnch->ncp; /* entry name in target */
1933 tname = tncp->nc_name;
1934 tname_len = tncp->nc_nlen;
1936 hammer2_pfs_memory_wait(tdip->pmp);
1937 hammer2_trans_init(tdip->pmp, 0);
1939 update_tdip = 0;
1940 update_fdip = 0;
1943 * ip is the inode being renamed. If this is a hardlink then
1944 * ip represents the actual file and not the hardlink marker.
1946 ip = VTOI(fncp->nc_vp);
1948 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1951 * Can return NULL and error == EXDEV if the common parent
1952 * crosses a directory with the xlink flag set.
1954 error = 0;
1955 hammer2_inode_lock(fdip, 0);
1956 hammer2_inode_lock(tdip, 0);
1957 hammer2_inode_ref(ip); /* extra ref */
1959 hammer2_inode_lock(ip, 0);
1962 * Delete the target namespace.
1965 hammer2_xop_unlink_t *xop2;
1966 hammer2_inode_t *tip;
1967 int isopen;
1970 * The unlink XOP unlinks the path from the directory and
1971 * locates and returns the cluster associated with the real
1972 * inode. We have to handle nlinks here on the frontend.
1974 xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
1975 hammer2_xop_setname(&xop2->head, tname, tname_len);
1976 isopen = cache_isopen(ap->a_tnch);
1977 xop2->isdir = -1;
1978 xop2->dopermanent = 0;
1979 hammer2_xop_start(&xop2->head, hammer2_xop_unlink);
1982 * Collect the real inode and adjust nlinks, destroy the real
1983 * inode if nlinks transitions to 0 and it was the real inode
1984 * (else it has already been removed).
1986 tnch_error = hammer2_xop_collect(&xop2->head, 0);
1987 /* hammer2_inode_unlock(tdip); */
1989 if (tnch_error == 0) {
1990 tip = hammer2_inode_get(tdip->pmp, NULL,
1991 &xop2->head.cluster, -1);
1992 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
1993 if (tip) {
1994 hammer2_inode_unlink_finisher(tip, isopen);
1995 hammer2_inode_unlock(tip);
1997 } else {
1998 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2000 /* hammer2_inode_lock(tdip, 0); */
2002 if (tnch_error && tnch_error != ENOENT) {
2003 error = tnch_error;
2004 goto done2;
2006 update_tdip = 1;
2010 * Resolve the collision space for (tdip, tname, tname_len)
2012 * tdip must be held exclusively locked to prevent races.
2015 hammer2_xop_scanlhc_t *sxop;
2016 hammer2_tid_t lhcbase;
2018 tlhc = hammer2_dirhash(tname, tname_len);
2019 lhcbase = tlhc;
2020 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2021 sxop->lhc = tlhc;
2022 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
2023 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2024 if (tlhc != sxop->head.cluster.focus->bref.key)
2025 break;
2026 ++tlhc;
2028 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2030 if (error) {
2031 if (error != ENOENT)
2032 goto done2;
2033 ++tlhc;
2034 error = 0;
2036 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2037 error = ENOSPC;
2038 goto done2;
2043 * Everything is setup, do the rename.
2045 * We have to synchronize ip->meta to the underlying operation.
2047 * NOTE: To avoid deadlocks we cannot lock (ip) while we are
2048 * unlinking elements from their directories. Locking
2049 * the nlinks field does not lock the whole inode.
2051 /* hammer2_inode_lock(ip, 0); */
2052 if (error == 0) {
2053 hammer2_xop_nrename_t *xop4;
2055 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2056 xop4->lhc = tlhc;
2057 xop4->ip_key = ip->meta.name_key;
2058 hammer2_xop_setip2(&xop4->head, ip);
2059 hammer2_xop_setip3(&xop4->head, tdip);
2060 hammer2_xop_setname(&xop4->head, fname, fname_len);
2061 hammer2_xop_setname2(&xop4->head, tname, tname_len);
2062 hammer2_xop_start(&xop4->head, hammer2_xop_nrename);
2064 error = hammer2_xop_collect(&xop4->head, 0);
2065 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2067 if (error == ENOENT)
2068 error = 0;
2069 if (error == 0 &&
2070 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2071 hammer2_inode_modify(ip);
2072 ip->meta.name_len = tname_len;
2073 ip->meta.name_key = tlhc;
2076 update_fdip = 1;
2077 update_fdip = 1;
2080 done2:
2082 * Update directory mtimes to represent the something changed.
2084 if (update_fdip || update_tdip) {
2085 uint64_t mtime;
2087 hammer2_update_time(&mtime);
2088 if (update_fdip) {
2089 hammer2_inode_modify(fdip);
2090 fdip->meta.mtime = mtime;
2092 if (update_tdip) {
2093 hammer2_inode_modify(tdip);
2094 tdip->meta.mtime = mtime;
2097 hammer2_inode_unlock(ip);
2098 hammer2_inode_unlock(tdip);
2099 hammer2_inode_unlock(fdip);
2100 hammer2_inode_drop(ip);
2101 hammer2_inode_run_sideq(fdip->pmp);
2103 hammer2_trans_done(tdip->pmp);
2106 * Issue the namecache update after unlocking all the internal
2107 * hammer structures, otherwise we might deadlock.
2109 if (tnch_error == 0) {
2110 cache_unlink(ap->a_tnch);
2111 cache_setunresolved(ap->a_tnch);
2113 if (error == 0)
2114 cache_rename(ap->a_fnch, ap->a_tnch);
2116 LOCKSTOP;
2117 return (error);
2121 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2123 static
2125 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2127 hammer2_inode_t *ip;
2128 int error;
2130 LOCKSTART;
2131 ip = VTOI(ap->a_vp);
2133 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2134 ap->a_fflag, ap->a_cred);
2135 LOCKSTOP;
2136 return (error);
2139 static
2140 int
2141 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2143 struct mount *mp;
2144 hammer2_pfs_t *pmp;
2145 int rc;
2147 LOCKSTART;
2148 switch (ap->a_op) {
2149 case (MOUNTCTL_SET_EXPORT):
2150 mp = ap->a_head.a_ops->head.vv_mount;
2151 pmp = MPTOPMP(mp);
2153 if (ap->a_ctllen != sizeof(struct export_args))
2154 rc = (EINVAL);
2155 else
2156 rc = vfs_export(mp, &pmp->export,
2157 (const struct export_args *)ap->a_ctl);
2158 break;
2159 default:
2160 rc = vop_stdmountctl(ap);
2161 break;
2163 LOCKSTOP;
2164 return (rc);
2168 * KQFILTER
2170 static void filt_hammer2detach(struct knote *kn);
2171 static int filt_hammer2read(struct knote *kn, long hint);
2172 static int filt_hammer2write(struct knote *kn, long hint);
2173 static int filt_hammer2vnode(struct knote *kn, long hint);
2175 static struct filterops hammer2read_filtops =
2176 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2177 NULL, filt_hammer2detach, filt_hammer2read };
2178 static struct filterops hammer2write_filtops =
2179 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2180 NULL, filt_hammer2detach, filt_hammer2write };
2181 static struct filterops hammer2vnode_filtops =
2182 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2183 NULL, filt_hammer2detach, filt_hammer2vnode };
2185 static
2187 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2189 struct vnode *vp = ap->a_vp;
2190 struct knote *kn = ap->a_kn;
2192 switch (kn->kn_filter) {
2193 case EVFILT_READ:
2194 kn->kn_fop = &hammer2read_filtops;
2195 break;
2196 case EVFILT_WRITE:
2197 kn->kn_fop = &hammer2write_filtops;
2198 break;
2199 case EVFILT_VNODE:
2200 kn->kn_fop = &hammer2vnode_filtops;
2201 break;
2202 default:
2203 return (EOPNOTSUPP);
2206 kn->kn_hook = (caddr_t)vp;
2208 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2210 return(0);
2213 static void
2214 filt_hammer2detach(struct knote *kn)
2216 struct vnode *vp = (void *)kn->kn_hook;
2218 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2221 static int
2222 filt_hammer2read(struct knote *kn, long hint)
2224 struct vnode *vp = (void *)kn->kn_hook;
2225 hammer2_inode_t *ip = VTOI(vp);
2226 off_t off;
2228 if (hint == NOTE_REVOKE) {
2229 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2230 return(1);
2232 off = ip->meta.size - kn->kn_fp->f_offset;
2233 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2234 if (kn->kn_sfflags & NOTE_OLDAPI)
2235 return(1);
2236 return (kn->kn_data != 0);
2240 static int
2241 filt_hammer2write(struct knote *kn, long hint)
2243 if (hint == NOTE_REVOKE)
2244 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2245 kn->kn_data = 0;
2246 return (1);
2249 static int
2250 filt_hammer2vnode(struct knote *kn, long hint)
2252 if (kn->kn_sfflags & hint)
2253 kn->kn_fflags |= hint;
2254 if (hint == NOTE_REVOKE) {
2255 kn->kn_flags |= (EV_EOF | EV_NODATA);
2256 return (1);
2258 return (kn->kn_fflags != 0);
2262 * FIFO VOPS
2264 static
2266 hammer2_vop_markatime(struct vop_markatime_args *ap)
2268 hammer2_inode_t *ip;
2269 struct vnode *vp;
2271 vp = ap->a_vp;
2272 ip = VTOI(vp);
2274 if (ip->pmp->ronly)
2275 return(EROFS);
2276 return(0);
2279 static
2281 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2283 int error;
2285 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2286 if (error)
2287 error = hammer2_vop_kqfilter(ap);
2288 return(error);
2292 * VOPS vector
2294 struct vop_ops hammer2_vnode_vops = {
2295 .vop_default = vop_defaultop,
2296 .vop_fsync = hammer2_vop_fsync,
2297 .vop_getpages = vop_stdgetpages,
2298 .vop_putpages = vop_stdputpages,
2299 .vop_access = hammer2_vop_access,
2300 .vop_advlock = hammer2_vop_advlock,
2301 .vop_close = hammer2_vop_close,
2302 .vop_nlink = hammer2_vop_nlink,
2303 .vop_ncreate = hammer2_vop_ncreate,
2304 .vop_nsymlink = hammer2_vop_nsymlink,
2305 .vop_nremove = hammer2_vop_nremove,
2306 .vop_nrmdir = hammer2_vop_nrmdir,
2307 .vop_nrename = hammer2_vop_nrename,
2308 .vop_getattr = hammer2_vop_getattr,
2309 .vop_setattr = hammer2_vop_setattr,
2310 .vop_readdir = hammer2_vop_readdir,
2311 .vop_readlink = hammer2_vop_readlink,
2312 .vop_getpages = vop_stdgetpages,
2313 .vop_putpages = vop_stdputpages,
2314 .vop_read = hammer2_vop_read,
2315 .vop_write = hammer2_vop_write,
2316 .vop_open = hammer2_vop_open,
2317 .vop_inactive = hammer2_vop_inactive,
2318 .vop_reclaim = hammer2_vop_reclaim,
2319 .vop_nresolve = hammer2_vop_nresolve,
2320 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2321 .vop_nmkdir = hammer2_vop_nmkdir,
2322 .vop_nmknod = hammer2_vop_nmknod,
2323 .vop_ioctl = hammer2_vop_ioctl,
2324 .vop_mountctl = hammer2_vop_mountctl,
2325 .vop_bmap = hammer2_vop_bmap,
2326 .vop_strategy = hammer2_vop_strategy,
2327 .vop_kqfilter = hammer2_vop_kqfilter
2330 struct vop_ops hammer2_spec_vops = {
2331 .vop_default = vop_defaultop,
2332 .vop_fsync = hammer2_vop_fsync,
2333 .vop_read = vop_stdnoread,
2334 .vop_write = vop_stdnowrite,
2335 .vop_access = hammer2_vop_access,
2336 .vop_close = hammer2_vop_close,
2337 .vop_markatime = hammer2_vop_markatime,
2338 .vop_getattr = hammer2_vop_getattr,
2339 .vop_inactive = hammer2_vop_inactive,
2340 .vop_reclaim = hammer2_vop_reclaim,
2341 .vop_setattr = hammer2_vop_setattr
2344 struct vop_ops hammer2_fifo_vops = {
2345 .vop_default = fifo_vnoperate,
2346 .vop_fsync = hammer2_vop_fsync,
2347 #if 0
2348 .vop_read = hammer2_vop_fiforead,
2349 .vop_write = hammer2_vop_fifowrite,
2350 #endif
2351 .vop_access = hammer2_vop_access,
2352 #if 0
2353 .vop_close = hammer2_vop_fifoclose,
2354 #endif
2355 .vop_markatime = hammer2_vop_markatime,
2356 .vop_getattr = hammer2_vop_getattr,
2357 .vop_inactive = hammer2_vop_inactive,
2358 .vop_reclaim = hammer2_vop_reclaim,
2359 .vop_setattr = hammer2_vop_setattr,
2360 .vop_kqfilter = hammer2_vop_fifokqfilter