Say 'hammer2' instead of 'hammer' in various places.
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
blobc092e8ca028b4127ba89559d50a3ecf8a6dec9c6
1 /*
2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 * Kernel Filesystem interface
39 * NOTE! local ipdata pointers must be reloaded on any modifying operation
40 * to the inode as its underlying chain may have changed.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
60 #include "hammer2.h"
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
63 int seqcount);
64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
65 int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
69 struct objcache *cache_xops;
71 static __inline
72 void
73 hammer2_knote(struct vnode *vp, int flags)
75 if (flags)
76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
80 * Last reference to a vnode is going away but it is still cached.
82 static
83 int
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
86 hammer2_inode_t *ip;
87 struct vnode *vp;
89 vp = ap->a_vp;
90 ip = VTOI(vp);
93 * Degenerate case
95 if (ip == NULL) {
96 vrecycle(vp);
97 return (0);
101 * Check for deleted inodes and recycle immediately on the last
102 * release. Be sure to destroy any left-over buffer cache buffers
103 * so we do not waste time trying to flush them.
105 * Note that deleting the file block chains under the inode chain
106 * would just be a waste of energy, so don't do it.
108 * WARNING: nvtruncbuf() can only be safely called without the inode
109 * lock held due to the way our write thread works.
111 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
112 hammer2_key_t lbase;
113 int nblksize;
116 * Detect updates to the embedded data which may be
117 * synchronized by the strategy code. Simply mark the
118 * inode modified so it gets picked up by our normal flush.
120 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
121 nvtruncbuf(vp, 0, nblksize, 0, 0);
122 vrecycle(vp);
124 return (0);
128 * Reclaim a vnode so that it can be reused; after the inode is
129 * disassociated, the filesystem must manage it alone.
131 static
133 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
135 hammer2_inode_t *ip;
136 hammer2_pfs_t *pmp;
137 struct vnode *vp;
139 vp = ap->a_vp;
140 ip = VTOI(vp);
141 if (ip == NULL) {
142 return(0);
144 pmp = ip->pmp;
147 * The final close of a deleted file or directory marks it for
148 * destruction. The DELETED flag allows the flusher to shortcut
149 * any modified blocks still unflushed (that is, just ignore them).
151 * HAMMER2 usually does not try to optimize the freemap by returning
152 * deleted blocks to it as it does not usually know how many snapshots
153 * might be referencing portions of the file/dir.
155 vp->v_data = NULL;
156 ip->vp = NULL;
159 * NOTE! We do not attempt to flush chains here, flushing is
160 * really fragile and could also deadlock.
162 vclrisdirty(vp);
165 * A modified inode may require chain synchronization. This
166 * synchronization is usually handled by VOP_SNYC / VOP_FSYNC
167 * when vfsync() is called. However, that requires a vnode.
169 * When the vnode is disassociated we must keep track of any modified
170 * inode via the sideq so that it is properly flushed. We cannot
171 * safely synchronize the inode from inside the reclaim due to
172 * potentially deep locks held as-of when the reclaim occurs.
173 * Interactions and potential deadlocks abound.
175 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED |
176 HAMMER2_INODE_MODIFIED |
177 HAMMER2_INODE_RESIZED)) &&
178 (ip->flags & HAMMER2_INODE_ISDELETED) == 0) {
179 hammer2_inode_sideq_t *ipul;
181 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
182 ipul->ip = ip;
184 hammer2_spin_ex(&pmp->list_spin);
185 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
186 /* ref -> sideq */
187 atomic_set_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
188 TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
189 ++pmp->sideq_count;
190 hammer2_spin_unex(&pmp->list_spin);
191 } else {
192 hammer2_spin_unex(&pmp->list_spin);
193 kfree(ipul, pmp->minode);
194 hammer2_inode_drop(ip); /* vp ref */
196 /* retain ref from vp for ipul */
197 } else {
198 hammer2_inode_drop(ip); /* vp ref */
202 * XXX handle background sync when ip dirty, kernel will no longer
203 * notify us regarding this inode because there is no longer a
204 * vnode attached to it.
207 return (0);
210 static
212 hammer2_vop_fsync(struct vop_fsync_args *ap)
214 hammer2_inode_t *ip;
215 struct vnode *vp;
217 vp = ap->a_vp;
218 ip = VTOI(vp);
220 #if 0
221 /* XXX can't do this yet */
222 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH);
223 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
224 #endif
225 hammer2_trans_init(ip->pmp, 0);
226 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
229 * Calling chain_flush here creates a lot of duplicative
230 * COW operations due to non-optimal vnode ordering.
232 * Only do it for an actual fsync() syscall. The other forms
233 * which call this function will eventually call chain_flush
234 * on the volume root as a catch-all, which is far more optimal.
236 hammer2_inode_lock(ip, 0);
237 if (ip->flags & HAMMER2_INODE_MODIFIED)
238 hammer2_inode_chain_sync(ip);
239 hammer2_inode_unlock(ip);
240 hammer2_trans_done(ip->pmp);
242 return (0);
245 static
247 hammer2_vop_access(struct vop_access_args *ap)
249 hammer2_inode_t *ip = VTOI(ap->a_vp);
250 uid_t uid;
251 gid_t gid;
252 int error;
254 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
255 uid = hammer2_to_unix_xid(&ip->meta.uid);
256 gid = hammer2_to_unix_xid(&ip->meta.gid);
257 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
258 hammer2_inode_unlock(ip);
260 return (error);
263 static
265 hammer2_vop_getattr(struct vop_getattr_args *ap)
267 hammer2_pfs_t *pmp;
268 hammer2_inode_t *ip;
269 struct vnode *vp;
270 struct vattr *vap;
271 hammer2_chain_t *chain;
272 int i;
274 vp = ap->a_vp;
275 vap = ap->a_vap;
277 ip = VTOI(vp);
278 pmp = ip->pmp;
280 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
282 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
283 vap->va_fileid = ip->meta.inum;
284 vap->va_mode = ip->meta.mode;
285 vap->va_nlink = ip->meta.nlinks;
286 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
287 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
288 vap->va_rmajor = 0;
289 vap->va_rminor = 0;
290 vap->va_size = ip->meta.size; /* protected by shared lock */
291 vap->va_blocksize = HAMMER2_PBUFSIZE;
292 vap->va_flags = ip->meta.uflags;
293 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
294 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
295 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
296 vap->va_gen = 1;
297 vap->va_bytes = 0;
298 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
300 * Can't really calculate directory use sans the files under
301 * it, just assume one block for now.
303 vap->va_bytes += HAMMER2_INODE_BYTES;
304 } else {
305 for (i = 0; i < ip->cluster.nchains; ++i) {
306 if ((chain = ip->cluster.array[i].chain) != NULL) {
307 if (vap->va_bytes <
308 chain->bref.embed.stats.data_count) {
309 vap->va_bytes =
310 chain->bref.embed.stats.data_count;
315 vap->va_type = hammer2_get_vtype(ip->meta.type);
316 vap->va_filerev = 0;
317 vap->va_uid_uuid = ip->meta.uid;
318 vap->va_gid_uuid = ip->meta.gid;
319 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
320 VA_FSID_UUID_VALID;
322 hammer2_inode_unlock(ip);
324 return (0);
327 static
329 hammer2_vop_setattr(struct vop_setattr_args *ap)
331 hammer2_inode_t *ip;
332 struct vnode *vp;
333 struct vattr *vap;
334 int error;
335 int kflags = 0;
336 uint64_t ctime;
338 vp = ap->a_vp;
339 vap = ap->a_vap;
340 hammer2_update_time(&ctime);
342 ip = VTOI(vp);
344 if (ip->pmp->ronly)
345 return (EROFS);
346 if (hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1)
347 return (ENOSPC);
349 hammer2_pfs_memory_wait(ip->pmp);
350 hammer2_trans_init(ip->pmp, 0);
351 hammer2_inode_lock(ip, 0);
352 error = 0;
354 if (vap->va_flags != VNOVAL) {
355 uint32_t flags;
357 flags = ip->meta.uflags;
358 error = vop_helper_setattr_flags(&flags, vap->va_flags,
359 hammer2_to_unix_xid(&ip->meta.uid),
360 ap->a_cred);
361 if (error == 0) {
362 if (ip->meta.uflags != flags) {
363 hammer2_inode_modify(ip);
364 ip->meta.uflags = flags;
365 ip->meta.ctime = ctime;
366 kflags |= NOTE_ATTRIB;
368 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
369 error = 0;
370 goto done;
373 goto done;
375 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
376 error = EPERM;
377 goto done;
379 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
380 mode_t cur_mode = ip->meta.mode;
381 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
382 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
383 uuid_t uuid_uid;
384 uuid_t uuid_gid;
386 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
387 ap->a_cred,
388 &cur_uid, &cur_gid, &cur_mode);
389 if (error == 0) {
390 hammer2_guid_to_uuid(&uuid_uid, cur_uid);
391 hammer2_guid_to_uuid(&uuid_gid, cur_gid);
392 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
393 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
394 ip->meta.mode != cur_mode
396 hammer2_inode_modify(ip);
397 ip->meta.uid = uuid_uid;
398 ip->meta.gid = uuid_gid;
399 ip->meta.mode = cur_mode;
400 ip->meta.ctime = ctime;
402 kflags |= NOTE_ATTRIB;
407 * Resize the file
409 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
410 switch(vp->v_type) {
411 case VREG:
412 if (vap->va_size == ip->meta.size)
413 break;
414 if (vap->va_size < ip->meta.size) {
415 hammer2_mtx_ex(&ip->truncate_lock);
416 hammer2_truncate_file(ip, vap->va_size);
417 hammer2_mtx_unlock(&ip->truncate_lock);
418 kflags |= NOTE_WRITE;
419 } else {
420 hammer2_extend_file(ip, vap->va_size);
421 kflags |= NOTE_WRITE | NOTE_EXTEND;
423 hammer2_inode_modify(ip);
424 ip->meta.mtime = ctime;
425 break;
426 default:
427 error = EINVAL;
428 goto done;
431 #if 0
432 /* atime not supported */
433 if (vap->va_atime.tv_sec != VNOVAL) {
434 hammer2_inode_modify(ip);
435 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
436 kflags |= NOTE_ATTRIB;
438 #endif
439 if (vap->va_mode != (mode_t)VNOVAL) {
440 mode_t cur_mode = ip->meta.mode;
441 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
442 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
444 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
445 cur_uid, cur_gid, &cur_mode);
446 if (error == 0 && ip->meta.mode != cur_mode) {
447 hammer2_inode_modify(ip);
448 ip->meta.mode = cur_mode;
449 ip->meta.ctime = ctime;
450 kflags |= NOTE_ATTRIB;
454 if (vap->va_mtime.tv_sec != VNOVAL) {
455 hammer2_inode_modify(ip);
456 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
457 kflags |= NOTE_ATTRIB;
460 done:
462 * If a truncation occurred we must call inode_fsync() now in order
463 * to trim the related data chains, otherwise a later expansion can
464 * cause havoc.
466 * If an extend occured that changed the DIRECTDATA state, we must
467 * call inode_fsync now in order to prepare the inode's indirect
468 * block table.
470 if (ip->flags & HAMMER2_INODE_RESIZED)
471 hammer2_inode_chain_sync(ip);
474 * Cleanup.
476 hammer2_inode_unlock(ip);
477 hammer2_trans_done(ip->pmp);
478 hammer2_knote(ip->vp, kflags);
480 return (error);
483 static
485 hammer2_vop_readdir(struct vop_readdir_args *ap)
487 hammer2_xop_readdir_t *xop;
488 hammer2_blockref_t bref;
489 hammer2_inode_t *ip;
490 hammer2_tid_t inum;
491 hammer2_key_t lkey;
492 struct uio *uio;
493 off_t *cookies;
494 off_t saveoff;
495 int cookie_index;
496 int ncookies;
497 int error;
498 int eofflag;
499 int r;
501 ip = VTOI(ap->a_vp);
502 uio = ap->a_uio;
503 saveoff = uio->uio_offset;
504 eofflag = 0;
505 error = 0;
508 * Setup cookies directory entry cookies if requested
510 if (ap->a_ncookies) {
511 ncookies = uio->uio_resid / 16 + 1;
512 if (ncookies > 1024)
513 ncookies = 1024;
514 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
515 } else {
516 ncookies = -1;
517 cookies = NULL;
519 cookie_index = 0;
521 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
524 * Handle artificial entries. To ensure that only positive 64 bit
525 * quantities are returned to userland we always strip off bit 63.
526 * The hash code is designed such that codes 0x0000-0x7FFF are not
527 * used, allowing us to use these codes for articial entries.
529 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
530 * allow '..' to cross the mount point into (e.g.) the super-root.
532 if (saveoff == 0) {
533 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
534 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
535 if (r)
536 goto done;
537 if (cookies)
538 cookies[cookie_index] = saveoff;
539 ++saveoff;
540 ++cookie_index;
541 if (cookie_index == ncookies)
542 goto done;
545 if (saveoff == 1) {
547 * Be careful with lockorder when accessing ".."
549 * (ip is the current dir. xip is the parent dir).
551 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
552 if (ip != ip->pmp->iroot)
553 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
554 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
555 if (r)
556 goto done;
557 if (cookies)
558 cookies[cookie_index] = saveoff;
559 ++saveoff;
560 ++cookie_index;
561 if (cookie_index == ncookies)
562 goto done;
565 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
566 if (hammer2_debug & 0x0020)
567 kprintf("readdir: lkey %016jx\n", lkey);
568 if (error)
569 goto done;
572 * Use XOP for cluster scan.
574 * parent is the inode cluster, already locked for us. Don't
575 * double lock shared locks as this will screw up upgrades.
577 xop = hammer2_xop_alloc(ip, 0);
578 xop->lkey = lkey;
579 hammer2_xop_start(&xop->head, hammer2_xop_readdir);
581 for (;;) {
582 const hammer2_inode_data_t *ripdata;
583 const char *dname;
584 int dtype;
586 error = hammer2_xop_collect(&xop->head, 0);
587 error = hammer2_error_to_errno(error);
588 if (error) {
589 break;
591 if (cookie_index == ncookies)
592 break;
593 if (hammer2_debug & 0x0020)
594 kprintf("cluster chain %p %p\n",
595 xop->head.cluster.focus,
596 (xop->head.cluster.focus ?
597 xop->head.cluster.focus->data : (void *)-1));
598 hammer2_cluster_bref(&xop->head.cluster, &bref);
600 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
601 ripdata =
602 &hammer2_cluster_rdata(&xop->head.cluster)->ipdata;
603 dtype = hammer2_get_dtype(ripdata->meta.type);
604 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
605 r = vop_write_dirent(&error, uio,
606 ripdata->meta.inum &
607 HAMMER2_DIRHASH_USERMSK,
608 dtype,
609 ripdata->meta.name_len,
610 ripdata->filename);
611 if (r)
612 break;
613 if (cookies)
614 cookies[cookie_index] = saveoff;
615 ++cookie_index;
616 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) {
617 dtype = hammer2_get_dtype(bref.embed.dirent.type);
618 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
619 if (bref.embed.dirent.namlen <=
620 sizeof(bref.check.buf)) {
621 dname = bref.check.buf;
622 } else {
623 dname =
624 hammer2_cluster_rdata(&xop->head.cluster)->buf;
626 r = vop_write_dirent(&error, uio,
627 bref.embed.dirent.inum,
628 dtype,
629 bref.embed.dirent.namlen,
630 dname);
631 if (r)
632 break;
633 if (cookies)
634 cookies[cookie_index] = saveoff;
635 ++cookie_index;
636 } else {
637 /* XXX chain error */
638 kprintf("bad chain type readdir %d\n", bref.type);
641 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
642 if (error == ENOENT) {
643 error = 0;
644 eofflag = 1;
645 saveoff = (hammer2_key_t)-1;
646 } else {
647 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
649 done:
650 hammer2_inode_unlock(ip);
651 if (ap->a_eofflag)
652 *ap->a_eofflag = eofflag;
653 if (hammer2_debug & 0x0020)
654 kprintf("readdir: done at %016jx\n", saveoff);
655 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
656 if (error && cookie_index == 0) {
657 if (cookies) {
658 kfree(cookies, M_TEMP);
659 *ap->a_ncookies = 0;
660 *ap->a_cookies = NULL;
662 } else {
663 if (cookies) {
664 *ap->a_ncookies = cookie_index;
665 *ap->a_cookies = cookies;
668 return (error);
672 * hammer2_vop_readlink { vp, uio, cred }
674 static
676 hammer2_vop_readlink(struct vop_readlink_args *ap)
678 struct vnode *vp;
679 hammer2_inode_t *ip;
680 int error;
682 vp = ap->a_vp;
683 if (vp->v_type != VLNK)
684 return (EINVAL);
685 ip = VTOI(vp);
687 error = hammer2_read_file(ip, ap->a_uio, 0);
688 return (error);
691 static
693 hammer2_vop_read(struct vop_read_args *ap)
695 struct vnode *vp;
696 hammer2_inode_t *ip;
697 struct uio *uio;
698 int error;
699 int seqcount;
700 int bigread;
703 * Read operations supported on this vnode?
705 vp = ap->a_vp;
706 if (vp->v_type != VREG)
707 return (EINVAL);
710 * Misc
712 ip = VTOI(vp);
713 uio = ap->a_uio;
714 error = 0;
716 seqcount = ap->a_ioflag >> 16;
717 bigread = (uio->uio_resid > 100 * 1024 * 1024);
719 error = hammer2_read_file(ip, uio, seqcount);
720 return (error);
723 static
725 hammer2_vop_write(struct vop_write_args *ap)
727 hammer2_inode_t *ip;
728 thread_t td;
729 struct vnode *vp;
730 struct uio *uio;
731 int error;
732 int seqcount;
733 int ioflag;
736 * Read operations supported on this vnode?
738 vp = ap->a_vp;
739 if (vp->v_type != VREG)
740 return (EINVAL);
743 * Misc
745 ip = VTOI(vp);
746 ioflag = ap->a_ioflag;
747 uio = ap->a_uio;
748 error = 0;
749 if (ip->pmp->ronly)
750 return (EROFS);
751 switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) {
752 case 2:
753 return (ENOSPC);
754 case 1:
755 ioflag |= IO_DIRECT; /* semi-synchronous */
756 /* fall through */
757 default:
758 break;
761 seqcount = ioflag >> 16;
764 * Check resource limit
766 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
767 uio->uio_offset + uio->uio_resid >
768 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
769 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
770 return (EFBIG);
774 * The transaction interlocks against flush initiations
775 * (note: but will run concurrently with the actual flush).
777 * To avoid deadlocking against the VM system, we must flag any
778 * transaction related to the buffer cache or other direct
779 * VM page manipulation.
781 if (uio->uio_segflg == UIO_NOCOPY)
782 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE);
783 else
784 hammer2_trans_init(ip->pmp, 0);
785 error = hammer2_write_file(ip, uio, ioflag, seqcount);
786 hammer2_trans_done(ip->pmp);
788 return (error);
792 * Perform read operations on a file or symlink given an UNLOCKED
793 * inode and uio.
795 * The passed ip is not locked.
797 static
799 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
801 hammer2_off_t size;
802 struct buf *bp;
803 int error;
805 error = 0;
808 * UIO read loop.
810 * WARNING! Assumes that the kernel interlocks size changes at the
811 * vnode level.
813 hammer2_mtx_sh(&ip->lock);
814 hammer2_mtx_sh(&ip->truncate_lock);
815 size = ip->meta.size;
816 hammer2_mtx_unlock(&ip->lock);
818 while (uio->uio_resid > 0 && uio->uio_offset < size) {
819 hammer2_key_t lbase;
820 hammer2_key_t leof;
821 int lblksize;
822 int loff;
823 int n;
825 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
826 &lbase, &leof);
828 #if 1
829 bp = NULL;
830 error = cluster_readx(ip->vp, leof, lbase, lblksize,
831 B_NOTMETA | B_KVABIO,
832 uio->uio_resid,
833 seqcount * MAXBSIZE,
834 &bp);
835 #else
836 if (uio->uio_segflg == UIO_NOCOPY) {
837 bp = getblk(ip->vp, lbase, lblksize,
838 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
839 if (bp->b_flags & B_CACHE) {
840 int i;
841 int j = 0;
842 if (bp->b_xio.xio_npages != 16)
843 kprintf("NPAGES BAD\n");
844 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
845 vm_page_t m;
846 m = bp->b_xio.xio_pages[i];
847 if (m == NULL || m->valid == 0) {
848 kprintf("bp %016jx %016jx pg %d inv",
849 lbase, leof, i);
850 if (m)
851 kprintf("m->object %p/%p", m->object, ip->vp->v_object);
852 kprintf("\n");
853 j = 1;
856 if (j)
857 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
859 bqrelse(bp);
861 error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
862 #endif
863 if (error) {
864 brelse(bp);
865 break;
867 bkvasync(bp);
868 loff = (int)(uio->uio_offset - lbase);
869 n = lblksize - loff;
870 if (n > uio->uio_resid)
871 n = uio->uio_resid;
872 if (n > size - uio->uio_offset)
873 n = (int)(size - uio->uio_offset);
874 bp->b_flags |= B_AGE;
875 uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
876 bqrelse(bp);
878 hammer2_mtx_unlock(&ip->truncate_lock);
880 return (error);
884 * Write to the file represented by the inode via the logical buffer cache.
885 * The inode may represent a regular file or a symlink.
887 * The inode must not be locked.
889 static
891 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
892 int ioflag, int seqcount)
894 hammer2_key_t old_eof;
895 hammer2_key_t new_eof;
896 struct buf *bp;
897 int kflags;
898 int error;
899 int modified;
902 * Setup if append
904 * WARNING! Assumes that the kernel interlocks size changes at the
905 * vnode level.
907 hammer2_mtx_ex(&ip->lock);
908 hammer2_mtx_sh(&ip->truncate_lock);
909 if (ioflag & IO_APPEND)
910 uio->uio_offset = ip->meta.size;
911 old_eof = ip->meta.size;
914 * Extend the file if necessary. If the write fails at some point
915 * we will truncate it back down to cover as much as we were able
916 * to write.
918 * Doing this now makes it easier to calculate buffer sizes in
919 * the loop.
921 kflags = 0;
922 error = 0;
923 modified = 0;
925 if (uio->uio_offset + uio->uio_resid > old_eof) {
926 new_eof = uio->uio_offset + uio->uio_resid;
927 modified = 1;
928 hammer2_extend_file(ip, new_eof);
929 kflags |= NOTE_EXTEND;
930 } else {
931 new_eof = old_eof;
933 hammer2_mtx_unlock(&ip->lock);
936 * UIO write loop
938 while (uio->uio_resid > 0) {
939 hammer2_key_t lbase;
940 int trivial;
941 int endofblk;
942 int lblksize;
943 int loff;
944 int n;
947 * Don't allow the buffer build to blow out the buffer
948 * cache.
950 if ((ioflag & IO_RECURSE) == 0)
951 bwillwrite(HAMMER2_PBUFSIZE);
954 * This nominally tells us how much we can cluster and
955 * what the logical buffer size needs to be. Currently
956 * we don't try to cluster the write and just handle one
957 * block at a time.
959 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
960 &lbase, NULL);
961 loff = (int)(uio->uio_offset - lbase);
963 KKASSERT(lblksize <= 65536);
966 * Calculate bytes to copy this transfer and whether the
967 * copy completely covers the buffer or not.
969 trivial = 0;
970 n = lblksize - loff;
971 if (n > uio->uio_resid) {
972 n = uio->uio_resid;
973 if (loff == lbase && uio->uio_offset + n == new_eof)
974 trivial = 1;
975 endofblk = 0;
976 } else {
977 if (loff == 0)
978 trivial = 1;
979 endofblk = 1;
981 if (lbase >= new_eof)
982 trivial = 1;
985 * Get the buffer
987 if (uio->uio_segflg == UIO_NOCOPY) {
989 * Issuing a write with the same data backing the
990 * buffer. Instantiate the buffer to collect the
991 * backing vm pages, then read-in any missing bits.
993 * This case is used by vop_stdputpages().
995 bp = getblk(ip->vp, lbase, lblksize,
996 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
997 if ((bp->b_flags & B_CACHE) == 0) {
998 bqrelse(bp);
999 error = bread_kvabio(ip->vp, lbase,
1000 lblksize, &bp);
1002 } else if (trivial) {
1004 * Even though we are entirely overwriting the buffer
1005 * we may still have to zero it out to avoid a
1006 * mmap/write visibility issue.
1008 bp = getblk(ip->vp, lbase, lblksize,
1009 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1010 if ((bp->b_flags & B_CACHE) == 0)
1011 vfs_bio_clrbuf(bp);
1012 } else {
1014 * Partial overwrite, read in any missing bits then
1015 * replace the portion being written.
1017 * (The strategy code will detect zero-fill physical
1018 * blocks for this case).
1020 error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
1021 if (error == 0)
1022 bheavy(bp);
1025 if (error) {
1026 brelse(bp);
1027 break;
1031 * Ok, copy the data in
1033 bkvasync(bp);
1034 error = uiomovebp(bp, bp->b_data + loff, n, uio);
1035 kflags |= NOTE_WRITE;
1036 modified = 1;
1037 if (error) {
1038 brelse(bp);
1039 break;
1043 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1044 * with IO_SYNC or IO_ASYNC set. These writes
1045 * must be handled as the pageout daemon expects.
1047 * NOTE! H2 relies on cluster_write() here because it
1048 * cannot preallocate disk blocks at the logical
1049 * level due to not knowing what the compression
1050 * size will be at this time.
1052 * We must use cluster_write() here and we depend
1053 * on the write-behind feature to flush buffers
1054 * appropriately. If we let the buffer daemons do
1055 * it the block allocations will be all over the
1056 * map.
1058 if (ioflag & IO_SYNC) {
1059 bwrite(bp);
1060 } else if ((ioflag & IO_DIRECT) && endofblk) {
1061 bawrite(bp);
1062 } else if (ioflag & IO_ASYNC) {
1063 bawrite(bp);
1064 } else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1065 bdwrite(bp);
1066 } else {
1067 #if 1
1068 bp->b_flags |= B_CLUSTEROK;
1069 cluster_write(bp, new_eof, lblksize, seqcount);
1070 #else
1071 bp->b_flags |= B_CLUSTEROK;
1072 bdwrite(bp);
1073 #endif
1078 * Cleanup. If we extended the file EOF but failed to write through
1079 * the entire write is a failure and we have to back-up.
1081 if (error && new_eof != old_eof) {
1082 hammer2_mtx_unlock(&ip->truncate_lock);
1083 hammer2_mtx_ex(&ip->lock);
1084 hammer2_mtx_ex(&ip->truncate_lock);
1085 hammer2_truncate_file(ip, old_eof);
1086 if (ip->flags & HAMMER2_INODE_MODIFIED)
1087 hammer2_inode_chain_sync(ip);
1088 hammer2_mtx_unlock(&ip->lock);
1089 } else if (modified) {
1090 hammer2_mtx_ex(&ip->lock);
1091 hammer2_inode_modify(ip);
1092 hammer2_update_time(&ip->meta.mtime);
1093 if (ip->flags & HAMMER2_INODE_MODIFIED)
1094 hammer2_inode_chain_sync(ip);
1095 hammer2_mtx_unlock(&ip->lock);
1096 hammer2_knote(ip->vp, kflags);
1098 hammer2_trans_assert_strategy(ip->pmp);
1099 hammer2_mtx_unlock(&ip->truncate_lock);
1101 return error;
1105 * Truncate the size of a file. The inode must not be locked.
1107 * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1108 * ensure that any on-media data beyond the new file EOF has been destroyed.
1110 * WARNING: nvtruncbuf() can only be safely called without the inode lock
1111 * held due to the way our write thread works. If the truncation
1112 * occurs in the middle of a buffer, nvtruncbuf() is responsible
1113 * for dirtying that buffer and zeroing out trailing bytes.
1115 * WARNING! Assumes that the kernel interlocks size changes at the
1116 * vnode level.
1118 * WARNING! Caller assumes responsibility for removing dead blocks
1119 * if INODE_RESIZED is set.
1121 static
1122 void
1123 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1125 hammer2_key_t lbase;
1126 int nblksize;
1128 hammer2_mtx_unlock(&ip->lock);
1129 if (ip->vp) {
1130 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1131 nvtruncbuf(ip->vp, nsize,
1132 nblksize, (int)nsize & (nblksize - 1),
1135 hammer2_mtx_ex(&ip->lock);
1136 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1137 ip->osize = ip->meta.size;
1138 ip->meta.size = nsize;
1139 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1140 hammer2_inode_modify(ip);
1144 * Extend the size of a file. The inode must not be locked.
1146 * Even though the file size is changing, we do not have to set the
1147 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1148 * boundary. When this occurs a hammer2_inode_chain_sync() is required
1149 * to prepare the inode cluster's indirect block table, otherwise
1150 * async execution of the strategy code will implode on us.
1152 * WARNING! Assumes that the kernel interlocks size changes at the
1153 * vnode level.
1155 * WARNING! Caller assumes responsibility for transitioning out
1156 * of the inode DIRECTDATA mode if INODE_RESIZED is set.
1158 static
1159 void
1160 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1162 hammer2_key_t lbase;
1163 hammer2_key_t osize;
1164 int oblksize;
1165 int nblksize;
1167 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1168 hammer2_inode_modify(ip);
1169 osize = ip->meta.size;
1170 ip->osize = osize;
1171 ip->meta.size = nsize;
1173 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1174 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1175 hammer2_inode_chain_sync(ip);
1178 hammer2_mtx_unlock(&ip->lock);
1179 if (ip->vp) {
1180 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1181 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1182 nvextendbuf(ip->vp,
1183 osize, nsize,
1184 oblksize, nblksize,
1185 -1, -1, 0);
1187 hammer2_mtx_ex(&ip->lock);
1190 static
1192 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1194 hammer2_xop_nresolve_t *xop;
1195 hammer2_inode_t *ip;
1196 hammer2_inode_t *dip;
1197 struct namecache *ncp;
1198 struct vnode *vp;
1199 int error;
1201 dip = VTOI(ap->a_dvp);
1202 xop = hammer2_xop_alloc(dip, 0);
1204 ncp = ap->a_nch->ncp;
1205 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1208 * Note: In DragonFly the kernel handles '.' and '..'.
1210 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1211 hammer2_xop_start(&xop->head, hammer2_xop_nresolve);
1213 error = hammer2_xop_collect(&xop->head, 0);
1214 error = hammer2_error_to_errno(error);
1215 if (error) {
1216 ip = NULL;
1217 } else {
1218 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1220 hammer2_inode_unlock(dip);
1223 * Acquire the related vnode
1225 * NOTE: For error processing, only ENOENT resolves the namecache
1226 * entry to NULL, otherwise we just return the error and
1227 * leave the namecache unresolved.
1229 * NOTE: multiple hammer2_inode structures can be aliased to the
1230 * same chain element, for example for hardlinks. This
1231 * use case does not 'reattach' inode associations that
1232 * might already exist, but always allocates a new one.
1234 * WARNING: inode structure is locked exclusively via inode_get
1235 * but chain was locked shared. inode_unlock()
1236 * will handle it properly.
1238 if (ip) {
1239 vp = hammer2_igetv(ip, &error); /* error set to UNIX error */
1240 if (error == 0) {
1241 vn_unlock(vp);
1242 cache_setvp(ap->a_nch, vp);
1243 } else if (error == ENOENT) {
1244 cache_setvp(ap->a_nch, NULL);
1246 hammer2_inode_unlock(ip);
1249 * The vp should not be released until after we've disposed
1250 * of our locks, because it might cause vop_inactive() to
1251 * be called.
1253 if (vp)
1254 vrele(vp);
1255 } else {
1256 error = ENOENT;
1257 cache_setvp(ap->a_nch, NULL);
1259 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1260 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1261 ("resolve error %d/%p ap %p\n",
1262 error, ap->a_nch->ncp->nc_vp, ap));
1264 return error;
1267 static
1269 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1271 hammer2_inode_t *dip;
1272 hammer2_tid_t inum;
1273 int error;
1275 dip = VTOI(ap->a_dvp);
1276 inum = dip->meta.iparent;
1277 *ap->a_vpp = NULL;
1279 if (inum) {
1280 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1281 inum, ap->a_vpp);
1282 } else {
1283 error = ENOENT;
1285 return error;
1288 static
1290 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1292 hammer2_inode_t *dip;
1293 hammer2_inode_t *nip;
1294 struct namecache *ncp;
1295 const uint8_t *name;
1296 size_t name_len;
1297 hammer2_tid_t inum;
1298 int error;
1300 dip = VTOI(ap->a_dvp);
1301 if (dip->pmp->ronly)
1302 return (EROFS);
1303 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1304 return (ENOSPC);
1306 ncp = ap->a_nch->ncp;
1307 name = ncp->nc_name;
1308 name_len = ncp->nc_nlen;
1310 hammer2_pfs_memory_wait(dip->pmp);
1311 hammer2_trans_init(dip->pmp, 0);
1313 inum = hammer2_trans_newinum(dip->pmp);
1316 * Create the actual inode as a hidden file in the iroot, then
1317 * create the directory entry. The creation of the actual inode
1318 * sets its nlinks to 1 which is the value we desire.
1320 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1321 NULL, 0, inum,
1322 inum, 0, 0,
1323 0, &error);
1324 if (error) {
1325 error = hammer2_error_to_errno(error);
1326 } else {
1327 error = hammer2_dirent_create(dip, name, name_len,
1328 nip->meta.inum, nip->meta.type);
1329 /* returns UNIX error code */
1331 if (error) {
1332 if (nip) {
1333 hammer2_inode_unlink_finisher(nip, 0);
1334 hammer2_inode_unlock(nip);
1335 nip = NULL;
1337 *ap->a_vpp = NULL;
1338 } else {
1339 *ap->a_vpp = hammer2_igetv(nip, &error);
1340 hammer2_inode_unlock(nip);
1344 * Update dip's mtime
1346 * We can use a shared inode lock and allow the meta.mtime update
1347 * SMP race. hammer2_inode_modify() is MPSAFE w/a shared lock.
1349 if (error == 0) {
1350 uint64_t mtime;
1352 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1353 hammer2_update_time(&mtime);
1354 hammer2_inode_modify(dip);
1355 dip->meta.mtime = mtime;
1356 hammer2_inode_unlock(dip);
1359 hammer2_trans_done(dip->pmp);
1361 if (error == 0) {
1362 cache_setunresolved(ap->a_nch);
1363 cache_setvp(ap->a_nch, *ap->a_vpp);
1364 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1366 return error;
1369 static
1371 hammer2_vop_open(struct vop_open_args *ap)
1373 return vop_stdopen(ap);
1377 * hammer2_vop_advlock { vp, id, op, fl, flags }
1379 static
1381 hammer2_vop_advlock(struct vop_advlock_args *ap)
1383 hammer2_inode_t *ip = VTOI(ap->a_vp);
1384 hammer2_off_t size;
1386 size = ip->meta.size;
1387 return (lf_advlock(ap, &ip->advlock, size));
1390 static
1392 hammer2_vop_close(struct vop_close_args *ap)
1394 return vop_stdclose(ap);
1398 * hammer2_vop_nlink { nch, dvp, vp, cred }
1400 * Create a hardlink from (vp) to {dvp, nch}.
1402 static
1404 hammer2_vop_nlink(struct vop_nlink_args *ap)
1406 hammer2_inode_t *tdip; /* target directory to create link in */
1407 hammer2_inode_t *ip; /* inode we are hardlinking to */
1408 struct namecache *ncp;
1409 const uint8_t *name;
1410 size_t name_len;
1411 int error;
1413 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1414 return(EXDEV);
1416 tdip = VTOI(ap->a_dvp);
1417 if (tdip->pmp->ronly)
1418 return (EROFS);
1419 if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1)
1420 return (ENOSPC);
1422 ncp = ap->a_nch->ncp;
1423 name = ncp->nc_name;
1424 name_len = ncp->nc_nlen;
1427 * ip represents the file being hardlinked. The file could be a
1428 * normal file or a hardlink target if it has already been hardlinked.
1429 * (with the new semantics, it will almost always be a hardlink
1430 * target).
1432 * Bump nlinks and potentially also create or move the hardlink
1433 * target in the parent directory common to (ip) and (tdip). The
1434 * consolidation code can modify ip->cluster. The returned cluster
1435 * is locked.
1437 ip = VTOI(ap->a_vp);
1438 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1439 hammer2_pfs_memory_wait(ip->pmp);
1440 hammer2_trans_init(ip->pmp, 0);
1443 * Target should be an indexed inode or there's no way we will ever
1444 * be able to find it!
1446 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1448 error = 0;
1451 * Can return NULL and error == EXDEV if the common parent
1452 * crosses a directory with the xlink flag set.
1454 hammer2_inode_lock(tdip, 0);
1455 hammer2_inode_lock(ip, 0);
1458 * Create the directory entry and bump nlinks.
1460 if (error == 0) {
1461 error = hammer2_dirent_create(tdip, name, name_len,
1462 ip->meta.inum, ip->meta.type);
1463 hammer2_inode_modify(ip);
1464 ++ip->meta.nlinks;
1466 if (error == 0) {
1468 * Update dip's mtime
1470 uint64_t mtime;
1472 hammer2_update_time(&mtime);
1473 hammer2_inode_modify(tdip);
1474 tdip->meta.mtime = mtime;
1476 cache_setunresolved(ap->a_nch);
1477 cache_setvp(ap->a_nch, ap->a_vp);
1479 hammer2_inode_unlock(ip);
1480 hammer2_inode_unlock(tdip);
1482 hammer2_trans_done(ip->pmp);
1483 hammer2_knote(ap->a_vp, NOTE_LINK);
1484 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1486 return error;
1490 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1492 * The operating system has already ensured that the directory entry
1493 * does not exist and done all appropriate namespace locking.
1495 static
1497 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1499 hammer2_inode_t *dip;
1500 hammer2_inode_t *nip;
1501 struct namecache *ncp;
1502 const uint8_t *name;
1503 size_t name_len;
1504 hammer2_tid_t inum;
1505 int error;
1507 dip = VTOI(ap->a_dvp);
1508 if (dip->pmp->ronly)
1509 return (EROFS);
1510 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1511 return (ENOSPC);
1513 ncp = ap->a_nch->ncp;
1514 name = ncp->nc_name;
1515 name_len = ncp->nc_nlen;
1516 hammer2_pfs_memory_wait(dip->pmp);
1517 hammer2_trans_init(dip->pmp, 0);
1519 inum = hammer2_trans_newinum(dip->pmp);
1522 * Create the actual inode as a hidden file in the iroot, then
1523 * create the directory entry. The creation of the actual inode
1524 * sets its nlinks to 1 which is the value we desire.
1526 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1527 NULL, 0, inum,
1528 inum, 0, 0,
1529 0, &error);
1531 if (error) {
1532 error = hammer2_error_to_errno(error);
1533 } else {
1534 error = hammer2_dirent_create(dip, name, name_len,
1535 nip->meta.inum, nip->meta.type);
1537 if (error) {
1538 if (nip) {
1539 hammer2_inode_unlink_finisher(nip, 0);
1540 hammer2_inode_unlock(nip);
1541 nip = NULL;
1543 *ap->a_vpp = NULL;
1544 } else {
1545 *ap->a_vpp = hammer2_igetv(nip, &error);
1546 hammer2_inode_unlock(nip);
1550 * Update dip's mtime
1552 if (error == 0) {
1553 uint64_t mtime;
1555 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1556 hammer2_update_time(&mtime);
1557 hammer2_inode_modify(dip);
1558 dip->meta.mtime = mtime;
1559 hammer2_inode_unlock(dip);
1562 hammer2_trans_done(dip->pmp);
1564 if (error == 0) {
1565 cache_setunresolved(ap->a_nch);
1566 cache_setvp(ap->a_nch, *ap->a_vpp);
1567 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1569 return error;
1573 * Make a device node (typically a fifo)
1575 static
1577 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1579 hammer2_inode_t *dip;
1580 hammer2_inode_t *nip;
1581 struct namecache *ncp;
1582 const uint8_t *name;
1583 size_t name_len;
1584 hammer2_tid_t inum;
1585 int error;
1587 dip = VTOI(ap->a_dvp);
1588 if (dip->pmp->ronly)
1589 return (EROFS);
1590 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1591 return (ENOSPC);
1593 ncp = ap->a_nch->ncp;
1594 name = ncp->nc_name;
1595 name_len = ncp->nc_nlen;
1596 hammer2_pfs_memory_wait(dip->pmp);
1597 hammer2_trans_init(dip->pmp, 0);
1600 * Create the device inode and then create the directory entry.
1602 inum = hammer2_trans_newinum(dip->pmp);
1603 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1604 NULL, 0, inum,
1605 inum, 0, 0,
1606 0, &error);
1607 if (error == 0) {
1608 error = hammer2_dirent_create(dip, name, name_len,
1609 nip->meta.inum, nip->meta.type);
1611 if (error) {
1612 if (nip) {
1613 hammer2_inode_unlink_finisher(nip, 0);
1614 hammer2_inode_unlock(nip);
1615 nip = NULL;
1617 *ap->a_vpp = NULL;
1618 } else {
1619 *ap->a_vpp = hammer2_igetv(nip, &error);
1620 hammer2_inode_unlock(nip);
1624 * Update dip's mtime
1626 if (error == 0) {
1627 uint64_t mtime;
1629 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1630 hammer2_update_time(&mtime);
1631 hammer2_inode_modify(dip);
1632 dip->meta.mtime = mtime;
1633 hammer2_inode_unlock(dip);
1636 hammer2_trans_done(dip->pmp);
1638 if (error == 0) {
1639 cache_setunresolved(ap->a_nch);
1640 cache_setvp(ap->a_nch, *ap->a_vpp);
1641 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1643 return error;
1647 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1649 static
1651 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1653 hammer2_inode_t *dip;
1654 hammer2_inode_t *nip;
1655 struct namecache *ncp;
1656 const uint8_t *name;
1657 size_t name_len;
1658 hammer2_tid_t inum;
1659 int error;
1661 dip = VTOI(ap->a_dvp);
1662 if (dip->pmp->ronly)
1663 return (EROFS);
1664 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1665 return (ENOSPC);
1667 ncp = ap->a_nch->ncp;
1668 name = ncp->nc_name;
1669 name_len = ncp->nc_nlen;
1670 hammer2_pfs_memory_wait(dip->pmp);
1671 hammer2_trans_init(dip->pmp, 0);
1673 ap->a_vap->va_type = VLNK; /* enforce type */
1676 * Create the softlink as an inode and then create the directory
1677 * entry.
1679 inum = hammer2_trans_newinum(dip->pmp);
1681 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1682 NULL, 0, inum,
1683 inum, 0, 0,
1684 0, &error);
1685 if (error == 0) {
1686 error = hammer2_dirent_create(dip, name, name_len,
1687 nip->meta.inum, nip->meta.type);
1689 if (error) {
1690 if (nip) {
1691 hammer2_inode_unlink_finisher(nip, 0);
1692 hammer2_inode_unlock(nip);
1693 nip = NULL;
1695 *ap->a_vpp = NULL;
1696 hammer2_trans_done(dip->pmp);
1697 return error;
1699 *ap->a_vpp = hammer2_igetv(nip, &error);
1702 * Build the softlink (~like file data) and finalize the namecache.
1704 if (error == 0) {
1705 size_t bytes;
1706 struct uio auio;
1707 struct iovec aiov;
1709 bytes = strlen(ap->a_target);
1711 hammer2_inode_unlock(nip);
1712 bzero(&auio, sizeof(auio));
1713 bzero(&aiov, sizeof(aiov));
1714 auio.uio_iov = &aiov;
1715 auio.uio_segflg = UIO_SYSSPACE;
1716 auio.uio_rw = UIO_WRITE;
1717 auio.uio_resid = bytes;
1718 auio.uio_iovcnt = 1;
1719 auio.uio_td = curthread;
1720 aiov.iov_base = ap->a_target;
1721 aiov.iov_len = bytes;
1722 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1723 /* XXX handle error */
1724 error = 0;
1725 } else {
1726 hammer2_inode_unlock(nip);
1730 * Update dip's mtime
1732 if (error == 0) {
1733 uint64_t mtime;
1735 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1736 hammer2_update_time(&mtime);
1737 hammer2_inode_modify(dip);
1738 dip->meta.mtime = mtime;
1739 hammer2_inode_unlock(dip);
1742 hammer2_trans_done(dip->pmp);
1745 * Finalize namecache
1747 if (error == 0) {
1748 cache_setunresolved(ap->a_nch);
1749 cache_setvp(ap->a_nch, *ap->a_vpp);
1750 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1752 return error;
1756 * hammer2_vop_nremove { nch, dvp, cred }
1758 static
1760 hammer2_vop_nremove(struct vop_nremove_args *ap)
1762 hammer2_xop_unlink_t *xop;
1763 hammer2_inode_t *dip;
1764 hammer2_inode_t *ip;
1765 struct namecache *ncp;
1766 int error;
1767 int isopen;
1769 dip = VTOI(ap->a_dvp);
1770 if (dip->pmp->ronly)
1771 return (EROFS);
1772 #if 0
1773 /* allow removals, except user to also bulkfree */
1774 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1775 return (ENOSPC);
1776 #endif
1778 ncp = ap->a_nch->ncp;
1780 hammer2_pfs_memory_wait(dip->pmp);
1781 hammer2_trans_init(dip->pmp, 0);
1782 hammer2_inode_lock(dip, 0);
1785 * The unlink XOP unlinks the path from the directory and
1786 * locates and returns the cluster associated with the real inode.
1787 * We have to handle nlinks here on the frontend.
1789 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1790 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1793 * The namecache entry is locked so nobody can use this namespace.
1794 * Calculate isopen to determine if this namespace has an open vp
1795 * associated with it and resolve the vp only if it does.
1797 * We try to avoid resolving the vnode if nobody has it open, but
1798 * note that the test is via this namespace only.
1800 isopen = cache_isopen(ap->a_nch);
1801 xop->isdir = 0;
1802 xop->dopermanent = 0;
1803 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1806 * Collect the real inode and adjust nlinks, destroy the real
1807 * inode if nlinks transitions to 0 and it was the real inode
1808 * (else it has already been removed).
1810 error = hammer2_xop_collect(&xop->head, 0);
1811 error = hammer2_error_to_errno(error);
1812 hammer2_inode_unlock(dip);
1814 if (error == 0) {
1815 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1816 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1817 if (ip) {
1818 hammer2_inode_unlink_finisher(ip, isopen);
1819 hammer2_inode_unlock(ip);
1821 } else {
1822 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1826 * Update dip's mtime
1828 if (error == 0) {
1829 uint64_t mtime;
1831 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1832 hammer2_update_time(&mtime);
1833 hammer2_inode_modify(dip);
1834 dip->meta.mtime = mtime;
1835 hammer2_inode_unlock(dip);
1838 hammer2_inode_run_sideq(dip->pmp, 0);
1839 hammer2_trans_done(dip->pmp);
1840 if (error == 0) {
1841 cache_unlink(ap->a_nch);
1842 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1844 return (error);
1848 * hammer2_vop_nrmdir { nch, dvp, cred }
1850 static
1852 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1854 hammer2_xop_unlink_t *xop;
1855 hammer2_inode_t *dip;
1856 hammer2_inode_t *ip;
1857 struct namecache *ncp;
1858 int isopen;
1859 int error;
1861 dip = VTOI(ap->a_dvp);
1862 if (dip->pmp->ronly)
1863 return (EROFS);
1864 #if 0
1865 /* allow removals, except user to also bulkfree */
1866 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1867 return (ENOSPC);
1868 #endif
1870 hammer2_pfs_memory_wait(dip->pmp);
1871 hammer2_trans_init(dip->pmp, 0);
1872 hammer2_inode_lock(dip, 0);
1874 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1876 ncp = ap->a_nch->ncp;
1877 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1878 isopen = cache_isopen(ap->a_nch);
1879 xop->isdir = 1;
1880 xop->dopermanent = 0;
1881 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1884 * Collect the real inode and adjust nlinks, destroy the real
1885 * inode if nlinks transitions to 0 and it was the real inode
1886 * (else it has already been removed).
1888 error = hammer2_xop_collect(&xop->head, 0);
1889 error = hammer2_error_to_errno(error);
1890 hammer2_inode_unlock(dip);
1892 if (error == 0) {
1893 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1894 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1895 if (ip) {
1896 hammer2_inode_unlink_finisher(ip, isopen);
1897 hammer2_inode_unlock(ip);
1899 } else {
1900 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1904 * Update dip's mtime
1906 if (error == 0) {
1907 uint64_t mtime;
1909 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1910 hammer2_update_time(&mtime);
1911 hammer2_inode_modify(dip);
1912 dip->meta.mtime = mtime;
1913 hammer2_inode_unlock(dip);
1916 hammer2_inode_run_sideq(dip->pmp, 0);
1917 hammer2_trans_done(dip->pmp);
1918 if (error == 0) {
1919 cache_unlink(ap->a_nch);
1920 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1922 return (error);
1926 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1928 static
1930 hammer2_vop_nrename(struct vop_nrename_args *ap)
1932 struct namecache *fncp;
1933 struct namecache *tncp;
1934 hammer2_inode_t *fdip; /* source directory */
1935 hammer2_inode_t *tdip; /* target directory */
1936 hammer2_inode_t *ip; /* file being renamed */
1937 hammer2_inode_t *tip; /* replaced target during rename or NULL */
1938 const uint8_t *fname;
1939 size_t fname_len;
1940 const uint8_t *tname;
1941 size_t tname_len;
1942 int error;
1943 int update_tdip;
1944 int update_fdip;
1945 hammer2_key_t tlhc;
1947 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1948 return(EXDEV);
1949 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1950 return(EXDEV);
1952 fdip = VTOI(ap->a_fdvp); /* source directory */
1953 tdip = VTOI(ap->a_tdvp); /* target directory */
1955 if (fdip->pmp->ronly)
1956 return (EROFS);
1957 if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1)
1958 return (ENOSPC);
1960 fncp = ap->a_fnch->ncp; /* entry name in source */
1961 fname = fncp->nc_name;
1962 fname_len = fncp->nc_nlen;
1964 tncp = ap->a_tnch->ncp; /* entry name in target */
1965 tname = tncp->nc_name;
1966 tname_len = tncp->nc_nlen;
1968 hammer2_pfs_memory_wait(tdip->pmp);
1969 hammer2_trans_init(tdip->pmp, 0);
1971 update_tdip = 0;
1972 update_fdip = 0;
1974 ip = VTOI(fncp->nc_vp);
1975 hammer2_inode_ref(ip); /* extra ref */
1978 * Lookup the target name to determine if a directory entry
1979 * is being overwritten. We only hold related inode locks
1980 * temporarily, the operating system is expected to protect
1981 * against rename races.
1983 tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL;
1984 if (tip)
1985 hammer2_inode_ref(tip); /* extra ref */
1988 * Can return NULL and error == EXDEV if the common parent
1989 * crosses a directory with the xlink flag set.
1991 * For now try to avoid deadlocks with a simple pointer address
1992 * test. (tip) can be NULL.
1994 error = 0;
1995 if (fdip <= tdip) {
1996 hammer2_inode_lock(fdip, 0);
1997 hammer2_inode_lock(tdip, 0);
1998 } else {
1999 hammer2_inode_lock(tdip, 0);
2000 hammer2_inode_lock(fdip, 0);
2002 if (tip) {
2003 if (ip <= tip) {
2004 hammer2_inode_lock(ip, 0);
2005 hammer2_inode_lock(tip, 0);
2006 } else {
2007 hammer2_inode_lock(tip, 0);
2008 hammer2_inode_lock(ip, 0);
2010 } else {
2011 hammer2_inode_lock(ip, 0);
2014 #if 0
2016 * Delete the target namespace.
2018 * REMOVED - NOW FOLDED INTO XOP_NRENAME OPERATION
2021 hammer2_xop_unlink_t *xop2;
2022 hammer2_inode_t *tip;
2023 int isopen;
2026 * The unlink XOP unlinks the path from the directory and
2027 * locates and returns the cluster associated with the real
2028 * inode. We have to handle nlinks here on the frontend.
2030 xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2031 hammer2_xop_setname(&xop2->head, tname, tname_len);
2032 isopen = cache_isopen(ap->a_tnch);
2033 xop2->isdir = -1;
2034 xop2->dopermanent = 0;
2035 hammer2_xop_start(&xop2->head, hammer2_xop_unlink);
2038 * Collect the real inode and adjust nlinks, destroy the real
2039 * inode if nlinks transitions to 0 and it was the real inode
2040 * (else it has already been removed).
2042 tnch_error = hammer2_xop_collect(&xop2->head, 0);
2043 tnch_error = hammer2_error_to_errno(tnch_error);
2044 /* hammer2_inode_unlock(tdip); */
2046 if (tnch_error == 0) {
2047 tip = hammer2_inode_get(tdip->pmp, NULL,
2048 &xop2->head.cluster, -1);
2049 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2050 if (tip) {
2051 hammer2_inode_unlink_finisher(tip, isopen);
2052 hammer2_inode_unlock(tip);
2054 } else {
2055 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2057 /* hammer2_inode_lock(tdip, 0); */
2059 if (tnch_error && tnch_error != ENOENT) {
2060 error = tnch_error;
2061 goto done2;
2063 update_tdip = 1;
2065 #endif
2068 * Resolve the collision space for (tdip, tname, tname_len)
2070 * tdip must be held exclusively locked to prevent races since
2071 * multiple filenames can end up in the same collision space.
2074 hammer2_xop_scanlhc_t *sxop;
2075 hammer2_tid_t lhcbase;
2077 tlhc = hammer2_dirhash(tname, tname_len);
2078 lhcbase = tlhc;
2079 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2080 sxop->lhc = tlhc;
2081 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
2082 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2083 if (tlhc != sxop->head.cluster.focus->bref.key)
2084 break;
2085 ++tlhc;
2087 error = hammer2_error_to_errno(error);
2088 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2090 if (error) {
2091 if (error != ENOENT)
2092 goto done2;
2093 ++tlhc;
2094 error = 0;
2096 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2097 error = ENOSPC;
2098 goto done2;
2103 * Ready to go, issue the rename to the backend. Note that meta-data
2104 * updates to the related inodes occur separately from the rename
2105 * operation.
2107 * NOTE: While it is not necessary to update ip->meta.name*, doing
2108 * so aids catastrophic recovery and debugging.
2110 if (error == 0) {
2111 hammer2_xop_nrename_t *xop4;
2113 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2114 xop4->lhc = tlhc;
2115 xop4->ip_key = ip->meta.name_key;
2116 hammer2_xop_setip2(&xop4->head, ip);
2117 hammer2_xop_setip3(&xop4->head, tdip);
2118 hammer2_xop_setname(&xop4->head, fname, fname_len);
2119 hammer2_xop_setname2(&xop4->head, tname, tname_len);
2120 hammer2_xop_start(&xop4->head, hammer2_xop_nrename);
2122 error = hammer2_xop_collect(&xop4->head, 0);
2123 error = hammer2_error_to_errno(error);
2124 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2126 if (error == ENOENT)
2127 error = 0;
2130 * Update inode meta-data.
2132 * WARNING! The in-memory inode (ip) structure does not
2133 * maintain a copy of the inode's filename buffer.
2135 if (error == 0 &&
2136 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2137 hammer2_inode_modify(ip);
2138 ip->meta.name_len = tname_len;
2139 ip->meta.name_key = tlhc;
2141 if (error == 0) {
2142 hammer2_inode_modify(ip);
2143 ip->meta.iparent = tdip->meta.inum;
2145 update_fdip = 1;
2146 update_tdip = 1;
2149 done2:
2151 * If no error, the backend has replaced the target directory entry.
2152 * We must adjust nlinks on the original replace target if it exists.
2154 if (error == 0 && tip) {
2155 int isopen;
2157 isopen = cache_isopen(ap->a_tnch);
2158 hammer2_inode_unlink_finisher(tip, isopen);
2162 * Update directory mtimes to represent the something changed.
2164 if (update_fdip || update_tdip) {
2165 uint64_t mtime;
2167 hammer2_update_time(&mtime);
2168 if (update_fdip) {
2169 hammer2_inode_modify(fdip);
2170 fdip->meta.mtime = mtime;
2172 if (update_tdip) {
2173 hammer2_inode_modify(tdip);
2174 tdip->meta.mtime = mtime;
2177 if (tip) {
2178 hammer2_inode_unlock(tip);
2179 hammer2_inode_drop(tip);
2181 hammer2_inode_unlock(ip);
2182 hammer2_inode_unlock(tdip);
2183 hammer2_inode_unlock(fdip);
2184 hammer2_inode_drop(ip);
2185 hammer2_inode_run_sideq(fdip->pmp, 0);
2187 hammer2_trans_done(tdip->pmp);
2190 * Issue the namecache update after unlocking all the internal
2191 * hammer2 structures, otherwise we might deadlock.
2193 if (error == 0 && tip) {
2194 cache_unlink(ap->a_tnch);
2195 cache_setunresolved(ap->a_tnch);
2197 if (error == 0) {
2198 cache_rename(ap->a_fnch, ap->a_tnch);
2199 hammer2_knote(ap->a_fdvp, NOTE_WRITE);
2200 hammer2_knote(ap->a_tdvp, NOTE_WRITE);
2201 hammer2_knote(fncp->nc_vp, NOTE_RENAME);
2204 return (error);
2208 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2210 static
2212 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2214 hammer2_inode_t *ip;
2215 int error;
2217 ip = VTOI(ap->a_vp);
2219 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2220 ap->a_fflag, ap->a_cred);
2221 return (error);
2224 static
2225 int
2226 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2228 struct mount *mp;
2229 hammer2_pfs_t *pmp;
2230 int rc;
2232 switch (ap->a_op) {
2233 case (MOUNTCTL_SET_EXPORT):
2234 mp = ap->a_head.a_ops->head.vv_mount;
2235 pmp = MPTOPMP(mp);
2237 if (ap->a_ctllen != sizeof(struct export_args))
2238 rc = (EINVAL);
2239 else
2240 rc = vfs_export(mp, &pmp->export,
2241 (const struct export_args *)ap->a_ctl);
2242 break;
2243 default:
2244 rc = vop_stdmountctl(ap);
2245 break;
2247 return (rc);
2251 * KQFILTER
2253 static void filt_hammer2detach(struct knote *kn);
2254 static int filt_hammer2read(struct knote *kn, long hint);
2255 static int filt_hammer2write(struct knote *kn, long hint);
2256 static int filt_hammer2vnode(struct knote *kn, long hint);
2258 static struct filterops hammer2read_filtops =
2259 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2260 NULL, filt_hammer2detach, filt_hammer2read };
2261 static struct filterops hammer2write_filtops =
2262 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2263 NULL, filt_hammer2detach, filt_hammer2write };
2264 static struct filterops hammer2vnode_filtops =
2265 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2266 NULL, filt_hammer2detach, filt_hammer2vnode };
2268 static
2270 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2272 struct vnode *vp = ap->a_vp;
2273 struct knote *kn = ap->a_kn;
2275 switch (kn->kn_filter) {
2276 case EVFILT_READ:
2277 kn->kn_fop = &hammer2read_filtops;
2278 break;
2279 case EVFILT_WRITE:
2280 kn->kn_fop = &hammer2write_filtops;
2281 break;
2282 case EVFILT_VNODE:
2283 kn->kn_fop = &hammer2vnode_filtops;
2284 break;
2285 default:
2286 return (EOPNOTSUPP);
2289 kn->kn_hook = (caddr_t)vp;
2291 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2293 return(0);
2296 static void
2297 filt_hammer2detach(struct knote *kn)
2299 struct vnode *vp = (void *)kn->kn_hook;
2301 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2304 static int
2305 filt_hammer2read(struct knote *kn, long hint)
2307 struct vnode *vp = (void *)kn->kn_hook;
2308 hammer2_inode_t *ip = VTOI(vp);
2309 off_t off;
2311 if (hint == NOTE_REVOKE) {
2312 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2313 return(1);
2315 off = ip->meta.size - kn->kn_fp->f_offset;
2316 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2317 if (kn->kn_sfflags & NOTE_OLDAPI)
2318 return(1);
2319 return (kn->kn_data != 0);
2323 static int
2324 filt_hammer2write(struct knote *kn, long hint)
2326 if (hint == NOTE_REVOKE)
2327 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2328 kn->kn_data = 0;
2329 return (1);
2332 static int
2333 filt_hammer2vnode(struct knote *kn, long hint)
2335 if (kn->kn_sfflags & hint)
2336 kn->kn_fflags |= hint;
2337 if (hint == NOTE_REVOKE) {
2338 kn->kn_flags |= (EV_EOF | EV_NODATA);
2339 return (1);
2341 return (kn->kn_fflags != 0);
2345 * FIFO VOPS
2347 static
2349 hammer2_vop_markatime(struct vop_markatime_args *ap)
2351 hammer2_inode_t *ip;
2352 struct vnode *vp;
2354 vp = ap->a_vp;
2355 ip = VTOI(vp);
2357 if (ip->pmp->ronly)
2358 return (EROFS);
2359 return(0);
2362 static
2364 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2366 int error;
2368 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2369 if (error)
2370 error = hammer2_vop_kqfilter(ap);
2371 return(error);
2375 * VOPS vector
2377 struct vop_ops hammer2_vnode_vops = {
2378 .vop_default = vop_defaultop,
2379 .vop_fsync = hammer2_vop_fsync,
2380 .vop_getpages = vop_stdgetpages,
2381 .vop_putpages = vop_stdputpages,
2382 .vop_access = hammer2_vop_access,
2383 .vop_advlock = hammer2_vop_advlock,
2384 .vop_close = hammer2_vop_close,
2385 .vop_nlink = hammer2_vop_nlink,
2386 .vop_ncreate = hammer2_vop_ncreate,
2387 .vop_nsymlink = hammer2_vop_nsymlink,
2388 .vop_nremove = hammer2_vop_nremove,
2389 .vop_nrmdir = hammer2_vop_nrmdir,
2390 .vop_nrename = hammer2_vop_nrename,
2391 .vop_getattr = hammer2_vop_getattr,
2392 .vop_setattr = hammer2_vop_setattr,
2393 .vop_readdir = hammer2_vop_readdir,
2394 .vop_readlink = hammer2_vop_readlink,
2395 .vop_getpages = vop_stdgetpages,
2396 .vop_putpages = vop_stdputpages,
2397 .vop_read = hammer2_vop_read,
2398 .vop_write = hammer2_vop_write,
2399 .vop_open = hammer2_vop_open,
2400 .vop_inactive = hammer2_vop_inactive,
2401 .vop_reclaim = hammer2_vop_reclaim,
2402 .vop_nresolve = hammer2_vop_nresolve,
2403 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2404 .vop_nmkdir = hammer2_vop_nmkdir,
2405 .vop_nmknod = hammer2_vop_nmknod,
2406 .vop_ioctl = hammer2_vop_ioctl,
2407 .vop_mountctl = hammer2_vop_mountctl,
2408 .vop_bmap = hammer2_vop_bmap,
2409 .vop_strategy = hammer2_vop_strategy,
2410 .vop_kqfilter = hammer2_vop_kqfilter
2413 struct vop_ops hammer2_spec_vops = {
2414 .vop_default = vop_defaultop,
2415 .vop_fsync = hammer2_vop_fsync,
2416 .vop_read = vop_stdnoread,
2417 .vop_write = vop_stdnowrite,
2418 .vop_access = hammer2_vop_access,
2419 .vop_close = hammer2_vop_close,
2420 .vop_markatime = hammer2_vop_markatime,
2421 .vop_getattr = hammer2_vop_getattr,
2422 .vop_inactive = hammer2_vop_inactive,
2423 .vop_reclaim = hammer2_vop_reclaim,
2424 .vop_setattr = hammer2_vop_setattr
2427 struct vop_ops hammer2_fifo_vops = {
2428 .vop_default = fifo_vnoperate,
2429 .vop_fsync = hammer2_vop_fsync,
2430 #if 0
2431 .vop_read = hammer2_vop_fiforead,
2432 .vop_write = hammer2_vop_fifowrite,
2433 #endif
2434 .vop_access = hammer2_vop_access,
2435 #if 0
2436 .vop_close = hammer2_vop_fifoclose,
2437 #endif
2438 .vop_markatime = hammer2_vop_markatime,
2439 .vop_getattr = hammer2_vop_getattr,
2440 .vop_inactive = hammer2_vop_inactive,
2441 .vop_reclaim = hammer2_vop_reclaim,
2442 .vop_setattr = hammer2_vop_setattr,
2443 .vop_kqfilter = hammer2_vop_fifokqfilter