hammer2 - Implement error processing and free reserve enforcement
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
blob23770f256453bd7d3bc66179c9507c6792d8c664
1 /*
2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 * Kernel Filesystem interface
39 * NOTE! local ipdata pointers must be reloaded on any modifying operation
40 * to the inode as its underlying chain may have changed.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
60 #include "hammer2.h"
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
63 int seqcount);
64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
65 int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
69 struct objcache *cache_xops;
71 static __inline
72 void
73 hammer2_knote(struct vnode *vp, int flags)
75 if (flags)
76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
80 * Last reference to a vnode is going away but it is still cached.
82 static
83 int
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
86 hammer2_inode_t *ip;
87 struct vnode *vp;
89 vp = ap->a_vp;
90 ip = VTOI(vp);
93 * Degenerate case
95 if (ip == NULL) {
96 vrecycle(vp);
97 return (0);
101 * Check for deleted inodes and recycle immediately on the last
102 * release. Be sure to destroy any left-over buffer cache buffers
103 * so we do not waste time trying to flush them.
105 * Note that deleting the file block chains under the inode chain
106 * would just be a waste of energy, so don't do it.
108 * WARNING: nvtruncbuf() can only be safely called without the inode
109 * lock held due to the way our write thread works.
111 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
112 hammer2_key_t lbase;
113 int nblksize;
116 * Detect updates to the embedded data which may be
117 * synchronized by the strategy code. Simply mark the
118 * inode modified so it gets picked up by our normal flush.
120 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
121 nvtruncbuf(vp, 0, nblksize, 0, 0);
122 vrecycle(vp);
124 return (0);
128 * Reclaim a vnode so that it can be reused; after the inode is
129 * disassociated, the filesystem must manage it alone.
131 static
133 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
135 hammer2_inode_t *ip;
136 hammer2_pfs_t *pmp;
137 struct vnode *vp;
139 vp = ap->a_vp;
140 ip = VTOI(vp);
141 if (ip == NULL) {
142 return(0);
144 pmp = ip->pmp;
147 * The final close of a deleted file or directory marks it for
148 * destruction. The DELETED flag allows the flusher to shortcut
149 * any modified blocks still unflushed (that is, just ignore them).
151 * HAMMER2 usually does not try to optimize the freemap by returning
152 * deleted blocks to it as it does not usually know how many snapshots
153 * might be referencing portions of the file/dir.
155 vp->v_data = NULL;
156 ip->vp = NULL;
159 * NOTE! We do not attempt to flush chains here, flushing is
160 * really fragile and could also deadlock.
162 vclrisdirty(vp);
165 * This occurs if the inode was unlinked while open. Reclamation of
166 * these inodes requires processing we cannot safely do here so add
167 * the inode to the sideq in that situation.
169 * A modified inode may require chain synchronization which will no
170 * longer be driven by a sync or fsync without the vnode, also use
171 * the sideq for that.
173 * A reclaim can occur at any time so we cannot safely start a
174 * transaction to handle reclamation of unlinked files. Instead,
175 * the ip is left with a reference and placed on a linked list and
176 * handled later on.
179 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED |
180 HAMMER2_INODE_MODIFIED |
181 HAMMER2_INODE_RESIZED)) &&
182 (ip->flags & HAMMER2_INODE_ISDELETED) == 0) {
183 hammer2_inode_sideq_t *ipul;
185 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
186 ipul->ip = ip;
188 hammer2_spin_ex(&pmp->list_spin);
189 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
190 /* ref -> sideq */
191 atomic_set_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
192 TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
193 hammer2_spin_unex(&pmp->list_spin);
194 } else {
195 hammer2_spin_unex(&pmp->list_spin);
196 kfree(ipul, pmp->minode);
197 hammer2_inode_drop(ip); /* vp ref */
199 /* retain ref from vp for ipul */
200 } else {
201 hammer2_inode_drop(ip); /* vp ref */
205 * XXX handle background sync when ip dirty, kernel will no longer
206 * notify us regarding this inode because there is no longer a
207 * vnode attached to it.
210 return (0);
213 static
215 hammer2_vop_fsync(struct vop_fsync_args *ap)
217 hammer2_inode_t *ip;
218 struct vnode *vp;
220 vp = ap->a_vp;
221 ip = VTOI(vp);
223 #if 0
224 /* XXX can't do this yet */
225 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH);
226 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
227 #endif
228 hammer2_trans_init(ip->pmp, 0);
229 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
232 * Calling chain_flush here creates a lot of duplicative
233 * COW operations due to non-optimal vnode ordering.
235 * Only do it for an actual fsync() syscall. The other forms
236 * which call this function will eventually call chain_flush
237 * on the volume root as a catch-all, which is far more optimal.
239 hammer2_inode_lock(ip, 0);
240 if (ip->flags & HAMMER2_INODE_MODIFIED)
241 hammer2_inode_chain_sync(ip);
242 hammer2_inode_unlock(ip);
243 hammer2_trans_done(ip->pmp);
245 return (0);
248 static
250 hammer2_vop_access(struct vop_access_args *ap)
252 hammer2_inode_t *ip = VTOI(ap->a_vp);
253 uid_t uid;
254 gid_t gid;
255 int error;
257 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
258 uid = hammer2_to_unix_xid(&ip->meta.uid);
259 gid = hammer2_to_unix_xid(&ip->meta.gid);
260 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
261 hammer2_inode_unlock(ip);
263 return (error);
266 static
268 hammer2_vop_getattr(struct vop_getattr_args *ap)
270 hammer2_pfs_t *pmp;
271 hammer2_inode_t *ip;
272 struct vnode *vp;
273 struct vattr *vap;
274 hammer2_chain_t *chain;
275 int i;
277 vp = ap->a_vp;
278 vap = ap->a_vap;
280 ip = VTOI(vp);
281 pmp = ip->pmp;
283 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
285 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
286 vap->va_fileid = ip->meta.inum;
287 vap->va_mode = ip->meta.mode;
288 vap->va_nlink = ip->meta.nlinks;
289 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
290 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
291 vap->va_rmajor = 0;
292 vap->va_rminor = 0;
293 vap->va_size = ip->meta.size; /* protected by shared lock */
294 vap->va_blocksize = HAMMER2_PBUFSIZE;
295 vap->va_flags = ip->meta.uflags;
296 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
297 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
298 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
299 vap->va_gen = 1;
300 vap->va_bytes = 0;
301 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
303 * Can't really calculate directory use sans the files under
304 * it, just assume one block for now.
306 vap->va_bytes += HAMMER2_INODE_BYTES;
307 } else {
308 for (i = 0; i < ip->cluster.nchains; ++i) {
309 if ((chain = ip->cluster.array[i].chain) != NULL) {
310 if (vap->va_bytes <
311 chain->bref.embed.stats.data_count) {
312 vap->va_bytes =
313 chain->bref.embed.stats.data_count;
318 vap->va_type = hammer2_get_vtype(ip->meta.type);
319 vap->va_filerev = 0;
320 vap->va_uid_uuid = ip->meta.uid;
321 vap->va_gid_uuid = ip->meta.gid;
322 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
323 VA_FSID_UUID_VALID;
325 hammer2_inode_unlock(ip);
327 return (0);
330 static
332 hammer2_vop_setattr(struct vop_setattr_args *ap)
334 hammer2_inode_t *ip;
335 struct vnode *vp;
336 struct vattr *vap;
337 int error;
338 int kflags = 0;
339 uint64_t ctime;
341 vp = ap->a_vp;
342 vap = ap->a_vap;
343 hammer2_update_time(&ctime);
345 ip = VTOI(vp);
347 if (ip->pmp->ronly)
348 return (EROFS);
349 if (hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1)
350 return (ENOSPC);
352 hammer2_pfs_memory_wait(ip->pmp);
353 hammer2_trans_init(ip->pmp, 0);
354 hammer2_inode_lock(ip, 0);
355 error = 0;
357 if (vap->va_flags != VNOVAL) {
358 uint32_t flags;
360 flags = ip->meta.uflags;
361 error = vop_helper_setattr_flags(&flags, vap->va_flags,
362 hammer2_to_unix_xid(&ip->meta.uid),
363 ap->a_cred);
364 if (error == 0) {
365 if (ip->meta.uflags != flags) {
366 hammer2_inode_modify(ip);
367 ip->meta.uflags = flags;
368 ip->meta.ctime = ctime;
369 kflags |= NOTE_ATTRIB;
371 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
372 error = 0;
373 goto done;
376 goto done;
378 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
379 error = EPERM;
380 goto done;
382 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
383 mode_t cur_mode = ip->meta.mode;
384 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
385 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
386 uuid_t uuid_uid;
387 uuid_t uuid_gid;
389 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
390 ap->a_cred,
391 &cur_uid, &cur_gid, &cur_mode);
392 if (error == 0) {
393 hammer2_guid_to_uuid(&uuid_uid, cur_uid);
394 hammer2_guid_to_uuid(&uuid_gid, cur_gid);
395 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
396 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
397 ip->meta.mode != cur_mode
399 hammer2_inode_modify(ip);
400 ip->meta.uid = uuid_uid;
401 ip->meta.gid = uuid_gid;
402 ip->meta.mode = cur_mode;
403 ip->meta.ctime = ctime;
405 kflags |= NOTE_ATTRIB;
410 * Resize the file
412 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
413 switch(vp->v_type) {
414 case VREG:
415 if (vap->va_size == ip->meta.size)
416 break;
417 if (vap->va_size < ip->meta.size) {
418 hammer2_mtx_ex(&ip->truncate_lock);
419 hammer2_truncate_file(ip, vap->va_size);
420 hammer2_mtx_unlock(&ip->truncate_lock);
421 kflags |= NOTE_WRITE;
422 } else {
423 hammer2_extend_file(ip, vap->va_size);
424 kflags |= NOTE_WRITE | NOTE_EXTEND;
426 hammer2_inode_modify(ip);
427 ip->meta.mtime = ctime;
428 break;
429 default:
430 error = EINVAL;
431 goto done;
434 #if 0
435 /* atime not supported */
436 if (vap->va_atime.tv_sec != VNOVAL) {
437 hammer2_inode_modify(ip);
438 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
439 kflags |= NOTE_ATTRIB;
441 #endif
442 if (vap->va_mode != (mode_t)VNOVAL) {
443 mode_t cur_mode = ip->meta.mode;
444 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
445 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
447 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
448 cur_uid, cur_gid, &cur_mode);
449 if (error == 0 && ip->meta.mode != cur_mode) {
450 hammer2_inode_modify(ip);
451 ip->meta.mode = cur_mode;
452 ip->meta.ctime = ctime;
453 kflags |= NOTE_ATTRIB;
457 if (vap->va_mtime.tv_sec != VNOVAL) {
458 hammer2_inode_modify(ip);
459 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
460 kflags |= NOTE_ATTRIB;
463 done:
465 * If a truncation occurred we must call inode_fsync() now in order
466 * to trim the related data chains, otherwise a later expansion can
467 * cause havoc.
469 * If an extend occured that changed the DIRECTDATA state, we must
470 * call inode_fsync now in order to prepare the inode's indirect
471 * block table.
473 if (ip->flags & HAMMER2_INODE_RESIZED)
474 hammer2_inode_chain_sync(ip);
477 * Cleanup.
479 hammer2_inode_unlock(ip);
480 hammer2_trans_done(ip->pmp);
481 hammer2_knote(ip->vp, kflags);
483 return (error);
486 static
488 hammer2_vop_readdir(struct vop_readdir_args *ap)
490 hammer2_xop_readdir_t *xop;
491 hammer2_blockref_t bref;
492 hammer2_inode_t *ip;
493 hammer2_tid_t inum;
494 hammer2_key_t lkey;
495 struct uio *uio;
496 off_t *cookies;
497 off_t saveoff;
498 int cookie_index;
499 int ncookies;
500 int error;
501 int eofflag;
502 int r;
504 ip = VTOI(ap->a_vp);
505 uio = ap->a_uio;
506 saveoff = uio->uio_offset;
507 eofflag = 0;
508 error = 0;
511 * Setup cookies directory entry cookies if requested
513 if (ap->a_ncookies) {
514 ncookies = uio->uio_resid / 16 + 1;
515 if (ncookies > 1024)
516 ncookies = 1024;
517 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
518 } else {
519 ncookies = -1;
520 cookies = NULL;
522 cookie_index = 0;
524 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
527 * Handle artificial entries. To ensure that only positive 64 bit
528 * quantities are returned to userland we always strip off bit 63.
529 * The hash code is designed such that codes 0x0000-0x7FFF are not
530 * used, allowing us to use these codes for articial entries.
532 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
533 * allow '..' to cross the mount point into (e.g.) the super-root.
535 if (saveoff == 0) {
536 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
537 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
538 if (r)
539 goto done;
540 if (cookies)
541 cookies[cookie_index] = saveoff;
542 ++saveoff;
543 ++cookie_index;
544 if (cookie_index == ncookies)
545 goto done;
548 if (saveoff == 1) {
550 * Be careful with lockorder when accessing ".."
552 * (ip is the current dir. xip is the parent dir).
554 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
555 if (ip != ip->pmp->iroot)
556 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
557 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
558 if (r)
559 goto done;
560 if (cookies)
561 cookies[cookie_index] = saveoff;
562 ++saveoff;
563 ++cookie_index;
564 if (cookie_index == ncookies)
565 goto done;
568 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
569 if (hammer2_debug & 0x0020)
570 kprintf("readdir: lkey %016jx\n", lkey);
571 if (error)
572 goto done;
575 * Use XOP for cluster scan.
577 * parent is the inode cluster, already locked for us. Don't
578 * double lock shared locks as this will screw up upgrades.
580 xop = hammer2_xop_alloc(ip, 0);
581 xop->lkey = lkey;
582 hammer2_xop_start(&xop->head, hammer2_xop_readdir);
584 for (;;) {
585 const hammer2_inode_data_t *ripdata;
586 const char *dname;
587 int dtype;
589 error = hammer2_xop_collect(&xop->head, 0);
590 error = hammer2_error_to_errno(error);
591 if (error) {
592 break;
594 if (cookie_index == ncookies)
595 break;
596 if (hammer2_debug & 0x0020)
597 kprintf("cluster chain %p %p\n",
598 xop->head.cluster.focus,
599 (xop->head.cluster.focus ?
600 xop->head.cluster.focus->data : (void *)-1));
601 hammer2_cluster_bref(&xop->head.cluster, &bref);
603 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
604 ripdata =
605 &hammer2_cluster_rdata(&xop->head.cluster)->ipdata;
606 dtype = hammer2_get_dtype(ripdata->meta.type);
607 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
608 r = vop_write_dirent(&error, uio,
609 ripdata->meta.inum &
610 HAMMER2_DIRHASH_USERMSK,
611 dtype,
612 ripdata->meta.name_len,
613 ripdata->filename);
614 if (r)
615 break;
616 if (cookies)
617 cookies[cookie_index] = saveoff;
618 ++cookie_index;
619 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) {
620 dtype = hammer2_get_dtype(bref.embed.dirent.type);
621 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
622 if (bref.embed.dirent.namlen <=
623 sizeof(bref.check.buf)) {
624 dname = bref.check.buf;
625 } else {
626 dname =
627 hammer2_cluster_rdata(&xop->head.cluster)->buf;
629 r = vop_write_dirent(&error, uio,
630 bref.embed.dirent.inum,
631 dtype,
632 bref.embed.dirent.namlen,
633 dname);
634 if (r)
635 break;
636 if (cookies)
637 cookies[cookie_index] = saveoff;
638 ++cookie_index;
639 } else {
640 /* XXX chain error */
641 kprintf("bad chain type readdir %d\n", bref.type);
644 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
645 if (error == ENOENT) {
646 error = 0;
647 eofflag = 1;
648 saveoff = (hammer2_key_t)-1;
649 } else {
650 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
652 done:
653 hammer2_inode_unlock(ip);
654 if (ap->a_eofflag)
655 *ap->a_eofflag = eofflag;
656 if (hammer2_debug & 0x0020)
657 kprintf("readdir: done at %016jx\n", saveoff);
658 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
659 if (error && cookie_index == 0) {
660 if (cookies) {
661 kfree(cookies, M_TEMP);
662 *ap->a_ncookies = 0;
663 *ap->a_cookies = NULL;
665 } else {
666 if (cookies) {
667 *ap->a_ncookies = cookie_index;
668 *ap->a_cookies = cookies;
671 return (error);
675 * hammer2_vop_readlink { vp, uio, cred }
677 static
679 hammer2_vop_readlink(struct vop_readlink_args *ap)
681 struct vnode *vp;
682 hammer2_inode_t *ip;
683 int error;
685 vp = ap->a_vp;
686 if (vp->v_type != VLNK)
687 return (EINVAL);
688 ip = VTOI(vp);
690 error = hammer2_read_file(ip, ap->a_uio, 0);
691 return (error);
694 static
696 hammer2_vop_read(struct vop_read_args *ap)
698 struct vnode *vp;
699 hammer2_inode_t *ip;
700 struct uio *uio;
701 int error;
702 int seqcount;
703 int bigread;
706 * Read operations supported on this vnode?
708 vp = ap->a_vp;
709 if (vp->v_type != VREG)
710 return (EINVAL);
713 * Misc
715 ip = VTOI(vp);
716 uio = ap->a_uio;
717 error = 0;
719 seqcount = ap->a_ioflag >> 16;
720 bigread = (uio->uio_resid > 100 * 1024 * 1024);
722 error = hammer2_read_file(ip, uio, seqcount);
723 return (error);
726 static
728 hammer2_vop_write(struct vop_write_args *ap)
730 hammer2_inode_t *ip;
731 thread_t td;
732 struct vnode *vp;
733 struct uio *uio;
734 int error;
735 int seqcount;
736 int ioflag;
739 * Read operations supported on this vnode?
741 vp = ap->a_vp;
742 if (vp->v_type != VREG)
743 return (EINVAL);
746 * Misc
748 ip = VTOI(vp);
749 ioflag = ap->a_ioflag;
750 uio = ap->a_uio;
751 error = 0;
752 if (ip->pmp->ronly)
753 return (EROFS);
754 switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) {
755 case 2:
756 return (ENOSPC);
757 case 1:
758 ioflag |= IO_DIRECT; /* semi-synchronous */
759 /* fall through */
760 default:
761 break;
764 seqcount = ioflag >> 16;
767 * Check resource limit
769 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
770 uio->uio_offset + uio->uio_resid >
771 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
772 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
773 return (EFBIG);
777 * The transaction interlocks against flush initiations
778 * (note: but will run concurrently with the actual flush).
780 * To avoid deadlocking against the VM system, we must flag any
781 * transaction related to the buffer cache or other direct
782 * VM page manipulation.
784 if (uio->uio_segflg == UIO_NOCOPY)
785 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE);
786 else
787 hammer2_trans_init(ip->pmp, 0);
788 error = hammer2_write_file(ip, uio, ioflag, seqcount);
789 hammer2_trans_done(ip->pmp);
791 return (error);
795 * Perform read operations on a file or symlink given an UNLOCKED
796 * inode and uio.
798 * The passed ip is not locked.
800 static
802 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
804 hammer2_off_t size;
805 struct buf *bp;
806 int error;
808 error = 0;
811 * UIO read loop.
813 * WARNING! Assumes that the kernel interlocks size changes at the
814 * vnode level.
816 hammer2_mtx_sh(&ip->lock);
817 hammer2_mtx_sh(&ip->truncate_lock);
818 size = ip->meta.size;
819 hammer2_mtx_unlock(&ip->lock);
821 while (uio->uio_resid > 0 && uio->uio_offset < size) {
822 hammer2_key_t lbase;
823 hammer2_key_t leof;
824 int lblksize;
825 int loff;
826 int n;
828 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
829 &lbase, &leof);
831 #if 1
832 error = cluster_read(ip->vp, leof, lbase, lblksize,
833 uio->uio_resid, seqcount * MAXBSIZE,
834 &bp);
835 #else
836 if (uio->uio_segflg == UIO_NOCOPY) {
837 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
838 if (bp->b_flags & B_CACHE) {
839 int i;
840 int j = 0;
841 if (bp->b_xio.xio_npages != 16)
842 kprintf("NPAGES BAD\n");
843 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
844 vm_page_t m;
845 m = bp->b_xio.xio_pages[i];
846 if (m == NULL || m->valid == 0) {
847 kprintf("bp %016jx %016jx pg %d inv",
848 lbase, leof, i);
849 if (m)
850 kprintf("m->object %p/%p", m->object, ip->vp->v_object);
851 kprintf("\n");
852 j = 1;
855 if (j)
856 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
858 bqrelse(bp);
860 error = bread(ip->vp, lbase, lblksize, &bp);
861 #endif
862 if (error) {
863 brelse(bp);
864 break;
866 loff = (int)(uio->uio_offset - lbase);
867 n = lblksize - loff;
868 if (n > uio->uio_resid)
869 n = uio->uio_resid;
870 if (n > size - uio->uio_offset)
871 n = (int)(size - uio->uio_offset);
872 bp->b_flags |= B_AGE;
873 uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
874 bqrelse(bp);
876 hammer2_mtx_unlock(&ip->truncate_lock);
878 return (error);
882 * Write to the file represented by the inode via the logical buffer cache.
883 * The inode may represent a regular file or a symlink.
885 * The inode must not be locked.
887 static
889 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
890 int ioflag, int seqcount)
892 hammer2_key_t old_eof;
893 hammer2_key_t new_eof;
894 struct buf *bp;
895 int kflags;
896 int error;
897 int modified;
900 * Setup if append
902 * WARNING! Assumes that the kernel interlocks size changes at the
903 * vnode level.
905 hammer2_mtx_ex(&ip->lock);
906 hammer2_mtx_sh(&ip->truncate_lock);
907 if (ioflag & IO_APPEND)
908 uio->uio_offset = ip->meta.size;
909 old_eof = ip->meta.size;
912 * Extend the file if necessary. If the write fails at some point
913 * we will truncate it back down to cover as much as we were able
914 * to write.
916 * Doing this now makes it easier to calculate buffer sizes in
917 * the loop.
919 kflags = 0;
920 error = 0;
921 modified = 0;
923 if (uio->uio_offset + uio->uio_resid > old_eof) {
924 new_eof = uio->uio_offset + uio->uio_resid;
925 modified = 1;
926 hammer2_extend_file(ip, new_eof);
927 kflags |= NOTE_EXTEND;
928 } else {
929 new_eof = old_eof;
931 hammer2_mtx_unlock(&ip->lock);
934 * UIO write loop
936 while (uio->uio_resid > 0) {
937 hammer2_key_t lbase;
938 int trivial;
939 int endofblk;
940 int lblksize;
941 int loff;
942 int n;
945 * Don't allow the buffer build to blow out the buffer
946 * cache.
948 if ((ioflag & IO_RECURSE) == 0)
949 bwillwrite(HAMMER2_PBUFSIZE);
952 * This nominally tells us how much we can cluster and
953 * what the logical buffer size needs to be. Currently
954 * we don't try to cluster the write and just handle one
955 * block at a time.
957 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
958 &lbase, NULL);
959 loff = (int)(uio->uio_offset - lbase);
961 KKASSERT(lblksize <= 65536);
964 * Calculate bytes to copy this transfer and whether the
965 * copy completely covers the buffer or not.
967 trivial = 0;
968 n = lblksize - loff;
969 if (n > uio->uio_resid) {
970 n = uio->uio_resid;
971 if (loff == lbase && uio->uio_offset + n == new_eof)
972 trivial = 1;
973 endofblk = 0;
974 } else {
975 if (loff == 0)
976 trivial = 1;
977 endofblk = 1;
979 if (lbase >= new_eof)
980 trivial = 1;
983 * Get the buffer
985 if (uio->uio_segflg == UIO_NOCOPY) {
987 * Issuing a write with the same data backing the
988 * buffer. Instantiate the buffer to collect the
989 * backing vm pages, then read-in any missing bits.
991 * This case is used by vop_stdputpages().
993 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
994 if ((bp->b_flags & B_CACHE) == 0) {
995 bqrelse(bp);
996 error = bread(ip->vp, lbase, lblksize, &bp);
998 } else if (trivial) {
1000 * Even though we are entirely overwriting the buffer
1001 * we may still have to zero it out to avoid a
1002 * mmap/write visibility issue.
1004 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1005 if ((bp->b_flags & B_CACHE) == 0)
1006 vfs_bio_clrbuf(bp);
1007 } else {
1009 * Partial overwrite, read in any missing bits then
1010 * replace the portion being written.
1012 * (The strategy code will detect zero-fill physical
1013 * blocks for this case).
1015 error = bread(ip->vp, lbase, lblksize, &bp);
1016 if (error == 0)
1017 bheavy(bp);
1020 if (error) {
1021 brelse(bp);
1022 break;
1026 * Ok, copy the data in
1028 error = uiomovebp(bp, bp->b_data + loff, n, uio);
1029 kflags |= NOTE_WRITE;
1030 modified = 1;
1031 if (error) {
1032 brelse(bp);
1033 break;
1037 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1038 * with IO_SYNC or IO_ASYNC set. These writes
1039 * must be handled as the pageout daemon expects.
1041 * NOTE! H2 relies on cluster_write() here because it
1042 * cannot preallocate disk blocks at the logical
1043 * level due to not knowing what the compression
1044 * size will be at this time.
1046 * We must use cluster_write() here and we depend
1047 * on the write-behind feature to flush buffers
1048 * appropriately. If we let the buffer daemons do
1049 * it the block allocations will be all over the
1050 * map.
1052 if (ioflag & IO_SYNC) {
1053 bwrite(bp);
1054 } else if ((ioflag & IO_DIRECT) && endofblk) {
1055 bawrite(bp);
1056 } else if (ioflag & IO_ASYNC) {
1057 bawrite(bp);
1058 } else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1059 bdwrite(bp);
1060 } else {
1061 #if 1
1062 bp->b_flags |= B_CLUSTEROK;
1063 cluster_write(bp, new_eof, lblksize, seqcount);
1064 #else
1065 bp->b_flags |= B_CLUSTEROK;
1066 bdwrite(bp);
1067 #endif
1072 * Cleanup. If we extended the file EOF but failed to write through
1073 * the entire write is a failure and we have to back-up.
1075 if (error && new_eof != old_eof) {
1076 hammer2_mtx_unlock(&ip->truncate_lock);
1077 hammer2_mtx_ex(&ip->lock);
1078 hammer2_mtx_ex(&ip->truncate_lock);
1079 hammer2_truncate_file(ip, old_eof);
1080 if (ip->flags & HAMMER2_INODE_MODIFIED)
1081 hammer2_inode_chain_sync(ip);
1082 hammer2_mtx_unlock(&ip->lock);
1083 } else if (modified) {
1084 hammer2_mtx_ex(&ip->lock);
1085 hammer2_inode_modify(ip);
1086 hammer2_update_time(&ip->meta.mtime);
1087 if (ip->flags & HAMMER2_INODE_MODIFIED)
1088 hammer2_inode_chain_sync(ip);
1089 hammer2_mtx_unlock(&ip->lock);
1090 hammer2_knote(ip->vp, kflags);
1092 hammer2_trans_assert_strategy(ip->pmp);
1093 hammer2_mtx_unlock(&ip->truncate_lock);
1095 return error;
1099 * Truncate the size of a file. The inode must not be locked.
1101 * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1102 * ensure that any on-media data beyond the new file EOF has been destroyed.
1104 * WARNING: nvtruncbuf() can only be safely called without the inode lock
1105 * held due to the way our write thread works. If the truncation
1106 * occurs in the middle of a buffer, nvtruncbuf() is responsible
1107 * for dirtying that buffer and zeroing out trailing bytes.
1109 * WARNING! Assumes that the kernel interlocks size changes at the
1110 * vnode level.
1112 * WARNING! Caller assumes responsibility for removing dead blocks
1113 * if INODE_RESIZED is set.
1115 static
1116 void
1117 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1119 hammer2_key_t lbase;
1120 int nblksize;
1122 hammer2_mtx_unlock(&ip->lock);
1123 if (ip->vp) {
1124 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1125 nvtruncbuf(ip->vp, nsize,
1126 nblksize, (int)nsize & (nblksize - 1),
1129 hammer2_mtx_ex(&ip->lock);
1130 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1131 ip->osize = ip->meta.size;
1132 ip->meta.size = nsize;
1133 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1134 hammer2_inode_modify(ip);
1138 * Extend the size of a file. The inode must not be locked.
1140 * Even though the file size is changing, we do not have to set the
1141 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1142 * boundary. When this occurs a hammer2_inode_chain_sync() is required
1143 * to prepare the inode cluster's indirect block table, otherwise
1144 * async execution of the strategy code will implode on us.
1146 * WARNING! Assumes that the kernel interlocks size changes at the
1147 * vnode level.
1149 * WARNING! Caller assumes responsibility for transitioning out
1150 * of the inode DIRECTDATA mode if INODE_RESIZED is set.
1152 static
1153 void
1154 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1156 hammer2_key_t lbase;
1157 hammer2_key_t osize;
1158 int oblksize;
1159 int nblksize;
1161 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1162 hammer2_inode_modify(ip);
1163 osize = ip->meta.size;
1164 ip->osize = osize;
1165 ip->meta.size = nsize;
1167 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1168 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1169 hammer2_inode_chain_sync(ip);
1172 hammer2_mtx_unlock(&ip->lock);
1173 if (ip->vp) {
1174 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1175 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1176 nvextendbuf(ip->vp,
1177 osize, nsize,
1178 oblksize, nblksize,
1179 -1, -1, 0);
1181 hammer2_mtx_ex(&ip->lock);
1184 static
1186 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1188 hammer2_xop_nresolve_t *xop;
1189 hammer2_inode_t *ip;
1190 hammer2_inode_t *dip;
1191 struct namecache *ncp;
1192 struct vnode *vp;
1193 int error;
1195 dip = VTOI(ap->a_dvp);
1196 xop = hammer2_xop_alloc(dip, 0);
1198 ncp = ap->a_nch->ncp;
1199 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1202 * Note: In DragonFly the kernel handles '.' and '..'.
1204 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1205 hammer2_xop_start(&xop->head, hammer2_xop_nresolve);
1207 error = hammer2_xop_collect(&xop->head, 0);
1208 error = hammer2_error_to_errno(error);
1209 if (error) {
1210 ip = NULL;
1211 } else {
1212 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1214 hammer2_inode_unlock(dip);
1217 * Acquire the related vnode
1219 * NOTE: For error processing, only ENOENT resolves the namecache
1220 * entry to NULL, otherwise we just return the error and
1221 * leave the namecache unresolved.
1223 * NOTE: multiple hammer2_inode structures can be aliased to the
1224 * same chain element, for example for hardlinks. This
1225 * use case does not 'reattach' inode associations that
1226 * might already exist, but always allocates a new one.
1228 * WARNING: inode structure is locked exclusively via inode_get
1229 * but chain was locked shared. inode_unlock()
1230 * will handle it properly.
1232 if (ip) {
1233 vp = hammer2_igetv(ip, &error); /* error set to UNIX error */
1234 if (error == 0) {
1235 vn_unlock(vp);
1236 cache_setvp(ap->a_nch, vp);
1237 } else if (error == ENOENT) {
1238 cache_setvp(ap->a_nch, NULL);
1240 hammer2_inode_unlock(ip);
1243 * The vp should not be released until after we've disposed
1244 * of our locks, because it might cause vop_inactive() to
1245 * be called.
1247 if (vp)
1248 vrele(vp);
1249 } else {
1250 error = ENOENT;
1251 cache_setvp(ap->a_nch, NULL);
1253 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1254 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1255 ("resolve error %d/%p ap %p\n",
1256 error, ap->a_nch->ncp->nc_vp, ap));
1258 return error;
1261 static
1263 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1265 hammer2_inode_t *dip;
1266 hammer2_tid_t inum;
1267 int error;
1269 dip = VTOI(ap->a_dvp);
1270 inum = dip->meta.iparent;
1271 *ap->a_vpp = NULL;
1273 if (inum) {
1274 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1275 inum, ap->a_vpp);
1276 } else {
1277 error = ENOENT;
1279 return error;
1282 static
1284 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1286 hammer2_inode_t *dip;
1287 hammer2_inode_t *nip;
1288 struct namecache *ncp;
1289 const uint8_t *name;
1290 size_t name_len;
1291 hammer2_tid_t inum;
1292 int error;
1294 dip = VTOI(ap->a_dvp);
1295 if (dip->pmp->ronly)
1296 return (EROFS);
1297 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1298 return (ENOSPC);
1300 ncp = ap->a_nch->ncp;
1301 name = ncp->nc_name;
1302 name_len = ncp->nc_nlen;
1304 hammer2_pfs_memory_wait(dip->pmp);
1305 hammer2_trans_init(dip->pmp, 0);
1307 inum = hammer2_trans_newinum(dip->pmp);
1310 * Create the actual inode as a hidden file in the iroot, then
1311 * create the directory entry. The creation of the actual inode
1312 * sets its nlinks to 1 which is the value we desire.
1314 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1315 NULL, 0, inum,
1316 inum, 0, 0,
1317 0, &error);
1318 if (error) {
1319 error = hammer2_error_to_errno(error);
1320 } else {
1321 error = hammer2_dirent_create(dip, name, name_len,
1322 nip->meta.inum, nip->meta.type);
1323 /* returns UNIX error code */
1325 if (error) {
1326 if (nip) {
1327 hammer2_inode_unlink_finisher(nip, 0);
1328 hammer2_inode_unlock(nip);
1329 nip = NULL;
1331 *ap->a_vpp = NULL;
1332 } else {
1333 *ap->a_vpp = hammer2_igetv(nip, &error);
1334 hammer2_inode_unlock(nip);
1338 * Update dip's mtime
1340 if (error == 0) {
1341 uint64_t mtime;
1343 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1344 hammer2_update_time(&mtime);
1345 hammer2_inode_modify(dip);
1346 dip->meta.mtime = mtime;
1347 hammer2_inode_unlock(dip);
1350 hammer2_trans_done(dip->pmp);
1352 if (error == 0) {
1353 cache_setunresolved(ap->a_nch);
1354 cache_setvp(ap->a_nch, *ap->a_vpp);
1355 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1357 return error;
1360 static
1362 hammer2_vop_open(struct vop_open_args *ap)
1364 return vop_stdopen(ap);
1368 * hammer2_vop_advlock { vp, id, op, fl, flags }
1370 static
1372 hammer2_vop_advlock(struct vop_advlock_args *ap)
1374 hammer2_inode_t *ip = VTOI(ap->a_vp);
1375 hammer2_off_t size;
1377 size = ip->meta.size;
1378 return (lf_advlock(ap, &ip->advlock, size));
1381 static
1383 hammer2_vop_close(struct vop_close_args *ap)
1385 return vop_stdclose(ap);
1389 * hammer2_vop_nlink { nch, dvp, vp, cred }
1391 * Create a hardlink from (vp) to {dvp, nch}.
1393 static
1395 hammer2_vop_nlink(struct vop_nlink_args *ap)
1397 hammer2_inode_t *tdip; /* target directory to create link in */
1398 hammer2_inode_t *ip; /* inode we are hardlinking to */
1399 struct namecache *ncp;
1400 const uint8_t *name;
1401 size_t name_len;
1402 int error;
1404 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1405 return(EXDEV);
1407 tdip = VTOI(ap->a_dvp);
1408 if (tdip->pmp->ronly)
1409 return (EROFS);
1410 if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1)
1411 return (ENOSPC);
1413 ncp = ap->a_nch->ncp;
1414 name = ncp->nc_name;
1415 name_len = ncp->nc_nlen;
1418 * ip represents the file being hardlinked. The file could be a
1419 * normal file or a hardlink target if it has already been hardlinked.
1420 * (with the new semantics, it will almost always be a hardlink
1421 * target).
1423 * Bump nlinks and potentially also create or move the hardlink
1424 * target in the parent directory common to (ip) and (tdip). The
1425 * consolidation code can modify ip->cluster. The returned cluster
1426 * is locked.
1428 ip = VTOI(ap->a_vp);
1429 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1430 hammer2_pfs_memory_wait(ip->pmp);
1431 hammer2_trans_init(ip->pmp, 0);
1434 * Target should be an indexed inode or there's no way we will ever
1435 * be able to find it!
1437 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1439 error = 0;
1442 * Can return NULL and error == EXDEV if the common parent
1443 * crosses a directory with the xlink flag set.
1445 hammer2_inode_lock(tdip, 0);
1446 hammer2_inode_lock(ip, 0);
1449 * Create the directory entry and bump nlinks.
1451 if (error == 0) {
1452 error = hammer2_dirent_create(tdip, name, name_len,
1453 ip->meta.inum, ip->meta.type);
1454 hammer2_inode_modify(ip);
1455 ++ip->meta.nlinks;
1457 if (error == 0) {
1459 * Update dip's mtime
1461 uint64_t mtime;
1463 hammer2_update_time(&mtime);
1464 hammer2_inode_modify(tdip);
1465 tdip->meta.mtime = mtime;
1467 cache_setunresolved(ap->a_nch);
1468 cache_setvp(ap->a_nch, ap->a_vp);
1470 hammer2_inode_unlock(ip);
1471 hammer2_inode_unlock(tdip);
1473 hammer2_trans_done(ip->pmp);
1474 hammer2_knote(ap->a_vp, NOTE_LINK);
1475 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1477 return error;
1481 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1483 * The operating system has already ensured that the directory entry
1484 * does not exist and done all appropriate namespace locking.
1486 static
1488 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1490 hammer2_inode_t *dip;
1491 hammer2_inode_t *nip;
1492 struct namecache *ncp;
1493 const uint8_t *name;
1494 size_t name_len;
1495 hammer2_tid_t inum;
1496 int error;
1498 dip = VTOI(ap->a_dvp);
1499 if (dip->pmp->ronly)
1500 return (EROFS);
1501 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1502 return (ENOSPC);
1504 ncp = ap->a_nch->ncp;
1505 name = ncp->nc_name;
1506 name_len = ncp->nc_nlen;
1507 hammer2_pfs_memory_wait(dip->pmp);
1508 hammer2_trans_init(dip->pmp, 0);
1510 inum = hammer2_trans_newinum(dip->pmp);
1513 * Create the actual inode as a hidden file in the iroot, then
1514 * create the directory entry. The creation of the actual inode
1515 * sets its nlinks to 1 which is the value we desire.
1517 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1518 NULL, 0, inum,
1519 inum, 0, 0,
1520 0, &error);
1522 if (error == 0) {
1523 error = hammer2_dirent_create(dip, name, name_len,
1524 nip->meta.inum, nip->meta.type);
1526 if (error) {
1527 if (nip) {
1528 hammer2_inode_unlink_finisher(nip, 0);
1529 hammer2_inode_unlock(nip);
1530 nip = NULL;
1532 *ap->a_vpp = NULL;
1533 } else {
1534 *ap->a_vpp = hammer2_igetv(nip, &error);
1535 hammer2_inode_unlock(nip);
1539 * Update dip's mtime
1541 if (error == 0) {
1542 uint64_t mtime;
1544 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1545 hammer2_update_time(&mtime);
1546 hammer2_inode_modify(dip);
1547 dip->meta.mtime = mtime;
1548 hammer2_inode_unlock(dip);
1551 hammer2_trans_done(dip->pmp);
1553 if (error == 0) {
1554 cache_setunresolved(ap->a_nch);
1555 cache_setvp(ap->a_nch, *ap->a_vpp);
1556 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1558 return error;
1562 * Make a device node (typically a fifo)
1564 static
1566 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1568 hammer2_inode_t *dip;
1569 hammer2_inode_t *nip;
1570 struct namecache *ncp;
1571 const uint8_t *name;
1572 size_t name_len;
1573 hammer2_tid_t inum;
1574 int error;
1576 dip = VTOI(ap->a_dvp);
1577 if (dip->pmp->ronly)
1578 return (EROFS);
1579 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1580 return (ENOSPC);
1582 ncp = ap->a_nch->ncp;
1583 name = ncp->nc_name;
1584 name_len = ncp->nc_nlen;
1585 hammer2_pfs_memory_wait(dip->pmp);
1586 hammer2_trans_init(dip->pmp, 0);
1589 * Create the device inode and then create the directory entry.
1591 inum = hammer2_trans_newinum(dip->pmp);
1592 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1593 NULL, 0, inum,
1594 inum, 0, 0,
1595 0, &error);
1596 if (error == 0) {
1597 error = hammer2_dirent_create(dip, name, name_len,
1598 nip->meta.inum, nip->meta.type);
1600 if (error) {
1601 if (nip) {
1602 hammer2_inode_unlink_finisher(nip, 0);
1603 hammer2_inode_unlock(nip);
1604 nip = NULL;
1606 *ap->a_vpp = NULL;
1607 } else {
1608 *ap->a_vpp = hammer2_igetv(nip, &error);
1609 hammer2_inode_unlock(nip);
1613 * Update dip's mtime
1615 if (error == 0) {
1616 uint64_t mtime;
1618 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1619 hammer2_update_time(&mtime);
1620 hammer2_inode_modify(dip);
1621 dip->meta.mtime = mtime;
1622 hammer2_inode_unlock(dip);
1625 hammer2_trans_done(dip->pmp);
1627 if (error == 0) {
1628 cache_setunresolved(ap->a_nch);
1629 cache_setvp(ap->a_nch, *ap->a_vpp);
1630 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1632 return error;
1636 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1638 static
1640 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1642 hammer2_inode_t *dip;
1643 hammer2_inode_t *nip;
1644 struct namecache *ncp;
1645 const uint8_t *name;
1646 size_t name_len;
1647 hammer2_tid_t inum;
1648 int error;
1650 dip = VTOI(ap->a_dvp);
1651 if (dip->pmp->ronly)
1652 return (EROFS);
1653 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1654 return (ENOSPC);
1656 ncp = ap->a_nch->ncp;
1657 name = ncp->nc_name;
1658 name_len = ncp->nc_nlen;
1659 hammer2_pfs_memory_wait(dip->pmp);
1660 hammer2_trans_init(dip->pmp, 0);
1662 ap->a_vap->va_type = VLNK; /* enforce type */
1665 * Create the softlink as an inode and then create the directory
1666 * entry.
1668 inum = hammer2_trans_newinum(dip->pmp);
1670 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1671 NULL, 0, inum,
1672 inum, 0, 0,
1673 0, &error);
1674 if (error == 0) {
1675 error = hammer2_dirent_create(dip, name, name_len,
1676 nip->meta.inum, nip->meta.type);
1678 if (error) {
1679 if (nip) {
1680 hammer2_inode_unlink_finisher(nip, 0);
1681 hammer2_inode_unlock(nip);
1682 nip = NULL;
1684 *ap->a_vpp = NULL;
1685 hammer2_trans_done(dip->pmp);
1686 return error;
1688 *ap->a_vpp = hammer2_igetv(nip, &error);
1691 * Build the softlink (~like file data) and finalize the namecache.
1693 if (error == 0) {
1694 size_t bytes;
1695 struct uio auio;
1696 struct iovec aiov;
1698 bytes = strlen(ap->a_target);
1700 hammer2_inode_unlock(nip);
1701 bzero(&auio, sizeof(auio));
1702 bzero(&aiov, sizeof(aiov));
1703 auio.uio_iov = &aiov;
1704 auio.uio_segflg = UIO_SYSSPACE;
1705 auio.uio_rw = UIO_WRITE;
1706 auio.uio_resid = bytes;
1707 auio.uio_iovcnt = 1;
1708 auio.uio_td = curthread;
1709 aiov.iov_base = ap->a_target;
1710 aiov.iov_len = bytes;
1711 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1712 /* XXX handle error */
1713 error = 0;
1714 } else {
1715 hammer2_inode_unlock(nip);
1719 * Update dip's mtime
1721 if (error == 0) {
1722 uint64_t mtime;
1724 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1725 hammer2_update_time(&mtime);
1726 hammer2_inode_modify(dip);
1727 dip->meta.mtime = mtime;
1728 hammer2_inode_unlock(dip);
1731 hammer2_trans_done(dip->pmp);
1734 * Finalize namecache
1736 if (error == 0) {
1737 cache_setunresolved(ap->a_nch);
1738 cache_setvp(ap->a_nch, *ap->a_vpp);
1739 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1741 return error;
1745 * hammer2_vop_nremove { nch, dvp, cred }
1747 static
1749 hammer2_vop_nremove(struct vop_nremove_args *ap)
1751 hammer2_xop_unlink_t *xop;
1752 hammer2_inode_t *dip;
1753 hammer2_inode_t *ip;
1754 struct namecache *ncp;
1755 int error;
1756 int isopen;
1758 dip = VTOI(ap->a_dvp);
1759 if (dip->pmp->ronly)
1760 return (EROFS);
1761 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1762 return (ENOSPC);
1764 ncp = ap->a_nch->ncp;
1766 hammer2_pfs_memory_wait(dip->pmp);
1767 hammer2_trans_init(dip->pmp, 0);
1768 hammer2_inode_lock(dip, 0);
1771 * The unlink XOP unlinks the path from the directory and
1772 * locates and returns the cluster associated with the real inode.
1773 * We have to handle nlinks here on the frontend.
1775 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1776 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1779 * The namecache entry is locked so nobody can use this namespace.
1780 * Calculate isopen to determine if this namespace has an open vp
1781 * associated with it and resolve the vp only if it does.
1783 * We try to avoid resolving the vnode if nobody has it open, but
1784 * note that the test is via this namespace only.
1786 isopen = cache_isopen(ap->a_nch);
1787 xop->isdir = 0;
1788 xop->dopermanent = 0;
1789 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1792 * Collect the real inode and adjust nlinks, destroy the real
1793 * inode if nlinks transitions to 0 and it was the real inode
1794 * (else it has already been removed).
1796 error = hammer2_xop_collect(&xop->head, 0);
1797 error = hammer2_error_to_errno(error);
1798 hammer2_inode_unlock(dip);
1800 if (error == 0) {
1801 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1802 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1803 if (ip) {
1804 hammer2_inode_unlink_finisher(ip, isopen);
1805 hammer2_inode_unlock(ip);
1807 } else {
1808 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1812 * Update dip's mtime
1814 if (error == 0) {
1815 uint64_t mtime;
1817 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1818 hammer2_update_time(&mtime);
1819 hammer2_inode_modify(dip);
1820 dip->meta.mtime = mtime;
1821 hammer2_inode_unlock(dip);
1824 hammer2_inode_run_sideq(dip->pmp);
1825 hammer2_trans_done(dip->pmp);
1826 if (error == 0) {
1827 cache_unlink(ap->a_nch);
1828 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1830 return (error);
1834 * hammer2_vop_nrmdir { nch, dvp, cred }
1836 static
1838 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1840 hammer2_xop_unlink_t *xop;
1841 hammer2_inode_t *dip;
1842 hammer2_inode_t *ip;
1843 struct namecache *ncp;
1844 int isopen;
1845 int error;
1847 dip = VTOI(ap->a_dvp);
1848 if (dip->pmp->ronly)
1849 return (EROFS);
1850 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1851 return (ENOSPC);
1853 hammer2_pfs_memory_wait(dip->pmp);
1854 hammer2_trans_init(dip->pmp, 0);
1855 hammer2_inode_lock(dip, 0);
1857 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1859 ncp = ap->a_nch->ncp;
1860 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1861 isopen = cache_isopen(ap->a_nch);
1862 xop->isdir = 1;
1863 xop->dopermanent = 0;
1864 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1867 * Collect the real inode and adjust nlinks, destroy the real
1868 * inode if nlinks transitions to 0 and it was the real inode
1869 * (else it has already been removed).
1871 error = hammer2_xop_collect(&xop->head, 0);
1872 error = hammer2_error_to_errno(error);
1873 hammer2_inode_unlock(dip);
1875 if (error == 0) {
1876 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1877 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1878 if (ip) {
1879 hammer2_inode_unlink_finisher(ip, isopen);
1880 hammer2_inode_unlock(ip);
1882 } else {
1883 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1887 * Update dip's mtime
1889 if (error == 0) {
1890 uint64_t mtime;
1892 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1893 hammer2_update_time(&mtime);
1894 hammer2_inode_modify(dip);
1895 dip->meta.mtime = mtime;
1896 hammer2_inode_unlock(dip);
1899 hammer2_inode_run_sideq(dip->pmp);
1900 hammer2_trans_done(dip->pmp);
1901 if (error == 0) {
1902 cache_unlink(ap->a_nch);
1903 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1905 return (error);
1909 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1911 static
1913 hammer2_vop_nrename(struct vop_nrename_args *ap)
1915 struct namecache *fncp;
1916 struct namecache *tncp;
1917 hammer2_inode_t *fdip; /* source directory */
1918 hammer2_inode_t *tdip; /* target directory */
1919 hammer2_inode_t *ip; /* file being renamed */
1920 hammer2_inode_t *tip; /* replaced target during rename or NULL */
1921 const uint8_t *fname;
1922 size_t fname_len;
1923 const uint8_t *tname;
1924 size_t tname_len;
1925 int error;
1926 int update_tdip;
1927 int update_fdip;
1928 hammer2_key_t tlhc;
1930 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1931 return(EXDEV);
1932 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1933 return(EXDEV);
1935 fdip = VTOI(ap->a_fdvp); /* source directory */
1936 tdip = VTOI(ap->a_tdvp); /* target directory */
1938 if (fdip->pmp->ronly)
1939 return (EROFS);
1940 if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1)
1941 return (ENOSPC);
1943 fncp = ap->a_fnch->ncp; /* entry name in source */
1944 fname = fncp->nc_name;
1945 fname_len = fncp->nc_nlen;
1947 tncp = ap->a_tnch->ncp; /* entry name in target */
1948 tname = tncp->nc_name;
1949 tname_len = tncp->nc_nlen;
1951 hammer2_pfs_memory_wait(tdip->pmp);
1952 hammer2_trans_init(tdip->pmp, 0);
1954 update_tdip = 0;
1955 update_fdip = 0;
1957 ip = VTOI(fncp->nc_vp);
1958 hammer2_inode_ref(ip); /* extra ref */
1961 * Lookup the target name to determine if a directory entry
1962 * is being overwritten. We only hold related inode locks
1963 * temporarily, the operating system is expected to protect
1964 * against rename races.
1966 tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL;
1967 if (tip)
1968 hammer2_inode_ref(tip); /* extra ref */
1971 * Can return NULL and error == EXDEV if the common parent
1972 * crosses a directory with the xlink flag set.
1974 * For now try to avoid deadlocks with a simple pointer address
1975 * test. (tip) can be NULL.
1977 error = 0;
1978 if (fdip <= tdip) {
1979 hammer2_inode_lock(fdip, 0);
1980 hammer2_inode_lock(tdip, 0);
1981 } else {
1982 hammer2_inode_lock(tdip, 0);
1983 hammer2_inode_lock(fdip, 0);
1985 if (tip) {
1986 if (ip <= tip) {
1987 hammer2_inode_lock(ip, 0);
1988 hammer2_inode_lock(tip, 0);
1989 } else {
1990 hammer2_inode_lock(tip, 0);
1991 hammer2_inode_lock(ip, 0);
1993 } else {
1994 hammer2_inode_lock(ip, 0);
1997 #if 0
1999 * Delete the target namespace.
2001 * REMOVED - NOW FOLDED INTO XOP_NRENAME OPERATION
2004 hammer2_xop_unlink_t *xop2;
2005 hammer2_inode_t *tip;
2006 int isopen;
2009 * The unlink XOP unlinks the path from the directory and
2010 * locates and returns the cluster associated with the real
2011 * inode. We have to handle nlinks here on the frontend.
2013 xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2014 hammer2_xop_setname(&xop2->head, tname, tname_len);
2015 isopen = cache_isopen(ap->a_tnch);
2016 xop2->isdir = -1;
2017 xop2->dopermanent = 0;
2018 hammer2_xop_start(&xop2->head, hammer2_xop_unlink);
2021 * Collect the real inode and adjust nlinks, destroy the real
2022 * inode if nlinks transitions to 0 and it was the real inode
2023 * (else it has already been removed).
2025 tnch_error = hammer2_xop_collect(&xop2->head, 0);
2026 tnch_error = hammer2_error_to_errno(tnch_error);
2027 /* hammer2_inode_unlock(tdip); */
2029 if (tnch_error == 0) {
2030 tip = hammer2_inode_get(tdip->pmp, NULL,
2031 &xop2->head.cluster, -1);
2032 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2033 if (tip) {
2034 hammer2_inode_unlink_finisher(tip, isopen);
2035 hammer2_inode_unlock(tip);
2037 } else {
2038 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2040 /* hammer2_inode_lock(tdip, 0); */
2042 if (tnch_error && tnch_error != ENOENT) {
2043 error = tnch_error;
2044 goto done2;
2046 update_tdip = 1;
2048 #endif
2051 * Resolve the collision space for (tdip, tname, tname_len)
2053 * tdip must be held exclusively locked to prevent races since
2054 * multiple filenames can end up in the same collision space.
2057 hammer2_xop_scanlhc_t *sxop;
2058 hammer2_tid_t lhcbase;
2060 tlhc = hammer2_dirhash(tname, tname_len);
2061 lhcbase = tlhc;
2062 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2063 sxop->lhc = tlhc;
2064 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
2065 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2066 if (tlhc != sxop->head.cluster.focus->bref.key)
2067 break;
2068 ++tlhc;
2070 error = hammer2_error_to_errno(error);
2071 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2073 if (error) {
2074 if (error != ENOENT)
2075 goto done2;
2076 ++tlhc;
2077 error = 0;
2079 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2080 error = ENOSPC;
2081 goto done2;
2086 * Ready to go, issue the rename to the backend. Note that meta-data
2087 * updates to the related inodes occur separately from the rename
2088 * operation.
2090 * NOTE: While it is not necessary to update ip->meta.name*, doing
2091 * so aids catastrophic recovery and debugging.
2093 if (error == 0) {
2094 hammer2_xop_nrename_t *xop4;
2096 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2097 xop4->lhc = tlhc;
2098 xop4->ip_key = ip->meta.name_key;
2099 hammer2_xop_setip2(&xop4->head, ip);
2100 hammer2_xop_setip3(&xop4->head, tdip);
2101 hammer2_xop_setname(&xop4->head, fname, fname_len);
2102 hammer2_xop_setname2(&xop4->head, tname, tname_len);
2103 hammer2_xop_start(&xop4->head, hammer2_xop_nrename);
2105 error = hammer2_xop_collect(&xop4->head, 0);
2106 error = hammer2_error_to_errno(error);
2107 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2109 if (error == ENOENT)
2110 error = 0;
2113 * Update inode meta-data.
2115 * WARNING! The in-memory inode (ip) structure does not
2116 * maintain a copy of the inode's filename buffer.
2118 if (error == 0 &&
2119 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2120 hammer2_inode_modify(ip);
2121 ip->meta.name_len = tname_len;
2122 ip->meta.name_key = tlhc;
2124 if (error == 0) {
2125 hammer2_inode_modify(ip);
2126 ip->meta.iparent = tdip->meta.inum;
2128 update_fdip = 1;
2129 update_tdip = 1;
2132 done2:
2134 * If no error, the backend has replaced the target directory entry.
2135 * We must adjust nlinks on the original replace target if it exists.
2137 if (error == 0 && tip) {
2138 int isopen;
2140 isopen = cache_isopen(ap->a_tnch);
2141 hammer2_inode_unlink_finisher(tip, isopen);
2145 * Update directory mtimes to represent the something changed.
2147 if (update_fdip || update_tdip) {
2148 uint64_t mtime;
2150 hammer2_update_time(&mtime);
2151 if (update_fdip) {
2152 hammer2_inode_modify(fdip);
2153 fdip->meta.mtime = mtime;
2155 if (update_tdip) {
2156 hammer2_inode_modify(tdip);
2157 tdip->meta.mtime = mtime;
2160 if (tip) {
2161 hammer2_inode_unlock(tip);
2162 hammer2_inode_drop(tip);
2164 hammer2_inode_unlock(ip);
2165 hammer2_inode_unlock(tdip);
2166 hammer2_inode_unlock(fdip);
2167 hammer2_inode_drop(ip);
2168 hammer2_inode_run_sideq(fdip->pmp);
2170 hammer2_trans_done(tdip->pmp);
2173 * Issue the namecache update after unlocking all the internal
2174 * hammer structures, otherwise we might deadlock.
2176 if (error == 0 && tip) {
2177 cache_unlink(ap->a_tnch);
2178 cache_setunresolved(ap->a_tnch);
2180 if (error == 0) {
2181 cache_rename(ap->a_fnch, ap->a_tnch);
2182 hammer2_knote(ap->a_fdvp, NOTE_WRITE);
2183 hammer2_knote(ap->a_tdvp, NOTE_WRITE);
2184 hammer2_knote(fncp->nc_vp, NOTE_RENAME);
2187 return (error);
2191 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2193 static
2195 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2197 hammer2_inode_t *ip;
2198 int error;
2200 ip = VTOI(ap->a_vp);
2202 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2203 ap->a_fflag, ap->a_cred);
2204 return (error);
2207 static
2208 int
2209 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2211 struct mount *mp;
2212 hammer2_pfs_t *pmp;
2213 int rc;
2215 switch (ap->a_op) {
2216 case (MOUNTCTL_SET_EXPORT):
2217 mp = ap->a_head.a_ops->head.vv_mount;
2218 pmp = MPTOPMP(mp);
2220 if (ap->a_ctllen != sizeof(struct export_args))
2221 rc = (EINVAL);
2222 else
2223 rc = vfs_export(mp, &pmp->export,
2224 (const struct export_args *)ap->a_ctl);
2225 break;
2226 default:
2227 rc = vop_stdmountctl(ap);
2228 break;
2230 return (rc);
2234 * KQFILTER
2236 static void filt_hammer2detach(struct knote *kn);
2237 static int filt_hammer2read(struct knote *kn, long hint);
2238 static int filt_hammer2write(struct knote *kn, long hint);
2239 static int filt_hammer2vnode(struct knote *kn, long hint);
2241 static struct filterops hammer2read_filtops =
2242 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2243 NULL, filt_hammer2detach, filt_hammer2read };
2244 static struct filterops hammer2write_filtops =
2245 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2246 NULL, filt_hammer2detach, filt_hammer2write };
2247 static struct filterops hammer2vnode_filtops =
2248 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2249 NULL, filt_hammer2detach, filt_hammer2vnode };
2251 static
2253 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2255 struct vnode *vp = ap->a_vp;
2256 struct knote *kn = ap->a_kn;
2258 switch (kn->kn_filter) {
2259 case EVFILT_READ:
2260 kn->kn_fop = &hammer2read_filtops;
2261 break;
2262 case EVFILT_WRITE:
2263 kn->kn_fop = &hammer2write_filtops;
2264 break;
2265 case EVFILT_VNODE:
2266 kn->kn_fop = &hammer2vnode_filtops;
2267 break;
2268 default:
2269 return (EOPNOTSUPP);
2272 kn->kn_hook = (caddr_t)vp;
2274 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2276 return(0);
2279 static void
2280 filt_hammer2detach(struct knote *kn)
2282 struct vnode *vp = (void *)kn->kn_hook;
2284 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2287 static int
2288 filt_hammer2read(struct knote *kn, long hint)
2290 struct vnode *vp = (void *)kn->kn_hook;
2291 hammer2_inode_t *ip = VTOI(vp);
2292 off_t off;
2294 if (hint == NOTE_REVOKE) {
2295 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2296 return(1);
2298 off = ip->meta.size - kn->kn_fp->f_offset;
2299 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2300 if (kn->kn_sfflags & NOTE_OLDAPI)
2301 return(1);
2302 return (kn->kn_data != 0);
2306 static int
2307 filt_hammer2write(struct knote *kn, long hint)
2309 if (hint == NOTE_REVOKE)
2310 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2311 kn->kn_data = 0;
2312 return (1);
2315 static int
2316 filt_hammer2vnode(struct knote *kn, long hint)
2318 if (kn->kn_sfflags & hint)
2319 kn->kn_fflags |= hint;
2320 if (hint == NOTE_REVOKE) {
2321 kn->kn_flags |= (EV_EOF | EV_NODATA);
2322 return (1);
2324 return (kn->kn_fflags != 0);
2328 * FIFO VOPS
2330 static
2332 hammer2_vop_markatime(struct vop_markatime_args *ap)
2334 hammer2_inode_t *ip;
2335 struct vnode *vp;
2337 vp = ap->a_vp;
2338 ip = VTOI(vp);
2340 if (ip->pmp->ronly)
2341 return (EROFS);
2342 return(0);
2345 static
2347 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2349 int error;
2351 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2352 if (error)
2353 error = hammer2_vop_kqfilter(ap);
2354 return(error);
2358 * VOPS vector
2360 struct vop_ops hammer2_vnode_vops = {
2361 .vop_default = vop_defaultop,
2362 .vop_fsync = hammer2_vop_fsync,
2363 .vop_getpages = vop_stdgetpages,
2364 .vop_putpages = vop_stdputpages,
2365 .vop_access = hammer2_vop_access,
2366 .vop_advlock = hammer2_vop_advlock,
2367 .vop_close = hammer2_vop_close,
2368 .vop_nlink = hammer2_vop_nlink,
2369 .vop_ncreate = hammer2_vop_ncreate,
2370 .vop_nsymlink = hammer2_vop_nsymlink,
2371 .vop_nremove = hammer2_vop_nremove,
2372 .vop_nrmdir = hammer2_vop_nrmdir,
2373 .vop_nrename = hammer2_vop_nrename,
2374 .vop_getattr = hammer2_vop_getattr,
2375 .vop_setattr = hammer2_vop_setattr,
2376 .vop_readdir = hammer2_vop_readdir,
2377 .vop_readlink = hammer2_vop_readlink,
2378 .vop_getpages = vop_stdgetpages,
2379 .vop_putpages = vop_stdputpages,
2380 .vop_read = hammer2_vop_read,
2381 .vop_write = hammer2_vop_write,
2382 .vop_open = hammer2_vop_open,
2383 .vop_inactive = hammer2_vop_inactive,
2384 .vop_reclaim = hammer2_vop_reclaim,
2385 .vop_nresolve = hammer2_vop_nresolve,
2386 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2387 .vop_nmkdir = hammer2_vop_nmkdir,
2388 .vop_nmknod = hammer2_vop_nmknod,
2389 .vop_ioctl = hammer2_vop_ioctl,
2390 .vop_mountctl = hammer2_vop_mountctl,
2391 .vop_bmap = hammer2_vop_bmap,
2392 .vop_strategy = hammer2_vop_strategy,
2393 .vop_kqfilter = hammer2_vop_kqfilter
2396 struct vop_ops hammer2_spec_vops = {
2397 .vop_default = vop_defaultop,
2398 .vop_fsync = hammer2_vop_fsync,
2399 .vop_read = vop_stdnoread,
2400 .vop_write = vop_stdnowrite,
2401 .vop_access = hammer2_vop_access,
2402 .vop_close = hammer2_vop_close,
2403 .vop_markatime = hammer2_vop_markatime,
2404 .vop_getattr = hammer2_vop_getattr,
2405 .vop_inactive = hammer2_vop_inactive,
2406 .vop_reclaim = hammer2_vop_reclaim,
2407 .vop_setattr = hammer2_vop_setattr
2410 struct vop_ops hammer2_fifo_vops = {
2411 .vop_default = fifo_vnoperate,
2412 .vop_fsync = hammer2_vop_fsync,
2413 #if 0
2414 .vop_read = hammer2_vop_fiforead,
2415 .vop_write = hammer2_vop_fifowrite,
2416 #endif
2417 .vop_access = hammer2_vop_access,
2418 #if 0
2419 .vop_close = hammer2_vop_fifoclose,
2420 #endif
2421 .vop_markatime = hammer2_vop_markatime,
2422 .vop_getattr = hammer2_vop_getattr,
2423 .vop_inactive = hammer2_vop_inactive,
2424 .vop_reclaim = hammer2_vop_reclaim,
2425 .vop_setattr = hammer2_vop_setattr,
2426 .vop_kqfilter = hammer2_vop_fifokqfilter