hammer2 - More involved refactoring of chain_repparent, cleanup
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
blobbb7c302af51558e48343c05a8b41b47d6f396d42
1 /*
2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 * Kernel Filesystem interface
39 * NOTE! local ipdata pointers must be reloaded on any modifying operation
40 * to the inode as its underlying chain may have changed.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
60 #include "hammer2.h"
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
63 int seqcount);
64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
65 int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
69 struct objcache *cache_xops;
71 static __inline
72 void
73 hammer2_knote(struct vnode *vp, int flags)
75 if (flags)
76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
80 * Last reference to a vnode is going away but it is still cached.
82 static
83 int
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
86 hammer2_inode_t *ip;
87 struct vnode *vp;
89 vp = ap->a_vp;
90 ip = VTOI(vp);
93 * Degenerate case
95 if (ip == NULL) {
96 vrecycle(vp);
97 return (0);
101 * Check for deleted inodes and recycle immediately on the last
102 * release. Be sure to destroy any left-over buffer cache buffers
103 * so we do not waste time trying to flush them.
105 * Note that deleting the file block chains under the inode chain
106 * would just be a waste of energy, so don't do it.
108 * WARNING: nvtruncbuf() can only be safely called without the inode
109 * lock held due to the way our write thread works.
111 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
112 hammer2_key_t lbase;
113 int nblksize;
116 * Detect updates to the embedded data which may be
117 * synchronized by the strategy code. Simply mark the
118 * inode modified so it gets picked up by our normal flush.
120 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
121 nvtruncbuf(vp, 0, nblksize, 0, 0);
122 vrecycle(vp);
124 return (0);
128 * Reclaim a vnode so that it can be reused; after the inode is
129 * disassociated, the filesystem must manage it alone.
131 static
133 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
135 hammer2_inode_t *ip;
136 hammer2_pfs_t *pmp;
137 struct vnode *vp;
139 vp = ap->a_vp;
140 ip = VTOI(vp);
141 if (ip == NULL) {
142 return(0);
144 pmp = ip->pmp;
147 * The final close of a deleted file or directory marks it for
148 * destruction. The DELETED flag allows the flusher to shortcut
149 * any modified blocks still unflushed (that is, just ignore them).
151 * HAMMER2 usually does not try to optimize the freemap by returning
152 * deleted blocks to it as it does not usually know how many snapshots
153 * might be referencing portions of the file/dir.
155 vp->v_data = NULL;
156 ip->vp = NULL;
159 * NOTE! We do not attempt to flush chains here, flushing is
160 * really fragile and could also deadlock.
162 vclrisdirty(vp);
165 * A modified inode may require chain synchronization. This
166 * synchronization is usually handled by VOP_SYNC / VOP_FSYNC
167 * when vfsync() is called. However, that requires a vnode.
169 * When the vnode is disassociated we must keep track of any modified
170 * inode via the sideq so that it is properly flushed. We cannot
171 * safely synchronize the inode from inside the reclaim due to
172 * potentially deep locks held as-of when the reclaim occurs.
173 * Interactions and potential deadlocks abound.
175 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED |
176 HAMMER2_INODE_MODIFIED |
177 HAMMER2_INODE_RESIZED |
178 HAMMER2_INODE_DIRTYDATA)) &&
179 (ip->flags & HAMMER2_INODE_ISDELETED) == 0) {
180 hammer2_inode_sideq_t *ipul;
182 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
183 ipul->ip = ip;
185 hammer2_spin_ex(&pmp->list_spin);
186 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
187 /* ref -> sideq */
188 atomic_set_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
189 TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
190 ++pmp->sideq_count;
191 hammer2_spin_unex(&pmp->list_spin);
192 } else {
193 hammer2_spin_unex(&pmp->list_spin);
194 kfree(ipul, pmp->minode);
195 hammer2_inode_drop(ip); /* vp ref */
197 /* retain ref from vp for ipul */
198 } else {
199 hammer2_inode_drop(ip); /* vp ref */
203 * XXX handle background sync when ip dirty, kernel will no longer
204 * notify us regarding this inode because there is no longer a
205 * vnode attached to it.
208 return (0);
212 * Currently this function synchronizes the front-end inode state to the
213 * backend chain topology, then flushes the inode's chain and sub-topology
214 * to backend media. This function does not flush the root topology down to
215 * the inode.
217 static
219 hammer2_vop_fsync(struct vop_fsync_args *ap)
221 hammer2_inode_t *ip;
222 struct vnode *vp;
223 int error1;
224 int error2;
226 vp = ap->a_vp;
227 ip = VTOI(vp);
228 error1 = 0;
230 hammer2_trans_init(ip->pmp, 0);
233 * Clean out buffer cache, wait for I/O's to complete.
235 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
236 bio_track_wait(&vp->v_track_write, 0, 0);
239 * Flush any inode changes
241 hammer2_inode_lock(ip, 0);
242 if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MODIFIED))
243 error1 = hammer2_inode_chain_sync(ip);
246 * Flush dirty chains related to the inode.
248 * NOTE! XXX We do not currently flush to the volume root, ultimately
249 * we will want to have a shortcut for the flushed inode stored
250 * in the volume root for recovery purposes.
252 error2 = hammer2_inode_chain_flush(ip);
253 if (error2)
254 error1 = error2;
255 hammer2_inode_unlock(ip);
256 hammer2_trans_done(ip->pmp);
258 return (error1);
261 static
263 hammer2_vop_access(struct vop_access_args *ap)
265 hammer2_inode_t *ip = VTOI(ap->a_vp);
266 uid_t uid;
267 gid_t gid;
268 int error;
270 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
271 uid = hammer2_to_unix_xid(&ip->meta.uid);
272 gid = hammer2_to_unix_xid(&ip->meta.gid);
273 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
274 hammer2_inode_unlock(ip);
276 return (error);
279 static
281 hammer2_vop_getattr(struct vop_getattr_args *ap)
283 hammer2_pfs_t *pmp;
284 hammer2_inode_t *ip;
285 struct vnode *vp;
286 struct vattr *vap;
287 hammer2_chain_t *chain;
288 int i;
290 vp = ap->a_vp;
291 vap = ap->a_vap;
293 ip = VTOI(vp);
294 pmp = ip->pmp;
296 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
298 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
299 vap->va_fileid = ip->meta.inum;
300 vap->va_mode = ip->meta.mode;
301 vap->va_nlink = ip->meta.nlinks;
302 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
303 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
304 vap->va_rmajor = 0;
305 vap->va_rminor = 0;
306 vap->va_size = ip->meta.size; /* protected by shared lock */
307 vap->va_blocksize = HAMMER2_PBUFSIZE;
308 vap->va_flags = ip->meta.uflags;
309 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
310 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
311 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
312 vap->va_gen = 1;
313 vap->va_bytes = 0;
314 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
316 * Can't really calculate directory use sans the files under
317 * it, just assume one block for now.
319 vap->va_bytes += HAMMER2_INODE_BYTES;
320 } else {
321 for (i = 0; i < ip->cluster.nchains; ++i) {
322 if ((chain = ip->cluster.array[i].chain) != NULL) {
323 if (vap->va_bytes <
324 chain->bref.embed.stats.data_count) {
325 vap->va_bytes =
326 chain->bref.embed.stats.data_count;
331 vap->va_type = hammer2_get_vtype(ip->meta.type);
332 vap->va_filerev = 0;
333 vap->va_uid_uuid = ip->meta.uid;
334 vap->va_gid_uuid = ip->meta.gid;
335 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
336 VA_FSID_UUID_VALID;
338 hammer2_inode_unlock(ip);
340 return (0);
343 static
345 hammer2_vop_setattr(struct vop_setattr_args *ap)
347 hammer2_inode_t *ip;
348 struct vnode *vp;
349 struct vattr *vap;
350 int error;
351 int kflags = 0;
352 uint64_t ctime;
354 vp = ap->a_vp;
355 vap = ap->a_vap;
356 hammer2_update_time(&ctime);
358 ip = VTOI(vp);
360 if (ip->pmp->ronly)
361 return (EROFS);
362 if (hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1)
363 return (ENOSPC);
365 hammer2_pfs_memory_wait(ip->pmp);
366 hammer2_trans_init(ip->pmp, 0);
367 hammer2_inode_lock(ip, 0);
368 error = 0;
370 if (vap->va_flags != VNOVAL) {
371 uint32_t flags;
373 flags = ip->meta.uflags;
374 error = vop_helper_setattr_flags(&flags, vap->va_flags,
375 hammer2_to_unix_xid(&ip->meta.uid),
376 ap->a_cred);
377 if (error == 0) {
378 if (ip->meta.uflags != flags) {
379 hammer2_inode_modify(ip);
380 ip->meta.uflags = flags;
381 ip->meta.ctime = ctime;
382 kflags |= NOTE_ATTRIB;
384 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
385 error = 0;
386 goto done;
389 goto done;
391 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
392 error = EPERM;
393 goto done;
395 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
396 mode_t cur_mode = ip->meta.mode;
397 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
398 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
399 uuid_t uuid_uid;
400 uuid_t uuid_gid;
402 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
403 ap->a_cred,
404 &cur_uid, &cur_gid, &cur_mode);
405 if (error == 0) {
406 hammer2_guid_to_uuid(&uuid_uid, cur_uid);
407 hammer2_guid_to_uuid(&uuid_gid, cur_gid);
408 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
409 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
410 ip->meta.mode != cur_mode
412 hammer2_inode_modify(ip);
413 ip->meta.uid = uuid_uid;
414 ip->meta.gid = uuid_gid;
415 ip->meta.mode = cur_mode;
416 ip->meta.ctime = ctime;
418 kflags |= NOTE_ATTRIB;
423 * Resize the file
425 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
426 switch(vp->v_type) {
427 case VREG:
428 if (vap->va_size == ip->meta.size)
429 break;
430 if (vap->va_size < ip->meta.size) {
431 hammer2_mtx_ex(&ip->truncate_lock);
432 hammer2_truncate_file(ip, vap->va_size);
433 hammer2_mtx_unlock(&ip->truncate_lock);
434 kflags |= NOTE_WRITE;
435 } else {
436 hammer2_extend_file(ip, vap->va_size);
437 kflags |= NOTE_WRITE | NOTE_EXTEND;
439 hammer2_inode_modify(ip);
440 ip->meta.mtime = ctime;
441 vclrflags(vp, VLASTWRITETS);
442 break;
443 default:
444 error = EINVAL;
445 goto done;
448 #if 0
449 /* atime not supported */
450 if (vap->va_atime.tv_sec != VNOVAL) {
451 hammer2_inode_modify(ip);
452 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
453 kflags |= NOTE_ATTRIB;
455 #endif
456 if (vap->va_mode != (mode_t)VNOVAL) {
457 mode_t cur_mode = ip->meta.mode;
458 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
459 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
461 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
462 cur_uid, cur_gid, &cur_mode);
463 if (error == 0 && ip->meta.mode != cur_mode) {
464 hammer2_inode_modify(ip);
465 ip->meta.mode = cur_mode;
466 ip->meta.ctime = ctime;
467 kflags |= NOTE_ATTRIB;
471 if (vap->va_mtime.tv_sec != VNOVAL) {
472 hammer2_inode_modify(ip);
473 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
474 kflags |= NOTE_ATTRIB;
475 vclrflags(vp, VLASTWRITETS);
478 done:
480 * If a truncation occurred we must call chain_sync() now in order
481 * to trim the related data chains, otherwise a later expansion can
482 * cause havoc.
484 * If an extend occured that changed the DIRECTDATA state, we must
485 * call inode_fsync now in order to prepare the inode's indirect
486 * block table.
488 * WARNING! This means we are making an adjustment to the inode's
489 * chain outside of sync/fsync, and not just to inode->meta, which
490 * may result in some consistency issues if a crash were to occur
491 * at just the wrong time.
493 if (ip->flags & HAMMER2_INODE_RESIZED)
494 hammer2_inode_chain_sync(ip);
497 * Cleanup.
499 hammer2_inode_unlock(ip);
500 hammer2_trans_done(ip->pmp);
501 hammer2_knote(ip->vp, kflags);
503 return (error);
506 static
508 hammer2_vop_readdir(struct vop_readdir_args *ap)
510 hammer2_xop_readdir_t *xop;
511 hammer2_blockref_t bref;
512 hammer2_inode_t *ip;
513 hammer2_tid_t inum;
514 hammer2_key_t lkey;
515 struct uio *uio;
516 off_t *cookies;
517 off_t saveoff;
518 int cookie_index;
519 int ncookies;
520 int error;
521 int eofflag;
522 int r;
524 ip = VTOI(ap->a_vp);
525 uio = ap->a_uio;
526 saveoff = uio->uio_offset;
527 eofflag = 0;
528 error = 0;
531 * Setup cookies directory entry cookies if requested
533 if (ap->a_ncookies) {
534 ncookies = uio->uio_resid / 16 + 1;
535 if (ncookies > 1024)
536 ncookies = 1024;
537 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
538 } else {
539 ncookies = -1;
540 cookies = NULL;
542 cookie_index = 0;
544 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
547 * Handle artificial entries. To ensure that only positive 64 bit
548 * quantities are returned to userland we always strip off bit 63.
549 * The hash code is designed such that codes 0x0000-0x7FFF are not
550 * used, allowing us to use these codes for articial entries.
552 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
553 * allow '..' to cross the mount point into (e.g.) the super-root.
555 if (saveoff == 0) {
556 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
557 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
558 if (r)
559 goto done;
560 if (cookies)
561 cookies[cookie_index] = saveoff;
562 ++saveoff;
563 ++cookie_index;
564 if (cookie_index == ncookies)
565 goto done;
568 if (saveoff == 1) {
570 * Be careful with lockorder when accessing ".."
572 * (ip is the current dir. xip is the parent dir).
574 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
575 if (ip != ip->pmp->iroot)
576 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
577 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
578 if (r)
579 goto done;
580 if (cookies)
581 cookies[cookie_index] = saveoff;
582 ++saveoff;
583 ++cookie_index;
584 if (cookie_index == ncookies)
585 goto done;
588 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
589 if (hammer2_debug & 0x0020)
590 kprintf("readdir: lkey %016jx\n", lkey);
591 if (error)
592 goto done;
595 * Use XOP for cluster scan.
597 * parent is the inode cluster, already locked for us. Don't
598 * double lock shared locks as this will screw up upgrades.
600 xop = hammer2_xop_alloc(ip, 0);
601 xop->lkey = lkey;
602 hammer2_xop_start(&xop->head, hammer2_xop_readdir);
604 for (;;) {
605 const hammer2_inode_data_t *ripdata;
606 const char *dname;
607 int dtype;
609 error = hammer2_xop_collect(&xop->head, 0);
610 error = hammer2_error_to_errno(error);
611 if (error) {
612 break;
614 if (cookie_index == ncookies)
615 break;
616 if (hammer2_debug & 0x0020)
617 kprintf("cluster chain %p %p\n",
618 xop->head.cluster.focus,
619 (xop->head.cluster.focus ?
620 xop->head.cluster.focus->data : (void *)-1));
621 hammer2_cluster_bref(&xop->head.cluster, &bref);
623 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
624 ripdata =
625 &hammer2_cluster_rdata(&xop->head.cluster)->ipdata;
626 dtype = hammer2_get_dtype(ripdata->meta.type);
627 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
628 r = vop_write_dirent(&error, uio,
629 ripdata->meta.inum &
630 HAMMER2_DIRHASH_USERMSK,
631 dtype,
632 ripdata->meta.name_len,
633 ripdata->filename);
634 if (r)
635 break;
636 if (cookies)
637 cookies[cookie_index] = saveoff;
638 ++cookie_index;
639 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) {
640 dtype = hammer2_get_dtype(bref.embed.dirent.type);
641 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
642 if (bref.embed.dirent.namlen <=
643 sizeof(bref.check.buf)) {
644 dname = bref.check.buf;
645 } else {
646 dname =
647 hammer2_cluster_rdata(&xop->head.cluster)->buf;
649 r = vop_write_dirent(&error, uio,
650 bref.embed.dirent.inum,
651 dtype,
652 bref.embed.dirent.namlen,
653 dname);
654 if (r)
655 break;
656 if (cookies)
657 cookies[cookie_index] = saveoff;
658 ++cookie_index;
659 } else {
660 /* XXX chain error */
661 kprintf("bad chain type readdir %d\n", bref.type);
664 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
665 if (error == ENOENT) {
666 error = 0;
667 eofflag = 1;
668 saveoff = (hammer2_key_t)-1;
669 } else {
670 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
672 done:
673 hammer2_inode_unlock(ip);
674 if (ap->a_eofflag)
675 *ap->a_eofflag = eofflag;
676 if (hammer2_debug & 0x0020)
677 kprintf("readdir: done at %016jx\n", saveoff);
678 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
679 if (error && cookie_index == 0) {
680 if (cookies) {
681 kfree(cookies, M_TEMP);
682 *ap->a_ncookies = 0;
683 *ap->a_cookies = NULL;
685 } else {
686 if (cookies) {
687 *ap->a_ncookies = cookie_index;
688 *ap->a_cookies = cookies;
691 return (error);
695 * hammer2_vop_readlink { vp, uio, cred }
697 static
699 hammer2_vop_readlink(struct vop_readlink_args *ap)
701 struct vnode *vp;
702 hammer2_inode_t *ip;
703 int error;
705 vp = ap->a_vp;
706 if (vp->v_type != VLNK)
707 return (EINVAL);
708 ip = VTOI(vp);
710 error = hammer2_read_file(ip, ap->a_uio, 0);
711 return (error);
714 static
716 hammer2_vop_read(struct vop_read_args *ap)
718 struct vnode *vp;
719 hammer2_inode_t *ip;
720 struct uio *uio;
721 int error;
722 int seqcount;
723 int bigread;
726 * Read operations supported on this vnode?
728 vp = ap->a_vp;
729 if (vp->v_type != VREG)
730 return (EINVAL);
733 * Misc
735 ip = VTOI(vp);
736 uio = ap->a_uio;
737 error = 0;
739 seqcount = ap->a_ioflag >> 16;
740 bigread = (uio->uio_resid > 100 * 1024 * 1024);
742 error = hammer2_read_file(ip, uio, seqcount);
743 return (error);
746 static
748 hammer2_vop_write(struct vop_write_args *ap)
750 hammer2_inode_t *ip;
751 thread_t td;
752 struct vnode *vp;
753 struct uio *uio;
754 int error;
755 int seqcount;
756 int ioflag;
759 * Read operations supported on this vnode?
761 vp = ap->a_vp;
762 if (vp->v_type != VREG)
763 return (EINVAL);
766 * Misc
768 ip = VTOI(vp);
769 ioflag = ap->a_ioflag;
770 uio = ap->a_uio;
771 error = 0;
772 if (ip->pmp->ronly)
773 return (EROFS);
774 switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) {
775 case 2:
776 return (ENOSPC);
777 case 1:
778 ioflag |= IO_DIRECT; /* semi-synchronous */
779 /* fall through */
780 default:
781 break;
784 seqcount = ioflag >> 16;
787 * Check resource limit
789 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
790 uio->uio_offset + uio->uio_resid >
791 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
792 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
793 return (EFBIG);
797 * The transaction interlocks against flush initiations
798 * (note: but will run concurrently with the actual flush).
800 * To avoid deadlocking against the VM system, we must flag any
801 * transaction related to the buffer cache or other direct
802 * VM page manipulation.
804 if (uio->uio_segflg == UIO_NOCOPY)
805 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE);
806 else
807 hammer2_trans_init(ip->pmp, 0);
808 error = hammer2_write_file(ip, uio, ioflag, seqcount);
809 hammer2_trans_done(ip->pmp);
811 return (error);
815 * Perform read operations on a file or symlink given an UNLOCKED
816 * inode and uio.
818 * The passed ip is not locked.
820 static
822 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
824 hammer2_off_t size;
825 struct buf *bp;
826 int error;
828 error = 0;
831 * UIO read loop.
833 * WARNING! Assumes that the kernel interlocks size changes at the
834 * vnode level.
836 hammer2_mtx_sh(&ip->lock);
837 hammer2_mtx_sh(&ip->truncate_lock);
838 size = ip->meta.size;
839 hammer2_mtx_unlock(&ip->lock);
841 while (uio->uio_resid > 0 && uio->uio_offset < size) {
842 hammer2_key_t lbase;
843 hammer2_key_t leof;
844 int lblksize;
845 int loff;
846 int n;
848 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
849 &lbase, &leof);
851 #if 1
852 bp = NULL;
853 error = cluster_readx(ip->vp, leof, lbase, lblksize,
854 B_NOTMETA | B_KVABIO,
855 uio->uio_resid,
856 seqcount * MAXBSIZE,
857 &bp);
858 #else
859 if (uio->uio_segflg == UIO_NOCOPY) {
860 bp = getblk(ip->vp, lbase, lblksize,
861 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
862 if (bp->b_flags & B_CACHE) {
863 int i;
864 int j = 0;
865 if (bp->b_xio.xio_npages != 16)
866 kprintf("NPAGES BAD\n");
867 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
868 vm_page_t m;
869 m = bp->b_xio.xio_pages[i];
870 if (m == NULL || m->valid == 0) {
871 kprintf("bp %016jx %016jx pg %d inv",
872 lbase, leof, i);
873 if (m)
874 kprintf("m->object %p/%p", m->object, ip->vp->v_object);
875 kprintf("\n");
876 j = 1;
879 if (j)
880 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
882 bqrelse(bp);
884 error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
885 #endif
886 if (error) {
887 brelse(bp);
888 break;
890 bkvasync(bp);
891 loff = (int)(uio->uio_offset - lbase);
892 n = lblksize - loff;
893 if (n > uio->uio_resid)
894 n = uio->uio_resid;
895 if (n > size - uio->uio_offset)
896 n = (int)(size - uio->uio_offset);
897 bp->b_flags |= B_AGE;
898 uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
899 bqrelse(bp);
901 hammer2_mtx_unlock(&ip->truncate_lock);
903 return (error);
907 * Write to the file represented by the inode via the logical buffer cache.
908 * The inode may represent a regular file or a symlink.
910 * The inode must not be locked.
912 static
914 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
915 int ioflag, int seqcount)
917 hammer2_key_t old_eof;
918 hammer2_key_t new_eof;
919 struct buf *bp;
920 int kflags;
921 int error;
922 int modified;
925 * Setup if append
927 * WARNING! Assumes that the kernel interlocks size changes at the
928 * vnode level.
930 hammer2_mtx_ex(&ip->lock);
931 hammer2_mtx_sh(&ip->truncate_lock);
932 if (ioflag & IO_APPEND)
933 uio->uio_offset = ip->meta.size;
934 old_eof = ip->meta.size;
937 * Extend the file if necessary. If the write fails at some point
938 * we will truncate it back down to cover as much as we were able
939 * to write.
941 * Doing this now makes it easier to calculate buffer sizes in
942 * the loop.
944 kflags = 0;
945 error = 0;
946 modified = 0;
948 if (uio->uio_offset + uio->uio_resid > old_eof) {
949 new_eof = uio->uio_offset + uio->uio_resid;
950 modified = 1;
951 hammer2_extend_file(ip, new_eof);
952 kflags |= NOTE_EXTEND;
953 } else {
954 new_eof = old_eof;
956 hammer2_mtx_unlock(&ip->lock);
959 * UIO write loop
961 while (uio->uio_resid > 0) {
962 hammer2_key_t lbase;
963 int trivial;
964 int endofblk;
965 int lblksize;
966 int loff;
967 int n;
970 * Don't allow the buffer build to blow out the buffer
971 * cache.
973 if ((ioflag & IO_RECURSE) == 0)
974 bwillwrite(HAMMER2_PBUFSIZE);
977 * This nominally tells us how much we can cluster and
978 * what the logical buffer size needs to be. Currently
979 * we don't try to cluster the write and just handle one
980 * block at a time.
982 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
983 &lbase, NULL);
984 loff = (int)(uio->uio_offset - lbase);
986 KKASSERT(lblksize <= 65536);
989 * Calculate bytes to copy this transfer and whether the
990 * copy completely covers the buffer or not.
992 trivial = 0;
993 n = lblksize - loff;
994 if (n > uio->uio_resid) {
995 n = uio->uio_resid;
996 if (loff == lbase && uio->uio_offset + n == new_eof)
997 trivial = 1;
998 endofblk = 0;
999 } else {
1000 if (loff == 0)
1001 trivial = 1;
1002 endofblk = 1;
1004 if (lbase >= new_eof)
1005 trivial = 1;
1008 * Get the buffer
1010 if (uio->uio_segflg == UIO_NOCOPY) {
1012 * Issuing a write with the same data backing the
1013 * buffer. Instantiate the buffer to collect the
1014 * backing vm pages, then read-in any missing bits.
1016 * This case is used by vop_stdputpages().
1018 bp = getblk(ip->vp, lbase, lblksize,
1019 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1020 if ((bp->b_flags & B_CACHE) == 0) {
1021 bqrelse(bp);
1022 error = bread_kvabio(ip->vp, lbase,
1023 lblksize, &bp);
1025 } else if (trivial) {
1027 * Even though we are entirely overwriting the buffer
1028 * we may still have to zero it out to avoid a
1029 * mmap/write visibility issue.
1031 bp = getblk(ip->vp, lbase, lblksize,
1032 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1033 if ((bp->b_flags & B_CACHE) == 0)
1034 vfs_bio_clrbuf(bp);
1035 } else {
1037 * Partial overwrite, read in any missing bits then
1038 * replace the portion being written.
1040 * (The strategy code will detect zero-fill physical
1041 * blocks for this case).
1043 error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
1044 if (error == 0)
1045 bheavy(bp);
1048 if (error) {
1049 brelse(bp);
1050 break;
1054 * Ok, copy the data in
1056 bkvasync(bp);
1057 error = uiomovebp(bp, bp->b_data + loff, n, uio);
1058 kflags |= NOTE_WRITE;
1059 modified = 1;
1060 if (error) {
1061 brelse(bp);
1062 break;
1066 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1067 * with IO_SYNC or IO_ASYNC set. These writes
1068 * must be handled as the pageout daemon expects.
1070 * NOTE! H2 relies on cluster_write() here because it
1071 * cannot preallocate disk blocks at the logical
1072 * level due to not knowing what the compression
1073 * size will be at this time.
1075 * We must use cluster_write() here and we depend
1076 * on the write-behind feature to flush buffers
1077 * appropriately. If we let the buffer daemons do
1078 * it the block allocations will be all over the
1079 * map.
1081 if (ioflag & IO_SYNC) {
1082 bwrite(bp);
1083 } else if ((ioflag & IO_DIRECT) && endofblk) {
1084 bawrite(bp);
1085 } else if (ioflag & IO_ASYNC) {
1086 bawrite(bp);
1087 } else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1088 bdwrite(bp);
1089 } else {
1090 #if 1
1091 bp->b_flags |= B_CLUSTEROK;
1092 cluster_write(bp, new_eof, lblksize, seqcount);
1093 #else
1094 bp->b_flags |= B_CLUSTEROK;
1095 bdwrite(bp);
1096 #endif
1101 * Cleanup. If we extended the file EOF but failed to write through
1102 * the entire write is a failure and we have to back-up.
1104 if (error && new_eof != old_eof) {
1105 hammer2_mtx_unlock(&ip->truncate_lock);
1106 hammer2_mtx_ex(&ip->lock);
1107 hammer2_mtx_ex(&ip->truncate_lock);
1108 hammer2_truncate_file(ip, old_eof);
1109 if (ip->flags & HAMMER2_INODE_MODIFIED)
1110 hammer2_inode_chain_sync(ip);
1111 hammer2_mtx_unlock(&ip->lock);
1112 } else if (modified) {
1113 struct vnode *vp = ip->vp;
1115 hammer2_mtx_ex(&ip->lock);
1116 hammer2_inode_modify(ip);
1117 if (uio->uio_segflg == UIO_NOCOPY) {
1118 if (vp->v_flag & VLASTWRITETS) {
1119 ip->meta.mtime =
1120 (unsigned long)vp->v_lastwrite_ts.tv_sec *
1121 1000000 +
1122 vp->v_lastwrite_ts.tv_nsec / 1000;
1124 } else {
1125 hammer2_update_time(&ip->meta.mtime);
1126 vclrflags(vp, VLASTWRITETS);
1129 #if 0
1131 * REMOVED - handled by hammer2_extend_file(). Do not issue
1132 * a chain_sync() outside of a sync/fsync except for DIRECTDATA
1133 * state changes.
1135 * Under normal conditions we only issue a chain_sync if
1136 * the inode's DIRECTDATA state changed.
1138 if (ip->flags & HAMMER2_INODE_RESIZED)
1139 hammer2_inode_chain_sync(ip);
1140 #endif
1141 hammer2_mtx_unlock(&ip->lock);
1142 hammer2_knote(ip->vp, kflags);
1144 hammer2_trans_assert_strategy(ip->pmp);
1145 hammer2_mtx_unlock(&ip->truncate_lock);
1147 return error;
1151 * Truncate the size of a file. The inode must not be locked.
1153 * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1154 * ensure that any on-media data beyond the new file EOF has been destroyed.
1156 * WARNING: nvtruncbuf() can only be safely called without the inode lock
1157 * held due to the way our write thread works. If the truncation
1158 * occurs in the middle of a buffer, nvtruncbuf() is responsible
1159 * for dirtying that buffer and zeroing out trailing bytes.
1161 * WARNING! Assumes that the kernel interlocks size changes at the
1162 * vnode level.
1164 * WARNING! Caller assumes responsibility for removing dead blocks
1165 * if INODE_RESIZED is set.
1167 static
1168 void
1169 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1171 hammer2_key_t lbase;
1172 int nblksize;
1174 hammer2_mtx_unlock(&ip->lock);
1175 if (ip->vp) {
1176 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1177 nvtruncbuf(ip->vp, nsize,
1178 nblksize, (int)nsize & (nblksize - 1),
1181 hammer2_mtx_ex(&ip->lock);
1182 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1183 ip->osize = ip->meta.size;
1184 ip->meta.size = nsize;
1185 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1186 hammer2_inode_modify(ip);
1190 * Extend the size of a file. The inode must not be locked.
1192 * Even though the file size is changing, we do not have to set the
1193 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1194 * boundary. When this occurs a hammer2_inode_chain_sync() is required
1195 * to prepare the inode cluster's indirect block table, otherwise
1196 * async execution of the strategy code will implode on us.
1198 * WARNING! Assumes that the kernel interlocks size changes at the
1199 * vnode level.
1201 * WARNING! Caller assumes responsibility for transitioning out
1202 * of the inode DIRECTDATA mode if INODE_RESIZED is set.
1204 static
1205 void
1206 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1208 hammer2_key_t lbase;
1209 hammer2_key_t osize;
1210 int oblksize;
1211 int nblksize;
1213 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1214 hammer2_inode_modify(ip);
1215 osize = ip->meta.size;
1216 ip->osize = osize;
1217 ip->meta.size = nsize;
1220 * We must issue a chain_sync() when the DIRECTDATA state changes
1221 * to prevent confusion between the flush code and the in-memory
1222 * state. This is not perfect because we are doing it outside of
1223 * a sync/fsync operation, so it might not be fully synchronized
1224 * with the meta-data topology flush.
1226 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1227 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1228 hammer2_inode_chain_sync(ip);
1231 hammer2_mtx_unlock(&ip->lock);
1232 if (ip->vp) {
1233 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1234 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1235 nvextendbuf(ip->vp,
1236 osize, nsize,
1237 oblksize, nblksize,
1238 -1, -1, 0);
1240 hammer2_mtx_ex(&ip->lock);
1243 static
1245 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1247 hammer2_xop_nresolve_t *xop;
1248 hammer2_inode_t *ip;
1249 hammer2_inode_t *dip;
1250 struct namecache *ncp;
1251 struct vnode *vp;
1252 int error;
1254 dip = VTOI(ap->a_dvp);
1255 xop = hammer2_xop_alloc(dip, 0);
1257 ncp = ap->a_nch->ncp;
1258 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1261 * Note: In DragonFly the kernel handles '.' and '..'.
1263 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1264 hammer2_xop_start(&xop->head, hammer2_xop_nresolve);
1266 error = hammer2_xop_collect(&xop->head, 0);
1267 error = hammer2_error_to_errno(error);
1268 if (error) {
1269 ip = NULL;
1270 } else {
1271 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1273 hammer2_inode_unlock(dip);
1276 * Acquire the related vnode
1278 * NOTE: For error processing, only ENOENT resolves the namecache
1279 * entry to NULL, otherwise we just return the error and
1280 * leave the namecache unresolved.
1282 * NOTE: multiple hammer2_inode structures can be aliased to the
1283 * same chain element, for example for hardlinks. This
1284 * use case does not 'reattach' inode associations that
1285 * might already exist, but always allocates a new one.
1287 * WARNING: inode structure is locked exclusively via inode_get
1288 * but chain was locked shared. inode_unlock()
1289 * will handle it properly.
1291 if (ip) {
1292 vp = hammer2_igetv(ip, &error); /* error set to UNIX error */
1293 if (error == 0) {
1294 vn_unlock(vp);
1295 cache_setvp(ap->a_nch, vp);
1296 } else if (error == ENOENT) {
1297 cache_setvp(ap->a_nch, NULL);
1299 hammer2_inode_unlock(ip);
1302 * The vp should not be released until after we've disposed
1303 * of our locks, because it might cause vop_inactive() to
1304 * be called.
1306 if (vp)
1307 vrele(vp);
1308 } else {
1309 error = ENOENT;
1310 cache_setvp(ap->a_nch, NULL);
1312 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1313 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1314 ("resolve error %d/%p ap %p\n",
1315 error, ap->a_nch->ncp->nc_vp, ap));
1317 return error;
1320 static
1322 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1324 hammer2_inode_t *dip;
1325 hammer2_tid_t inum;
1326 int error;
1328 dip = VTOI(ap->a_dvp);
1329 inum = dip->meta.iparent;
1330 *ap->a_vpp = NULL;
1332 if (inum) {
1333 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1334 inum, ap->a_vpp);
1335 } else {
1336 error = ENOENT;
1338 return error;
1341 static
1343 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1345 hammer2_inode_t *dip;
1346 hammer2_inode_t *nip;
1347 struct namecache *ncp;
1348 const uint8_t *name;
1349 size_t name_len;
1350 hammer2_tid_t inum;
1351 int error;
1353 dip = VTOI(ap->a_dvp);
1354 if (dip->pmp->ronly)
1355 return (EROFS);
1356 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1357 return (ENOSPC);
1359 ncp = ap->a_nch->ncp;
1360 name = ncp->nc_name;
1361 name_len = ncp->nc_nlen;
1363 hammer2_pfs_memory_wait(dip->pmp);
1364 hammer2_trans_init(dip->pmp, 0);
1366 inum = hammer2_trans_newinum(dip->pmp);
1369 * Create the actual inode as a hidden file in the iroot, then
1370 * create the directory entry. The creation of the actual inode
1371 * sets its nlinks to 1 which is the value we desire.
1373 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1374 NULL, 0, inum,
1375 inum, 0, 0,
1376 0, &error);
1377 if (error) {
1378 error = hammer2_error_to_errno(error);
1379 } else {
1380 error = hammer2_dirent_create(dip, name, name_len,
1381 nip->meta.inum, nip->meta.type);
1382 /* returns UNIX error code */
1384 if (error) {
1385 if (nip) {
1386 hammer2_inode_unlink_finisher(nip, 0);
1387 hammer2_inode_unlock(nip);
1388 nip = NULL;
1390 *ap->a_vpp = NULL;
1391 } else {
1392 *ap->a_vpp = hammer2_igetv(nip, &error);
1393 hammer2_inode_unlock(nip);
1397 * Update dip's mtime
1399 * We can use a shared inode lock and allow the meta.mtime update
1400 * SMP race. hammer2_inode_modify() is MPSAFE w/a shared lock.
1402 if (error == 0) {
1403 uint64_t mtime;
1405 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1406 hammer2_update_time(&mtime);
1407 hammer2_inode_modify(dip);
1408 dip->meta.mtime = mtime;
1409 hammer2_inode_unlock(dip);
1412 hammer2_trans_done(dip->pmp);
1414 if (error == 0) {
1415 cache_setunresolved(ap->a_nch);
1416 cache_setvp(ap->a_nch, *ap->a_vpp);
1417 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1419 return error;
1422 static
1424 hammer2_vop_open(struct vop_open_args *ap)
1426 return vop_stdopen(ap);
1430 * hammer2_vop_advlock { vp, id, op, fl, flags }
1432 static
1434 hammer2_vop_advlock(struct vop_advlock_args *ap)
1436 hammer2_inode_t *ip = VTOI(ap->a_vp);
1437 hammer2_off_t size;
1439 size = ip->meta.size;
1440 return (lf_advlock(ap, &ip->advlock, size));
1443 static
1445 hammer2_vop_close(struct vop_close_args *ap)
1447 return vop_stdclose(ap);
1451 * hammer2_vop_nlink { nch, dvp, vp, cred }
1453 * Create a hardlink from (vp) to {dvp, nch}.
1455 static
1457 hammer2_vop_nlink(struct vop_nlink_args *ap)
1459 hammer2_inode_t *tdip; /* target directory to create link in */
1460 hammer2_inode_t *ip; /* inode we are hardlinking to */
1461 struct namecache *ncp;
1462 const uint8_t *name;
1463 size_t name_len;
1464 int error;
1466 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1467 return(EXDEV);
1469 tdip = VTOI(ap->a_dvp);
1470 if (tdip->pmp->ronly)
1471 return (EROFS);
1472 if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1)
1473 return (ENOSPC);
1475 ncp = ap->a_nch->ncp;
1476 name = ncp->nc_name;
1477 name_len = ncp->nc_nlen;
1480 * ip represents the file being hardlinked. The file could be a
1481 * normal file or a hardlink target if it has already been hardlinked.
1482 * (with the new semantics, it will almost always be a hardlink
1483 * target).
1485 * Bump nlinks and potentially also create or move the hardlink
1486 * target in the parent directory common to (ip) and (tdip). The
1487 * consolidation code can modify ip->cluster. The returned cluster
1488 * is locked.
1490 ip = VTOI(ap->a_vp);
1491 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1492 hammer2_pfs_memory_wait(ip->pmp);
1493 hammer2_trans_init(ip->pmp, 0);
1496 * Target should be an indexed inode or there's no way we will ever
1497 * be able to find it!
1499 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1501 error = 0;
1504 * Can return NULL and error == EXDEV if the common parent
1505 * crosses a directory with the xlink flag set.
1507 hammer2_inode_lock(tdip, 0);
1508 hammer2_inode_lock(ip, 0);
1511 * Create the directory entry and bump nlinks.
1513 if (error == 0) {
1514 error = hammer2_dirent_create(tdip, name, name_len,
1515 ip->meta.inum, ip->meta.type);
1516 hammer2_inode_modify(ip);
1517 ++ip->meta.nlinks;
1519 if (error == 0) {
1521 * Update dip's mtime
1523 uint64_t mtime;
1525 hammer2_update_time(&mtime);
1526 hammer2_inode_modify(tdip);
1527 tdip->meta.mtime = mtime;
1529 cache_setunresolved(ap->a_nch);
1530 cache_setvp(ap->a_nch, ap->a_vp);
1532 hammer2_inode_unlock(ip);
1533 hammer2_inode_unlock(tdip);
1535 hammer2_trans_done(ip->pmp);
1536 hammer2_knote(ap->a_vp, NOTE_LINK);
1537 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1539 return error;
1543 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1545 * The operating system has already ensured that the directory entry
1546 * does not exist and done all appropriate namespace locking.
1548 static
1550 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1552 hammer2_inode_t *dip;
1553 hammer2_inode_t *nip;
1554 struct namecache *ncp;
1555 const uint8_t *name;
1556 size_t name_len;
1557 hammer2_tid_t inum;
1558 int error;
1560 dip = VTOI(ap->a_dvp);
1561 if (dip->pmp->ronly)
1562 return (EROFS);
1563 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1564 return (ENOSPC);
1566 ncp = ap->a_nch->ncp;
1567 name = ncp->nc_name;
1568 name_len = ncp->nc_nlen;
1569 hammer2_pfs_memory_wait(dip->pmp);
1570 hammer2_trans_init(dip->pmp, 0);
1572 inum = hammer2_trans_newinum(dip->pmp);
1575 * Create the actual inode as a hidden file in the iroot, then
1576 * create the directory entry. The creation of the actual inode
1577 * sets its nlinks to 1 which is the value we desire.
1579 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1580 NULL, 0, inum,
1581 inum, 0, 0,
1582 0, &error);
1584 if (error) {
1585 error = hammer2_error_to_errno(error);
1586 } else {
1587 error = hammer2_dirent_create(dip, name, name_len,
1588 nip->meta.inum, nip->meta.type);
1590 if (error) {
1591 if (nip) {
1592 hammer2_inode_unlink_finisher(nip, 0);
1593 hammer2_inode_unlock(nip);
1594 nip = NULL;
1596 *ap->a_vpp = NULL;
1597 } else {
1598 *ap->a_vpp = hammer2_igetv(nip, &error);
1599 hammer2_inode_unlock(nip);
1603 * Update dip's mtime
1605 if (error == 0) {
1606 uint64_t mtime;
1608 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1609 hammer2_update_time(&mtime);
1610 hammer2_inode_modify(dip);
1611 dip->meta.mtime = mtime;
1612 hammer2_inode_unlock(dip);
1615 hammer2_trans_done(dip->pmp);
1617 if (error == 0) {
1618 cache_setunresolved(ap->a_nch);
1619 cache_setvp(ap->a_nch, *ap->a_vpp);
1620 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1622 return error;
1626 * Make a device node (typically a fifo)
1628 static
1630 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1632 hammer2_inode_t *dip;
1633 hammer2_inode_t *nip;
1634 struct namecache *ncp;
1635 const uint8_t *name;
1636 size_t name_len;
1637 hammer2_tid_t inum;
1638 int error;
1640 dip = VTOI(ap->a_dvp);
1641 if (dip->pmp->ronly)
1642 return (EROFS);
1643 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1644 return (ENOSPC);
1646 ncp = ap->a_nch->ncp;
1647 name = ncp->nc_name;
1648 name_len = ncp->nc_nlen;
1649 hammer2_pfs_memory_wait(dip->pmp);
1650 hammer2_trans_init(dip->pmp, 0);
1653 * Create the device inode and then create the directory entry.
1655 inum = hammer2_trans_newinum(dip->pmp);
1656 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1657 NULL, 0, inum,
1658 inum, 0, 0,
1659 0, &error);
1660 if (error == 0) {
1661 error = hammer2_dirent_create(dip, name, name_len,
1662 nip->meta.inum, nip->meta.type);
1664 if (error) {
1665 if (nip) {
1666 hammer2_inode_unlink_finisher(nip, 0);
1667 hammer2_inode_unlock(nip);
1668 nip = NULL;
1670 *ap->a_vpp = NULL;
1671 } else {
1672 *ap->a_vpp = hammer2_igetv(nip, &error);
1673 hammer2_inode_unlock(nip);
1677 * Update dip's mtime
1679 if (error == 0) {
1680 uint64_t mtime;
1682 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1683 hammer2_update_time(&mtime);
1684 hammer2_inode_modify(dip);
1685 dip->meta.mtime = mtime;
1686 hammer2_inode_unlock(dip);
1689 hammer2_trans_done(dip->pmp);
1691 if (error == 0) {
1692 cache_setunresolved(ap->a_nch);
1693 cache_setvp(ap->a_nch, *ap->a_vpp);
1694 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1696 return error;
1700 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1702 static
1704 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1706 hammer2_inode_t *dip;
1707 hammer2_inode_t *nip;
1708 struct namecache *ncp;
1709 const uint8_t *name;
1710 size_t name_len;
1711 hammer2_tid_t inum;
1712 int error;
1714 dip = VTOI(ap->a_dvp);
1715 if (dip->pmp->ronly)
1716 return (EROFS);
1717 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1718 return (ENOSPC);
1720 ncp = ap->a_nch->ncp;
1721 name = ncp->nc_name;
1722 name_len = ncp->nc_nlen;
1723 hammer2_pfs_memory_wait(dip->pmp);
1724 hammer2_trans_init(dip->pmp, 0);
1726 ap->a_vap->va_type = VLNK; /* enforce type */
1729 * Create the softlink as an inode and then create the directory
1730 * entry.
1732 inum = hammer2_trans_newinum(dip->pmp);
1734 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1735 NULL, 0, inum,
1736 inum, 0, 0,
1737 0, &error);
1738 if (error == 0) {
1739 error = hammer2_dirent_create(dip, name, name_len,
1740 nip->meta.inum, nip->meta.type);
1742 if (error) {
1743 if (nip) {
1744 hammer2_inode_unlink_finisher(nip, 0);
1745 hammer2_inode_unlock(nip);
1746 nip = NULL;
1748 *ap->a_vpp = NULL;
1749 hammer2_trans_done(dip->pmp);
1750 return error;
1752 *ap->a_vpp = hammer2_igetv(nip, &error);
1755 * Build the softlink (~like file data) and finalize the namecache.
1757 if (error == 0) {
1758 size_t bytes;
1759 struct uio auio;
1760 struct iovec aiov;
1762 bytes = strlen(ap->a_target);
1764 hammer2_inode_unlock(nip);
1765 bzero(&auio, sizeof(auio));
1766 bzero(&aiov, sizeof(aiov));
1767 auio.uio_iov = &aiov;
1768 auio.uio_segflg = UIO_SYSSPACE;
1769 auio.uio_rw = UIO_WRITE;
1770 auio.uio_resid = bytes;
1771 auio.uio_iovcnt = 1;
1772 auio.uio_td = curthread;
1773 aiov.iov_base = ap->a_target;
1774 aiov.iov_len = bytes;
1775 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1776 /* XXX handle error */
1777 error = 0;
1778 } else {
1779 hammer2_inode_unlock(nip);
1783 * Update dip's mtime
1785 if (error == 0) {
1786 uint64_t mtime;
1788 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1789 hammer2_update_time(&mtime);
1790 hammer2_inode_modify(dip);
1791 dip->meta.mtime = mtime;
1792 hammer2_inode_unlock(dip);
1795 hammer2_trans_done(dip->pmp);
1798 * Finalize namecache
1800 if (error == 0) {
1801 cache_setunresolved(ap->a_nch);
1802 cache_setvp(ap->a_nch, *ap->a_vpp);
1803 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1805 return error;
1809 * hammer2_vop_nremove { nch, dvp, cred }
1811 static
1813 hammer2_vop_nremove(struct vop_nremove_args *ap)
1815 hammer2_xop_unlink_t *xop;
1816 hammer2_inode_t *dip;
1817 hammer2_inode_t *ip;
1818 struct namecache *ncp;
1819 int error;
1820 int isopen;
1822 dip = VTOI(ap->a_dvp);
1823 if (dip->pmp->ronly)
1824 return (EROFS);
1825 #if 0
1826 /* allow removals, except user to also bulkfree */
1827 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1828 return (ENOSPC);
1829 #endif
1831 ncp = ap->a_nch->ncp;
1833 hammer2_pfs_memory_wait(dip->pmp);
1834 hammer2_trans_init(dip->pmp, 0);
1835 hammer2_inode_lock(dip, 0);
1838 * The unlink XOP unlinks the path from the directory and
1839 * locates and returns the cluster associated with the real inode.
1840 * We have to handle nlinks here on the frontend.
1842 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1843 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1846 * The namecache entry is locked so nobody can use this namespace.
1847 * Calculate isopen to determine if this namespace has an open vp
1848 * associated with it and resolve the vp only if it does.
1850 * We try to avoid resolving the vnode if nobody has it open, but
1851 * note that the test is via this namespace only.
1853 isopen = cache_isopen(ap->a_nch);
1854 xop->isdir = 0;
1855 xop->dopermanent = 0;
1856 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1859 * Collect the real inode and adjust nlinks, destroy the real
1860 * inode if nlinks transitions to 0 and it was the real inode
1861 * (else it has already been removed).
1863 error = hammer2_xop_collect(&xop->head, 0);
1864 error = hammer2_error_to_errno(error);
1865 hammer2_inode_unlock(dip);
1867 if (error == 0) {
1868 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1869 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1870 if (ip) {
1871 hammer2_inode_unlink_finisher(ip, isopen);
1872 hammer2_inode_unlock(ip);
1874 } else {
1875 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1879 * Update dip's mtime
1881 if (error == 0) {
1882 uint64_t mtime;
1884 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1885 hammer2_update_time(&mtime);
1886 hammer2_inode_modify(dip);
1887 dip->meta.mtime = mtime;
1888 hammer2_inode_unlock(dip);
1891 hammer2_inode_run_sideq(dip->pmp, 0);
1892 hammer2_trans_done(dip->pmp);
1893 if (error == 0) {
1894 cache_unlink(ap->a_nch);
1895 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1897 return (error);
1901 * hammer2_vop_nrmdir { nch, dvp, cred }
1903 static
1905 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1907 hammer2_xop_unlink_t *xop;
1908 hammer2_inode_t *dip;
1909 hammer2_inode_t *ip;
1910 struct namecache *ncp;
1911 int isopen;
1912 int error;
1914 dip = VTOI(ap->a_dvp);
1915 if (dip->pmp->ronly)
1916 return (EROFS);
1917 #if 0
1918 /* allow removals, except user to also bulkfree */
1919 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1920 return (ENOSPC);
1921 #endif
1923 hammer2_pfs_memory_wait(dip->pmp);
1924 hammer2_trans_init(dip->pmp, 0);
1925 hammer2_inode_lock(dip, 0);
1927 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1929 ncp = ap->a_nch->ncp;
1930 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1931 isopen = cache_isopen(ap->a_nch);
1932 xop->isdir = 1;
1933 xop->dopermanent = 0;
1934 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1937 * Collect the real inode and adjust nlinks, destroy the real
1938 * inode if nlinks transitions to 0 and it was the real inode
1939 * (else it has already been removed).
1941 error = hammer2_xop_collect(&xop->head, 0);
1942 error = hammer2_error_to_errno(error);
1943 hammer2_inode_unlock(dip);
1945 if (error == 0) {
1946 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1947 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1948 if (ip) {
1949 hammer2_inode_unlink_finisher(ip, isopen);
1950 hammer2_inode_unlock(ip);
1952 } else {
1953 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1957 * Update dip's mtime
1959 if (error == 0) {
1960 uint64_t mtime;
1962 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1963 hammer2_update_time(&mtime);
1964 hammer2_inode_modify(dip);
1965 dip->meta.mtime = mtime;
1966 hammer2_inode_unlock(dip);
1969 hammer2_inode_run_sideq(dip->pmp, 0);
1970 hammer2_trans_done(dip->pmp);
1971 if (error == 0) {
1972 cache_unlink(ap->a_nch);
1973 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1975 return (error);
1979 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1981 static
1983 hammer2_vop_nrename(struct vop_nrename_args *ap)
1985 struct namecache *fncp;
1986 struct namecache *tncp;
1987 hammer2_inode_t *fdip; /* source directory */
1988 hammer2_inode_t *tdip; /* target directory */
1989 hammer2_inode_t *ip; /* file being renamed */
1990 hammer2_inode_t *tip; /* replaced target during rename or NULL */
1991 const uint8_t *fname;
1992 size_t fname_len;
1993 const uint8_t *tname;
1994 size_t tname_len;
1995 int error;
1996 int update_tdip;
1997 int update_fdip;
1998 hammer2_key_t tlhc;
2000 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
2001 return(EXDEV);
2002 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
2003 return(EXDEV);
2005 fdip = VTOI(ap->a_fdvp); /* source directory */
2006 tdip = VTOI(ap->a_tdvp); /* target directory */
2008 if (fdip->pmp->ronly)
2009 return (EROFS);
2010 if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1)
2011 return (ENOSPC);
2013 fncp = ap->a_fnch->ncp; /* entry name in source */
2014 fname = fncp->nc_name;
2015 fname_len = fncp->nc_nlen;
2017 tncp = ap->a_tnch->ncp; /* entry name in target */
2018 tname = tncp->nc_name;
2019 tname_len = tncp->nc_nlen;
2021 hammer2_pfs_memory_wait(tdip->pmp);
2022 hammer2_trans_init(tdip->pmp, 0);
2024 update_tdip = 0;
2025 update_fdip = 0;
2027 ip = VTOI(fncp->nc_vp);
2028 hammer2_inode_ref(ip); /* extra ref */
2031 * Lookup the target name to determine if a directory entry
2032 * is being overwritten. We only hold related inode locks
2033 * temporarily, the operating system is expected to protect
2034 * against rename races.
2036 tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL;
2037 if (tip)
2038 hammer2_inode_ref(tip); /* extra ref */
2041 * Can return NULL and error == EXDEV if the common parent
2042 * crosses a directory with the xlink flag set.
2044 * For now try to avoid deadlocks with a simple pointer address
2045 * test. (tip) can be NULL.
2047 error = 0;
2048 if (fdip <= tdip) {
2049 hammer2_inode_lock(fdip, 0);
2050 hammer2_inode_lock(tdip, 0);
2051 } else {
2052 hammer2_inode_lock(tdip, 0);
2053 hammer2_inode_lock(fdip, 0);
2055 if (tip) {
2056 if (ip <= tip) {
2057 hammer2_inode_lock(ip, 0);
2058 hammer2_inode_lock(tip, 0);
2059 } else {
2060 hammer2_inode_lock(tip, 0);
2061 hammer2_inode_lock(ip, 0);
2063 } else {
2064 hammer2_inode_lock(ip, 0);
2067 #if 0
2069 * Delete the target namespace.
2071 * REMOVED - NOW FOLDED INTO XOP_NRENAME OPERATION
2074 hammer2_xop_unlink_t *xop2;
2075 hammer2_inode_t *tip;
2076 int isopen;
2079 * The unlink XOP unlinks the path from the directory and
2080 * locates and returns the cluster associated with the real
2081 * inode. We have to handle nlinks here on the frontend.
2083 xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2084 hammer2_xop_setname(&xop2->head, tname, tname_len);
2085 isopen = cache_isopen(ap->a_tnch);
2086 xop2->isdir = -1;
2087 xop2->dopermanent = 0;
2088 hammer2_xop_start(&xop2->head, hammer2_xop_unlink);
2091 * Collect the real inode and adjust nlinks, destroy the real
2092 * inode if nlinks transitions to 0 and it was the real inode
2093 * (else it has already been removed).
2095 tnch_error = hammer2_xop_collect(&xop2->head, 0);
2096 tnch_error = hammer2_error_to_errno(tnch_error);
2097 /* hammer2_inode_unlock(tdip); */
2099 if (tnch_error == 0) {
2100 tip = hammer2_inode_get(tdip->pmp, NULL,
2101 &xop2->head.cluster, -1);
2102 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2103 if (tip) {
2104 hammer2_inode_unlink_finisher(tip, isopen);
2105 hammer2_inode_unlock(tip);
2107 } else {
2108 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2110 /* hammer2_inode_lock(tdip, 0); */
2112 if (tnch_error && tnch_error != ENOENT) {
2113 error = tnch_error;
2114 goto done2;
2116 update_tdip = 1;
2118 #endif
2121 * Resolve the collision space for (tdip, tname, tname_len)
2123 * tdip must be held exclusively locked to prevent races since
2124 * multiple filenames can end up in the same collision space.
2127 hammer2_xop_scanlhc_t *sxop;
2128 hammer2_tid_t lhcbase;
2130 tlhc = hammer2_dirhash(tname, tname_len);
2131 lhcbase = tlhc;
2132 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2133 sxop->lhc = tlhc;
2134 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
2135 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2136 if (tlhc != sxop->head.cluster.focus->bref.key)
2137 break;
2138 ++tlhc;
2140 error = hammer2_error_to_errno(error);
2141 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2143 if (error) {
2144 if (error != ENOENT)
2145 goto done2;
2146 ++tlhc;
2147 error = 0;
2149 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2150 error = ENOSPC;
2151 goto done2;
2156 * Ready to go, issue the rename to the backend. Note that meta-data
2157 * updates to the related inodes occur separately from the rename
2158 * operation.
2160 * NOTE: While it is not necessary to update ip->meta.name*, doing
2161 * so aids catastrophic recovery and debugging.
2163 if (error == 0) {
2164 hammer2_xop_nrename_t *xop4;
2166 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2167 xop4->lhc = tlhc;
2168 xop4->ip_key = ip->meta.name_key;
2169 hammer2_xop_setip2(&xop4->head, ip);
2170 hammer2_xop_setip3(&xop4->head, tdip);
2171 hammer2_xop_setname(&xop4->head, fname, fname_len);
2172 hammer2_xop_setname2(&xop4->head, tname, tname_len);
2173 hammer2_xop_start(&xop4->head, hammer2_xop_nrename);
2175 error = hammer2_xop_collect(&xop4->head, 0);
2176 error = hammer2_error_to_errno(error);
2177 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2179 if (error == ENOENT)
2180 error = 0;
2183 * Update inode meta-data.
2185 * WARNING! The in-memory inode (ip) structure does not
2186 * maintain a copy of the inode's filename buffer.
2188 if (error == 0 &&
2189 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2190 hammer2_inode_modify(ip);
2191 ip->meta.name_len = tname_len;
2192 ip->meta.name_key = tlhc;
2194 if (error == 0) {
2195 hammer2_inode_modify(ip);
2196 ip->meta.iparent = tdip->meta.inum;
2198 update_fdip = 1;
2199 update_tdip = 1;
2202 done2:
2204 * If no error, the backend has replaced the target directory entry.
2205 * We must adjust nlinks on the original replace target if it exists.
2207 if (error == 0 && tip) {
2208 int isopen;
2210 isopen = cache_isopen(ap->a_tnch);
2211 hammer2_inode_unlink_finisher(tip, isopen);
2215 * Update directory mtimes to represent the something changed.
2217 if (update_fdip || update_tdip) {
2218 uint64_t mtime;
2220 hammer2_update_time(&mtime);
2221 if (update_fdip) {
2222 hammer2_inode_modify(fdip);
2223 fdip->meta.mtime = mtime;
2225 if (update_tdip) {
2226 hammer2_inode_modify(tdip);
2227 tdip->meta.mtime = mtime;
2230 if (tip) {
2231 hammer2_inode_unlock(tip);
2232 hammer2_inode_drop(tip);
2234 hammer2_inode_unlock(ip);
2235 hammer2_inode_unlock(tdip);
2236 hammer2_inode_unlock(fdip);
2237 hammer2_inode_drop(ip);
2238 hammer2_inode_run_sideq(fdip->pmp, 0);
2240 hammer2_trans_done(tdip->pmp);
2243 * Issue the namecache update after unlocking all the internal
2244 * hammer2 structures, otherwise we might deadlock.
2246 * WARNING! The target namespace must be updated atomically,
2247 * and we depend on cache_rename() to handle that for
2248 * us. Do not do a separate cache_unlink() because
2249 * that leaves a small window of opportunity for other
2250 * threads to allocate the target namespace before we
2251 * manage to complete our rename.
2253 * WARNING! cache_rename() (and cache_unlink()) will properly
2254 * set VREF_FINALIZE on any attached vnode. Do not
2255 * call cache_setunresolved() manually before-hand as
2256 * this will prevent the flag from being set later via
2257 * cache_rename(). If VREF_FINALIZE is not properly set
2258 * and the inode is no longer in the topology, related
2259 * chains can remain dirty indefinitely.
2261 if (error == 0 && tip) {
2262 /*cache_unlink(ap->a_tnch); see above */
2263 /*cache_setunresolved(ap->a_tnch); see above */
2265 if (error == 0) {
2266 cache_rename(ap->a_fnch, ap->a_tnch);
2267 hammer2_knote(ap->a_fdvp, NOTE_WRITE);
2268 hammer2_knote(ap->a_tdvp, NOTE_WRITE);
2269 hammer2_knote(fncp->nc_vp, NOTE_RENAME);
2272 return (error);
2276 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2278 static
2280 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2282 hammer2_inode_t *ip;
2283 int error;
2285 ip = VTOI(ap->a_vp);
2287 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2288 ap->a_fflag, ap->a_cred);
2289 return (error);
2292 static
2294 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2296 struct mount *mp;
2297 hammer2_pfs_t *pmp;
2298 int rc;
2300 switch (ap->a_op) {
2301 case (MOUNTCTL_SET_EXPORT):
2302 mp = ap->a_head.a_ops->head.vv_mount;
2303 pmp = MPTOPMP(mp);
2305 if (ap->a_ctllen != sizeof(struct export_args))
2306 rc = (EINVAL);
2307 else
2308 rc = vfs_export(mp, &pmp->export,
2309 (const struct export_args *)ap->a_ctl);
2310 break;
2311 default:
2312 rc = vop_stdmountctl(ap);
2313 break;
2315 return (rc);
2319 * KQFILTER
2321 static void filt_hammer2detach(struct knote *kn);
2322 static int filt_hammer2read(struct knote *kn, long hint);
2323 static int filt_hammer2write(struct knote *kn, long hint);
2324 static int filt_hammer2vnode(struct knote *kn, long hint);
2326 static struct filterops hammer2read_filtops =
2327 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2328 NULL, filt_hammer2detach, filt_hammer2read };
2329 static struct filterops hammer2write_filtops =
2330 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2331 NULL, filt_hammer2detach, filt_hammer2write };
2332 static struct filterops hammer2vnode_filtops =
2333 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2334 NULL, filt_hammer2detach, filt_hammer2vnode };
2336 static
2338 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2340 struct vnode *vp = ap->a_vp;
2341 struct knote *kn = ap->a_kn;
2343 switch (kn->kn_filter) {
2344 case EVFILT_READ:
2345 kn->kn_fop = &hammer2read_filtops;
2346 break;
2347 case EVFILT_WRITE:
2348 kn->kn_fop = &hammer2write_filtops;
2349 break;
2350 case EVFILT_VNODE:
2351 kn->kn_fop = &hammer2vnode_filtops;
2352 break;
2353 default:
2354 return (EOPNOTSUPP);
2357 kn->kn_hook = (caddr_t)vp;
2359 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2361 return(0);
2364 static void
2365 filt_hammer2detach(struct knote *kn)
2367 struct vnode *vp = (void *)kn->kn_hook;
2369 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2372 static int
2373 filt_hammer2read(struct knote *kn, long hint)
2375 struct vnode *vp = (void *)kn->kn_hook;
2376 hammer2_inode_t *ip = VTOI(vp);
2377 off_t off;
2379 if (hint == NOTE_REVOKE) {
2380 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2381 return(1);
2383 off = ip->meta.size - kn->kn_fp->f_offset;
2384 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2385 if (kn->kn_sfflags & NOTE_OLDAPI)
2386 return(1);
2387 return (kn->kn_data != 0);
2391 static int
2392 filt_hammer2write(struct knote *kn, long hint)
2394 if (hint == NOTE_REVOKE)
2395 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2396 kn->kn_data = 0;
2397 return (1);
2400 static int
2401 filt_hammer2vnode(struct knote *kn, long hint)
2403 if (kn->kn_sfflags & hint)
2404 kn->kn_fflags |= hint;
2405 if (hint == NOTE_REVOKE) {
2406 kn->kn_flags |= (EV_EOF | EV_NODATA);
2407 return (1);
2409 return (kn->kn_fflags != 0);
2413 * FIFO VOPS
2415 static
2417 hammer2_vop_markatime(struct vop_markatime_args *ap)
2419 hammer2_inode_t *ip;
2420 struct vnode *vp;
2422 vp = ap->a_vp;
2423 ip = VTOI(vp);
2425 if (ip->pmp->ronly)
2426 return (EROFS);
2427 return(0);
2430 static
2432 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2434 int error;
2436 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2437 if (error)
2438 error = hammer2_vop_kqfilter(ap);
2439 return(error);
2443 * VOPS vector
2445 struct vop_ops hammer2_vnode_vops = {
2446 .vop_default = vop_defaultop,
2447 .vop_fsync = hammer2_vop_fsync,
2448 .vop_getpages = vop_stdgetpages,
2449 .vop_putpages = vop_stdputpages,
2450 .vop_access = hammer2_vop_access,
2451 .vop_advlock = hammer2_vop_advlock,
2452 .vop_close = hammer2_vop_close,
2453 .vop_nlink = hammer2_vop_nlink,
2454 .vop_ncreate = hammer2_vop_ncreate,
2455 .vop_nsymlink = hammer2_vop_nsymlink,
2456 .vop_nremove = hammer2_vop_nremove,
2457 .vop_nrmdir = hammer2_vop_nrmdir,
2458 .vop_nrename = hammer2_vop_nrename,
2459 .vop_getattr = hammer2_vop_getattr,
2460 .vop_setattr = hammer2_vop_setattr,
2461 .vop_readdir = hammer2_vop_readdir,
2462 .vop_readlink = hammer2_vop_readlink,
2463 .vop_read = hammer2_vop_read,
2464 .vop_write = hammer2_vop_write,
2465 .vop_open = hammer2_vop_open,
2466 .vop_inactive = hammer2_vop_inactive,
2467 .vop_reclaim = hammer2_vop_reclaim,
2468 .vop_nresolve = hammer2_vop_nresolve,
2469 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2470 .vop_nmkdir = hammer2_vop_nmkdir,
2471 .vop_nmknod = hammer2_vop_nmknod,
2472 .vop_ioctl = hammer2_vop_ioctl,
2473 .vop_mountctl = hammer2_vop_mountctl,
2474 .vop_bmap = hammer2_vop_bmap,
2475 .vop_strategy = hammer2_vop_strategy,
2476 .vop_kqfilter = hammer2_vop_kqfilter
2479 struct vop_ops hammer2_spec_vops = {
2480 .vop_default = vop_defaultop,
2481 .vop_fsync = hammer2_vop_fsync,
2482 .vop_read = vop_stdnoread,
2483 .vop_write = vop_stdnowrite,
2484 .vop_access = hammer2_vop_access,
2485 .vop_close = hammer2_vop_close,
2486 .vop_markatime = hammer2_vop_markatime,
2487 .vop_getattr = hammer2_vop_getattr,
2488 .vop_inactive = hammer2_vop_inactive,
2489 .vop_reclaim = hammer2_vop_reclaim,
2490 .vop_setattr = hammer2_vop_setattr
2493 struct vop_ops hammer2_fifo_vops = {
2494 .vop_default = fifo_vnoperate,
2495 .vop_fsync = hammer2_vop_fsync,
2496 #if 0
2497 .vop_read = hammer2_vop_fiforead,
2498 .vop_write = hammer2_vop_fifowrite,
2499 #endif
2500 .vop_access = hammer2_vop_access,
2501 #if 0
2502 .vop_close = hammer2_vop_fifoclose,
2503 #endif
2504 .vop_markatime = hammer2_vop_markatime,
2505 .vop_getattr = hammer2_vop_getattr,
2506 .vop_inactive = hammer2_vop_inactive,
2507 .vop_reclaim = hammer2_vop_reclaim,
2508 .vop_setattr = hammer2_vop_setattr,
2509 .vop_kqfilter = hammer2_vop_fifokqfilter