hammer2 - Fix focus vs modify race
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
blobfb4f3216d01c8f52260f766280605626a7fee0d6
1 /*
2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 * Kernel Filesystem interface
39 * NOTE! local ipdata pointers must be reloaded on any modifying operation
40 * to the inode as its underlying chain may have changed.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
60 #include "hammer2.h"
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
63 int seqcount);
64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
65 int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
69 struct objcache *cache_xops;
71 static __inline
72 void
73 hammer2_knote(struct vnode *vp, int flags)
75 if (flags)
76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
80 * Last reference to a vnode is going away but it is still cached.
82 static
83 int
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
86 hammer2_inode_t *ip;
87 struct vnode *vp;
89 vp = ap->a_vp;
90 ip = VTOI(vp);
93 * Degenerate case
95 if (ip == NULL) {
96 vrecycle(vp);
97 return (0);
101 * Check for deleted inodes and recycle immediately on the last
102 * release. Be sure to destroy any left-over buffer cache buffers
103 * so we do not waste time trying to flush them.
105 * Note that deleting the file block chains under the inode chain
106 * would just be a waste of energy, so don't do it.
108 * WARNING: nvtruncbuf() can only be safely called without the inode
109 * lock held due to the way our write thread works.
111 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
112 hammer2_key_t lbase;
113 int nblksize;
116 * Detect updates to the embedded data which may be
117 * synchronized by the strategy code. Simply mark the
118 * inode modified so it gets picked up by our normal flush.
120 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
121 nvtruncbuf(vp, 0, nblksize, 0, 0);
122 vrecycle(vp);
124 return (0);
128 * Reclaim a vnode so that it can be reused; after the inode is
129 * disassociated, the filesystem must manage it alone.
131 static
133 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
135 hammer2_inode_t *ip;
136 hammer2_pfs_t *pmp;
137 struct vnode *vp;
139 vp = ap->a_vp;
140 ip = VTOI(vp);
141 if (ip == NULL) {
142 return(0);
144 pmp = ip->pmp;
147 * The final close of a deleted file or directory marks it for
148 * destruction. The DELETED flag allows the flusher to shortcut
149 * any modified blocks still unflushed (that is, just ignore them).
151 * HAMMER2 usually does not try to optimize the freemap by returning
152 * deleted blocks to it as it does not usually know how many snapshots
153 * might be referencing portions of the file/dir.
155 vp->v_data = NULL;
156 ip->vp = NULL;
159 * NOTE! We do not attempt to flush chains here, flushing is
160 * really fragile and could also deadlock.
162 vclrisdirty(vp);
165 * A modified inode may require chain synchronization. This
166 * synchronization is usually handled by VOP_SYNC / VOP_FSYNC
167 * when vfsync() is called. However, that requires a vnode.
169 * When the vnode is disassociated we must keep track of any modified
170 * inode via the sideq so that it is properly flushed. We cannot
171 * safely synchronize the inode from inside the reclaim due to
172 * potentially deep locks held as-of when the reclaim occurs.
173 * Interactions and potential deadlocks abound.
175 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED |
176 HAMMER2_INODE_MODIFIED |
177 HAMMER2_INODE_RESIZED |
178 HAMMER2_INODE_DIRTYDATA)) &&
179 (ip->flags & HAMMER2_INODE_ISDELETED) == 0) {
180 hammer2_inode_sideq_t *ipul;
182 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
183 ipul->ip = ip;
185 hammer2_spin_ex(&pmp->list_spin);
186 if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
187 /* ref -> sideq */
188 atomic_set_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
189 TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
190 ++pmp->sideq_count;
191 hammer2_spin_unex(&pmp->list_spin);
192 } else {
193 hammer2_spin_unex(&pmp->list_spin);
194 kfree(ipul, pmp->minode);
195 hammer2_inode_drop(ip); /* vp ref */
197 /* retain ref from vp for ipul */
198 } else {
199 hammer2_inode_drop(ip); /* vp ref */
203 * XXX handle background sync when ip dirty, kernel will no longer
204 * notify us regarding this inode because there is no longer a
205 * vnode attached to it.
208 return (0);
212 * Currently this function synchronizes the front-end inode state to the
213 * backend chain topology, then flushes the inode's chain and sub-topology
214 * to backend media. This function does not flush the root topology down to
215 * the inode.
217 static
219 hammer2_vop_fsync(struct vop_fsync_args *ap)
221 hammer2_inode_t *ip;
222 struct vnode *vp;
223 int error1;
224 int error2;
226 vp = ap->a_vp;
227 ip = VTOI(vp);
228 error1 = 0;
230 hammer2_trans_init(ip->pmp, 0);
233 * Flush dirty buffers in the file's logical buffer cache.
234 * It is best to wait for the strategy code to commit the
235 * buffers to the device's backing buffer cache before
236 * then trying to flush the inode.
238 * This should be quick, but certain inode modifications cached
239 * entirely in the hammer2_inode structure may not trigger a
240 * buffer read until the flush so the fsync can wind up also
241 * doing scattered reads.
243 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
244 bio_track_wait(&vp->v_track_write, 0, 0);
247 * Flush any inode changes
249 hammer2_inode_lock(ip, 0);
250 if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MODIFIED))
251 error1 = hammer2_inode_chain_sync(ip);
254 * Flush dirty chains related to the inode.
256 * NOTE! XXX We do not currently flush to the volume root, ultimately
257 * we will want to have a shortcut for the flushed inode stored
258 * in the volume root for recovery purposes.
260 error2 = hammer2_inode_chain_flush(ip);
261 if (error2)
262 error1 = error2;
265 * We may be able to clear the vnode dirty flag. The
266 * hammer2_pfs_moderate() code depends on this usually working.
268 if ((ip->flags & (HAMMER2_INODE_MODIFIED |
269 HAMMER2_INODE_RESIZED |
270 HAMMER2_INODE_DIRTYDATA)) == 0 &&
271 RB_EMPTY(&vp->v_rbdirty_tree) &&
272 !bio_track_active(&vp->v_track_write)) {
273 vclrisdirty(vp);
275 hammer2_inode_unlock(ip);
276 hammer2_trans_done(ip->pmp, 0);
278 return (error1);
281 static
283 hammer2_vop_access(struct vop_access_args *ap)
285 hammer2_inode_t *ip = VTOI(ap->a_vp);
286 uid_t uid;
287 gid_t gid;
288 int error;
290 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
291 uid = hammer2_to_unix_xid(&ip->meta.uid);
292 gid = hammer2_to_unix_xid(&ip->meta.gid);
293 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
294 hammer2_inode_unlock(ip);
296 return (error);
299 static
301 hammer2_vop_getattr(struct vop_getattr_args *ap)
303 hammer2_pfs_t *pmp;
304 hammer2_inode_t *ip;
305 struct vnode *vp;
306 struct vattr *vap;
307 hammer2_chain_t *chain;
308 int i;
310 vp = ap->a_vp;
311 vap = ap->a_vap;
313 ip = VTOI(vp);
314 pmp = ip->pmp;
316 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
318 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
319 vap->va_fileid = ip->meta.inum;
320 vap->va_mode = ip->meta.mode;
321 vap->va_nlink = ip->meta.nlinks;
322 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
323 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
324 vap->va_rmajor = 0;
325 vap->va_rminor = 0;
326 vap->va_size = ip->meta.size; /* protected by shared lock */
327 vap->va_blocksize = HAMMER2_PBUFSIZE;
328 vap->va_flags = ip->meta.uflags;
329 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
330 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
331 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
332 vap->va_gen = 1;
333 vap->va_bytes = 0;
334 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
336 * Can't really calculate directory use sans the files under
337 * it, just assume one block for now.
339 vap->va_bytes += HAMMER2_INODE_BYTES;
340 } else {
341 for (i = 0; i < ip->cluster.nchains; ++i) {
342 if ((chain = ip->cluster.array[i].chain) != NULL) {
343 if (vap->va_bytes <
344 chain->bref.embed.stats.data_count) {
345 vap->va_bytes =
346 chain->bref.embed.stats.data_count;
351 vap->va_type = hammer2_get_vtype(ip->meta.type);
352 vap->va_filerev = 0;
353 vap->va_uid_uuid = ip->meta.uid;
354 vap->va_gid_uuid = ip->meta.gid;
355 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
356 VA_FSID_UUID_VALID;
358 hammer2_inode_unlock(ip);
360 return (0);
363 static
365 hammer2_vop_setattr(struct vop_setattr_args *ap)
367 hammer2_inode_t *ip;
368 struct vnode *vp;
369 struct vattr *vap;
370 int error;
371 int kflags = 0;
372 uint64_t ctime;
374 vp = ap->a_vp;
375 vap = ap->a_vap;
376 hammer2_update_time(&ctime);
378 ip = VTOI(vp);
380 if (ip->pmp->ronly)
381 return (EROFS);
382 if (hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1)
383 return (ENOSPC);
385 hammer2_pfs_memory_wait(ip, 0);
386 hammer2_trans_init(ip->pmp, 0);
387 hammer2_inode_lock(ip, 0);
388 error = 0;
390 if (vap->va_flags != VNOVAL) {
391 uint32_t flags;
393 flags = ip->meta.uflags;
394 error = vop_helper_setattr_flags(&flags, vap->va_flags,
395 hammer2_to_unix_xid(&ip->meta.uid),
396 ap->a_cred);
397 if (error == 0) {
398 if (ip->meta.uflags != flags) {
399 hammer2_inode_modify(ip);
400 ip->meta.uflags = flags;
401 ip->meta.ctime = ctime;
402 kflags |= NOTE_ATTRIB;
404 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
405 error = 0;
406 goto done;
409 goto done;
411 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
412 error = EPERM;
413 goto done;
415 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
416 mode_t cur_mode = ip->meta.mode;
417 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
418 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
419 uuid_t uuid_uid;
420 uuid_t uuid_gid;
422 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
423 ap->a_cred,
424 &cur_uid, &cur_gid, &cur_mode);
425 if (error == 0) {
426 hammer2_guid_to_uuid(&uuid_uid, cur_uid);
427 hammer2_guid_to_uuid(&uuid_gid, cur_gid);
428 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
429 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
430 ip->meta.mode != cur_mode
432 hammer2_inode_modify(ip);
433 ip->meta.uid = uuid_uid;
434 ip->meta.gid = uuid_gid;
435 ip->meta.mode = cur_mode;
436 ip->meta.ctime = ctime;
438 kflags |= NOTE_ATTRIB;
443 * Resize the file
445 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
446 switch(vp->v_type) {
447 case VREG:
448 if (vap->va_size == ip->meta.size)
449 break;
450 if (vap->va_size < ip->meta.size) {
451 hammer2_mtx_ex(&ip->truncate_lock);
452 hammer2_truncate_file(ip, vap->va_size);
453 hammer2_mtx_unlock(&ip->truncate_lock);
454 kflags |= NOTE_WRITE;
455 } else {
456 hammer2_extend_file(ip, vap->va_size);
457 kflags |= NOTE_WRITE | NOTE_EXTEND;
459 hammer2_inode_modify(ip);
460 ip->meta.mtime = ctime;
461 vclrflags(vp, VLASTWRITETS);
462 break;
463 default:
464 error = EINVAL;
465 goto done;
468 #if 0
469 /* atime not supported */
470 if (vap->va_atime.tv_sec != VNOVAL) {
471 hammer2_inode_modify(ip);
472 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
473 kflags |= NOTE_ATTRIB;
475 #endif
476 if (vap->va_mode != (mode_t)VNOVAL) {
477 mode_t cur_mode = ip->meta.mode;
478 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
479 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
481 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
482 cur_uid, cur_gid, &cur_mode);
483 if (error == 0 && ip->meta.mode != cur_mode) {
484 hammer2_inode_modify(ip);
485 ip->meta.mode = cur_mode;
486 ip->meta.ctime = ctime;
487 kflags |= NOTE_ATTRIB;
491 if (vap->va_mtime.tv_sec != VNOVAL) {
492 hammer2_inode_modify(ip);
493 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
494 kflags |= NOTE_ATTRIB;
495 vclrflags(vp, VLASTWRITETS);
498 done:
500 * If a truncation occurred we must call chain_sync() now in order
501 * to trim the related data chains, otherwise a later expansion can
502 * cause havoc.
504 * If an extend occured that changed the DIRECTDATA state, we must
505 * call inode_fsync now in order to prepare the inode's indirect
506 * block table.
508 * WARNING! This means we are making an adjustment to the inode's
509 * chain outside of sync/fsync, and not just to inode->meta, which
510 * may result in some consistency issues if a crash were to occur
511 * at just the wrong time.
513 if (ip->flags & HAMMER2_INODE_RESIZED)
514 hammer2_inode_chain_sync(ip);
517 * Cleanup.
519 hammer2_inode_unlock(ip);
520 hammer2_trans_done(ip->pmp, 1);
521 hammer2_knote(ip->vp, kflags);
523 return (error);
526 static
528 hammer2_vop_readdir(struct vop_readdir_args *ap)
530 hammer2_xop_readdir_t *xop;
531 hammer2_blockref_t bref;
532 hammer2_inode_t *ip;
533 hammer2_tid_t inum;
534 hammer2_key_t lkey;
535 struct uio *uio;
536 off_t *cookies;
537 off_t saveoff;
538 int cookie_index;
539 int ncookies;
540 int error;
541 int eofflag;
542 int r;
544 ip = VTOI(ap->a_vp);
545 uio = ap->a_uio;
546 saveoff = uio->uio_offset;
547 eofflag = 0;
548 error = 0;
551 * Setup cookies directory entry cookies if requested
553 if (ap->a_ncookies) {
554 ncookies = uio->uio_resid / 16 + 1;
555 if (ncookies > 1024)
556 ncookies = 1024;
557 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
558 } else {
559 ncookies = -1;
560 cookies = NULL;
562 cookie_index = 0;
564 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
567 * Handle artificial entries. To ensure that only positive 64 bit
568 * quantities are returned to userland we always strip off bit 63.
569 * The hash code is designed such that codes 0x0000-0x7FFF are not
570 * used, allowing us to use these codes for articial entries.
572 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
573 * allow '..' to cross the mount point into (e.g.) the super-root.
575 if (saveoff == 0) {
576 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
577 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
578 if (r)
579 goto done;
580 if (cookies)
581 cookies[cookie_index] = saveoff;
582 ++saveoff;
583 ++cookie_index;
584 if (cookie_index == ncookies)
585 goto done;
588 if (saveoff == 1) {
590 * Be careful with lockorder when accessing ".."
592 * (ip is the current dir. xip is the parent dir).
594 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
595 if (ip != ip->pmp->iroot)
596 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
597 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
598 if (r)
599 goto done;
600 if (cookies)
601 cookies[cookie_index] = saveoff;
602 ++saveoff;
603 ++cookie_index;
604 if (cookie_index == ncookies)
605 goto done;
608 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
609 if (hammer2_debug & 0x0020)
610 kprintf("readdir: lkey %016jx\n", lkey);
611 if (error)
612 goto done;
615 * Use XOP for cluster scan.
617 * parent is the inode cluster, already locked for us. Don't
618 * double lock shared locks as this will screw up upgrades.
620 xop = hammer2_xop_alloc(ip, 0);
621 xop->lkey = lkey;
622 hammer2_xop_start(&xop->head, hammer2_xop_readdir);
624 for (;;) {
625 const hammer2_inode_data_t *ripdata;
626 const char *dname;
627 int dtype;
629 error = hammer2_xop_collect(&xop->head, 0);
630 error = hammer2_error_to_errno(error);
631 if (error) {
632 break;
634 if (cookie_index == ncookies)
635 break;
636 if (hammer2_debug & 0x0020)
637 kprintf("cluster chain %p %p\n",
638 xop->head.cluster.focus,
639 (xop->head.cluster.focus ?
640 xop->head.cluster.focus->data : (void *)-1));
641 hammer2_cluster_bref(&xop->head.cluster, &bref);
643 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
644 ripdata = &hammer2_xop_gdata(&xop->head)->ipdata;
645 dtype = hammer2_get_dtype(ripdata->meta.type);
646 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
647 r = vop_write_dirent(&error, uio,
648 ripdata->meta.inum &
649 HAMMER2_DIRHASH_USERMSK,
650 dtype,
651 ripdata->meta.name_len,
652 ripdata->filename);
653 hammer2_xop_pdata(&xop->head);
654 if (r)
655 break;
656 if (cookies)
657 cookies[cookie_index] = saveoff;
658 ++cookie_index;
659 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) {
660 uint16_t namlen;
662 dtype = hammer2_get_dtype(bref.embed.dirent.type);
663 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
664 namlen = bref.embed.dirent.namlen;
665 if (namlen <= sizeof(bref.check.buf)) {
666 dname = bref.check.buf;
667 } else {
668 dname = hammer2_xop_gdata(&xop->head)->buf;
670 r = vop_write_dirent(&error, uio,
671 bref.embed.dirent.inum, dtype,
672 namlen, dname);
673 if (namlen > sizeof(bref.check.buf))
674 hammer2_xop_pdata(&xop->head);
675 if (r)
676 break;
677 if (cookies)
678 cookies[cookie_index] = saveoff;
679 ++cookie_index;
680 } else {
681 /* XXX chain error */
682 kprintf("bad chain type readdir %d\n", bref.type);
685 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
686 if (error == ENOENT) {
687 error = 0;
688 eofflag = 1;
689 saveoff = (hammer2_key_t)-1;
690 } else {
691 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
693 done:
694 hammer2_inode_unlock(ip);
695 if (ap->a_eofflag)
696 *ap->a_eofflag = eofflag;
697 if (hammer2_debug & 0x0020)
698 kprintf("readdir: done at %016jx\n", saveoff);
699 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
700 if (error && cookie_index == 0) {
701 if (cookies) {
702 kfree(cookies, M_TEMP);
703 *ap->a_ncookies = 0;
704 *ap->a_cookies = NULL;
706 } else {
707 if (cookies) {
708 *ap->a_ncookies = cookie_index;
709 *ap->a_cookies = cookies;
712 return (error);
716 * hammer2_vop_readlink { vp, uio, cred }
718 static
720 hammer2_vop_readlink(struct vop_readlink_args *ap)
722 struct vnode *vp;
723 hammer2_inode_t *ip;
724 int error;
726 vp = ap->a_vp;
727 if (vp->v_type != VLNK)
728 return (EINVAL);
729 ip = VTOI(vp);
731 error = hammer2_read_file(ip, ap->a_uio, 0);
732 return (error);
735 static
737 hammer2_vop_read(struct vop_read_args *ap)
739 struct vnode *vp;
740 hammer2_inode_t *ip;
741 struct uio *uio;
742 int error;
743 int seqcount;
744 int bigread;
747 * Read operations supported on this vnode?
749 vp = ap->a_vp;
750 if (vp->v_type != VREG)
751 return (EINVAL);
754 * Misc
756 ip = VTOI(vp);
757 uio = ap->a_uio;
758 error = 0;
760 seqcount = ap->a_ioflag >> 16;
761 bigread = (uio->uio_resid > 100 * 1024 * 1024);
763 error = hammer2_read_file(ip, uio, seqcount);
764 return (error);
767 static
769 hammer2_vop_write(struct vop_write_args *ap)
771 hammer2_inode_t *ip;
772 thread_t td;
773 struct vnode *vp;
774 struct uio *uio;
775 int error;
776 int seqcount;
777 int ioflag;
780 * Read operations supported on this vnode?
782 vp = ap->a_vp;
783 if (vp->v_type != VREG)
784 return (EINVAL);
787 * Misc
789 ip = VTOI(vp);
790 ioflag = ap->a_ioflag;
791 uio = ap->a_uio;
792 error = 0;
793 if (ip->pmp->ronly)
794 return (EROFS);
795 switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) {
796 case 2:
797 return (ENOSPC);
798 case 1:
799 ioflag |= IO_DIRECT; /* semi-synchronous */
800 /* fall through */
801 default:
802 break;
805 seqcount = ioflag >> 16;
808 * Check resource limit
810 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
811 uio->uio_offset + uio->uio_resid >
812 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
813 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
814 return (EFBIG);
818 * The transaction interlocks against flush initiations
819 * (note: but will run concurrently with the actual flush).
821 * To avoid deadlocking against the VM system, we must flag any
822 * transaction related to the buffer cache or other direct
823 * VM page manipulation.
825 if (uio->uio_segflg == UIO_NOCOPY) {
826 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE);
827 } else {
828 hammer2_pfs_memory_wait(ip, 0);
829 hammer2_trans_init(ip->pmp, 0);
831 error = hammer2_write_file(ip, uio, ioflag, seqcount);
832 hammer2_trans_done(ip->pmp, 1);
834 return (error);
838 * Perform read operations on a file or symlink given an UNLOCKED
839 * inode and uio.
841 * The passed ip is not locked.
843 static
845 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
847 hammer2_off_t size;
848 struct buf *bp;
849 int error;
851 error = 0;
854 * UIO read loop.
856 * WARNING! Assumes that the kernel interlocks size changes at the
857 * vnode level.
859 hammer2_mtx_sh(&ip->lock);
860 hammer2_mtx_sh(&ip->truncate_lock);
861 size = ip->meta.size;
862 hammer2_mtx_unlock(&ip->lock);
864 while (uio->uio_resid > 0 && uio->uio_offset < size) {
865 hammer2_key_t lbase;
866 hammer2_key_t leof;
867 int lblksize;
868 int loff;
869 int n;
871 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
872 &lbase, &leof);
874 #if 1
875 bp = NULL;
876 error = cluster_readx(ip->vp, leof, lbase, lblksize,
877 B_NOTMETA | B_KVABIO,
878 uio->uio_resid,
879 seqcount * MAXBSIZE,
880 &bp);
881 #else
882 if (uio->uio_segflg == UIO_NOCOPY) {
883 bp = getblk(ip->vp, lbase, lblksize,
884 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
885 if (bp->b_flags & B_CACHE) {
886 int i;
887 int j = 0;
888 if (bp->b_xio.xio_npages != 16)
889 kprintf("NPAGES BAD\n");
890 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
891 vm_page_t m;
892 m = bp->b_xio.xio_pages[i];
893 if (m == NULL || m->valid == 0) {
894 kprintf("bp %016jx %016jx pg %d inv",
895 lbase, leof, i);
896 if (m)
897 kprintf("m->object %p/%p", m->object, ip->vp->v_object);
898 kprintf("\n");
899 j = 1;
902 if (j)
903 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
905 bqrelse(bp);
907 error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
908 #endif
909 if (error) {
910 brelse(bp);
911 break;
913 bkvasync(bp);
914 loff = (int)(uio->uio_offset - lbase);
915 n = lblksize - loff;
916 if (n > uio->uio_resid)
917 n = uio->uio_resid;
918 if (n > size - uio->uio_offset)
919 n = (int)(size - uio->uio_offset);
920 bp->b_flags |= B_AGE;
921 uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
922 bqrelse(bp);
924 hammer2_mtx_unlock(&ip->truncate_lock);
926 return (error);
930 * Write to the file represented by the inode via the logical buffer cache.
931 * The inode may represent a regular file or a symlink.
933 * The inode must not be locked.
935 static
937 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
938 int ioflag, int seqcount)
940 hammer2_key_t old_eof;
941 hammer2_key_t new_eof;
942 struct buf *bp;
943 int kflags;
944 int error;
945 int modified;
948 * Setup if append
950 * WARNING! Assumes that the kernel interlocks size changes at the
951 * vnode level.
953 hammer2_mtx_ex(&ip->lock);
954 hammer2_mtx_sh(&ip->truncate_lock);
955 if (ioflag & IO_APPEND)
956 uio->uio_offset = ip->meta.size;
957 old_eof = ip->meta.size;
960 * Extend the file if necessary. If the write fails at some point
961 * we will truncate it back down to cover as much as we were able
962 * to write.
964 * Doing this now makes it easier to calculate buffer sizes in
965 * the loop.
967 kflags = 0;
968 error = 0;
969 modified = 0;
971 if (uio->uio_offset + uio->uio_resid > old_eof) {
972 new_eof = uio->uio_offset + uio->uio_resid;
973 modified = 1;
974 hammer2_extend_file(ip, new_eof);
975 kflags |= NOTE_EXTEND;
976 } else {
977 new_eof = old_eof;
979 hammer2_mtx_unlock(&ip->lock);
982 * UIO write loop
984 while (uio->uio_resid > 0) {
985 hammer2_key_t lbase;
986 int trivial;
987 int endofblk;
988 int lblksize;
989 int loff;
990 int n;
993 * Don't allow the buffer build to blow out the buffer
994 * cache.
996 if ((ioflag & IO_RECURSE) == 0)
997 bwillwrite(HAMMER2_PBUFSIZE);
1000 * This nominally tells us how much we can cluster and
1001 * what the logical buffer size needs to be. Currently
1002 * we don't try to cluster the write and just handle one
1003 * block at a time.
1005 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
1006 &lbase, NULL);
1007 loff = (int)(uio->uio_offset - lbase);
1009 KKASSERT(lblksize <= 65536);
1012 * Calculate bytes to copy this transfer and whether the
1013 * copy completely covers the buffer or not.
1015 trivial = 0;
1016 n = lblksize - loff;
1017 if (n > uio->uio_resid) {
1018 n = uio->uio_resid;
1019 if (loff == lbase && uio->uio_offset + n == new_eof)
1020 trivial = 1;
1021 endofblk = 0;
1022 } else {
1023 if (loff == 0)
1024 trivial = 1;
1025 endofblk = 1;
1027 if (lbase >= new_eof)
1028 trivial = 1;
1031 * Get the buffer
1033 if (uio->uio_segflg == UIO_NOCOPY) {
1035 * Issuing a write with the same data backing the
1036 * buffer. Instantiate the buffer to collect the
1037 * backing vm pages, then read-in any missing bits.
1039 * This case is used by vop_stdputpages().
1041 bp = getblk(ip->vp, lbase, lblksize,
1042 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1043 if ((bp->b_flags & B_CACHE) == 0) {
1044 bqrelse(bp);
1045 error = bread_kvabio(ip->vp, lbase,
1046 lblksize, &bp);
1048 } else if (trivial) {
1050 * Even though we are entirely overwriting the buffer
1051 * we may still have to zero it out to avoid a
1052 * mmap/write visibility issue.
1054 bp = getblk(ip->vp, lbase, lblksize,
1055 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1056 if ((bp->b_flags & B_CACHE) == 0)
1057 vfs_bio_clrbuf(bp);
1058 } else {
1060 * Partial overwrite, read in any missing bits then
1061 * replace the portion being written.
1063 * (The strategy code will detect zero-fill physical
1064 * blocks for this case).
1066 error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
1067 if (error == 0)
1068 bheavy(bp);
1071 if (error) {
1072 brelse(bp);
1073 break;
1077 * Ok, copy the data in
1079 bkvasync(bp);
1080 error = uiomovebp(bp, bp->b_data + loff, n, uio);
1081 kflags |= NOTE_WRITE;
1082 modified = 1;
1083 if (error) {
1084 brelse(bp);
1085 break;
1089 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1090 * with IO_SYNC or IO_ASYNC set. These writes
1091 * must be handled as the pageout daemon expects.
1093 * NOTE! H2 relies on cluster_write() here because it
1094 * cannot preallocate disk blocks at the logical
1095 * level due to not knowing what the compression
1096 * size will be at this time.
1098 * We must use cluster_write() here and we depend
1099 * on the write-behind feature to flush buffers
1100 * appropriately. If we let the buffer daemons do
1101 * it the block allocations will be all over the
1102 * map.
1104 if (ioflag & IO_SYNC) {
1105 bwrite(bp);
1106 } else if ((ioflag & IO_DIRECT) && endofblk) {
1107 bawrite(bp);
1108 } else if (ioflag & IO_ASYNC) {
1109 bawrite(bp);
1110 } else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1111 bdwrite(bp);
1112 } else {
1113 #if 1
1114 bp->b_flags |= B_CLUSTEROK;
1115 cluster_write(bp, new_eof, lblksize, seqcount);
1116 #else
1117 bp->b_flags |= B_CLUSTEROK;
1118 bdwrite(bp);
1119 #endif
1124 * Cleanup. If we extended the file EOF but failed to write through
1125 * the entire write is a failure and we have to back-up.
1127 if (error && new_eof != old_eof) {
1128 hammer2_mtx_unlock(&ip->truncate_lock);
1129 hammer2_mtx_ex(&ip->lock);
1130 hammer2_mtx_ex(&ip->truncate_lock);
1131 hammer2_truncate_file(ip, old_eof);
1132 if (ip->flags & HAMMER2_INODE_MODIFIED)
1133 hammer2_inode_chain_sync(ip);
1134 hammer2_mtx_unlock(&ip->lock);
1135 } else if (modified) {
1136 struct vnode *vp = ip->vp;
1138 hammer2_mtx_ex(&ip->lock);
1139 hammer2_inode_modify(ip);
1140 if (uio->uio_segflg == UIO_NOCOPY) {
1141 if (vp->v_flag & VLASTWRITETS) {
1142 ip->meta.mtime =
1143 (unsigned long)vp->v_lastwrite_ts.tv_sec *
1144 1000000 +
1145 vp->v_lastwrite_ts.tv_nsec / 1000;
1147 } else {
1148 hammer2_update_time(&ip->meta.mtime);
1149 vclrflags(vp, VLASTWRITETS);
1152 #if 0
1154 * REMOVED - handled by hammer2_extend_file(). Do not issue
1155 * a chain_sync() outside of a sync/fsync except for DIRECTDATA
1156 * state changes.
1158 * Under normal conditions we only issue a chain_sync if
1159 * the inode's DIRECTDATA state changed.
1161 if (ip->flags & HAMMER2_INODE_RESIZED)
1162 hammer2_inode_chain_sync(ip);
1163 #endif
1164 hammer2_mtx_unlock(&ip->lock);
1165 hammer2_knote(ip->vp, kflags);
1167 hammer2_trans_assert_strategy(ip->pmp);
1168 hammer2_mtx_unlock(&ip->truncate_lock);
1170 return error;
1174 * Truncate the size of a file. The inode must not be locked.
1176 * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1177 * ensure that any on-media data beyond the new file EOF has been destroyed.
1179 * WARNING: nvtruncbuf() can only be safely called without the inode lock
1180 * held due to the way our write thread works. If the truncation
1181 * occurs in the middle of a buffer, nvtruncbuf() is responsible
1182 * for dirtying that buffer and zeroing out trailing bytes.
1184 * WARNING! Assumes that the kernel interlocks size changes at the
1185 * vnode level.
1187 * WARNING! Caller assumes responsibility for removing dead blocks
1188 * if INODE_RESIZED is set.
1190 static
1191 void
1192 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1194 hammer2_key_t lbase;
1195 int nblksize;
1197 hammer2_mtx_unlock(&ip->lock);
1198 if (ip->vp) {
1199 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1200 nvtruncbuf(ip->vp, nsize,
1201 nblksize, (int)nsize & (nblksize - 1),
1204 hammer2_mtx_ex(&ip->lock);
1205 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1206 ip->osize = ip->meta.size;
1207 ip->meta.size = nsize;
1208 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1209 hammer2_inode_modify(ip);
1213 * Extend the size of a file. The inode must not be locked.
1215 * Even though the file size is changing, we do not have to set the
1216 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1217 * boundary. When this occurs a hammer2_inode_chain_sync() is required
1218 * to prepare the inode cluster's indirect block table, otherwise
1219 * async execution of the strategy code will implode on us.
1221 * WARNING! Assumes that the kernel interlocks size changes at the
1222 * vnode level.
1224 * WARNING! Caller assumes responsibility for transitioning out
1225 * of the inode DIRECTDATA mode if INODE_RESIZED is set.
1227 static
1228 void
1229 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1231 hammer2_key_t lbase;
1232 hammer2_key_t osize;
1233 int oblksize;
1234 int nblksize;
1236 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1237 hammer2_inode_modify(ip);
1238 osize = ip->meta.size;
1239 ip->osize = osize;
1240 ip->meta.size = nsize;
1243 * We must issue a chain_sync() when the DIRECTDATA state changes
1244 * to prevent confusion between the flush code and the in-memory
1245 * state. This is not perfect because we are doing it outside of
1246 * a sync/fsync operation, so it might not be fully synchronized
1247 * with the meta-data topology flush.
1249 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1250 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1251 hammer2_inode_chain_sync(ip);
1254 hammer2_mtx_unlock(&ip->lock);
1255 if (ip->vp) {
1256 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1257 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1258 nvextendbuf(ip->vp,
1259 osize, nsize,
1260 oblksize, nblksize,
1261 -1, -1, 0);
1263 hammer2_mtx_ex(&ip->lock);
1266 static
1268 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1270 hammer2_xop_nresolve_t *xop;
1271 hammer2_inode_t *ip;
1272 hammer2_inode_t *dip;
1273 struct namecache *ncp;
1274 struct vnode *vp;
1275 int error;
1277 dip = VTOI(ap->a_dvp);
1278 xop = hammer2_xop_alloc(dip, 0);
1280 ncp = ap->a_nch->ncp;
1281 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1284 * Note: In DragonFly the kernel handles '.' and '..'.
1286 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1287 hammer2_xop_start(&xop->head, hammer2_xop_nresolve);
1289 error = hammer2_xop_collect(&xop->head, 0);
1290 error = hammer2_error_to_errno(error);
1291 if (error) {
1292 ip = NULL;
1293 } else {
1294 ip = hammer2_inode_get(dip->pmp, dip, &xop->head, -1);
1296 hammer2_inode_unlock(dip);
1299 * Acquire the related vnode
1301 * NOTE: For error processing, only ENOENT resolves the namecache
1302 * entry to NULL, otherwise we just return the error and
1303 * leave the namecache unresolved.
1305 * NOTE: multiple hammer2_inode structures can be aliased to the
1306 * same chain element, for example for hardlinks. This
1307 * use case does not 'reattach' inode associations that
1308 * might already exist, but always allocates a new one.
1310 * WARNING: inode structure is locked exclusively via inode_get
1311 * but chain was locked shared. inode_unlock()
1312 * will handle it properly.
1314 if (ip) {
1315 vp = hammer2_igetv(ip, &error); /* error set to UNIX error */
1316 if (error == 0) {
1317 vn_unlock(vp);
1318 cache_setvp(ap->a_nch, vp);
1319 } else if (error == ENOENT) {
1320 cache_setvp(ap->a_nch, NULL);
1322 hammer2_inode_unlock(ip);
1325 * The vp should not be released until after we've disposed
1326 * of our locks, because it might cause vop_inactive() to
1327 * be called.
1329 if (vp)
1330 vrele(vp);
1331 } else {
1332 error = ENOENT;
1333 cache_setvp(ap->a_nch, NULL);
1335 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1336 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1337 ("resolve error %d/%p ap %p\n",
1338 error, ap->a_nch->ncp->nc_vp, ap));
1340 return error;
1343 static
1345 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1347 hammer2_inode_t *dip;
1348 hammer2_tid_t inum;
1349 int error;
1351 dip = VTOI(ap->a_dvp);
1352 inum = dip->meta.iparent;
1353 *ap->a_vpp = NULL;
1355 if (inum) {
1356 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1357 inum, ap->a_vpp);
1358 } else {
1359 error = ENOENT;
1361 return error;
1364 static
1366 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1368 hammer2_inode_t *dip;
1369 hammer2_inode_t *nip;
1370 struct namecache *ncp;
1371 const uint8_t *name;
1372 size_t name_len;
1373 hammer2_tid_t inum;
1374 int error;
1376 dip = VTOI(ap->a_dvp);
1377 if (dip->pmp->ronly)
1378 return (EROFS);
1379 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1380 return (ENOSPC);
1382 ncp = ap->a_nch->ncp;
1383 name = ncp->nc_name;
1384 name_len = ncp->nc_nlen;
1386 hammer2_pfs_memory_wait(dip, 1);
1387 hammer2_trans_init(dip->pmp, 0);
1389 inum = hammer2_trans_newinum(dip->pmp);
1392 * Create the actual inode as a hidden file in the iroot, then
1393 * create the directory entry. The creation of the actual inode
1394 * sets its nlinks to 1 which is the value we desire.
1396 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1397 NULL, 0, inum,
1398 inum, 0, 0,
1399 0, &error);
1400 if (error) {
1401 error = hammer2_error_to_errno(error);
1402 } else {
1403 error = hammer2_dirent_create(dip, name, name_len,
1404 nip->meta.inum, nip->meta.type);
1405 /* returns UNIX error code */
1407 if (error) {
1408 if (nip) {
1409 hammer2_inode_unlink_finisher(nip, 0);
1410 hammer2_inode_unlock(nip);
1411 nip = NULL;
1413 *ap->a_vpp = NULL;
1414 } else {
1415 *ap->a_vpp = hammer2_igetv(nip, &error);
1416 hammer2_inode_unlock(nip);
1420 * Update dip's mtime
1422 * We can use a shared inode lock and allow the meta.mtime update
1423 * SMP race. hammer2_inode_modify() is MPSAFE w/a shared lock.
1425 if (error == 0) {
1426 uint64_t mtime;
1428 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1429 hammer2_update_time(&mtime);
1430 hammer2_inode_modify(dip);
1431 dip->meta.mtime = mtime;
1432 hammer2_inode_unlock(dip);
1435 hammer2_trans_done(dip->pmp, 1);
1437 if (error == 0) {
1438 cache_setunresolved(ap->a_nch);
1439 cache_setvp(ap->a_nch, *ap->a_vpp);
1440 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1442 return error;
1445 static
1447 hammer2_vop_open(struct vop_open_args *ap)
1449 return vop_stdopen(ap);
1453 * hammer2_vop_advlock { vp, id, op, fl, flags }
1455 static
1457 hammer2_vop_advlock(struct vop_advlock_args *ap)
1459 hammer2_inode_t *ip = VTOI(ap->a_vp);
1460 hammer2_off_t size;
1462 size = ip->meta.size;
1463 return (lf_advlock(ap, &ip->advlock, size));
1466 static
1468 hammer2_vop_close(struct vop_close_args *ap)
1470 return vop_stdclose(ap);
1474 * hammer2_vop_nlink { nch, dvp, vp, cred }
1476 * Create a hardlink from (vp) to {dvp, nch}.
1478 static
1480 hammer2_vop_nlink(struct vop_nlink_args *ap)
1482 hammer2_inode_t *tdip; /* target directory to create link in */
1483 hammer2_inode_t *ip; /* inode we are hardlinking to */
1484 struct namecache *ncp;
1485 const uint8_t *name;
1486 size_t name_len;
1487 int error;
1489 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1490 return(EXDEV);
1492 tdip = VTOI(ap->a_dvp);
1493 if (tdip->pmp->ronly)
1494 return (EROFS);
1495 if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1)
1496 return (ENOSPC);
1498 ncp = ap->a_nch->ncp;
1499 name = ncp->nc_name;
1500 name_len = ncp->nc_nlen;
1503 * ip represents the file being hardlinked. The file could be a
1504 * normal file or a hardlink target if it has already been hardlinked.
1505 * (with the new semantics, it will almost always be a hardlink
1506 * target).
1508 * Bump nlinks and potentially also create or move the hardlink
1509 * target in the parent directory common to (ip) and (tdip). The
1510 * consolidation code can modify ip->cluster. The returned cluster
1511 * is locked.
1513 ip = VTOI(ap->a_vp);
1514 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1515 hammer2_pfs_memory_wait(ip, 0);
1516 hammer2_trans_init(ip->pmp, 0);
1519 * Target should be an indexed inode or there's no way we will ever
1520 * be able to find it!
1522 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1524 error = 0;
1527 * Can return NULL and error == EXDEV if the common parent
1528 * crosses a directory with the xlink flag set.
1530 hammer2_inode_lock(tdip, 0);
1531 hammer2_inode_lock(ip, 0);
1534 * Create the directory entry and bump nlinks.
1536 if (error == 0) {
1537 error = hammer2_dirent_create(tdip, name, name_len,
1538 ip->meta.inum, ip->meta.type);
1539 hammer2_inode_modify(ip);
1540 ++ip->meta.nlinks;
1542 if (error == 0) {
1544 * Update dip's mtime
1546 uint64_t mtime;
1548 hammer2_update_time(&mtime);
1549 hammer2_inode_modify(tdip);
1550 tdip->meta.mtime = mtime;
1552 cache_setunresolved(ap->a_nch);
1553 cache_setvp(ap->a_nch, ap->a_vp);
1555 hammer2_inode_unlock(ip);
1556 hammer2_inode_unlock(tdip);
1558 hammer2_trans_done(ip->pmp, 1);
1559 hammer2_knote(ap->a_vp, NOTE_LINK);
1560 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1562 return error;
1566 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1568 * The operating system has already ensured that the directory entry
1569 * does not exist and done all appropriate namespace locking.
1571 static
1573 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1575 hammer2_inode_t *dip;
1576 hammer2_inode_t *nip;
1577 struct namecache *ncp;
1578 const uint8_t *name;
1579 size_t name_len;
1580 hammer2_tid_t inum;
1581 int error;
1583 dip = VTOI(ap->a_dvp);
1584 if (dip->pmp->ronly)
1585 return (EROFS);
1586 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1587 return (ENOSPC);
1589 ncp = ap->a_nch->ncp;
1590 name = ncp->nc_name;
1591 name_len = ncp->nc_nlen;
1592 hammer2_pfs_memory_wait(dip, 1);
1593 hammer2_trans_init(dip->pmp, 0);
1595 inum = hammer2_trans_newinum(dip->pmp);
1598 * Create the actual inode as a hidden file in the iroot, then
1599 * create the directory entry. The creation of the actual inode
1600 * sets its nlinks to 1 which is the value we desire.
1602 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1603 NULL, 0, inum,
1604 inum, 0, 0,
1605 0, &error);
1607 if (error) {
1608 error = hammer2_error_to_errno(error);
1609 } else {
1610 error = hammer2_dirent_create(dip, name, name_len,
1611 nip->meta.inum, nip->meta.type);
1613 if (error) {
1614 if (nip) {
1615 hammer2_inode_unlink_finisher(nip, 0);
1616 hammer2_inode_unlock(nip);
1617 nip = NULL;
1619 *ap->a_vpp = NULL;
1620 } else {
1621 *ap->a_vpp = hammer2_igetv(nip, &error);
1622 hammer2_inode_unlock(nip);
1626 * Update dip's mtime
1628 if (error == 0) {
1629 uint64_t mtime;
1631 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1632 hammer2_update_time(&mtime);
1633 hammer2_inode_modify(dip);
1634 dip->meta.mtime = mtime;
1635 hammer2_inode_unlock(dip);
1638 hammer2_trans_done(dip->pmp, 1);
1640 if (error == 0) {
1641 cache_setunresolved(ap->a_nch);
1642 cache_setvp(ap->a_nch, *ap->a_vpp);
1643 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1645 return error;
1649 * Make a device node (typically a fifo)
1651 static
1653 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1655 hammer2_inode_t *dip;
1656 hammer2_inode_t *nip;
1657 struct namecache *ncp;
1658 const uint8_t *name;
1659 size_t name_len;
1660 hammer2_tid_t inum;
1661 int error;
1663 dip = VTOI(ap->a_dvp);
1664 if (dip->pmp->ronly)
1665 return (EROFS);
1666 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1667 return (ENOSPC);
1669 ncp = ap->a_nch->ncp;
1670 name = ncp->nc_name;
1671 name_len = ncp->nc_nlen;
1672 hammer2_pfs_memory_wait(dip, 1);
1673 hammer2_trans_init(dip->pmp, 0);
1676 * Create the device inode and then create the directory entry.
1678 inum = hammer2_trans_newinum(dip->pmp);
1679 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1680 NULL, 0, inum,
1681 inum, 0, 0,
1682 0, &error);
1683 if (error == 0) {
1684 error = hammer2_dirent_create(dip, name, name_len,
1685 nip->meta.inum, nip->meta.type);
1687 if (error) {
1688 if (nip) {
1689 hammer2_inode_unlink_finisher(nip, 0);
1690 hammer2_inode_unlock(nip);
1691 nip = NULL;
1693 *ap->a_vpp = NULL;
1694 } else {
1695 *ap->a_vpp = hammer2_igetv(nip, &error);
1696 hammer2_inode_unlock(nip);
1700 * Update dip's mtime
1702 if (error == 0) {
1703 uint64_t mtime;
1705 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1706 hammer2_update_time(&mtime);
1707 hammer2_inode_modify(dip);
1708 dip->meta.mtime = mtime;
1709 hammer2_inode_unlock(dip);
1712 hammer2_trans_done(dip->pmp, 1);
1714 if (error == 0) {
1715 cache_setunresolved(ap->a_nch);
1716 cache_setvp(ap->a_nch, *ap->a_vpp);
1717 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1719 return error;
1723 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1725 static
1727 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1729 hammer2_inode_t *dip;
1730 hammer2_inode_t *nip;
1731 struct namecache *ncp;
1732 const uint8_t *name;
1733 size_t name_len;
1734 hammer2_tid_t inum;
1735 int error;
1737 dip = VTOI(ap->a_dvp);
1738 if (dip->pmp->ronly)
1739 return (EROFS);
1740 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1741 return (ENOSPC);
1743 ncp = ap->a_nch->ncp;
1744 name = ncp->nc_name;
1745 name_len = ncp->nc_nlen;
1746 hammer2_pfs_memory_wait(dip, 1);
1747 hammer2_trans_init(dip->pmp, 0);
1749 ap->a_vap->va_type = VLNK; /* enforce type */
1752 * Create the softlink as an inode and then create the directory
1753 * entry.
1755 inum = hammer2_trans_newinum(dip->pmp);
1757 nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1758 NULL, 0, inum,
1759 inum, 0, 0,
1760 0, &error);
1761 if (error == 0) {
1762 error = hammer2_dirent_create(dip, name, name_len,
1763 nip->meta.inum, nip->meta.type);
1765 if (error) {
1766 if (nip) {
1767 hammer2_inode_unlink_finisher(nip, 0);
1768 hammer2_inode_unlock(nip);
1769 nip = NULL;
1771 *ap->a_vpp = NULL;
1772 hammer2_trans_done(dip->pmp, 1);
1773 return error;
1775 *ap->a_vpp = hammer2_igetv(nip, &error);
1778 * Build the softlink (~like file data) and finalize the namecache.
1780 if (error == 0) {
1781 size_t bytes;
1782 struct uio auio;
1783 struct iovec aiov;
1785 bytes = strlen(ap->a_target);
1787 hammer2_inode_unlock(nip);
1788 bzero(&auio, sizeof(auio));
1789 bzero(&aiov, sizeof(aiov));
1790 auio.uio_iov = &aiov;
1791 auio.uio_segflg = UIO_SYSSPACE;
1792 auio.uio_rw = UIO_WRITE;
1793 auio.uio_resid = bytes;
1794 auio.uio_iovcnt = 1;
1795 auio.uio_td = curthread;
1796 aiov.iov_base = ap->a_target;
1797 aiov.iov_len = bytes;
1798 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1799 /* XXX handle error */
1800 error = 0;
1801 } else {
1802 hammer2_inode_unlock(nip);
1806 * Update dip's mtime
1808 if (error == 0) {
1809 uint64_t mtime;
1811 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1812 hammer2_update_time(&mtime);
1813 hammer2_inode_modify(dip);
1814 dip->meta.mtime = mtime;
1815 hammer2_inode_unlock(dip);
1818 hammer2_trans_done(dip->pmp, 1);
1821 * Finalize namecache
1823 if (error == 0) {
1824 cache_setunresolved(ap->a_nch);
1825 cache_setvp(ap->a_nch, *ap->a_vpp);
1826 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1828 return error;
1832 * hammer2_vop_nremove { nch, dvp, cred }
1834 static
1836 hammer2_vop_nremove(struct vop_nremove_args *ap)
1838 hammer2_xop_unlink_t *xop;
1839 hammer2_inode_t *dip;
1840 hammer2_inode_t *ip;
1841 struct namecache *ncp;
1842 int error;
1843 int isopen;
1845 dip = VTOI(ap->a_dvp);
1846 if (dip->pmp->ronly)
1847 return (EROFS);
1848 #if 0
1849 /* allow removals, except user to also bulkfree */
1850 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1851 return (ENOSPC);
1852 #endif
1854 ncp = ap->a_nch->ncp;
1856 hammer2_pfs_memory_wait(dip, 1);
1857 hammer2_trans_init(dip->pmp, 0);
1858 hammer2_inode_lock(dip, 0);
1861 * The unlink XOP unlinks the path from the directory and
1862 * locates and returns the cluster associated with the real inode.
1863 * We have to handle nlinks here on the frontend.
1865 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1866 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1869 * The namecache entry is locked so nobody can use this namespace.
1870 * Calculate isopen to determine if this namespace has an open vp
1871 * associated with it and resolve the vp only if it does.
1873 * We try to avoid resolving the vnode if nobody has it open, but
1874 * note that the test is via this namespace only.
1876 isopen = cache_isopen(ap->a_nch);
1877 xop->isdir = 0;
1878 xop->dopermanent = 0;
1879 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1882 * Collect the real inode and adjust nlinks, destroy the real
1883 * inode if nlinks transitions to 0 and it was the real inode
1884 * (else it has already been removed).
1886 error = hammer2_xop_collect(&xop->head, 0);
1887 error = hammer2_error_to_errno(error);
1888 hammer2_inode_unlock(dip);
1890 if (error == 0) {
1891 ip = hammer2_inode_get(dip->pmp, dip, &xop->head, -1);
1892 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1893 if (ip) {
1894 hammer2_inode_unlink_finisher(ip, isopen);
1895 hammer2_inode_unlock(ip);
1897 } else {
1898 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1902 * Update dip's mtime
1904 if (error == 0) {
1905 uint64_t mtime;
1907 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1908 hammer2_update_time(&mtime);
1909 hammer2_inode_modify(dip);
1910 dip->meta.mtime = mtime;
1911 hammer2_inode_unlock(dip);
1914 hammer2_trans_done(dip->pmp, 1);
1915 if (error == 0) {
1916 cache_unlink(ap->a_nch);
1917 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1919 return (error);
1923 * hammer2_vop_nrmdir { nch, dvp, cred }
1925 static
1927 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1929 hammer2_xop_unlink_t *xop;
1930 hammer2_inode_t *dip;
1931 hammer2_inode_t *ip;
1932 struct namecache *ncp;
1933 int isopen;
1934 int error;
1936 dip = VTOI(ap->a_dvp);
1937 if (dip->pmp->ronly)
1938 return (EROFS);
1939 #if 0
1940 /* allow removals, except user to also bulkfree */
1941 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1942 return (ENOSPC);
1943 #endif
1945 hammer2_pfs_memory_wait(dip, 1);
1946 hammer2_trans_init(dip->pmp, 0);
1947 hammer2_inode_lock(dip, 0);
1949 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1951 ncp = ap->a_nch->ncp;
1952 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1953 isopen = cache_isopen(ap->a_nch);
1954 xop->isdir = 1;
1955 xop->dopermanent = 0;
1956 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1959 * Collect the real inode and adjust nlinks, destroy the real
1960 * inode if nlinks transitions to 0 and it was the real inode
1961 * (else it has already been removed).
1963 error = hammer2_xop_collect(&xop->head, 0);
1964 error = hammer2_error_to_errno(error);
1965 hammer2_inode_unlock(dip);
1967 if (error == 0) {
1968 ip = hammer2_inode_get(dip->pmp, dip, &xop->head, -1);
1969 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1970 if (ip) {
1971 hammer2_inode_unlink_finisher(ip, isopen);
1972 hammer2_inode_unlock(ip);
1974 } else {
1975 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1979 * Update dip's mtime
1981 if (error == 0) {
1982 uint64_t mtime;
1984 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1985 hammer2_update_time(&mtime);
1986 hammer2_inode_modify(dip);
1987 dip->meta.mtime = mtime;
1988 hammer2_inode_unlock(dip);
1991 hammer2_trans_done(dip->pmp, 1);
1992 if (error == 0) {
1993 cache_unlink(ap->a_nch);
1994 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1996 return (error);
2000 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
2002 static
2004 hammer2_vop_nrename(struct vop_nrename_args *ap)
2006 struct namecache *fncp;
2007 struct namecache *tncp;
2008 hammer2_inode_t *fdip; /* source directory */
2009 hammer2_inode_t *tdip; /* target directory */
2010 hammer2_inode_t *ip; /* file being renamed */
2011 hammer2_inode_t *tip; /* replaced target during rename or NULL */
2012 const uint8_t *fname;
2013 size_t fname_len;
2014 const uint8_t *tname;
2015 size_t tname_len;
2016 int error;
2017 int update_tdip;
2018 int update_fdip;
2019 hammer2_key_t tlhc;
2021 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
2022 return(EXDEV);
2023 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
2024 return(EXDEV);
2026 fdip = VTOI(ap->a_fdvp); /* source directory */
2027 tdip = VTOI(ap->a_tdvp); /* target directory */
2029 if (fdip->pmp->ronly)
2030 return (EROFS);
2031 if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1)
2032 return (ENOSPC);
2034 fncp = ap->a_fnch->ncp; /* entry name in source */
2035 fname = fncp->nc_name;
2036 fname_len = fncp->nc_nlen;
2038 tncp = ap->a_tnch->ncp; /* entry name in target */
2039 tname = tncp->nc_name;
2040 tname_len = tncp->nc_nlen;
2042 hammer2_pfs_memory_wait(tdip, 0);
2043 hammer2_trans_init(tdip->pmp, 0);
2045 update_tdip = 0;
2046 update_fdip = 0;
2048 ip = VTOI(fncp->nc_vp);
2049 hammer2_inode_ref(ip); /* extra ref */
2052 * Lookup the target name to determine if a directory entry
2053 * is being overwritten. We only hold related inode locks
2054 * temporarily, the operating system is expected to protect
2055 * against rename races.
2057 tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL;
2058 if (tip)
2059 hammer2_inode_ref(tip); /* extra ref */
2062 * Can return NULL and error == EXDEV if the common parent
2063 * crosses a directory with the xlink flag set.
2065 * For now try to avoid deadlocks with a simple pointer address
2066 * test. (tip) can be NULL.
2068 error = 0;
2069 if (fdip <= tdip) {
2070 hammer2_inode_lock(fdip, 0);
2071 hammer2_inode_lock(tdip, 0);
2072 } else {
2073 hammer2_inode_lock(tdip, 0);
2074 hammer2_inode_lock(fdip, 0);
2076 if (tip) {
2077 if (ip <= tip) {
2078 hammer2_inode_lock(ip, 0);
2079 hammer2_inode_lock(tip, 0);
2080 } else {
2081 hammer2_inode_lock(tip, 0);
2082 hammer2_inode_lock(ip, 0);
2084 } else {
2085 hammer2_inode_lock(ip, 0);
2088 #if 0
2090 * Delete the target namespace.
2092 * REMOVED - NOW FOLDED INTO XOP_NRENAME OPERATION
2095 hammer2_xop_unlink_t *xop2;
2096 hammer2_inode_t *tip;
2097 int isopen;
2100 * The unlink XOP unlinks the path from the directory and
2101 * locates and returns the cluster associated with the real
2102 * inode. We have to handle nlinks here on the frontend.
2104 xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2105 hammer2_xop_setname(&xop2->head, tname, tname_len);
2106 isopen = cache_isopen(ap->a_tnch);
2107 xop2->isdir = -1;
2108 xop2->dopermanent = 0;
2109 hammer2_xop_start(&xop2->head, hammer2_xop_unlink);
2112 * Collect the real inode and adjust nlinks, destroy the real
2113 * inode if nlinks transitions to 0 and it was the real inode
2114 * (else it has already been removed).
2116 tnch_error = hammer2_xop_collect(&xop2->head, 0);
2117 tnch_error = hammer2_error_to_errno(tnch_error);
2118 /* hammer2_inode_unlock(tdip); */
2120 if (tnch_error == 0) {
2121 tip = hammer2_inode_get(tdip->pmp, NULL,
2122 &xop2->head, -1);
2123 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2124 if (tip) {
2125 hammer2_inode_unlink_finisher(tip, isopen);
2126 hammer2_inode_unlock(tip);
2128 } else {
2129 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2131 /* hammer2_inode_lock(tdip, 0); */
2133 if (tnch_error && tnch_error != ENOENT) {
2134 error = tnch_error;
2135 goto done2;
2137 update_tdip = 1;
2139 #endif
2142 * Resolve the collision space for (tdip, tname, tname_len)
2144 * tdip must be held exclusively locked to prevent races since
2145 * multiple filenames can end up in the same collision space.
2148 hammer2_xop_scanlhc_t *sxop;
2149 hammer2_tid_t lhcbase;
2151 tlhc = hammer2_dirhash(tname, tname_len);
2152 lhcbase = tlhc;
2153 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2154 sxop->lhc = tlhc;
2155 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
2156 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2157 if (tlhc != sxop->head.cluster.focus->bref.key)
2158 break;
2159 ++tlhc;
2161 error = hammer2_error_to_errno(error);
2162 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2164 if (error) {
2165 if (error != ENOENT)
2166 goto done2;
2167 ++tlhc;
2168 error = 0;
2170 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2171 error = ENOSPC;
2172 goto done2;
2177 * Ready to go, issue the rename to the backend. Note that meta-data
2178 * updates to the related inodes occur separately from the rename
2179 * operation.
2181 * NOTE: While it is not necessary to update ip->meta.name*, doing
2182 * so aids catastrophic recovery and debugging.
2184 if (error == 0) {
2185 hammer2_xop_nrename_t *xop4;
2187 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2188 xop4->lhc = tlhc;
2189 xop4->ip_key = ip->meta.name_key;
2190 hammer2_xop_setip2(&xop4->head, ip);
2191 hammer2_xop_setip3(&xop4->head, tdip);
2192 hammer2_xop_setname(&xop4->head, fname, fname_len);
2193 hammer2_xop_setname2(&xop4->head, tname, tname_len);
2194 hammer2_xop_start(&xop4->head, hammer2_xop_nrename);
2196 error = hammer2_xop_collect(&xop4->head, 0);
2197 error = hammer2_error_to_errno(error);
2198 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2200 if (error == ENOENT)
2201 error = 0;
2204 * Update inode meta-data.
2206 * WARNING! The in-memory inode (ip) structure does not
2207 * maintain a copy of the inode's filename buffer.
2209 if (error == 0 &&
2210 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2211 hammer2_inode_modify(ip);
2212 ip->meta.name_len = tname_len;
2213 ip->meta.name_key = tlhc;
2215 if (error == 0) {
2216 hammer2_inode_modify(ip);
2217 ip->meta.iparent = tdip->meta.inum;
2219 update_fdip = 1;
2220 update_tdip = 1;
2223 done2:
2225 * If no error, the backend has replaced the target directory entry.
2226 * We must adjust nlinks on the original replace target if it exists.
2228 if (error == 0 && tip) {
2229 int isopen;
2231 isopen = cache_isopen(ap->a_tnch);
2232 hammer2_inode_unlink_finisher(tip, isopen);
2236 * Update directory mtimes to represent the something changed.
2238 if (update_fdip || update_tdip) {
2239 uint64_t mtime;
2241 hammer2_update_time(&mtime);
2242 if (update_fdip) {
2243 hammer2_inode_modify(fdip);
2244 fdip->meta.mtime = mtime;
2246 if (update_tdip) {
2247 hammer2_inode_modify(tdip);
2248 tdip->meta.mtime = mtime;
2251 if (tip) {
2252 hammer2_inode_unlock(tip);
2253 hammer2_inode_drop(tip);
2255 hammer2_inode_unlock(ip);
2256 hammer2_inode_unlock(tdip);
2257 hammer2_inode_unlock(fdip);
2258 hammer2_inode_drop(ip);
2259 hammer2_trans_done(tdip->pmp, 1);
2262 * Issue the namecache update after unlocking all the internal
2263 * hammer2 structures, otherwise we might deadlock.
2265 * WARNING! The target namespace must be updated atomically,
2266 * and we depend on cache_rename() to handle that for
2267 * us. Do not do a separate cache_unlink() because
2268 * that leaves a small window of opportunity for other
2269 * threads to allocate the target namespace before we
2270 * manage to complete our rename.
2272 * WARNING! cache_rename() (and cache_unlink()) will properly
2273 * set VREF_FINALIZE on any attached vnode. Do not
2274 * call cache_setunresolved() manually before-hand as
2275 * this will prevent the flag from being set later via
2276 * cache_rename(). If VREF_FINALIZE is not properly set
2277 * and the inode is no longer in the topology, related
2278 * chains can remain dirty indefinitely.
2280 if (error == 0 && tip) {
2281 /*cache_unlink(ap->a_tnch); see above */
2282 /*cache_setunresolved(ap->a_tnch); see above */
2284 if (error == 0) {
2285 cache_rename(ap->a_fnch, ap->a_tnch);
2286 hammer2_knote(ap->a_fdvp, NOTE_WRITE);
2287 hammer2_knote(ap->a_tdvp, NOTE_WRITE);
2288 hammer2_knote(fncp->nc_vp, NOTE_RENAME);
2291 return (error);
2295 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2297 static
2299 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2301 hammer2_inode_t *ip;
2302 int error;
2304 ip = VTOI(ap->a_vp);
2306 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2307 ap->a_fflag, ap->a_cred);
2308 return (error);
2311 static
2313 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2315 struct mount *mp;
2316 hammer2_pfs_t *pmp;
2317 int rc;
2319 switch (ap->a_op) {
2320 case (MOUNTCTL_SET_EXPORT):
2321 mp = ap->a_head.a_ops->head.vv_mount;
2322 pmp = MPTOPMP(mp);
2324 if (ap->a_ctllen != sizeof(struct export_args))
2325 rc = (EINVAL);
2326 else
2327 rc = vfs_export(mp, &pmp->export,
2328 (const struct export_args *)ap->a_ctl);
2329 break;
2330 default:
2331 rc = vop_stdmountctl(ap);
2332 break;
2334 return (rc);
2338 * KQFILTER
2340 static void filt_hammer2detach(struct knote *kn);
2341 static int filt_hammer2read(struct knote *kn, long hint);
2342 static int filt_hammer2write(struct knote *kn, long hint);
2343 static int filt_hammer2vnode(struct knote *kn, long hint);
2345 static struct filterops hammer2read_filtops =
2346 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2347 NULL, filt_hammer2detach, filt_hammer2read };
2348 static struct filterops hammer2write_filtops =
2349 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2350 NULL, filt_hammer2detach, filt_hammer2write };
2351 static struct filterops hammer2vnode_filtops =
2352 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2353 NULL, filt_hammer2detach, filt_hammer2vnode };
2355 static
2357 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2359 struct vnode *vp = ap->a_vp;
2360 struct knote *kn = ap->a_kn;
2362 switch (kn->kn_filter) {
2363 case EVFILT_READ:
2364 kn->kn_fop = &hammer2read_filtops;
2365 break;
2366 case EVFILT_WRITE:
2367 kn->kn_fop = &hammer2write_filtops;
2368 break;
2369 case EVFILT_VNODE:
2370 kn->kn_fop = &hammer2vnode_filtops;
2371 break;
2372 default:
2373 return (EOPNOTSUPP);
2376 kn->kn_hook = (caddr_t)vp;
2378 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2380 return(0);
2383 static void
2384 filt_hammer2detach(struct knote *kn)
2386 struct vnode *vp = (void *)kn->kn_hook;
2388 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2391 static int
2392 filt_hammer2read(struct knote *kn, long hint)
2394 struct vnode *vp = (void *)kn->kn_hook;
2395 hammer2_inode_t *ip = VTOI(vp);
2396 off_t off;
2398 if (hint == NOTE_REVOKE) {
2399 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2400 return(1);
2402 off = ip->meta.size - kn->kn_fp->f_offset;
2403 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2404 if (kn->kn_sfflags & NOTE_OLDAPI)
2405 return(1);
2406 return (kn->kn_data != 0);
2410 static int
2411 filt_hammer2write(struct knote *kn, long hint)
2413 if (hint == NOTE_REVOKE)
2414 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2415 kn->kn_data = 0;
2416 return (1);
2419 static int
2420 filt_hammer2vnode(struct knote *kn, long hint)
2422 if (kn->kn_sfflags & hint)
2423 kn->kn_fflags |= hint;
2424 if (hint == NOTE_REVOKE) {
2425 kn->kn_flags |= (EV_EOF | EV_NODATA);
2426 return (1);
2428 return (kn->kn_fflags != 0);
2432 * FIFO VOPS
2434 static
2436 hammer2_vop_markatime(struct vop_markatime_args *ap)
2438 hammer2_inode_t *ip;
2439 struct vnode *vp;
2441 vp = ap->a_vp;
2442 ip = VTOI(vp);
2444 if (ip->pmp->ronly)
2445 return (EROFS);
2446 return(0);
2449 static
2451 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2453 int error;
2455 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2456 if (error)
2457 error = hammer2_vop_kqfilter(ap);
2458 return(error);
2462 * VOPS vector
2464 struct vop_ops hammer2_vnode_vops = {
2465 .vop_default = vop_defaultop,
2466 .vop_fsync = hammer2_vop_fsync,
2467 .vop_getpages = vop_stdgetpages,
2468 .vop_putpages = vop_stdputpages,
2469 .vop_access = hammer2_vop_access,
2470 .vop_advlock = hammer2_vop_advlock,
2471 .vop_close = hammer2_vop_close,
2472 .vop_nlink = hammer2_vop_nlink,
2473 .vop_ncreate = hammer2_vop_ncreate,
2474 .vop_nsymlink = hammer2_vop_nsymlink,
2475 .vop_nremove = hammer2_vop_nremove,
2476 .vop_nrmdir = hammer2_vop_nrmdir,
2477 .vop_nrename = hammer2_vop_nrename,
2478 .vop_getattr = hammer2_vop_getattr,
2479 .vop_setattr = hammer2_vop_setattr,
2480 .vop_readdir = hammer2_vop_readdir,
2481 .vop_readlink = hammer2_vop_readlink,
2482 .vop_read = hammer2_vop_read,
2483 .vop_write = hammer2_vop_write,
2484 .vop_open = hammer2_vop_open,
2485 .vop_inactive = hammer2_vop_inactive,
2486 .vop_reclaim = hammer2_vop_reclaim,
2487 .vop_nresolve = hammer2_vop_nresolve,
2488 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2489 .vop_nmkdir = hammer2_vop_nmkdir,
2490 .vop_nmknod = hammer2_vop_nmknod,
2491 .vop_ioctl = hammer2_vop_ioctl,
2492 .vop_mountctl = hammer2_vop_mountctl,
2493 .vop_bmap = hammer2_vop_bmap,
2494 .vop_strategy = hammer2_vop_strategy,
2495 .vop_kqfilter = hammer2_vop_kqfilter
2498 struct vop_ops hammer2_spec_vops = {
2499 .vop_default = vop_defaultop,
2500 .vop_fsync = hammer2_vop_fsync,
2501 .vop_read = vop_stdnoread,
2502 .vop_write = vop_stdnowrite,
2503 .vop_access = hammer2_vop_access,
2504 .vop_close = hammer2_vop_close,
2505 .vop_markatime = hammer2_vop_markatime,
2506 .vop_getattr = hammer2_vop_getattr,
2507 .vop_inactive = hammer2_vop_inactive,
2508 .vop_reclaim = hammer2_vop_reclaim,
2509 .vop_setattr = hammer2_vop_setattr
2512 struct vop_ops hammer2_fifo_vops = {
2513 .vop_default = fifo_vnoperate,
2514 .vop_fsync = hammer2_vop_fsync,
2515 #if 0
2516 .vop_read = hammer2_vop_fiforead,
2517 .vop_write = hammer2_vop_fifowrite,
2518 #endif
2519 .vop_access = hammer2_vop_access,
2520 #if 0
2521 .vop_close = hammer2_vop_fifoclose,
2522 #endif
2523 .vop_markatime = hammer2_vop_markatime,
2524 .vop_getattr = hammer2_vop_getattr,
2525 .vop_inactive = hammer2_vop_inactive,
2526 .vop_reclaim = hammer2_vop_reclaim,
2527 .vop_setattr = hammer2_vop_setattr,
2528 .vop_kqfilter = hammer2_vop_fifokqfilter