usr.sbin/makefs: Sync with sys/vfs/hammer2
[dragonfly.git] / usr.sbin / makefs / hammer2 / hammer2_vnops.c
blobcd6145c33830b11f00b448b59c420897dcd1fc65
1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org>
5 * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Matthew Dillon <dillon@dragonflybsd.org>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
38 * Kernel Filesystem interface
40 * NOTE! local ipdata pointers must be reloaded on any modifying operation
41 * to the inode as its underlying chain may have changed.
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/fcntl.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/mount.h>
52 #include <sys/vnode.h>
53 #include <sys/mountctl.h>
54 #include <sys/dirent.h>
55 #include <sys/uio.h>
56 #include <sys/objcache.h>
57 #include <sys/event.h>
58 #include <sys/file.h>
59 #include <vfs/fifofs/fifo.h>
62 #include "hammer2.h"
65 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
66 int seqcount);
68 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
69 int ioflag, int seqcount);
70 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
71 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
74 * Last reference to a vnode is going away but it is still cached.
76 static
77 int
78 hammer2_vop_inactive(struct vop_inactive_args *ap)
80 #if 0
81 hammer2_inode_t *ip;
82 struct m_vnode *vp;
84 vp = ap->a_vp;
85 ip = VTOI(vp);
88 * Degenerate case
90 if (ip == NULL) {
91 vrecycle(vp);
92 return (0);
96 * Aquire the inode lock to interlock against vp updates via
97 * the inode path and file deletions and such (which can be
98 * namespace-only operations that might not hold the vnode).
100 hammer2_inode_lock(ip, 0);
101 if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
102 hammer2_key_t lbase;
103 int nblksize;
106 * If the inode has been unlinked we can throw away all
107 * buffers (dirty or not) and clean the file out.
109 * Because vrecycle() calls are not guaranteed, try to
110 * dispose of the inode as much as possible right here.
112 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
113 nvtruncbuf(vp, 0, nblksize, 0, 0);
116 * Delete the file on-media.
118 if ((ip->flags & HAMMER2_INODE_DELETING) == 0) {
119 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING);
120 hammer2_inode_delayed_sideq(ip);
122 hammer2_inode_unlock(ip);
125 * Recycle immediately if possible
127 vrecycle(vp);
128 } else {
129 hammer2_inode_unlock(ip);
131 return (0);
132 #endif
133 return (EOPNOTSUPP);
137 * Reclaim a vnode so that it can be reused; after the inode is
138 * disassociated, the filesystem must manage it alone.
140 static
142 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
144 hammer2_inode_t *ip;
145 struct m_vnode *vp;
147 vp = ap->a_vp;
148 ip = VTOI(vp);
149 if (ip == NULL)
150 return(0);
153 * NOTE! We do not attempt to flush chains here, flushing is
154 * really fragile and could also deadlock.
156 vclrisdirty(vp);
159 * The inode lock is required to disconnect it.
161 hammer2_inode_lock(ip, 0);
162 vp->v_data = NULL;
163 ip->vp = NULL;
166 * Delete the file on-media. This should have been handled by the
167 * inactivation. The operation is likely still queued on the inode
168 * though so only complain if the stars don't align.
170 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED | HAMMER2_INODE_DELETING)) ==
171 HAMMER2_INODE_ISUNLINKED)
173 assert(0);
174 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING);
175 hammer2_inode_delayed_sideq(ip);
176 kprintf("hammer2: vp=%p ip=%p unlinked but not disposed\n",
177 vp, ip);
179 hammer2_inode_unlock(ip);
182 * Modified inodes will already be on SIDEQ or SYNCQ, no further
183 * action is needed.
185 * We cannot safely synchronize the inode from inside the reclaim
186 * due to potentially deep locks held as-of when the reclaim occurs.
187 * Interactions and potential deadlocks abound. We also can't do it
188 * here without desynchronizing from the related directory entrie(s).
190 hammer2_inode_drop(ip); /* vp ref */
193 * XXX handle background sync when ip dirty, kernel will no longer
194 * notify us regarding this inode because there is no longer a
195 * vnode attached to it.
198 return (0);
202 hammer2_reclaim(struct m_vnode *vp)
204 struct vop_reclaim_args ap = {
205 .a_vp = vp,
208 return hammer2_vop_reclaim(&ap);
212 * Currently this function synchronizes the front-end inode state to the
213 * backend chain topology, then flushes the inode's chain and sub-topology
214 * to backend media. This function does not flush the root topology down to
215 * the inode.
217 static
219 hammer2_vop_fsync(struct vop_fsync_args *ap)
221 #if 0
222 hammer2_inode_t *ip;
223 struct m_vnode *vp;
224 int error1;
225 int error2;
227 vp = ap->a_vp;
228 ip = VTOI(vp);
229 error1 = 0;
231 hammer2_trans_init(ip->pmp, 0);
234 * Flush dirty buffers in the file's logical buffer cache.
235 * It is best to wait for the strategy code to commit the
236 * buffers to the device's backing buffer cache before
237 * then trying to flush the inode.
239 * This should be quick, but certain inode modifications cached
240 * entirely in the hammer2_inode structure may not trigger a
241 * buffer read until the flush so the fsync can wind up also
242 * doing scattered reads.
244 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
245 bio_track_wait(&vp->v_track_write, 0, 0);
248 * Flush any inode changes
250 hammer2_inode_lock(ip, 0);
251 if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MODIFIED))
252 error1 = hammer2_inode_chain_sync(ip);
255 * Flush dirty chains related to the inode.
257 * NOTE! We are not in a flush transaction. The inode remains on
258 * the sideq so the filesystem syncer can synchronize it to
259 * the volume root.
261 error2 = hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP);
262 if (error2)
263 error1 = error2;
266 * We may be able to clear the vnode dirty flag.
268 if ((ip->flags & (HAMMER2_INODE_MODIFIED |
269 HAMMER2_INODE_RESIZED |
270 HAMMER2_INODE_DIRTYDATA)) == 0 &&
271 RB_EMPTY(&vp->v_rbdirty_tree) &&
272 !bio_track_active(&vp->v_track_write)) {
273 vclrisdirty(vp);
275 hammer2_inode_unlock(ip);
276 hammer2_trans_done(ip->pmp, 0);
278 return (error1);
279 #endif
280 return (EOPNOTSUPP);
284 * No lock needed, just handle ip->update
286 static
288 hammer2_vop_access(struct vop_access_args *ap)
290 #if 0
291 hammer2_inode_t *ip = VTOI(ap->a_vp);
292 uid_t uid;
293 gid_t gid;
294 mode_t mode;
295 uint32_t uflags;
296 int error;
297 int update;
299 retry:
300 update = spin_access_start(&ip->cluster_spin);
302 /*hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);*/
303 uid = hammer2_to_unix_xid(&ip->meta.uid);
304 gid = hammer2_to_unix_xid(&ip->meta.gid);
305 mode = ip->meta.mode;
306 uflags = ip->meta.uflags;
307 /*hammer2_inode_unlock(ip);*/
309 if (__predict_false(spin_access_end(&ip->cluster_spin, update)))
310 goto retry;
312 error = vop_helper_access(ap, uid, gid, mode, uflags);
314 return (error);
315 #endif
316 return (EOPNOTSUPP);
319 static
321 hammer2_vop_getattr(struct vop_getattr_args *ap)
323 #if 0
324 hammer2_pfs_t *pmp;
325 hammer2_inode_t *ip;
326 struct m_vnode *vp;
327 struct vattr *vap;
328 int update;
330 vp = ap->a_vp;
331 vap = ap->a_vap;
333 ip = VTOI(vp);
334 pmp = ip->pmp;
336 retry:
337 update = spin_access_start(&ip->cluster_spin);
339 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
340 vap->va_fileid = ip->meta.inum;
341 vap->va_mode = ip->meta.mode;
342 vap->va_nlink = ip->meta.nlinks;
343 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
344 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
345 vap->va_rmajor = 0;
346 vap->va_rminor = 0;
347 vap->va_size = ip->meta.size; /* protected by shared lock */
348 vap->va_blocksize = HAMMER2_PBUFSIZE;
349 vap->va_flags = ip->meta.uflags;
350 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
351 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
352 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
353 vap->va_gen = 1;
354 vap->va_bytes = 0;
355 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
357 * Can't really calculate directory use sans the files under
358 * it, just assume one block for now.
360 vap->va_bytes += HAMMER2_INODE_BYTES;
361 } else {
362 vap->va_bytes = hammer2_inode_data_count(ip);
364 vap->va_type = hammer2_get_vtype(ip->meta.type);
365 vap->va_filerev = 0;
366 vap->va_uid_uuid = ip->meta.uid;
367 vap->va_gid_uuid = ip->meta.gid;
368 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
369 VA_FSID_UUID_VALID;
371 if (__predict_false(spin_access_end(&ip->cluster_spin, update)))
372 goto retry;
374 return (0);
375 #endif
376 return (EOPNOTSUPP);
379 static
381 hammer2_vop_getattr_lite(struct vop_getattr_lite_args *ap)
383 #if 0
384 hammer2_pfs_t *pmp;
385 hammer2_inode_t *ip;
386 struct m_vnode *vp;
387 struct vattr_lite *lvap;
388 int update;
390 vp = ap->a_vp;
391 lvap = ap->a_lvap;
393 ip = VTOI(vp);
394 pmp = ip->pmp;
396 retry:
397 update = spin_access_start(&ip->cluster_spin);
399 #if 0
400 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
401 vap->va_fileid = ip->meta.inum;
402 #endif
403 lvap->va_mode = ip->meta.mode;
404 lvap->va_nlink = ip->meta.nlinks;
405 lvap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
406 lvap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
407 #if 0
408 vap->va_rmajor = 0;
409 vap->va_rminor = 0;
410 #endif
411 lvap->va_size = ip->meta.size;
412 #if 0
413 vap->va_blocksize = HAMMER2_PBUFSIZE;
414 #endif
415 lvap->va_flags = ip->meta.uflags;
416 lvap->va_type = hammer2_get_vtype(ip->meta.type);
417 #if 0
418 vap->va_filerev = 0;
419 vap->va_uid_uuid = ip->meta.uid;
420 vap->va_gid_uuid = ip->meta.gid;
421 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
422 VA_FSID_UUID_VALID;
423 #endif
425 if (__predict_false(spin_access_end(&ip->cluster_spin, update)))
426 goto retry;
428 return (0);
429 #endif
430 return (EOPNOTSUPP);
433 static
435 hammer2_vop_setattr(struct vop_setattr_args *ap)
437 #if 0
438 hammer2_inode_t *ip;
439 struct m_vnode *vp;
440 struct vattr *vap;
441 int error;
442 int kflags = 0;
443 uint64_t ctime;
445 vp = ap->a_vp;
446 vap = ap->a_vap;
447 hammer2_update_time(&ctime);
449 ip = VTOI(vp);
451 if (ip->pmp->ronly)
452 return (EROFS);
455 * Normally disallow setattr if there is no space, unless we
456 * are in emergency mode (might be needed to chflags -R noschg
457 * files prior to removal).
459 if ((ip->pmp->flags & HAMMER2_PMPF_EMERG) == 0 &&
460 hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1) {
461 return (ENOSPC);
464 hammer2_trans_init(ip->pmp, 0);
465 hammer2_inode_lock(ip, 0);
466 error = 0;
468 if (vap->va_flags != VNOVAL) {
469 uint32_t flags;
471 flags = ip->meta.uflags;
472 error = vop_helper_setattr_flags(&flags, vap->va_flags,
473 hammer2_to_unix_xid(&ip->meta.uid),
474 ap->a_cred);
475 if (error == 0) {
476 if (ip->meta.uflags != flags) {
477 hammer2_inode_modify(ip);
478 hammer2_spin_lock_update(&ip->cluster_spin);
479 ip->meta.uflags = flags;
480 ip->meta.ctime = ctime;
481 hammer2_spin_unlock_update(&ip->cluster_spin);
482 kflags |= NOTE_ATTRIB;
484 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
485 error = 0;
486 goto done;
489 goto done;
491 if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
492 error = EPERM;
493 goto done;
495 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
496 mode_t cur_mode = ip->meta.mode;
497 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
498 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
499 uuid_t uuid_uid;
500 uuid_t uuid_gid;
502 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
503 ap->a_cred,
504 &cur_uid, &cur_gid, &cur_mode);
505 if (error == 0) {
506 hammer2_guid_to_uuid(&uuid_uid, cur_uid);
507 hammer2_guid_to_uuid(&uuid_gid, cur_gid);
508 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
509 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
510 ip->meta.mode != cur_mode
512 hammer2_inode_modify(ip);
513 hammer2_spin_lock_update(&ip->cluster_spin);
514 ip->meta.uid = uuid_uid;
515 ip->meta.gid = uuid_gid;
516 ip->meta.mode = cur_mode;
517 ip->meta.ctime = ctime;
518 hammer2_spin_unlock_update(&ip->cluster_spin);
520 kflags |= NOTE_ATTRIB;
525 * Resize the file
527 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
528 switch(vp->v_type) {
529 case VREG:
530 if (vap->va_size == ip->meta.size)
531 break;
532 if (vap->va_size < ip->meta.size) {
533 hammer2_mtx_ex(&ip->truncate_lock);
534 hammer2_truncate_file(ip, vap->va_size);
535 hammer2_mtx_unlock(&ip->truncate_lock);
536 kflags |= NOTE_WRITE;
537 } else {
538 hammer2_extend_file(ip, vap->va_size);
539 kflags |= NOTE_WRITE | NOTE_EXTEND;
541 hammer2_inode_modify(ip);
542 ip->meta.mtime = ctime;
543 vclrflags(vp, VLASTWRITETS);
544 break;
545 default:
546 error = EINVAL;
547 goto done;
550 #if 0
551 /* atime not supported */
552 if (vap->va_atime.tv_sec != VNOVAL) {
553 hammer2_inode_modify(ip);
554 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
555 kflags |= NOTE_ATTRIB;
557 #endif
558 if (vap->va_mode != (mode_t)VNOVAL) {
559 mode_t cur_mode = ip->meta.mode;
560 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
561 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
563 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
564 cur_uid, cur_gid, &cur_mode);
565 if (error == 0) {
566 hammer2_inode_modify(ip);
567 hammer2_spin_lock_update(&ip->cluster_spin);
568 ip->meta.mode = cur_mode;
569 ip->meta.ctime = ctime;
570 hammer2_spin_unlock_update(&ip->cluster_spin);
571 kflags |= NOTE_ATTRIB;
575 if (vap->va_mtime.tv_sec != VNOVAL) {
576 hammer2_inode_modify(ip);
577 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
578 kflags |= NOTE_ATTRIB;
579 vclrflags(vp, VLASTWRITETS);
582 done:
584 * If a truncation occurred we must call chain_sync() now in order
585 * to trim the related data chains, otherwise a later expansion can
586 * cause havoc.
588 * If an extend occured that changed the DIRECTDATA state, we must
589 * call inode_chain_sync now in order to prepare the inode's indirect
590 * block table.
592 * WARNING! This means we are making an adjustment to the inode's
593 * chain outside of sync/fsync, and not just to inode->meta, which
594 * may result in some consistency issues if a crash were to occur
595 * at just the wrong time.
597 if (ip->flags & HAMMER2_INODE_RESIZED)
598 hammer2_inode_chain_sync(ip);
601 * Cleanup.
603 hammer2_inode_unlock(ip);
604 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ);
605 hammer2_knote(ip->vp, kflags);
607 return (error);
608 #endif
609 return (EOPNOTSUPP);
612 static
614 hammer2_vop_readdir(struct vop_readdir_args *ap)
616 #if 0
617 hammer2_xop_readdir_t *xop;
618 hammer2_blockref_t bref;
619 hammer2_inode_t *ip;
620 hammer2_tid_t inum;
621 hammer2_key_t lkey;
622 struct uio *uio;
623 off_t *cookies;
624 off_t saveoff;
625 int cookie_index;
626 int ncookies;
627 int error;
628 int eofflag;
629 int r;
631 ip = VTOI(ap->a_vp);
632 uio = ap->a_uio;
633 saveoff = uio->uio_offset;
634 eofflag = 0;
635 error = 0;
638 * Setup cookies directory entry cookies if requested
640 if (ap->a_ncookies) {
641 ncookies = uio->uio_resid / 16 + 1;
642 if (ncookies > 1024)
643 ncookies = 1024;
644 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
645 } else {
646 ncookies = -1;
647 cookies = NULL;
649 cookie_index = 0;
651 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
654 * Handle artificial entries. To ensure that only positive 64 bit
655 * quantities are returned to userland we always strip off bit 63.
656 * The hash code is designed such that codes 0x0000-0x7FFF are not
657 * used, allowing us to use these codes for articial entries.
659 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
660 * allow '..' to cross the mount point into (e.g.) the super-root.
662 if (saveoff == 0) {
663 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
664 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
665 if (r)
666 goto done;
667 if (cookies)
668 cookies[cookie_index] = saveoff;
669 ++saveoff;
670 ++cookie_index;
671 if (cookie_index == ncookies)
672 goto done;
675 if (saveoff == 1) {
676 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
677 if (ip != ip->pmp->iroot)
678 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
679 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
680 if (r)
681 goto done;
682 if (cookies)
683 cookies[cookie_index] = saveoff;
684 ++saveoff;
685 ++cookie_index;
686 if (cookie_index == ncookies)
687 goto done;
690 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
691 if (hammer2_debug & 0x0020)
692 kprintf("readdir: lkey %016jx\n", lkey);
693 if (error)
694 goto done;
696 xop = hammer2_xop_alloc(ip, 0);
697 xop->lkey = lkey;
698 hammer2_xop_start(&xop->head, &hammer2_readdir_desc);
700 for (;;) {
701 const hammer2_inode_data_t *ripdata;
702 const char *dname;
703 int dtype;
705 error = hammer2_xop_collect(&xop->head, 0);
706 error = hammer2_error_to_errno(error);
707 if (error) {
708 break;
710 if (cookie_index == ncookies)
711 break;
712 if (hammer2_debug & 0x0020)
713 kprintf("cluster chain %p %p\n",
714 xop->head.cluster.focus,
715 (xop->head.cluster.focus ?
716 xop->head.cluster.focus->data : (void *)-1));
717 hammer2_cluster_bref(&xop->head.cluster, &bref);
719 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
720 ripdata = &hammer2_xop_gdata(&xop->head)->ipdata;
721 dtype = hammer2_get_dtype(ripdata->meta.type);
722 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
723 r = vop_write_dirent(&error, uio,
724 ripdata->meta.inum &
725 HAMMER2_DIRHASH_USERMSK,
726 dtype,
727 ripdata->meta.name_len,
728 ripdata->filename);
729 hammer2_xop_pdata(&xop->head);
730 if (r)
731 break;
732 if (cookies)
733 cookies[cookie_index] = saveoff;
734 ++cookie_index;
735 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) {
736 uint16_t namlen;
738 dtype = hammer2_get_dtype(bref.embed.dirent.type);
739 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
740 namlen = bref.embed.dirent.namlen;
741 if (namlen <= sizeof(bref.check.buf)) {
742 dname = bref.check.buf;
743 } else {
744 dname = hammer2_xop_gdata(&xop->head)->buf;
746 r = vop_write_dirent(&error, uio,
747 bref.embed.dirent.inum, dtype,
748 namlen, dname);
749 if (namlen > sizeof(bref.check.buf))
750 hammer2_xop_pdata(&xop->head);
751 if (r)
752 break;
753 if (cookies)
754 cookies[cookie_index] = saveoff;
755 ++cookie_index;
756 } else {
757 /* XXX chain error */
758 kprintf("bad chain type readdir %d\n", bref.type);
761 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
762 if (error == ENOENT) {
763 error = 0;
764 eofflag = 1;
765 saveoff = (hammer2_key_t)-1;
766 } else {
767 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
769 done:
770 hammer2_inode_unlock(ip);
771 if (ap->a_eofflag)
772 *ap->a_eofflag = eofflag;
773 if (hammer2_debug & 0x0020)
774 kprintf("readdir: done at %016jx\n", saveoff);
775 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
776 if (error && cookie_index == 0) {
777 if (cookies) {
778 kfree(cookies, M_TEMP);
779 *ap->a_ncookies = 0;
780 *ap->a_cookies = NULL;
782 } else {
783 if (cookies) {
784 *ap->a_ncookies = cookie_index;
785 *ap->a_cookies = cookies;
788 return (error);
789 #endif
790 return (EOPNOTSUPP);
794 * hammer2_vop_readlink { vp, uio, cred }
796 static
798 hammer2_vop_readlink(struct vop_readlink_args *ap)
800 #if 0
801 struct m_vnode *vp;
802 hammer2_inode_t *ip;
803 int error;
805 vp = ap->a_vp;
806 if (vp->v_type != VLNK)
807 return (EINVAL);
808 ip = VTOI(vp);
810 error = hammer2_read_file(ip, ap->a_uio, 0);
811 return (error);
812 #endif
813 return (EOPNOTSUPP);
816 static
818 hammer2_vop_read(struct vop_read_args *ap)
820 #if 0
821 struct m_vnode *vp;
822 hammer2_inode_t *ip;
823 struct uio *uio;
824 int error;
825 int seqcount;
828 * Read operations supported on this vnode?
830 vp = ap->a_vp;
831 if (vp->v_type == VDIR)
832 return (EISDIR);
833 if (vp->v_type != VREG)
834 return (EINVAL);
837 * Misc
839 ip = VTOI(vp);
840 uio = ap->a_uio;
841 error = 0;
843 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
845 error = hammer2_read_file(ip, uio, seqcount);
846 return (error);
847 #endif
848 return (EOPNOTSUPP);
851 static
853 hammer2_vop_write(struct vop_write_args *ap)
855 hammer2_inode_t *ip;
856 //thread_t td;
857 struct m_vnode *vp;
858 struct uio *uio;
859 int error;
860 int seqcount;
861 int ioflag;
864 * Read operations supported on this vnode?
866 vp = ap->a_vp;
867 if (vp->v_type != VREG)
868 return (EINVAL);
871 * Misc
873 ip = VTOI(vp);
874 ioflag = ap->a_ioflag;
875 uio = ap->a_uio;
876 error = 0;
877 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG))
878 return (EROFS);
879 switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) {
880 case 2:
881 return (ENOSPC);
882 case 1:
883 ioflag |= IO_DIRECT; /* semi-synchronous */
884 /* fall through */
885 default:
886 break;
889 seqcount = ioflag >> IO_SEQSHIFT;
892 * Check resource limit
895 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
896 uio->uio_offset + uio->uio_resid >
897 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
898 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
899 return (EFBIG);
904 * The transaction interlocks against flush initiations
905 * (note: but will run concurrently with the actual flush).
907 * To avoid deadlocking against the VM system, we must flag any
908 * transaction related to the buffer cache or other direct
909 * VM page manipulation.
911 if (uio->uio_segflg == UIO_NOCOPY) {
912 assert(0); /* no UIO_NOCOPY in makefs */
913 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE);
914 } else {
915 hammer2_trans_init(ip->pmp, 0);
917 error = hammer2_write_file(ip, uio, ioflag, seqcount);
918 if (uio->uio_segflg == UIO_NOCOPY) {
919 assert(0); /* no UIO_NOCOPY in makefs */
920 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE |
921 HAMMER2_TRANS_SIDEQ);
922 } else
923 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ);
925 return (error);
929 hammer2_write(struct m_vnode *vp, void *buf, size_t size, off_t offset)
931 assert(buf);
932 assert(size > 0);
933 assert(size <= HAMMER2_PBUFSIZE);
935 struct iovec iov = {
936 .iov_base = buf,
937 .iov_len = size,
939 struct uio uio = {
940 .uio_iov = &iov,
941 .uio_iovcnt = 1,
942 .uio_offset = offset,
943 .uio_resid = size,
944 .uio_segflg = UIO_USERSPACE,
945 .uio_rw = UIO_WRITE,
946 .uio_td = NULL,
948 struct vop_write_args ap = {
949 .a_vp = vp,
950 .a_uio = &uio,
951 .a_ioflag = 0,
952 .a_cred = NULL,
955 return hammer2_vop_write(&ap);
958 #if 0
960 * Perform read operations on a file or symlink given an UNLOCKED
961 * inode and uio.
963 * The passed ip is not locked.
965 static
967 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
969 hammer2_off_t size;
970 struct m_buf *bp;
971 int error;
973 error = 0;
976 * UIO read loop.
978 * WARNING! Assumes that the kernel interlocks size changes at the
979 * vnode level.
981 hammer2_mtx_sh(&ip->lock);
982 hammer2_mtx_sh(&ip->truncate_lock);
983 size = ip->meta.size;
984 hammer2_mtx_unlock(&ip->lock);
986 while (uio->uio_resid > 0 && uio->uio_offset < size) {
987 hammer2_key_t lbase;
988 hammer2_key_t leof;
989 int lblksize;
990 int loff;
991 int n;
993 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
994 &lbase, &leof);
996 #if 1
997 bp = NULL;
998 error = cluster_readx(ip->vp, leof, lbase, lblksize,
999 B_NOTMETA | B_KVABIO,
1000 uio->uio_resid,
1001 seqcount * MAXBSIZE,
1002 &bp);
1003 #else
1004 if (uio->uio_segflg == UIO_NOCOPY) {
1005 bp = getblk(ip->vp, lbase, lblksize,
1006 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1007 if (bp->b_flags & B_CACHE) {
1008 int i;
1009 int j = 0;
1010 if (bp->b_xio.xio_npages != 16)
1011 kprintf("NPAGES BAD\n");
1012 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1013 vm_page_t m;
1014 m = bp->b_xio.xio_pages[i];
1015 if (m == NULL || m->valid == 0) {
1016 kprintf("bp %016jx %016jx pg %d inv",
1017 lbase, leof, i);
1018 if (m)
1019 kprintf("m->object %p/%p", m->object, ip->vp->v_object);
1020 kprintf("\n");
1021 j = 1;
1024 if (j)
1025 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
1027 bqrelse(bp);
1029 error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
1030 #endif
1031 if (error) {
1032 brelse(bp);
1033 break;
1035 bkvasync(bp);
1036 loff = (int)(uio->uio_offset - lbase);
1037 n = lblksize - loff;
1038 if (n > uio->uio_resid)
1039 n = uio->uio_resid;
1040 if (n > size - uio->uio_offset)
1041 n = (int)(size - uio->uio_offset);
1042 bp->b_flags |= B_AGE;
1043 uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
1044 bqrelse(bp);
1046 hammer2_mtx_unlock(&ip->truncate_lock);
1048 return (error);
1050 #endif
1053 * Write to the file represented by the inode via the logical buffer cache.
1054 * The inode may represent a regular file or a symlink.
1056 * The inode must not be locked.
1058 static
1060 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
1061 int ioflag, int seqcount)
1063 hammer2_key_t old_eof;
1064 hammer2_key_t new_eof;
1065 struct m_buf *bp;
1066 int kflags;
1067 int error;
1068 int modified;
1071 * Setup if append
1073 * WARNING! Assumes that the kernel interlocks size changes at the
1074 * vnode level.
1076 hammer2_mtx_ex(&ip->lock);
1077 hammer2_mtx_sh(&ip->truncate_lock);
1078 if (ioflag & IO_APPEND)
1079 uio->uio_offset = ip->meta.size;
1080 old_eof = ip->meta.size;
1083 * Extend the file if necessary. If the write fails at some point
1084 * we will truncate it back down to cover as much as we were able
1085 * to write.
1087 * Doing this now makes it easier to calculate buffer sizes in
1088 * the loop.
1090 kflags = 0;
1091 error = 0;
1092 modified = 0;
1094 if (uio->uio_offset + uio->uio_resid > old_eof) {
1095 new_eof = uio->uio_offset + uio->uio_resid;
1096 modified = 1;
1097 hammer2_extend_file(ip, new_eof);
1098 kflags |= NOTE_EXTEND;
1099 } else {
1100 new_eof = old_eof;
1102 hammer2_mtx_unlock(&ip->lock);
1105 * UIO write loop
1107 while (uio->uio_resid > 0) {
1108 hammer2_key_t lbase;
1109 int trivial;
1110 int endofblk;
1111 int lblksize;
1112 int loff;
1113 int n;
1116 * Don't allow the buffer build to blow out the buffer
1117 * cache.
1119 if ((ioflag & IO_RECURSE) == 0)
1120 bwillwrite(HAMMER2_PBUFSIZE);
1123 * This nominally tells us how much we can cluster and
1124 * what the logical buffer size needs to be. Currently
1125 * we don't try to cluster the write and just handle one
1126 * block at a time.
1128 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
1129 &lbase, NULL);
1130 loff = (int)(uio->uio_offset - lbase);
1132 KKASSERT(lblksize <= MAXBSIZE);
1135 * Calculate bytes to copy this transfer and whether the
1136 * copy completely covers the buffer or not.
1138 trivial = 0;
1139 n = lblksize - loff;
1140 if (n > uio->uio_resid) {
1141 n = uio->uio_resid;
1142 if (loff == lbase && uio->uio_offset + n == new_eof)
1143 trivial = 1;
1144 endofblk = 0;
1145 } else {
1146 if (loff == 0)
1147 trivial = 1;
1148 endofblk = 1;
1150 if (lbase >= new_eof)
1151 trivial = 1;
1152 trivial = 1; /* force trivial for makefs */
1155 * Get the buffer
1157 if (uio->uio_segflg == UIO_NOCOPY) {
1158 assert(0); /* no UIO_NOCOPY in makefs */
1160 * Issuing a write with the same data backing the
1161 * buffer. Instantiate the buffer to collect the
1162 * backing vm pages, then read-in any missing bits.
1164 * This case is used by vop_stdputpages().
1166 bp = getblkx(ip->vp, lbase, lblksize,
1167 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1169 if ((bp->b_flags & B_CACHE) == 0) {
1170 bqrelse(bp);
1171 error = bread_kvabio(ip->vp, lbase,
1172 lblksize, &bp);
1175 } else if (trivial) {
1177 * Even though we are entirely overwriting the buffer
1178 * we may still have to zero it out to avoid a
1179 * mmap/write visibility issue.
1181 bp = getblkx(ip->vp, lbase, lblksize,
1182 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1184 if ((bp->b_flags & B_CACHE) == 0)
1185 vfs_bio_clrbuf(bp);
1187 } else {
1188 assert(0); /* no partial write in makefs */
1190 * Partial overwrite, read in any missing bits then
1191 * replace the portion being written.
1193 * (The strategy code will detect zero-fill physical
1194 * blocks for this case).
1196 error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
1197 if (error == 0)
1198 bheavy(bp);
1201 if (error) {
1202 brelse(bp);
1203 break;
1207 * Ok, copy the data in
1209 bkvasync(bp);
1210 error = uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
1211 kflags |= NOTE_WRITE;
1212 modified = 1;
1213 if (error) {
1214 brelse(bp);
1215 break;
1219 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1220 * with IO_SYNC or IO_ASYNC set. These writes
1221 * must be handled as the pageout daemon expects.
1223 * NOTE! H2 relies on cluster_write() here because it
1224 * cannot preallocate disk blocks at the logical
1225 * level due to not knowing what the compression
1226 * size will be at this time.
1228 * We must use cluster_write() here and we depend
1229 * on the write-behind feature to flush buffers
1230 * appropriately. If we let the buffer daemons do
1231 * it the block allocations will be all over the
1232 * map.
1234 if (1) {
1235 bp->b_cmd = BUF_CMD_WRITE;
1237 struct bio bio;
1238 bio.bio_buf = bp;
1239 bio.bio_offset = lbase;
1241 struct vop_strategy_args ap;
1242 ap.a_vp = ip->vp;
1243 ap.a_bio = &bio;
1245 error = hammer2_vop_strategy(&ap);
1246 assert(!error);
1248 brelse(bp);
1249 } else if (ioflag & IO_SYNC) {
1250 assert(0);
1251 bwrite(bp);
1252 } else if ((ioflag & IO_DIRECT) && endofblk) {
1253 assert(0);
1254 bawrite(bp);
1255 } else if (ioflag & IO_ASYNC) {
1256 assert(0);
1257 bawrite(bp);
1258 } else if (0 /*ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW*/) {
1259 assert(0);
1260 bdwrite(bp);
1261 } else {
1262 assert(0);
1263 #if 0
1264 #if 1
1265 bp->b_flags |= B_CLUSTEROK;
1266 cluster_write(bp, new_eof, lblksize, seqcount);
1267 #else
1268 bp->b_flags |= B_CLUSTEROK;
1269 bdwrite(bp);
1270 #endif
1271 #endif
1276 * Cleanup. If we extended the file EOF but failed to write through
1277 * the entire write is a failure and we have to back-up.
1279 if (error && new_eof != old_eof) {
1280 hammer2_mtx_unlock(&ip->truncate_lock);
1281 hammer2_mtx_ex(&ip->lock); /* note lock order */
1282 hammer2_mtx_ex(&ip->truncate_lock); /* note lock order */
1283 hammer2_truncate_file(ip, old_eof);
1284 if (ip->flags & HAMMER2_INODE_MODIFIED)
1285 hammer2_inode_chain_sync(ip);
1286 hammer2_mtx_unlock(&ip->lock);
1287 } else if (modified) {
1288 struct m_vnode *vp = ip->vp;
1290 hammer2_mtx_ex(&ip->lock);
1291 hammer2_inode_modify(ip);
1292 if (uio->uio_segflg == UIO_NOCOPY) {
1293 assert(0); /* no UIO_NOCOPY in makefs */
1295 if (vp->v_flag & VLASTWRITETS) {
1296 ip->meta.mtime =
1297 (unsigned long)vp->v_lastwrite_ts.tv_sec *
1298 1000000 +
1299 vp->v_lastwrite_ts.tv_nsec / 1000;
1302 } else {
1303 hammer2_update_time(&ip->meta.mtime);
1304 vclrflags(vp, VLASTWRITETS);
1307 #if 0
1309 * REMOVED - handled by hammer2_extend_file(). Do not issue
1310 * a chain_sync() outside of a sync/fsync except for DIRECTDATA
1311 * state changes.
1313 * Under normal conditions we only issue a chain_sync if
1314 * the inode's DIRECTDATA state changed.
1316 if (ip->flags & HAMMER2_INODE_RESIZED)
1317 hammer2_inode_chain_sync(ip);
1318 #endif
1319 hammer2_mtx_unlock(&ip->lock);
1320 hammer2_knote(ip->vp, kflags);
1322 hammer2_trans_assert_strategy(ip->pmp);
1323 hammer2_mtx_unlock(&ip->truncate_lock);
1325 return error;
1329 * Truncate the size of a file. The inode must be locked.
1331 * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1332 * ensure that any on-media data beyond the new file EOF has been destroyed.
1334 * WARNING: nvtruncbuf() can only be safely called without the inode lock
1335 * held due to the way our write thread works. If the truncation
1336 * occurs in the middle of a buffer, nvtruncbuf() is responsible
1337 * for dirtying that buffer and zeroing out trailing bytes.
1339 * WARNING! Assumes that the kernel interlocks size changes at the
1340 * vnode level.
1342 * WARNING! Caller assumes responsibility for removing dead blocks
1343 * if INODE_RESIZED is set.
1345 static
1346 void
1347 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1349 hammer2_key_t lbase;
1350 int nblksize;
1352 hammer2_mtx_unlock(&ip->lock);
1353 if (ip->vp) {
1354 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1355 nvtruncbuf(ip->vp, nsize,
1356 nblksize, (int)nsize & (nblksize - 1),
1359 hammer2_mtx_ex(&ip->lock);
1360 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1361 ip->osize = ip->meta.size;
1362 ip->meta.size = nsize;
1363 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1364 hammer2_inode_modify(ip);
1368 * Extend the size of a file. The inode must be locked.
1370 * Even though the file size is changing, we do not have to set the
1371 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1372 * boundary. When this occurs a hammer2_inode_chain_sync() is required
1373 * to prepare the inode cluster's indirect block table, otherwise
1374 * async execution of the strategy code will implode on us.
1376 * WARNING! Assumes that the kernel interlocks size changes at the
1377 * vnode level.
1379 * WARNING! Caller assumes responsibility for transitioning out
1380 * of the inode DIRECTDATA mode if INODE_RESIZED is set.
1382 static
1383 void
1384 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1386 hammer2_key_t lbase;
1387 hammer2_key_t osize;
1388 int oblksize;
1389 int nblksize;
1390 int error;
1392 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1393 hammer2_inode_modify(ip);
1394 osize = ip->meta.size;
1395 ip->osize = osize;
1396 ip->meta.size = nsize;
1399 * We must issue a chain_sync() when the DIRECTDATA state changes
1400 * to prevent confusion between the flush code and the in-memory
1401 * state. This is not perfect because we are doing it outside of
1402 * a sync/fsync operation, so it might not be fully synchronized
1403 * with the meta-data topology flush.
1405 * We must retain and re-dirty the buffer cache buffer containing
1406 * the direct data so it can be written to a real block. It should
1407 * not be possible for a bread error to occur since the original data
1408 * is extracted from the inode structure directly.
1410 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1411 if (osize) {
1412 assert(0); /* no such transition in makefs */
1413 struct m_buf *bp;
1415 oblksize = hammer2_calc_logical(ip, 0, NULL, NULL);
1416 error = bread_kvabio(ip->vp, 0, oblksize, &bp);
1417 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1418 hammer2_inode_chain_sync(ip);
1419 if (error == 0) {
1420 bheavy(bp);
1421 bdwrite(bp);
1422 } else {
1423 brelse(bp);
1425 } else {
1426 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1427 hammer2_inode_chain_sync(ip);
1430 hammer2_mtx_unlock(&ip->lock);
1431 if (ip->vp) {
1432 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1433 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1434 nvextendbuf(ip->vp,
1435 osize, nsize,
1436 oblksize, nblksize,
1437 -1, -1, 0);
1439 hammer2_mtx_ex(&ip->lock);
1442 static
1444 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1446 hammer2_xop_nresolve_t *xop;
1447 hammer2_inode_t *ip;
1448 hammer2_inode_t *dip;
1449 struct namecache *ncp;
1450 struct m_vnode *vp;
1451 int error;
1453 dip = VTOI(ap->a_dvp);
1454 xop = hammer2_xop_alloc(dip, 0);
1456 ncp = ap->a_nch->ncp;
1457 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1460 * Note: In DragonFly the kernel handles '.' and '..'.
1462 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1463 hammer2_xop_start(&xop->head, &hammer2_nresolve_desc);
1465 error = hammer2_xop_collect(&xop->head, 0);
1466 error = hammer2_error_to_errno(error);
1467 if (error) {
1468 ip = NULL;
1469 } else {
1470 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
1472 hammer2_inode_unlock(dip);
1475 * Acquire the related vnode
1477 * NOTE: For error processing, only ENOENT resolves the namecache
1478 * entry to NULL, otherwise we just return the error and
1479 * leave the namecache unresolved.
1481 * WARNING: inode structure is locked exclusively via inode_get
1482 * but chain was locked shared. inode_unlock()
1483 * will handle it properly.
1485 if (ip) {
1486 vp = hammer2_igetv(ip, &error); /* error set to UNIX error */
1487 if (error == 0) {
1488 vn_unlock(vp);
1489 cache_setvp(ap->a_nch, vp);
1490 *ap->a_vpp = vp;
1491 } else if (error == ENOENT) {
1492 cache_setvp(ap->a_nch, NULL);
1494 hammer2_inode_unlock(ip);
1497 * The vp should not be released until after we've disposed
1498 * of our locks, because it might cause vop_inactive() to
1499 * be called.
1501 if (vp)
1502 vrele(vp);
1503 } else {
1504 error = ENOENT;
1505 cache_setvp(ap->a_nch, NULL);
1507 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1509 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1510 ("resolve error %d/%p ap %p\n",
1511 error, ap->a_nch->ncp->nc_vp, ap));
1514 return error;
1518 hammer2_nresolve(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen)
1520 *vpp = NULL;
1521 struct namecache nc = {
1522 .nc_name = name,
1523 .nc_nlen = nlen,
1525 struct nchandle nch = {
1526 .ncp = &nc,
1528 struct vop_nresolve_args ap = {
1529 .a_nch = &nch,
1530 .a_dvp = dvp,
1531 .a_vpp = vpp,
1534 return hammer2_vop_nresolve(&ap);
1537 static
1539 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1541 #if 0
1542 hammer2_inode_t *dip;
1543 hammer2_tid_t inum;
1544 int error;
1546 dip = VTOI(ap->a_dvp);
1547 inum = dip->meta.iparent;
1548 *ap->a_vpp = NULL;
1550 if (inum) {
1551 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1552 inum, ap->a_vpp);
1553 } else {
1554 error = ENOENT;
1556 return error;
1557 #endif
1558 return (EOPNOTSUPP);
1561 static
1563 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1565 hammer2_inode_t *dip;
1566 hammer2_inode_t *nip;
1567 struct namecache *ncp;
1568 const char *name;
1569 size_t name_len;
1570 hammer2_tid_t inum;
1571 int error;
1573 dip = VTOI(ap->a_dvp);
1574 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG))
1575 return (EROFS);
1576 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1577 return (ENOSPC);
1579 ncp = ap->a_nch->ncp;
1580 name = ncp->nc_name;
1581 name_len = ncp->nc_nlen;
1583 hammer2_trans_init(dip->pmp, 0);
1585 inum = hammer2_trans_newinum(dip->pmp);
1588 * Create the actual inode as a hidden file in the iroot, then
1589 * create the directory entry. The creation of the actual inode
1590 * sets its nlinks to 1 which is the value we desire.
1592 * dip must be locked before nip to avoid deadlock.
1594 hammer2_inode_lock(dip, 0);
1595 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1596 inum, &error);
1597 if (error) {
1598 error = hammer2_error_to_errno(error);
1599 } else {
1600 error = hammer2_dirent_create(dip, name, name_len,
1601 nip->meta.inum, nip->meta.type);
1602 /* returns UNIX error code */
1604 if (error) {
1605 if (nip) {
1606 hammer2_inode_unlink_finisher(nip, NULL);
1607 hammer2_inode_unlock(nip);
1608 nip = NULL;
1610 *ap->a_vpp = NULL;
1611 } else {
1613 * inode_depend() must occur before the igetv() because
1614 * the igetv() can temporarily release the inode lock.
1616 hammer2_inode_depend(dip, nip); /* before igetv */
1617 *ap->a_vpp = hammer2_igetv(nip, &error);
1618 hammer2_inode_unlock(nip);
1622 * Update dip's mtime
1624 * We can use a shared inode lock and allow the meta.mtime update
1625 * SMP race. hammer2_inode_modify() is MPSAFE w/a shared lock.
1627 if (error == 0) {
1628 uint64_t mtime;
1630 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
1631 hammer2_update_time(&mtime);
1632 hammer2_inode_modify(dip);
1633 dip->meta.mtime = mtime;
1634 /*hammer2_inode_unlock(dip);*/
1636 hammer2_inode_unlock(dip);
1638 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
1640 if (error == 0) {
1641 cache_setunresolved(ap->a_nch);
1642 cache_setvp(ap->a_nch, *ap->a_vpp);
1643 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1645 return error;
1649 hammer2_nmkdir(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen)
1651 struct namecache nc = {
1652 .nc_name = name,
1653 .nc_nlen = nlen,
1655 struct nchandle nch = {
1656 .ncp = &nc,
1658 uid_t va_uid = VNOVAL; //getuid();
1659 uid_t va_gid = VNOVAL; //getgid();
1660 struct vattr va = {
1661 .va_type = VDIR,
1662 .va_mode = 0755, /* should be tunable */
1663 .va_uid = va_uid,
1664 .va_gid = va_gid,
1666 struct vop_nmkdir_args ap = {
1667 .a_nch = &nch,
1668 .a_dvp = dvp,
1669 .a_vpp = vpp,
1670 .a_vap = &va,
1673 return hammer2_vop_nmkdir(&ap);
1676 static
1678 hammer2_vop_open(struct vop_open_args *ap)
1680 #if 0
1681 return vop_stdopen(ap);
1682 #endif
1683 return (EOPNOTSUPP);
1687 * hammer2_vop_advlock { vp, id, op, fl, flags }
1689 static
1691 hammer2_vop_advlock(struct vop_advlock_args *ap)
1693 #if 0
1694 hammer2_inode_t *ip = VTOI(ap->a_vp);
1695 hammer2_off_t size;
1697 size = ip->meta.size;
1698 return (lf_advlock(ap, &ip->advlock, size));
1699 #endif
1700 return (EOPNOTSUPP);
1703 static
1705 hammer2_vop_close(struct vop_close_args *ap)
1707 #if 0
1708 return vop_stdclose(ap);
1709 #endif
1710 return (EOPNOTSUPP);
1714 * hammer2_vop_nlink { nch, dvp, vp, cred }
1716 * Create a hardlink from (vp) to {dvp, nch}.
1718 static
1720 hammer2_vop_nlink(struct vop_nlink_args *ap)
1722 hammer2_inode_t *tdip; /* target directory to create link in */
1723 hammer2_inode_t *ip; /* inode we are hardlinking to */
1724 struct namecache *ncp;
1725 const char *name;
1726 size_t name_len;
1727 int error;
1728 uint64_t cmtime;
1730 /* We know it's the same in makefs */
1732 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1733 return(EXDEV);
1736 tdip = VTOI(ap->a_dvp);
1737 if (tdip->pmp->ronly || (tdip->pmp->flags & HAMMER2_PMPF_EMERG))
1738 return (EROFS);
1739 if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1)
1740 return (ENOSPC);
1742 ncp = ap->a_nch->ncp;
1743 name = ncp->nc_name;
1744 name_len = ncp->nc_nlen;
1747 * ip represents the file being hardlinked. The file could be a
1748 * normal file or a hardlink target if it has already been hardlinked.
1749 * (with the new semantics, it will almost always be a hardlink
1750 * target).
1752 * Bump nlinks and potentially also create or move the hardlink
1753 * target in the parent directory common to (ip) and (tdip). The
1754 * consolidation code can modify ip->cluster. The returned cluster
1755 * is locked.
1757 ip = VTOI(ap->a_vp);
1758 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1759 hammer2_trans_init(ip->pmp, 0);
1762 * Target should be an indexed inode or there's no way we will ever
1763 * be able to find it!
1765 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1767 error = 0;
1770 * Can return NULL and error == EXDEV if the common parent
1771 * crosses a directory with the xlink flag set.
1773 hammer2_inode_lock4(tdip, ip, NULL, NULL);
1775 hammer2_update_time(&cmtime);
1778 * Create the directory entry and bump nlinks.
1779 * Also update ip's ctime.
1781 if (error == 0) {
1782 error = hammer2_dirent_create(tdip, name, name_len,
1783 ip->meta.inum, ip->meta.type);
1784 hammer2_inode_modify(ip);
1785 ++ip->meta.nlinks;
1786 ip->meta.ctime = cmtime;
1788 if (error == 0) {
1790 * Update dip's [cm]time
1792 hammer2_inode_modify(tdip);
1793 tdip->meta.mtime = cmtime;
1794 tdip->meta.ctime = cmtime;
1796 cache_setunresolved(ap->a_nch);
1797 cache_setvp(ap->a_nch, ap->a_vp);
1799 hammer2_inode_unlock(ip);
1800 hammer2_inode_unlock(tdip);
1802 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ);
1803 hammer2_knote(ap->a_vp, NOTE_LINK);
1804 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1806 return error;
1810 hammer2_nlink(struct m_vnode *dvp, struct m_vnode *vp, char *name, int nlen)
1812 struct namecache nc = {
1813 .nc_name = name,
1814 .nc_nlen = nlen,
1816 struct nchandle nch = {
1817 .ncp = &nc,
1819 struct vop_nlink_args ap = {
1820 .a_nch = &nch,
1821 .a_dvp = dvp,
1822 .a_vp = vp,
1825 return hammer2_vop_nlink(&ap);
1829 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1831 * The operating system has already ensured that the directory entry
1832 * does not exist and done all appropriate namespace locking.
1834 static
1836 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1838 hammer2_inode_t *dip;
1839 hammer2_inode_t *nip;
1840 struct namecache *ncp;
1841 const char *name;
1842 size_t name_len;
1843 hammer2_tid_t inum;
1844 int error;
1846 dip = VTOI(ap->a_dvp);
1847 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG))
1848 return (EROFS);
1849 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1850 return (ENOSPC);
1852 ncp = ap->a_nch->ncp;
1853 name = ncp->nc_name;
1854 name_len = ncp->nc_nlen;
1855 hammer2_trans_init(dip->pmp, 0);
1857 inum = hammer2_trans_newinum(dip->pmp);
1860 * Create the actual inode as a hidden file in the iroot, then
1861 * create the directory entry. The creation of the actual inode
1862 * sets its nlinks to 1 which is the value we desire.
1864 * dip must be locked before nip to avoid deadlock.
1866 hammer2_inode_lock(dip, 0);
1867 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1868 inum, &error);
1870 if (error) {
1871 error = hammer2_error_to_errno(error);
1872 } else {
1873 error = hammer2_dirent_create(dip, name, name_len,
1874 nip->meta.inum, nip->meta.type);
1876 if (error) {
1877 if (nip) {
1878 hammer2_inode_unlink_finisher(nip, NULL);
1879 hammer2_inode_unlock(nip);
1880 nip = NULL;
1882 *ap->a_vpp = NULL;
1883 } else {
1884 hammer2_inode_depend(dip, nip); /* before igetv */
1885 *ap->a_vpp = hammer2_igetv(nip, &error);
1886 hammer2_inode_unlock(nip);
1890 * Update dip's mtime
1892 if (error == 0) {
1893 uint64_t mtime;
1895 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
1896 hammer2_update_time(&mtime);
1897 hammer2_inode_modify(dip);
1898 dip->meta.mtime = mtime;
1899 /*hammer2_inode_unlock(dip);*/
1901 hammer2_inode_unlock(dip);
1903 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
1905 if (error == 0) {
1906 cache_setunresolved(ap->a_nch);
1907 cache_setvp(ap->a_nch, *ap->a_vpp);
1908 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1910 return error;
1914 hammer2_ncreate(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen)
1916 struct namecache nc = {
1917 .nc_name = name,
1918 .nc_nlen = nlen,
1920 struct nchandle nch = {
1921 .ncp = &nc,
1923 uid_t va_uid = VNOVAL; //getuid();
1924 uid_t va_gid = VNOVAL; //getgid();
1925 struct vattr va = {
1926 .va_type = VREG,
1927 .va_mode = 0644, /* should be tunable */
1928 .va_uid = va_uid,
1929 .va_gid = va_gid,
1931 struct vop_ncreate_args ap = {
1932 .a_nch = &nch,
1933 .a_dvp = dvp,
1934 .a_vpp = vpp,
1935 .a_vap = &va,
1938 return hammer2_vop_ncreate(&ap);
1942 * Make a device node (typically a fifo)
1944 static
1946 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1948 hammer2_inode_t *dip;
1949 hammer2_inode_t *nip;
1950 struct namecache *ncp;
1951 const char *name;
1952 size_t name_len;
1953 hammer2_tid_t inum;
1954 int error;
1956 dip = VTOI(ap->a_dvp);
1957 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG))
1958 return (EROFS);
1959 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1960 return (ENOSPC);
1962 ncp = ap->a_nch->ncp;
1963 name = ncp->nc_name;
1964 name_len = ncp->nc_nlen;
1965 hammer2_trans_init(dip->pmp, 0);
1968 * Create the device inode and then create the directory entry.
1970 * dip must be locked before nip to avoid deadlock.
1972 inum = hammer2_trans_newinum(dip->pmp);
1974 hammer2_inode_lock(dip, 0);
1975 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1976 inum, &error);
1977 if (error == 0) {
1978 error = hammer2_dirent_create(dip, name, name_len,
1979 nip->meta.inum, nip->meta.type);
1981 if (error) {
1982 if (nip) {
1983 hammer2_inode_unlink_finisher(nip, NULL);
1984 hammer2_inode_unlock(nip);
1985 nip = NULL;
1987 *ap->a_vpp = NULL;
1988 } else {
1989 hammer2_inode_depend(dip, nip); /* before igetv */
1990 *ap->a_vpp = hammer2_igetv(nip, &error);
1991 hammer2_inode_unlock(nip);
1995 * Update dip's mtime
1997 if (error == 0) {
1998 uint64_t mtime;
2000 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
2001 hammer2_update_time(&mtime);
2002 hammer2_inode_modify(dip);
2003 dip->meta.mtime = mtime;
2004 /*hammer2_inode_unlock(dip);*/
2006 hammer2_inode_unlock(dip);
2008 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
2010 if (error == 0) {
2011 cache_setunresolved(ap->a_nch);
2012 cache_setvp(ap->a_nch, *ap->a_vpp);
2013 hammer2_knote(ap->a_dvp, NOTE_WRITE);
2015 return error;
2019 hammer2_nmknod(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen,
2020 int type)
2022 struct namecache nc = {
2023 .nc_name = name,
2024 .nc_nlen = nlen,
2026 struct nchandle nch = {
2027 .ncp = &nc,
2029 uid_t va_uid = VNOVAL; //getuid();
2030 uid_t va_gid = VNOVAL; //getgid();
2031 struct vattr va = {
2032 .va_type = type,
2033 .va_mode = 0644, /* should be tunable */
2034 .va_uid = va_uid,
2035 .va_gid = va_gid,
2037 struct vop_nmknod_args ap = {
2038 .a_nch = &nch,
2039 .a_dvp = dvp,
2040 .a_vpp = vpp,
2041 .a_vap = &va,
2044 return hammer2_vop_nmknod(&ap);
2048 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2050 static
2052 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
2054 hammer2_inode_t *dip;
2055 hammer2_inode_t *nip;
2056 struct namecache *ncp;
2057 const char *name;
2058 size_t name_len;
2059 hammer2_tid_t inum;
2060 int error;
2062 dip = VTOI(ap->a_dvp);
2063 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG))
2064 return (EROFS);
2065 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
2066 return (ENOSPC);
2068 ncp = ap->a_nch->ncp;
2069 name = ncp->nc_name;
2070 name_len = ncp->nc_nlen;
2071 hammer2_trans_init(dip->pmp, 0);
2073 ap->a_vap->va_type = VLNK; /* enforce type */
2076 * Create the softlink as an inode and then create the directory
2077 * entry.
2079 * dip must be locked before nip to avoid deadlock.
2081 inum = hammer2_trans_newinum(dip->pmp);
2083 hammer2_inode_lock(dip, 0);
2084 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
2085 inum, &error);
2086 if (error == 0) {
2087 error = hammer2_dirent_create(dip, name, name_len,
2088 nip->meta.inum, nip->meta.type);
2090 if (error) {
2091 if (nip) {
2092 hammer2_inode_unlink_finisher(nip, NULL);
2093 hammer2_inode_unlock(nip);
2094 nip = NULL;
2096 *ap->a_vpp = NULL;
2097 hammer2_inode_unlock(dip);
2098 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
2099 return error;
2101 hammer2_inode_depend(dip, nip); /* before igetv */
2102 *ap->a_vpp = hammer2_igetv(nip, &error);
2105 * Build the softlink (~like file data) and finalize the namecache.
2107 if (error == 0) {
2108 size_t bytes;
2109 struct uio auio;
2110 struct iovec aiov;
2112 bytes = strlen(ap->a_target);
2114 hammer2_inode_unlock(nip);
2115 bzero(&auio, sizeof(auio));
2116 bzero(&aiov, sizeof(aiov));
2117 auio.uio_iov = &aiov;
2118 auio.uio_segflg = UIO_SYSSPACE;
2119 auio.uio_rw = UIO_WRITE;
2120 auio.uio_resid = bytes;
2121 auio.uio_iovcnt = 1;
2122 auio.uio_td = curthread;
2123 aiov.iov_base = ap->a_target;
2124 aiov.iov_len = bytes;
2125 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
2126 /* XXX handle error */
2127 error = 0;
2128 } else {
2129 hammer2_inode_unlock(nip);
2133 * Update dip's mtime
2135 if (error == 0) {
2136 uint64_t mtime;
2138 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
2139 hammer2_update_time(&mtime);
2140 hammer2_inode_modify(dip);
2141 dip->meta.mtime = mtime;
2142 /*hammer2_inode_unlock(dip);*/
2144 hammer2_inode_unlock(dip);
2146 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
2149 * Finalize namecache
2151 if (error == 0) {
2152 cache_setunresolved(ap->a_nch);
2153 cache_setvp(ap->a_nch, *ap->a_vpp);
2154 hammer2_knote(ap->a_dvp, NOTE_WRITE);
2156 return error;
2160 hammer2_nsymlink(struct m_vnode *dvp, struct m_vnode **vpp, char *name, int nlen,
2161 char *target)
2163 struct namecache nc = {
2164 .nc_name = name,
2165 .nc_nlen = nlen,
2167 struct nchandle nch = {
2168 .ncp = &nc,
2170 uid_t va_uid = VNOVAL; //getuid();
2171 uid_t va_gid = VNOVAL; //getgid();
2172 struct vattr va = {
2173 .va_type = VDIR,
2174 .va_mode = 0755, /* should be tunable */
2175 .va_uid = va_uid,
2176 .va_gid = va_gid,
2178 struct vop_nsymlink_args ap = {
2179 .a_nch = &nch,
2180 .a_dvp = dvp,
2181 .a_vpp = vpp,
2182 .a_vap = &va,
2183 .a_target = target,
2186 return hammer2_vop_nsymlink(&ap);
2190 * hammer2_vop_nremove { nch, dvp, cred }
2192 static
2194 hammer2_vop_nremove(struct vop_nremove_args *ap)
2196 #if 0
2197 hammer2_xop_unlink_t *xop;
2198 hammer2_inode_t *dip;
2199 hammer2_inode_t *ip;
2200 struct m_vnode *vprecycle;
2201 struct namecache *ncp;
2202 int error;
2204 dip = VTOI(ap->a_dvp);
2205 if (dip->pmp->ronly)
2206 return (EROFS);
2207 #if 0
2208 /* allow removals, except user to also bulkfree */
2209 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
2210 return (ENOSPC);
2211 #endif
2213 ncp = ap->a_nch->ncp;
2215 if (hammer2_debug_inode && dip->meta.inum == hammer2_debug_inode) {
2216 kprintf("hammer2: attempt to delete inside debug inode: %s\n",
2217 ncp->nc_name);
2218 while (hammer2_debug_inode &&
2219 dip->meta.inum == hammer2_debug_inode) {
2220 tsleep(&hammer2_debug_inode, 0, "h2debug", hz*5);
2224 hammer2_trans_init(dip->pmp, 0);
2225 hammer2_inode_lock(dip, 0);
2228 * The unlink XOP unlinks the path from the directory and
2229 * locates and returns the cluster associated with the real inode.
2230 * We have to handle nlinks here on the frontend.
2232 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
2233 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
2235 xop->isdir = 0;
2236 xop->dopermanent = 0;
2237 hammer2_xop_start(&xop->head, &hammer2_unlink_desc);
2240 * Collect the real inode and adjust nlinks, destroy the real
2241 * inode if nlinks transitions to 0 and it was the real inode
2242 * (else it has already been removed).
2244 error = hammer2_xop_collect(&xop->head, 0);
2245 error = hammer2_error_to_errno(error);
2246 vprecycle = NULL;
2248 if (error == 0) {
2249 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
2250 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
2251 if (ip) {
2252 if (hammer2_debug_inode &&
2253 ip->meta.inum == hammer2_debug_inode) {
2254 kprintf("hammer2: attempt to delete debug "
2255 "inode!\n");
2256 while (hammer2_debug_inode &&
2257 ip->meta.inum == hammer2_debug_inode) {
2258 tsleep(&hammer2_debug_inode, 0,
2259 "h2debug", hz*5);
2262 hammer2_inode_unlink_finisher(ip, &vprecycle);
2263 hammer2_inode_depend(dip, ip); /* after modified */
2264 hammer2_inode_unlock(ip);
2266 } else {
2267 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
2271 * Update dip's mtime
2273 if (error == 0) {
2274 uint64_t mtime;
2276 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
2277 hammer2_update_time(&mtime);
2278 hammer2_inode_modify(dip);
2279 dip->meta.mtime = mtime;
2280 /*hammer2_inode_unlock(dip);*/
2282 hammer2_inode_unlock(dip);
2284 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
2285 if (error == 0) {
2286 cache_unlink(ap->a_nch);
2287 hammer2_knote(ap->a_dvp, NOTE_WRITE);
2289 if (vprecycle)
2290 hammer2_inode_vprecycle(vprecycle);
2292 return (error);
2293 #endif
2294 return (EOPNOTSUPP);
2298 * hammer2_vop_nrmdir { nch, dvp, cred }
2300 static
2302 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
2304 #if 0
2305 hammer2_xop_unlink_t *xop;
2306 hammer2_inode_t *dip;
2307 hammer2_inode_t *ip;
2308 struct namecache *ncp;
2309 struct m_vnode *vprecycle;
2310 int error;
2312 dip = VTOI(ap->a_dvp);
2313 if (dip->pmp->ronly)
2314 return (EROFS);
2315 #if 0
2316 /* allow removals, except user to also bulkfree */
2317 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
2318 return (ENOSPC);
2319 #endif
2321 hammer2_trans_init(dip->pmp, 0);
2322 hammer2_inode_lock(dip, 0);
2324 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
2326 ncp = ap->a_nch->ncp;
2327 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
2328 xop->isdir = 1;
2329 xop->dopermanent = 0;
2330 hammer2_xop_start(&xop->head, &hammer2_unlink_desc);
2333 * Collect the real inode and adjust nlinks, destroy the real
2334 * inode if nlinks transitions to 0 and it was the real inode
2335 * (else it has already been removed).
2337 error = hammer2_xop_collect(&xop->head, 0);
2338 error = hammer2_error_to_errno(error);
2339 vprecycle = NULL;
2341 if (error == 0) {
2342 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
2343 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
2344 if (ip) {
2345 hammer2_inode_unlink_finisher(ip, &vprecycle);
2346 hammer2_inode_depend(dip, ip); /* after modified */
2347 hammer2_inode_unlock(ip);
2349 } else {
2350 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
2354 * Update dip's mtime
2356 if (error == 0) {
2357 uint64_t mtime;
2359 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
2360 hammer2_update_time(&mtime);
2361 hammer2_inode_modify(dip);
2362 dip->meta.mtime = mtime;
2363 /*hammer2_inode_unlock(dip);*/
2365 hammer2_inode_unlock(dip);
2367 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
2368 if (error == 0) {
2369 cache_unlink(ap->a_nch);
2370 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
2372 if (vprecycle)
2373 hammer2_inode_vprecycle(vprecycle);
2374 return (error);
2375 #endif
2376 return (EOPNOTSUPP);
2380 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
2382 static
2384 hammer2_vop_nrename(struct vop_nrename_args *ap)
2386 #if 0
2387 struct namecache *fncp;
2388 struct namecache *tncp;
2389 hammer2_inode_t *fdip; /* source directory */
2390 hammer2_inode_t *tdip; /* target directory */
2391 hammer2_inode_t *ip; /* file being renamed */
2392 hammer2_inode_t *tip; /* replaced target during rename or NULL */
2393 struct m_vnode *vprecycle;
2394 const char *fname;
2395 size_t fname_len;
2396 const char *tname;
2397 size_t tname_len;
2398 int error;
2399 int update_tdip;
2400 int update_fdip;
2401 hammer2_key_t tlhc;
2403 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
2404 return(EXDEV);
2405 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
2406 return(EXDEV);
2408 fdip = VTOI(ap->a_fdvp); /* source directory */
2409 tdip = VTOI(ap->a_tdvp); /* target directory */
2411 if (fdip->pmp->ronly || (fdip->pmp->flags & HAMMER2_PMPF_EMERG))
2412 return (EROFS);
2413 if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1)
2414 return (ENOSPC);
2416 fncp = ap->a_fnch->ncp; /* entry name in source */
2417 fname = fncp->nc_name;
2418 fname_len = fncp->nc_nlen;
2420 tncp = ap->a_tnch->ncp; /* entry name in target */
2421 tname = tncp->nc_name;
2422 tname_len = tncp->nc_nlen;
2424 hammer2_trans_init(tdip->pmp, 0);
2426 update_tdip = 0;
2427 update_fdip = 0;
2429 ip = VTOI(fncp->nc_vp);
2430 hammer2_inode_ref(ip); /* extra ref */
2433 * Lookup the target name to determine if a directory entry
2434 * is being overwritten. We only hold related inode locks
2435 * temporarily, the operating system is expected to protect
2436 * against rename races.
2438 tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL;
2439 if (tip)
2440 hammer2_inode_ref(tip); /* extra ref */
2443 * Can return NULL and error == EXDEV if the common parent
2444 * crosses a directory with the xlink flag set.
2446 * For now try to avoid deadlocks with a simple pointer address
2447 * test. (tip) can be NULL.
2449 error = 0;
2451 hammer2_inode_t *ip1 = fdip;
2452 hammer2_inode_t *ip2 = tdip;
2453 hammer2_inode_t *ip3 = ip;
2454 hammer2_inode_t *ip4 = tip; /* may be NULL */
2456 if (fdip > tdip) {
2457 ip1 = tdip;
2458 ip2 = fdip;
2460 if (tip && ip > tip) {
2461 ip3 = tip;
2462 ip4 = ip;
2464 hammer2_inode_lock4(ip1, ip2, ip3, ip4);
2468 * Resolve the collision space for (tdip, tname, tname_len)
2470 * tdip must be held exclusively locked to prevent races since
2471 * multiple filenames can end up in the same collision space.
2474 hammer2_xop_scanlhc_t *sxop;
2475 hammer2_tid_t lhcbase;
2477 tlhc = hammer2_dirhash(tname, tname_len);
2478 lhcbase = tlhc;
2479 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2480 sxop->lhc = tlhc;
2481 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
2482 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2483 if (tlhc != sxop->head.cluster.focus->bref.key)
2484 break;
2485 ++tlhc;
2487 error = hammer2_error_to_errno(error);
2488 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2490 if (error) {
2491 if (error != ENOENT)
2492 goto done2;
2493 ++tlhc;
2494 error = 0;
2496 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2497 error = ENOSPC;
2498 goto done2;
2503 * Ready to go, issue the rename to the backend. Note that meta-data
2504 * updates to the related inodes occur separately from the rename
2505 * operation.
2507 * NOTE: While it is not necessary to update ip->meta.name*, doing
2508 * so aids catastrophic recovery and debugging.
2510 if (error == 0) {
2511 hammer2_xop_nrename_t *xop4;
2513 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2514 xop4->lhc = tlhc;
2515 xop4->ip_key = ip->meta.name_key;
2516 hammer2_xop_setip2(&xop4->head, ip);
2517 hammer2_xop_setip3(&xop4->head, tdip);
2518 if (tip && tip->meta.type == HAMMER2_OBJTYPE_DIRECTORY)
2519 hammer2_xop_setip4(&xop4->head, tip);
2520 hammer2_xop_setname(&xop4->head, fname, fname_len);
2521 hammer2_xop_setname2(&xop4->head, tname, tname_len);
2522 hammer2_xop_start(&xop4->head, &hammer2_nrename_desc);
2524 error = hammer2_xop_collect(&xop4->head, 0);
2525 error = hammer2_error_to_errno(error);
2526 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2528 if (error == ENOENT)
2529 error = 0;
2532 * Update inode meta-data.
2534 * WARNING! The in-memory inode (ip) structure does not
2535 * maintain a copy of the inode's filename buffer.
2537 if (error == 0 &&
2538 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2539 hammer2_inode_modify(ip);
2540 ip->meta.name_len = tname_len;
2541 ip->meta.name_key = tlhc;
2543 if (error == 0) {
2544 hammer2_inode_modify(ip);
2545 ip->meta.iparent = tdip->meta.inum;
2547 update_fdip = 1;
2548 update_tdip = 1;
2551 done2:
2553 * If no error, the backend has replaced the target directory entry.
2554 * We must adjust nlinks on the original replace target if it exists.
2556 vprecycle = NULL;
2557 if (error == 0 && tip) {
2558 hammer2_inode_unlink_finisher(tip, &vprecycle);
2562 * Update directory mtimes to represent the something changed.
2564 if (update_fdip || update_tdip) {
2565 uint64_t mtime;
2567 hammer2_update_time(&mtime);
2568 if (update_fdip) {
2569 hammer2_inode_modify(fdip);
2570 fdip->meta.mtime = mtime;
2572 if (update_tdip) {
2573 hammer2_inode_modify(tdip);
2574 tdip->meta.mtime = mtime;
2577 if (tip) {
2578 hammer2_inode_unlock(tip);
2579 hammer2_inode_drop(tip);
2581 hammer2_inode_unlock(ip);
2582 hammer2_inode_unlock(tdip);
2583 hammer2_inode_unlock(fdip);
2584 hammer2_inode_drop(ip);
2585 hammer2_trans_done(tdip->pmp, HAMMER2_TRANS_SIDEQ);
2588 * Issue the namecache update after unlocking all the internal
2589 * hammer2 structures, otherwise we might deadlock.
2591 * WARNING! The target namespace must be updated atomically,
2592 * and we depend on cache_rename() to handle that for
2593 * us. Do not do a separate cache_unlink() because
2594 * that leaves a small window of opportunity for other
2595 * threads to allocate the target namespace before we
2596 * manage to complete our rename.
2598 * WARNING! cache_rename() (and cache_unlink()) will properly
2599 * set VREF_FINALIZE on any attached vnode. Do not
2600 * call cache_setunresolved() manually before-hand as
2601 * this will prevent the flag from being set later via
2602 * cache_rename(). If VREF_FINALIZE is not properly set
2603 * and the inode is no longer in the topology, related
2604 * chains can remain dirty indefinitely.
2606 if (error == 0 && tip) {
2607 /*cache_unlink(ap->a_tnch); see above */
2608 /*cache_setunresolved(ap->a_tnch); see above */
2610 if (error == 0) {
2611 cache_rename(ap->a_fnch, ap->a_tnch);
2612 hammer2_knote(ap->a_fdvp, NOTE_WRITE);
2613 hammer2_knote(ap->a_tdvp, NOTE_WRITE);
2614 hammer2_knote(fncp->nc_vp, NOTE_RENAME);
2616 if (vprecycle)
2617 hammer2_inode_vprecycle(vprecycle);
2619 return (error);
2620 #endif
2621 return (EOPNOTSUPP);
2625 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2627 static
2629 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2631 #if 0
2632 hammer2_inode_t *ip;
2633 int error;
2635 ip = VTOI(ap->a_vp);
2637 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2638 ap->a_fflag, ap->a_cred);
2639 return (error);
2640 #endif
2641 return (EOPNOTSUPP);
2644 static
2646 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2648 #if 0
2649 struct mount *mp;
2650 hammer2_pfs_t *pmp;
2651 int rc;
2653 switch (ap->a_op) {
2654 case (MOUNTCTL_SET_EXPORT):
2655 mp = ap->a_head.a_ops->head.vv_mount;
2656 pmp = MPTOPMP(mp);
2658 if (ap->a_ctllen != sizeof(struct export_args))
2659 rc = (EINVAL);
2660 else
2661 rc = vfs_export(mp, &pmp->export,
2662 (const struct export_args *)ap->a_ctl);
2663 break;
2664 default:
2665 rc = vop_stdmountctl(ap);
2666 break;
2668 return (rc);
2669 #endif
2670 return (EOPNOTSUPP);
2674 * KQFILTER
2677 static void filt_hammer2detach(struct knote *kn);
2678 static int filt_hammer2read(struct knote *kn, long hint);
2679 static int filt_hammer2write(struct knote *kn, long hint);
2680 static int filt_hammer2vnode(struct knote *kn, long hint);
2682 static struct filterops hammer2read_filtops =
2683 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2684 NULL, filt_hammer2detach, filt_hammer2read };
2685 static struct filterops hammer2write_filtops =
2686 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2687 NULL, filt_hammer2detach, filt_hammer2write };
2688 static struct filterops hammer2vnode_filtops =
2689 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2690 NULL, filt_hammer2detach, filt_hammer2vnode };
2693 static
2695 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2697 #if 0
2698 struct m_vnode *vp = ap->a_vp;
2699 struct knote *kn = ap->a_kn;
2701 switch (kn->kn_filter) {
2702 case EVFILT_READ:
2703 kn->kn_fop = &hammer2read_filtops;
2704 break;
2705 case EVFILT_WRITE:
2706 kn->kn_fop = &hammer2write_filtops;
2707 break;
2708 case EVFILT_VNODE:
2709 kn->kn_fop = &hammer2vnode_filtops;
2710 break;
2711 default:
2712 return (EOPNOTSUPP);
2715 kn->kn_hook = (caddr_t)vp;
2717 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2719 return(0);
2720 #endif
2721 return (EOPNOTSUPP);
2724 #if 0
2725 static void
2726 filt_hammer2detach(struct knote *kn)
2728 struct m_vnode *vp = (void *)kn->kn_hook;
2730 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2733 static int
2734 filt_hammer2read(struct knote *kn, long hint)
2736 struct m_vnode *vp = (void *)kn->kn_hook;
2737 hammer2_inode_t *ip = VTOI(vp);
2738 off_t off;
2740 if (hint == NOTE_REVOKE) {
2741 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2742 return(1);
2744 off = ip->meta.size - kn->kn_fp->f_offset;
2745 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2746 if (kn->kn_sfflags & NOTE_OLDAPI)
2747 return(1);
2748 return (kn->kn_data != 0);
2752 static int
2753 filt_hammer2write(struct knote *kn, long hint)
2755 if (hint == NOTE_REVOKE)
2756 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2757 kn->kn_data = 0;
2758 return (1);
2761 static int
2762 filt_hammer2vnode(struct knote *kn, long hint)
2764 if (kn->kn_sfflags & hint)
2765 kn->kn_fflags |= hint;
2766 if (hint == NOTE_REVOKE) {
2767 kn->kn_flags |= (EV_EOF | EV_NODATA);
2768 return (1);
2770 return (kn->kn_fflags != 0);
2772 #endif
2775 * FIFO VOPS
2777 static
2779 hammer2_vop_markatime(struct vop_markatime_args *ap)
2781 #if 0
2782 hammer2_inode_t *ip;
2783 struct m_vnode *vp;
2785 vp = ap->a_vp;
2786 ip = VTOI(vp);
2788 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG))
2789 return (EROFS);
2790 return(0);
2791 #endif
2792 return (EOPNOTSUPP);
2795 static
2797 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2799 #if 0
2800 int error;
2802 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2803 if (error)
2804 error = hammer2_vop_kqfilter(ap);
2805 return(error);
2806 #endif
2807 return (EOPNOTSUPP);
2811 * VOPS vector
2813 struct vop_ops hammer2_vnode_vops = {
2814 .vop_default = vop_defaultop,
2815 .vop_fsync = hammer2_vop_fsync,
2816 .vop_getpages = vop_stdgetpages,
2817 .vop_putpages = vop_stdputpages,
2818 .vop_access = hammer2_vop_access,
2819 .vop_advlock = hammer2_vop_advlock,
2820 .vop_close = hammer2_vop_close,
2821 .vop_nlink = hammer2_vop_nlink,
2822 .vop_ncreate = hammer2_vop_ncreate,
2823 .vop_nsymlink = hammer2_vop_nsymlink,
2824 .vop_nremove = hammer2_vop_nremove,
2825 .vop_nrmdir = hammer2_vop_nrmdir,
2826 .vop_nrename = hammer2_vop_nrename,
2827 .vop_getattr = hammer2_vop_getattr,
2828 .vop_getattr_lite = hammer2_vop_getattr_lite,
2829 .vop_setattr = hammer2_vop_setattr,
2830 .vop_readdir = hammer2_vop_readdir,
2831 .vop_readlink = hammer2_vop_readlink,
2832 .vop_read = hammer2_vop_read,
2833 .vop_write = hammer2_vop_write,
2834 .vop_open = hammer2_vop_open,
2835 .vop_inactive = hammer2_vop_inactive,
2836 .vop_reclaim = hammer2_vop_reclaim,
2837 .vop_nresolve = hammer2_vop_nresolve,
2838 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2839 .vop_nmkdir = hammer2_vop_nmkdir,
2840 .vop_nmknod = hammer2_vop_nmknod,
2841 .vop_ioctl = hammer2_vop_ioctl,
2842 .vop_mountctl = hammer2_vop_mountctl,
2843 .vop_bmap = hammer2_vop_bmap,
2844 .vop_strategy = hammer2_vop_strategy,
2845 .vop_kqfilter = hammer2_vop_kqfilter
2848 struct vop_ops hammer2_spec_vops = {
2849 .vop_default = vop_defaultop,
2850 .vop_fsync = hammer2_vop_fsync,
2851 .vop_read = vop_stdnoread,
2852 .vop_write = vop_stdnowrite,
2853 .vop_access = hammer2_vop_access,
2854 .vop_close = hammer2_vop_close,
2855 .vop_markatime = hammer2_vop_markatime,
2856 .vop_getattr = hammer2_vop_getattr,
2857 .vop_inactive = hammer2_vop_inactive,
2858 .vop_reclaim = hammer2_vop_reclaim,
2859 .vop_setattr = hammer2_vop_setattr
2862 struct vop_ops hammer2_fifo_vops = {
2863 .vop_default = fifo_vnoperate,
2864 .vop_fsync = hammer2_vop_fsync,
2865 #if 0
2866 .vop_read = hammer2_vop_fiforead,
2867 .vop_write = hammer2_vop_fifowrite,
2868 #endif
2869 .vop_access = hammer2_vop_access,
2870 #if 0
2871 .vop_close = hammer2_vop_fifoclose,
2872 #endif
2873 .vop_markatime = hammer2_vop_markatime,
2874 .vop_getattr = hammer2_vop_getattr,
2875 .vop_inactive = hammer2_vop_inactive,
2876 .vop_reclaim = hammer2_vop_reclaim,
2877 .vop_setattr = hammer2_vop_setattr,
2878 .vop_kqfilter = hammer2_vop_fifokqfilter