2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * Kernel Filesystem interface
39 * NOTE! local ipdata pointers must be reloaded on any modifying operation
40 * to the inode as its underlying chain may have changed.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
58 #include <vfs/fifofs/fifo.h>
62 static int hammer2_read_file(hammer2_inode_t
*ip
, struct uio
*uio
,
64 static int hammer2_write_file(hammer2_inode_t
*ip
, struct uio
*uio
,
65 int ioflag
, int seqcount
);
66 static void hammer2_extend_file(hammer2_inode_t
*ip
, hammer2_key_t nsize
);
67 static void hammer2_truncate_file(hammer2_inode_t
*ip
, hammer2_key_t nsize
);
69 struct objcache
*cache_xops
;
73 hammer2_knote(struct vnode
*vp
, int flags
)
76 KNOTE(&vp
->v_pollinfo
.vpi_kqinfo
.ki_note
, flags
);
80 * Last reference to a vnode is going away but it is still cached.
84 hammer2_vop_inactive(struct vop_inactive_args
*ap
)
101 * Check for deleted inodes and recycle immediately on the last
102 * release. Be sure to destroy any left-over buffer cache buffers
103 * so we do not waste time trying to flush them.
105 * Note that deleting the file block chains under the inode chain
106 * would just be a waste of energy, so don't do it.
108 * WARNING: nvtruncbuf() can only be safely called without the inode
109 * lock held due to the way our write thread works.
111 if (ip
->flags
& HAMMER2_INODE_ISUNLINKED
) {
116 * Detect updates to the embedded data which may be
117 * synchronized by the strategy code. Simply mark the
118 * inode modified so it gets picked up by our normal flush.
120 nblksize
= hammer2_calc_logical(ip
, 0, &lbase
, NULL
);
121 nvtruncbuf(vp
, 0, nblksize
, 0, 0);
128 * Reclaim a vnode so that it can be reused; after the inode is
129 * disassociated, the filesystem must manage it alone.
133 hammer2_vop_reclaim(struct vop_reclaim_args
*ap
)
147 * The final close of a deleted file or directory marks it for
148 * destruction. The DELETED flag allows the flusher to shortcut
149 * any modified blocks still unflushed (that is, just ignore them).
151 * HAMMER2 usually does not try to optimize the freemap by returning
152 * deleted blocks to it as it does not usually know how many snapshots
153 * might be referencing portions of the file/dir.
159 * NOTE! We do not attempt to flush chains here, flushing is
160 * really fragile and could also deadlock.
165 * This occurs if the inode was unlinked while open. Reclamation of
166 * these inodes requires processing we cannot safely do here so add
167 * the inode to the sideq in that situation.
169 * A modified inode may require chain synchronization which will no
170 * longer be driven by a sync or fsync without the vnode, also use
171 * the sideq for that.
173 * A reclaim can occur at any time so we cannot safely start a
174 * transaction to handle reclamation of unlinked files. Instead,
175 * the ip is left with a reference and placed on a linked list and
179 if ((ip
->flags
& (HAMMER2_INODE_ISUNLINKED
|
180 HAMMER2_INODE_MODIFIED
|
181 HAMMER2_INODE_RESIZED
)) &&
182 (ip
->flags
& HAMMER2_INODE_ISDELETED
) == 0) {
183 hammer2_inode_sideq_t
*ipul
;
185 ipul
= kmalloc(sizeof(*ipul
), pmp
->minode
, M_WAITOK
| M_ZERO
);
188 hammer2_spin_ex(&pmp
->list_spin
);
189 if ((ip
->flags
& HAMMER2_INODE_ONSIDEQ
) == 0) {
191 atomic_set_int(&ip
->flags
, HAMMER2_INODE_ONSIDEQ
);
192 TAILQ_INSERT_TAIL(&pmp
->sideq
, ipul
, entry
);
193 hammer2_spin_unex(&pmp
->list_spin
);
195 hammer2_spin_unex(&pmp
->list_spin
);
196 kfree(ipul
, pmp
->minode
);
197 hammer2_inode_drop(ip
); /* vp ref */
199 /* retain ref from vp for ipul */
201 hammer2_inode_drop(ip
); /* vp ref */
205 * XXX handle background sync when ip dirty, kernel will no longer
206 * notify us regarding this inode because there is no longer a
207 * vnode attached to it.
215 hammer2_vop_fsync(struct vop_fsync_args
*ap
)
224 /* XXX can't do this yet */
225 hammer2_trans_init(ip
->pmp
, HAMMER2_TRANS_ISFLUSH
);
226 vfsync(vp
, ap
->a_waitfor
, 1, NULL
, NULL
);
228 hammer2_trans_init(ip
->pmp
, 0);
229 vfsync(vp
, ap
->a_waitfor
, 1, NULL
, NULL
);
232 * Calling chain_flush here creates a lot of duplicative
233 * COW operations due to non-optimal vnode ordering.
235 * Only do it for an actual fsync() syscall. The other forms
236 * which call this function will eventually call chain_flush
237 * on the volume root as a catch-all, which is far more optimal.
239 hammer2_inode_lock(ip
, 0);
240 if (ip
->flags
& HAMMER2_INODE_MODIFIED
)
241 hammer2_inode_chain_sync(ip
);
242 hammer2_inode_unlock(ip
);
243 hammer2_trans_done(ip
->pmp
);
250 hammer2_vop_access(struct vop_access_args
*ap
)
252 hammer2_inode_t
*ip
= VTOI(ap
->a_vp
);
257 hammer2_inode_lock(ip
, HAMMER2_RESOLVE_SHARED
);
258 uid
= hammer2_to_unix_xid(&ip
->meta
.uid
);
259 gid
= hammer2_to_unix_xid(&ip
->meta
.gid
);
260 error
= vop_helper_access(ap
, uid
, gid
, ip
->meta
.mode
, ip
->meta
.uflags
);
261 hammer2_inode_unlock(ip
);
268 hammer2_vop_getattr(struct vop_getattr_args
*ap
)
274 hammer2_chain_t
*chain
;
283 hammer2_inode_lock(ip
, HAMMER2_RESOLVE_SHARED
);
285 vap
->va_fsid
= pmp
->mp
->mnt_stat
.f_fsid
.val
[0];
286 vap
->va_fileid
= ip
->meta
.inum
;
287 vap
->va_mode
= ip
->meta
.mode
;
288 vap
->va_nlink
= ip
->meta
.nlinks
;
289 vap
->va_uid
= hammer2_to_unix_xid(&ip
->meta
.uid
);
290 vap
->va_gid
= hammer2_to_unix_xid(&ip
->meta
.gid
);
293 vap
->va_size
= ip
->meta
.size
; /* protected by shared lock */
294 vap
->va_blocksize
= HAMMER2_PBUFSIZE
;
295 vap
->va_flags
= ip
->meta
.uflags
;
296 hammer2_time_to_timespec(ip
->meta
.ctime
, &vap
->va_ctime
);
297 hammer2_time_to_timespec(ip
->meta
.mtime
, &vap
->va_mtime
);
298 hammer2_time_to_timespec(ip
->meta
.mtime
, &vap
->va_atime
);
301 if (ip
->meta
.type
== HAMMER2_OBJTYPE_DIRECTORY
) {
303 * Can't really calculate directory use sans the files under
304 * it, just assume one block for now.
306 vap
->va_bytes
+= HAMMER2_INODE_BYTES
;
308 for (i
= 0; i
< ip
->cluster
.nchains
; ++i
) {
309 if ((chain
= ip
->cluster
.array
[i
].chain
) != NULL
) {
311 chain
->bref
.embed
.stats
.data_count
) {
313 chain
->bref
.embed
.stats
.data_count
;
318 vap
->va_type
= hammer2_get_vtype(ip
->meta
.type
);
320 vap
->va_uid_uuid
= ip
->meta
.uid
;
321 vap
->va_gid_uuid
= ip
->meta
.gid
;
322 vap
->va_vaflags
= VA_UID_UUID_VALID
| VA_GID_UUID_VALID
|
325 hammer2_inode_unlock(ip
);
332 hammer2_vop_setattr(struct vop_setattr_args
*ap
)
343 hammer2_update_time(&ctime
);
349 if (hammer2_vfs_enospace(ip
, 0, ap
->a_cred
) > 1)
352 hammer2_pfs_memory_wait(ip
->pmp
);
353 hammer2_trans_init(ip
->pmp
, 0);
354 hammer2_inode_lock(ip
, 0);
357 if (vap
->va_flags
!= VNOVAL
) {
360 flags
= ip
->meta
.uflags
;
361 error
= vop_helper_setattr_flags(&flags
, vap
->va_flags
,
362 hammer2_to_unix_xid(&ip
->meta
.uid
),
365 if (ip
->meta
.uflags
!= flags
) {
366 hammer2_inode_modify(ip
);
367 ip
->meta
.uflags
= flags
;
368 ip
->meta
.ctime
= ctime
;
369 kflags
|= NOTE_ATTRIB
;
371 if (ip
->meta
.uflags
& (IMMUTABLE
| APPEND
)) {
378 if (ip
->meta
.uflags
& (IMMUTABLE
| APPEND
)) {
382 if (vap
->va_uid
!= (uid_t
)VNOVAL
|| vap
->va_gid
!= (gid_t
)VNOVAL
) {
383 mode_t cur_mode
= ip
->meta
.mode
;
384 uid_t cur_uid
= hammer2_to_unix_xid(&ip
->meta
.uid
);
385 gid_t cur_gid
= hammer2_to_unix_xid(&ip
->meta
.gid
);
389 error
= vop_helper_chown(ap
->a_vp
, vap
->va_uid
, vap
->va_gid
,
391 &cur_uid
, &cur_gid
, &cur_mode
);
393 hammer2_guid_to_uuid(&uuid_uid
, cur_uid
);
394 hammer2_guid_to_uuid(&uuid_gid
, cur_gid
);
395 if (bcmp(&uuid_uid
, &ip
->meta
.uid
, sizeof(uuid_uid
)) ||
396 bcmp(&uuid_gid
, &ip
->meta
.gid
, sizeof(uuid_gid
)) ||
397 ip
->meta
.mode
!= cur_mode
399 hammer2_inode_modify(ip
);
400 ip
->meta
.uid
= uuid_uid
;
401 ip
->meta
.gid
= uuid_gid
;
402 ip
->meta
.mode
= cur_mode
;
403 ip
->meta
.ctime
= ctime
;
405 kflags
|= NOTE_ATTRIB
;
412 if (vap
->va_size
!= VNOVAL
&& ip
->meta
.size
!= vap
->va_size
) {
415 if (vap
->va_size
== ip
->meta
.size
)
417 if (vap
->va_size
< ip
->meta
.size
) {
418 hammer2_mtx_ex(&ip
->truncate_lock
);
419 hammer2_truncate_file(ip
, vap
->va_size
);
420 hammer2_mtx_unlock(&ip
->truncate_lock
);
421 kflags
|= NOTE_WRITE
;
423 hammer2_extend_file(ip
, vap
->va_size
);
424 kflags
|= NOTE_WRITE
| NOTE_EXTEND
;
426 hammer2_inode_modify(ip
);
427 ip
->meta
.mtime
= ctime
;
435 /* atime not supported */
436 if (vap
->va_atime
.tv_sec
!= VNOVAL
) {
437 hammer2_inode_modify(ip
);
438 ip
->meta
.atime
= hammer2_timespec_to_time(&vap
->va_atime
);
439 kflags
|= NOTE_ATTRIB
;
442 if (vap
->va_mode
!= (mode_t
)VNOVAL
) {
443 mode_t cur_mode
= ip
->meta
.mode
;
444 uid_t cur_uid
= hammer2_to_unix_xid(&ip
->meta
.uid
);
445 gid_t cur_gid
= hammer2_to_unix_xid(&ip
->meta
.gid
);
447 error
= vop_helper_chmod(ap
->a_vp
, vap
->va_mode
, ap
->a_cred
,
448 cur_uid
, cur_gid
, &cur_mode
);
449 if (error
== 0 && ip
->meta
.mode
!= cur_mode
) {
450 hammer2_inode_modify(ip
);
451 ip
->meta
.mode
= cur_mode
;
452 ip
->meta
.ctime
= ctime
;
453 kflags
|= NOTE_ATTRIB
;
457 if (vap
->va_mtime
.tv_sec
!= VNOVAL
) {
458 hammer2_inode_modify(ip
);
459 ip
->meta
.mtime
= hammer2_timespec_to_time(&vap
->va_mtime
);
460 kflags
|= NOTE_ATTRIB
;
465 * If a truncation occurred we must call inode_fsync() now in order
466 * to trim the related data chains, otherwise a later expansion can
469 * If an extend occured that changed the DIRECTDATA state, we must
470 * call inode_fsync now in order to prepare the inode's indirect
473 if (ip
->flags
& HAMMER2_INODE_RESIZED
)
474 hammer2_inode_chain_sync(ip
);
479 hammer2_inode_unlock(ip
);
480 hammer2_trans_done(ip
->pmp
);
481 hammer2_knote(ip
->vp
, kflags
);
488 hammer2_vop_readdir(struct vop_readdir_args
*ap
)
490 hammer2_xop_readdir_t
*xop
;
491 hammer2_blockref_t bref
;
506 saveoff
= uio
->uio_offset
;
511 * Setup cookies directory entry cookies if requested
513 if (ap
->a_ncookies
) {
514 ncookies
= uio
->uio_resid
/ 16 + 1;
517 cookies
= kmalloc(ncookies
* sizeof(off_t
), M_TEMP
, M_WAITOK
);
524 hammer2_inode_lock(ip
, HAMMER2_RESOLVE_SHARED
);
527 * Handle artificial entries. To ensure that only positive 64 bit
528 * quantities are returned to userland we always strip off bit 63.
529 * The hash code is designed such that codes 0x0000-0x7FFF are not
530 * used, allowing us to use these codes for articial entries.
532 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
533 * allow '..' to cross the mount point into (e.g.) the super-root.
536 inum
= ip
->meta
.inum
& HAMMER2_DIRHASH_USERMSK
;
537 r
= vop_write_dirent(&error
, uio
, inum
, DT_DIR
, 1, ".");
541 cookies
[cookie_index
] = saveoff
;
544 if (cookie_index
== ncookies
)
550 * Be careful with lockorder when accessing ".."
552 * (ip is the current dir. xip is the parent dir).
554 inum
= ip
->meta
.inum
& HAMMER2_DIRHASH_USERMSK
;
555 if (ip
!= ip
->pmp
->iroot
)
556 inum
= ip
->meta
.iparent
& HAMMER2_DIRHASH_USERMSK
;
557 r
= vop_write_dirent(&error
, uio
, inum
, DT_DIR
, 2, "..");
561 cookies
[cookie_index
] = saveoff
;
564 if (cookie_index
== ncookies
)
568 lkey
= saveoff
| HAMMER2_DIRHASH_VISIBLE
;
569 if (hammer2_debug
& 0x0020)
570 kprintf("readdir: lkey %016jx\n", lkey
);
575 * Use XOP for cluster scan.
577 * parent is the inode cluster, already locked for us. Don't
578 * double lock shared locks as this will screw up upgrades.
580 xop
= hammer2_xop_alloc(ip
, 0);
582 hammer2_xop_start(&xop
->head
, hammer2_xop_readdir
);
585 const hammer2_inode_data_t
*ripdata
;
589 error
= hammer2_xop_collect(&xop
->head
, 0);
590 error
= hammer2_error_to_errno(error
);
594 if (cookie_index
== ncookies
)
596 if (hammer2_debug
& 0x0020)
597 kprintf("cluster chain %p %p\n",
598 xop
->head
.cluster
.focus
,
599 (xop
->head
.cluster
.focus
?
600 xop
->head
.cluster
.focus
->data
: (void *)-1));
601 hammer2_cluster_bref(&xop
->head
.cluster
, &bref
);
603 if (bref
.type
== HAMMER2_BREF_TYPE_INODE
) {
605 &hammer2_cluster_rdata(&xop
->head
.cluster
)->ipdata
;
606 dtype
= hammer2_get_dtype(ripdata
->meta
.type
);
607 saveoff
= bref
.key
& HAMMER2_DIRHASH_USERMSK
;
608 r
= vop_write_dirent(&error
, uio
,
610 HAMMER2_DIRHASH_USERMSK
,
612 ripdata
->meta
.name_len
,
617 cookies
[cookie_index
] = saveoff
;
619 } else if (bref
.type
== HAMMER2_BREF_TYPE_DIRENT
) {
620 dtype
= hammer2_get_dtype(bref
.embed
.dirent
.type
);
621 saveoff
= bref
.key
& HAMMER2_DIRHASH_USERMSK
;
622 if (bref
.embed
.dirent
.namlen
<=
623 sizeof(bref
.check
.buf
)) {
624 dname
= bref
.check
.buf
;
627 hammer2_cluster_rdata(&xop
->head
.cluster
)->buf
;
629 r
= vop_write_dirent(&error
, uio
,
630 bref
.embed
.dirent
.inum
,
632 bref
.embed
.dirent
.namlen
,
637 cookies
[cookie_index
] = saveoff
;
640 /* XXX chain error */
641 kprintf("bad chain type readdir %d\n", bref
.type
);
644 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
645 if (error
== ENOENT
) {
648 saveoff
= (hammer2_key_t
)-1;
650 saveoff
= bref
.key
& HAMMER2_DIRHASH_USERMSK
;
653 hammer2_inode_unlock(ip
);
655 *ap
->a_eofflag
= eofflag
;
656 if (hammer2_debug
& 0x0020)
657 kprintf("readdir: done at %016jx\n", saveoff
);
658 uio
->uio_offset
= saveoff
& ~HAMMER2_DIRHASH_VISIBLE
;
659 if (error
&& cookie_index
== 0) {
661 kfree(cookies
, M_TEMP
);
663 *ap
->a_cookies
= NULL
;
667 *ap
->a_ncookies
= cookie_index
;
668 *ap
->a_cookies
= cookies
;
675 * hammer2_vop_readlink { vp, uio, cred }
679 hammer2_vop_readlink(struct vop_readlink_args
*ap
)
686 if (vp
->v_type
!= VLNK
)
690 error
= hammer2_read_file(ip
, ap
->a_uio
, 0);
696 hammer2_vop_read(struct vop_read_args
*ap
)
706 * Read operations supported on this vnode?
709 if (vp
->v_type
!= VREG
)
719 seqcount
= ap
->a_ioflag
>> 16;
720 bigread
= (uio
->uio_resid
> 100 * 1024 * 1024);
722 error
= hammer2_read_file(ip
, uio
, seqcount
);
728 hammer2_vop_write(struct vop_write_args
*ap
)
739 * Read operations supported on this vnode?
742 if (vp
->v_type
!= VREG
)
749 ioflag
= ap
->a_ioflag
;
754 switch (hammer2_vfs_enospace(ip
, uio
->uio_resid
, ap
->a_cred
)) {
758 ioflag
|= IO_DIRECT
; /* semi-synchronous */
764 seqcount
= ioflag
>> 16;
767 * Check resource limit
769 if (uio
->uio_resid
> 0 && (td
= uio
->uio_td
) != NULL
&& td
->td_proc
&&
770 uio
->uio_offset
+ uio
->uio_resid
>
771 td
->td_proc
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
) {
772 lwpsignal(td
->td_proc
, td
->td_lwp
, SIGXFSZ
);
777 * The transaction interlocks against flush initiations
778 * (note: but will run concurrently with the actual flush).
780 * To avoid deadlocking against the VM system, we must flag any
781 * transaction related to the buffer cache or other direct
782 * VM page manipulation.
784 if (uio
->uio_segflg
== UIO_NOCOPY
)
785 hammer2_trans_init(ip
->pmp
, HAMMER2_TRANS_BUFCACHE
);
787 hammer2_trans_init(ip
->pmp
, 0);
788 error
= hammer2_write_file(ip
, uio
, ioflag
, seqcount
);
789 hammer2_trans_done(ip
->pmp
);
795 * Perform read operations on a file or symlink given an UNLOCKED
798 * The passed ip is not locked.
802 hammer2_read_file(hammer2_inode_t
*ip
, struct uio
*uio
, int seqcount
)
813 * WARNING! Assumes that the kernel interlocks size changes at the
816 hammer2_mtx_sh(&ip
->lock
);
817 hammer2_mtx_sh(&ip
->truncate_lock
);
818 size
= ip
->meta
.size
;
819 hammer2_mtx_unlock(&ip
->lock
);
821 while (uio
->uio_resid
> 0 && uio
->uio_offset
< size
) {
828 lblksize
= hammer2_calc_logical(ip
, uio
->uio_offset
,
832 error
= cluster_read(ip
->vp
, leof
, lbase
, lblksize
,
833 uio
->uio_resid
, seqcount
* MAXBSIZE
,
836 if (uio
->uio_segflg
== UIO_NOCOPY
) {
837 bp
= getblk(ip
->vp
, lbase
, lblksize
, GETBLK_BHEAVY
, 0);
838 if (bp
->b_flags
& B_CACHE
) {
841 if (bp
->b_xio
.xio_npages
!= 16)
842 kprintf("NPAGES BAD\n");
843 for (i
= 0; i
< bp
->b_xio
.xio_npages
; ++i
) {
845 m
= bp
->b_xio
.xio_pages
[i
];
846 if (m
== NULL
|| m
->valid
== 0) {
847 kprintf("bp %016jx %016jx pg %d inv",
850 kprintf("m->object %p/%p", m
->object
, ip
->vp
->v_object
);
856 kprintf("b_flags %08x, b_error %d\n", bp
->b_flags
, bp
->b_error
);
860 error
= bread(ip
->vp
, lbase
, lblksize
, &bp
);
866 loff
= (int)(uio
->uio_offset
- lbase
);
868 if (n
> uio
->uio_resid
)
870 if (n
> size
- uio
->uio_offset
)
871 n
= (int)(size
- uio
->uio_offset
);
872 bp
->b_flags
|= B_AGE
;
873 uiomovebp(bp
, (char *)bp
->b_data
+ loff
, n
, uio
);
876 hammer2_mtx_unlock(&ip
->truncate_lock
);
882 * Write to the file represented by the inode via the logical buffer cache.
883 * The inode may represent a regular file or a symlink.
885 * The inode must not be locked.
889 hammer2_write_file(hammer2_inode_t
*ip
, struct uio
*uio
,
890 int ioflag
, int seqcount
)
892 hammer2_key_t old_eof
;
893 hammer2_key_t new_eof
;
902 * WARNING! Assumes that the kernel interlocks size changes at the
905 hammer2_mtx_ex(&ip
->lock
);
906 hammer2_mtx_sh(&ip
->truncate_lock
);
907 if (ioflag
& IO_APPEND
)
908 uio
->uio_offset
= ip
->meta
.size
;
909 old_eof
= ip
->meta
.size
;
912 * Extend the file if necessary. If the write fails at some point
913 * we will truncate it back down to cover as much as we were able
916 * Doing this now makes it easier to calculate buffer sizes in
923 if (uio
->uio_offset
+ uio
->uio_resid
> old_eof
) {
924 new_eof
= uio
->uio_offset
+ uio
->uio_resid
;
926 hammer2_extend_file(ip
, new_eof
);
927 kflags
|= NOTE_EXTEND
;
931 hammer2_mtx_unlock(&ip
->lock
);
936 while (uio
->uio_resid
> 0) {
945 * Don't allow the buffer build to blow out the buffer
948 if ((ioflag
& IO_RECURSE
) == 0)
949 bwillwrite(HAMMER2_PBUFSIZE
);
952 * This nominally tells us how much we can cluster and
953 * what the logical buffer size needs to be. Currently
954 * we don't try to cluster the write and just handle one
957 lblksize
= hammer2_calc_logical(ip
, uio
->uio_offset
,
959 loff
= (int)(uio
->uio_offset
- lbase
);
961 KKASSERT(lblksize
<= 65536);
964 * Calculate bytes to copy this transfer and whether the
965 * copy completely covers the buffer or not.
969 if (n
> uio
->uio_resid
) {
971 if (loff
== lbase
&& uio
->uio_offset
+ n
== new_eof
)
979 if (lbase
>= new_eof
)
985 if (uio
->uio_segflg
== UIO_NOCOPY
) {
987 * Issuing a write with the same data backing the
988 * buffer. Instantiate the buffer to collect the
989 * backing vm pages, then read-in any missing bits.
991 * This case is used by vop_stdputpages().
993 bp
= getblk(ip
->vp
, lbase
, lblksize
, GETBLK_BHEAVY
, 0);
994 if ((bp
->b_flags
& B_CACHE
) == 0) {
996 error
= bread(ip
->vp
, lbase
, lblksize
, &bp
);
998 } else if (trivial
) {
1000 * Even though we are entirely overwriting the buffer
1001 * we may still have to zero it out to avoid a
1002 * mmap/write visibility issue.
1004 bp
= getblk(ip
->vp
, lbase
, lblksize
, GETBLK_BHEAVY
, 0);
1005 if ((bp
->b_flags
& B_CACHE
) == 0)
1009 * Partial overwrite, read in any missing bits then
1010 * replace the portion being written.
1012 * (The strategy code will detect zero-fill physical
1013 * blocks for this case).
1015 error
= bread(ip
->vp
, lbase
, lblksize
, &bp
);
1026 * Ok, copy the data in
1028 error
= uiomovebp(bp
, bp
->b_data
+ loff
, n
, uio
);
1029 kflags
|= NOTE_WRITE
;
1037 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1038 * with IO_SYNC or IO_ASYNC set. These writes
1039 * must be handled as the pageout daemon expects.
1041 * NOTE! H2 relies on cluster_write() here because it
1042 * cannot preallocate disk blocks at the logical
1043 * level due to not knowing what the compression
1044 * size will be at this time.
1046 * We must use cluster_write() here and we depend
1047 * on the write-behind feature to flush buffers
1048 * appropriately. If we let the buffer daemons do
1049 * it the block allocations will be all over the
1052 if (ioflag
& IO_SYNC
) {
1054 } else if ((ioflag
& IO_DIRECT
) && endofblk
) {
1056 } else if (ioflag
& IO_ASYNC
) {
1058 } else if (ip
->vp
->v_mount
->mnt_flag
& MNT_NOCLUSTERW
) {
1062 bp
->b_flags
|= B_CLUSTEROK
;
1063 cluster_write(bp
, new_eof
, lblksize
, seqcount
);
1065 bp
->b_flags
|= B_CLUSTEROK
;
1072 * Cleanup. If we extended the file EOF but failed to write through
1073 * the entire write is a failure and we have to back-up.
1075 if (error
&& new_eof
!= old_eof
) {
1076 hammer2_mtx_unlock(&ip
->truncate_lock
);
1077 hammer2_mtx_ex(&ip
->lock
);
1078 hammer2_mtx_ex(&ip
->truncate_lock
);
1079 hammer2_truncate_file(ip
, old_eof
);
1080 if (ip
->flags
& HAMMER2_INODE_MODIFIED
)
1081 hammer2_inode_chain_sync(ip
);
1082 hammer2_mtx_unlock(&ip
->lock
);
1083 } else if (modified
) {
1084 hammer2_mtx_ex(&ip
->lock
);
1085 hammer2_inode_modify(ip
);
1086 hammer2_update_time(&ip
->meta
.mtime
);
1087 if (ip
->flags
& HAMMER2_INODE_MODIFIED
)
1088 hammer2_inode_chain_sync(ip
);
1089 hammer2_mtx_unlock(&ip
->lock
);
1090 hammer2_knote(ip
->vp
, kflags
);
1092 hammer2_trans_assert_strategy(ip
->pmp
);
1093 hammer2_mtx_unlock(&ip
->truncate_lock
);
1099 * Truncate the size of a file. The inode must not be locked.
1101 * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1102 * ensure that any on-media data beyond the new file EOF has been destroyed.
1104 * WARNING: nvtruncbuf() can only be safely called without the inode lock
1105 * held due to the way our write thread works. If the truncation
1106 * occurs in the middle of a buffer, nvtruncbuf() is responsible
1107 * for dirtying that buffer and zeroing out trailing bytes.
1109 * WARNING! Assumes that the kernel interlocks size changes at the
1112 * WARNING! Caller assumes responsibility for removing dead blocks
1113 * if INODE_RESIZED is set.
1117 hammer2_truncate_file(hammer2_inode_t
*ip
, hammer2_key_t nsize
)
1119 hammer2_key_t lbase
;
1122 hammer2_mtx_unlock(&ip
->lock
);
1124 nblksize
= hammer2_calc_logical(ip
, nsize
, &lbase
, NULL
);
1125 nvtruncbuf(ip
->vp
, nsize
,
1126 nblksize
, (int)nsize
& (nblksize
- 1),
1129 hammer2_mtx_ex(&ip
->lock
);
1130 KKASSERT((ip
->flags
& HAMMER2_INODE_RESIZED
) == 0);
1131 ip
->osize
= ip
->meta
.size
;
1132 ip
->meta
.size
= nsize
;
1133 atomic_set_int(&ip
->flags
, HAMMER2_INODE_RESIZED
);
1134 hammer2_inode_modify(ip
);
1138 * Extend the size of a file. The inode must not be locked.
1140 * Even though the file size is changing, we do not have to set the
1141 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1142 * boundary. When this occurs a hammer2_inode_chain_sync() is required
1143 * to prepare the inode cluster's indirect block table, otherwise
1144 * async execution of the strategy code will implode on us.
1146 * WARNING! Assumes that the kernel interlocks size changes at the
1149 * WARNING! Caller assumes responsibility for transitioning out
1150 * of the inode DIRECTDATA mode if INODE_RESIZED is set.
1154 hammer2_extend_file(hammer2_inode_t
*ip
, hammer2_key_t nsize
)
1156 hammer2_key_t lbase
;
1157 hammer2_key_t osize
;
1161 KKASSERT((ip
->flags
& HAMMER2_INODE_RESIZED
) == 0);
1162 hammer2_inode_modify(ip
);
1163 osize
= ip
->meta
.size
;
1165 ip
->meta
.size
= nsize
;
1167 if (osize
<= HAMMER2_EMBEDDED_BYTES
&& nsize
> HAMMER2_EMBEDDED_BYTES
) {
1168 atomic_set_int(&ip
->flags
, HAMMER2_INODE_RESIZED
);
1169 hammer2_inode_chain_sync(ip
);
1172 hammer2_mtx_unlock(&ip
->lock
);
1174 oblksize
= hammer2_calc_logical(ip
, osize
, &lbase
, NULL
);
1175 nblksize
= hammer2_calc_logical(ip
, nsize
, &lbase
, NULL
);
1181 hammer2_mtx_ex(&ip
->lock
);
1186 hammer2_vop_nresolve(struct vop_nresolve_args
*ap
)
1188 hammer2_xop_nresolve_t
*xop
;
1189 hammer2_inode_t
*ip
;
1190 hammer2_inode_t
*dip
;
1191 struct namecache
*ncp
;
1195 dip
= VTOI(ap
->a_dvp
);
1196 xop
= hammer2_xop_alloc(dip
, 0);
1198 ncp
= ap
->a_nch
->ncp
;
1199 hammer2_xop_setname(&xop
->head
, ncp
->nc_name
, ncp
->nc_nlen
);
1202 * Note: In DragonFly the kernel handles '.' and '..'.
1204 hammer2_inode_lock(dip
, HAMMER2_RESOLVE_SHARED
);
1205 hammer2_xop_start(&xop
->head
, hammer2_xop_nresolve
);
1207 error
= hammer2_xop_collect(&xop
->head
, 0);
1208 error
= hammer2_error_to_errno(error
);
1212 ip
= hammer2_inode_get(dip
->pmp
, dip
, &xop
->head
.cluster
, -1);
1214 hammer2_inode_unlock(dip
);
1217 * Acquire the related vnode
1219 * NOTE: For error processing, only ENOENT resolves the namecache
1220 * entry to NULL, otherwise we just return the error and
1221 * leave the namecache unresolved.
1223 * NOTE: multiple hammer2_inode structures can be aliased to the
1224 * same chain element, for example for hardlinks. This
1225 * use case does not 'reattach' inode associations that
1226 * might already exist, but always allocates a new one.
1228 * WARNING: inode structure is locked exclusively via inode_get
1229 * but chain was locked shared. inode_unlock()
1230 * will handle it properly.
1233 vp
= hammer2_igetv(ip
, &error
); /* error set to UNIX error */
1236 cache_setvp(ap
->a_nch
, vp
);
1237 } else if (error
== ENOENT
) {
1238 cache_setvp(ap
->a_nch
, NULL
);
1240 hammer2_inode_unlock(ip
);
1243 * The vp should not be released until after we've disposed
1244 * of our locks, because it might cause vop_inactive() to
1251 cache_setvp(ap
->a_nch
, NULL
);
1253 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1254 KASSERT(error
|| ap
->a_nch
->ncp
->nc_vp
!= NULL
,
1255 ("resolve error %d/%p ap %p\n",
1256 error
, ap
->a_nch
->ncp
->nc_vp
, ap
));
1263 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args
*ap
)
1265 hammer2_inode_t
*dip
;
1269 dip
= VTOI(ap
->a_dvp
);
1270 inum
= dip
->meta
.iparent
;
1274 error
= hammer2_vfs_vget(ap
->a_dvp
->v_mount
, NULL
,
1284 hammer2_vop_nmkdir(struct vop_nmkdir_args
*ap
)
1286 hammer2_inode_t
*dip
;
1287 hammer2_inode_t
*nip
;
1288 struct namecache
*ncp
;
1289 const uint8_t *name
;
1294 dip
= VTOI(ap
->a_dvp
);
1295 if (dip
->pmp
->ronly
)
1297 if (hammer2_vfs_enospace(dip
, 0, ap
->a_cred
) > 1)
1300 ncp
= ap
->a_nch
->ncp
;
1301 name
= ncp
->nc_name
;
1302 name_len
= ncp
->nc_nlen
;
1304 hammer2_pfs_memory_wait(dip
->pmp
);
1305 hammer2_trans_init(dip
->pmp
, 0);
1307 inum
= hammer2_trans_newinum(dip
->pmp
);
1310 * Create the actual inode as a hidden file in the iroot, then
1311 * create the directory entry. The creation of the actual inode
1312 * sets its nlinks to 1 which is the value we desire.
1314 nip
= hammer2_inode_create(dip
->pmp
->iroot
, dip
, ap
->a_vap
, ap
->a_cred
,
1319 error
= hammer2_error_to_errno(error
);
1321 error
= hammer2_dirent_create(dip
, name
, name_len
,
1322 nip
->meta
.inum
, nip
->meta
.type
);
1323 /* returns UNIX error code */
1327 hammer2_inode_unlink_finisher(nip
, 0);
1328 hammer2_inode_unlock(nip
);
1333 *ap
->a_vpp
= hammer2_igetv(nip
, &error
);
1334 hammer2_inode_unlock(nip
);
1338 * Update dip's mtime
1343 hammer2_inode_lock(dip
, HAMMER2_RESOLVE_SHARED
);
1344 hammer2_update_time(&mtime
);
1345 hammer2_inode_modify(dip
);
1346 dip
->meta
.mtime
= mtime
;
1347 hammer2_inode_unlock(dip
);
1350 hammer2_trans_done(dip
->pmp
);
1353 cache_setunresolved(ap
->a_nch
);
1354 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
1355 hammer2_knote(ap
->a_dvp
, NOTE_WRITE
| NOTE_LINK
);
1362 hammer2_vop_open(struct vop_open_args
*ap
)
1364 return vop_stdopen(ap
);
1368 * hammer2_vop_advlock { vp, id, op, fl, flags }
1372 hammer2_vop_advlock(struct vop_advlock_args
*ap
)
1374 hammer2_inode_t
*ip
= VTOI(ap
->a_vp
);
1377 size
= ip
->meta
.size
;
1378 return (lf_advlock(ap
, &ip
->advlock
, size
));
1383 hammer2_vop_close(struct vop_close_args
*ap
)
1385 return vop_stdclose(ap
);
1389 * hammer2_vop_nlink { nch, dvp, vp, cred }
1391 * Create a hardlink from (vp) to {dvp, nch}.
1395 hammer2_vop_nlink(struct vop_nlink_args
*ap
)
1397 hammer2_inode_t
*tdip
; /* target directory to create link in */
1398 hammer2_inode_t
*ip
; /* inode we are hardlinking to */
1399 struct namecache
*ncp
;
1400 const uint8_t *name
;
1404 if (ap
->a_dvp
->v_mount
!= ap
->a_vp
->v_mount
)
1407 tdip
= VTOI(ap
->a_dvp
);
1408 if (tdip
->pmp
->ronly
)
1410 if (hammer2_vfs_enospace(tdip
, 0, ap
->a_cred
) > 1)
1413 ncp
= ap
->a_nch
->ncp
;
1414 name
= ncp
->nc_name
;
1415 name_len
= ncp
->nc_nlen
;
1418 * ip represents the file being hardlinked. The file could be a
1419 * normal file or a hardlink target if it has already been hardlinked.
1420 * (with the new semantics, it will almost always be a hardlink
1423 * Bump nlinks and potentially also create or move the hardlink
1424 * target in the parent directory common to (ip) and (tdip). The
1425 * consolidation code can modify ip->cluster. The returned cluster
1428 ip
= VTOI(ap
->a_vp
);
1429 KASSERT(ip
->pmp
, ("ip->pmp is NULL %p %p", ip
, ip
->pmp
));
1430 hammer2_pfs_memory_wait(ip
->pmp
);
1431 hammer2_trans_init(ip
->pmp
, 0);
1434 * Target should be an indexed inode or there's no way we will ever
1435 * be able to find it!
1437 KKASSERT((ip
->meta
.name_key
& HAMMER2_DIRHASH_VISIBLE
) == 0);
1442 * Can return NULL and error == EXDEV if the common parent
1443 * crosses a directory with the xlink flag set.
1445 hammer2_inode_lock(tdip
, 0);
1446 hammer2_inode_lock(ip
, 0);
1449 * Create the directory entry and bump nlinks.
1452 error
= hammer2_dirent_create(tdip
, name
, name_len
,
1453 ip
->meta
.inum
, ip
->meta
.type
);
1454 hammer2_inode_modify(ip
);
1459 * Update dip's mtime
1463 hammer2_update_time(&mtime
);
1464 hammer2_inode_modify(tdip
);
1465 tdip
->meta
.mtime
= mtime
;
1467 cache_setunresolved(ap
->a_nch
);
1468 cache_setvp(ap
->a_nch
, ap
->a_vp
);
1470 hammer2_inode_unlock(ip
);
1471 hammer2_inode_unlock(tdip
);
1473 hammer2_trans_done(ip
->pmp
);
1474 hammer2_knote(ap
->a_vp
, NOTE_LINK
);
1475 hammer2_knote(ap
->a_dvp
, NOTE_WRITE
);
1481 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1483 * The operating system has already ensured that the directory entry
1484 * does not exist and done all appropriate namespace locking.
1488 hammer2_vop_ncreate(struct vop_ncreate_args
*ap
)
1490 hammer2_inode_t
*dip
;
1491 hammer2_inode_t
*nip
;
1492 struct namecache
*ncp
;
1493 const uint8_t *name
;
1498 dip
= VTOI(ap
->a_dvp
);
1499 if (dip
->pmp
->ronly
)
1501 if (hammer2_vfs_enospace(dip
, 0, ap
->a_cred
) > 1)
1504 ncp
= ap
->a_nch
->ncp
;
1505 name
= ncp
->nc_name
;
1506 name_len
= ncp
->nc_nlen
;
1507 hammer2_pfs_memory_wait(dip
->pmp
);
1508 hammer2_trans_init(dip
->pmp
, 0);
1510 inum
= hammer2_trans_newinum(dip
->pmp
);
1513 * Create the actual inode as a hidden file in the iroot, then
1514 * create the directory entry. The creation of the actual inode
1515 * sets its nlinks to 1 which is the value we desire.
1517 nip
= hammer2_inode_create(dip
->pmp
->iroot
, dip
, ap
->a_vap
, ap
->a_cred
,
1523 error
= hammer2_dirent_create(dip
, name
, name_len
,
1524 nip
->meta
.inum
, nip
->meta
.type
);
1528 hammer2_inode_unlink_finisher(nip
, 0);
1529 hammer2_inode_unlock(nip
);
1534 *ap
->a_vpp
= hammer2_igetv(nip
, &error
);
1535 hammer2_inode_unlock(nip
);
1539 * Update dip's mtime
1544 hammer2_inode_lock(dip
, HAMMER2_RESOLVE_SHARED
);
1545 hammer2_update_time(&mtime
);
1546 hammer2_inode_modify(dip
);
1547 dip
->meta
.mtime
= mtime
;
1548 hammer2_inode_unlock(dip
);
1551 hammer2_trans_done(dip
->pmp
);
1554 cache_setunresolved(ap
->a_nch
);
1555 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
1556 hammer2_knote(ap
->a_dvp
, NOTE_WRITE
);
1562 * Make a device node (typically a fifo)
1566 hammer2_vop_nmknod(struct vop_nmknod_args
*ap
)
1568 hammer2_inode_t
*dip
;
1569 hammer2_inode_t
*nip
;
1570 struct namecache
*ncp
;
1571 const uint8_t *name
;
1576 dip
= VTOI(ap
->a_dvp
);
1577 if (dip
->pmp
->ronly
)
1579 if (hammer2_vfs_enospace(dip
, 0, ap
->a_cred
) > 1)
1582 ncp
= ap
->a_nch
->ncp
;
1583 name
= ncp
->nc_name
;
1584 name_len
= ncp
->nc_nlen
;
1585 hammer2_pfs_memory_wait(dip
->pmp
);
1586 hammer2_trans_init(dip
->pmp
, 0);
1589 * Create the device inode and then create the directory entry.
1591 inum
= hammer2_trans_newinum(dip
->pmp
);
1592 nip
= hammer2_inode_create(dip
->pmp
->iroot
, dip
, ap
->a_vap
, ap
->a_cred
,
1597 error
= hammer2_dirent_create(dip
, name
, name_len
,
1598 nip
->meta
.inum
, nip
->meta
.type
);
1602 hammer2_inode_unlink_finisher(nip
, 0);
1603 hammer2_inode_unlock(nip
);
1608 *ap
->a_vpp
= hammer2_igetv(nip
, &error
);
1609 hammer2_inode_unlock(nip
);
1613 * Update dip's mtime
1618 hammer2_inode_lock(dip
, HAMMER2_RESOLVE_SHARED
);
1619 hammer2_update_time(&mtime
);
1620 hammer2_inode_modify(dip
);
1621 dip
->meta
.mtime
= mtime
;
1622 hammer2_inode_unlock(dip
);
1625 hammer2_trans_done(dip
->pmp
);
1628 cache_setunresolved(ap
->a_nch
);
1629 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
1630 hammer2_knote(ap
->a_dvp
, NOTE_WRITE
);
1636 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1640 hammer2_vop_nsymlink(struct vop_nsymlink_args
*ap
)
1642 hammer2_inode_t
*dip
;
1643 hammer2_inode_t
*nip
;
1644 struct namecache
*ncp
;
1645 const uint8_t *name
;
1650 dip
= VTOI(ap
->a_dvp
);
1651 if (dip
->pmp
->ronly
)
1653 if (hammer2_vfs_enospace(dip
, 0, ap
->a_cred
) > 1)
1656 ncp
= ap
->a_nch
->ncp
;
1657 name
= ncp
->nc_name
;
1658 name_len
= ncp
->nc_nlen
;
1659 hammer2_pfs_memory_wait(dip
->pmp
);
1660 hammer2_trans_init(dip
->pmp
, 0);
1662 ap
->a_vap
->va_type
= VLNK
; /* enforce type */
1665 * Create the softlink as an inode and then create the directory
1668 inum
= hammer2_trans_newinum(dip
->pmp
);
1670 nip
= hammer2_inode_create(dip
->pmp
->iroot
, dip
, ap
->a_vap
, ap
->a_cred
,
1675 error
= hammer2_dirent_create(dip
, name
, name_len
,
1676 nip
->meta
.inum
, nip
->meta
.type
);
1680 hammer2_inode_unlink_finisher(nip
, 0);
1681 hammer2_inode_unlock(nip
);
1685 hammer2_trans_done(dip
->pmp
);
1688 *ap
->a_vpp
= hammer2_igetv(nip
, &error
);
1691 * Build the softlink (~like file data) and finalize the namecache.
1698 bytes
= strlen(ap
->a_target
);
1700 hammer2_inode_unlock(nip
);
1701 bzero(&auio
, sizeof(auio
));
1702 bzero(&aiov
, sizeof(aiov
));
1703 auio
.uio_iov
= &aiov
;
1704 auio
.uio_segflg
= UIO_SYSSPACE
;
1705 auio
.uio_rw
= UIO_WRITE
;
1706 auio
.uio_resid
= bytes
;
1707 auio
.uio_iovcnt
= 1;
1708 auio
.uio_td
= curthread
;
1709 aiov
.iov_base
= ap
->a_target
;
1710 aiov
.iov_len
= bytes
;
1711 error
= hammer2_write_file(nip
, &auio
, IO_APPEND
, 0);
1712 /* XXX handle error */
1715 hammer2_inode_unlock(nip
);
1719 * Update dip's mtime
1724 hammer2_inode_lock(dip
, HAMMER2_RESOLVE_SHARED
);
1725 hammer2_update_time(&mtime
);
1726 hammer2_inode_modify(dip
);
1727 dip
->meta
.mtime
= mtime
;
1728 hammer2_inode_unlock(dip
);
1731 hammer2_trans_done(dip
->pmp
);
1734 * Finalize namecache
1737 cache_setunresolved(ap
->a_nch
);
1738 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
1739 hammer2_knote(ap
->a_dvp
, NOTE_WRITE
);
1745 * hammer2_vop_nremove { nch, dvp, cred }
1749 hammer2_vop_nremove(struct vop_nremove_args
*ap
)
1751 hammer2_xop_unlink_t
*xop
;
1752 hammer2_inode_t
*dip
;
1753 hammer2_inode_t
*ip
;
1754 struct namecache
*ncp
;
1758 dip
= VTOI(ap
->a_dvp
);
1759 if (dip
->pmp
->ronly
)
1761 if (hammer2_vfs_enospace(dip
, 0, ap
->a_cred
) > 1)
1764 ncp
= ap
->a_nch
->ncp
;
1766 hammer2_pfs_memory_wait(dip
->pmp
);
1767 hammer2_trans_init(dip
->pmp
, 0);
1768 hammer2_inode_lock(dip
, 0);
1771 * The unlink XOP unlinks the path from the directory and
1772 * locates and returns the cluster associated with the real inode.
1773 * We have to handle nlinks here on the frontend.
1775 xop
= hammer2_xop_alloc(dip
, HAMMER2_XOP_MODIFYING
);
1776 hammer2_xop_setname(&xop
->head
, ncp
->nc_name
, ncp
->nc_nlen
);
1779 * The namecache entry is locked so nobody can use this namespace.
1780 * Calculate isopen to determine if this namespace has an open vp
1781 * associated with it and resolve the vp only if it does.
1783 * We try to avoid resolving the vnode if nobody has it open, but
1784 * note that the test is via this namespace only.
1786 isopen
= cache_isopen(ap
->a_nch
);
1788 xop
->dopermanent
= 0;
1789 hammer2_xop_start(&xop
->head
, hammer2_xop_unlink
);
1792 * Collect the real inode and adjust nlinks, destroy the real
1793 * inode if nlinks transitions to 0 and it was the real inode
1794 * (else it has already been removed).
1796 error
= hammer2_xop_collect(&xop
->head
, 0);
1797 error
= hammer2_error_to_errno(error
);
1798 hammer2_inode_unlock(dip
);
1801 ip
= hammer2_inode_get(dip
->pmp
, dip
, &xop
->head
.cluster
, -1);
1802 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1804 hammer2_inode_unlink_finisher(ip
, isopen
);
1805 hammer2_inode_unlock(ip
);
1808 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1812 * Update dip's mtime
1817 hammer2_inode_lock(dip
, HAMMER2_RESOLVE_SHARED
);
1818 hammer2_update_time(&mtime
);
1819 hammer2_inode_modify(dip
);
1820 dip
->meta
.mtime
= mtime
;
1821 hammer2_inode_unlock(dip
);
1824 hammer2_inode_run_sideq(dip
->pmp
);
1825 hammer2_trans_done(dip
->pmp
);
1827 cache_unlink(ap
->a_nch
);
1828 hammer2_knote(ap
->a_dvp
, NOTE_WRITE
);
1834 * hammer2_vop_nrmdir { nch, dvp, cred }
1838 hammer2_vop_nrmdir(struct vop_nrmdir_args
*ap
)
1840 hammer2_xop_unlink_t
*xop
;
1841 hammer2_inode_t
*dip
;
1842 hammer2_inode_t
*ip
;
1843 struct namecache
*ncp
;
1847 dip
= VTOI(ap
->a_dvp
);
1848 if (dip
->pmp
->ronly
)
1850 if (hammer2_vfs_enospace(dip
, 0, ap
->a_cred
) > 1)
1853 hammer2_pfs_memory_wait(dip
->pmp
);
1854 hammer2_trans_init(dip
->pmp
, 0);
1855 hammer2_inode_lock(dip
, 0);
1857 xop
= hammer2_xop_alloc(dip
, HAMMER2_XOP_MODIFYING
);
1859 ncp
= ap
->a_nch
->ncp
;
1860 hammer2_xop_setname(&xop
->head
, ncp
->nc_name
, ncp
->nc_nlen
);
1861 isopen
= cache_isopen(ap
->a_nch
);
1863 xop
->dopermanent
= 0;
1864 hammer2_xop_start(&xop
->head
, hammer2_xop_unlink
);
1867 * Collect the real inode and adjust nlinks, destroy the real
1868 * inode if nlinks transitions to 0 and it was the real inode
1869 * (else it has already been removed).
1871 error
= hammer2_xop_collect(&xop
->head
, 0);
1872 error
= hammer2_error_to_errno(error
);
1873 hammer2_inode_unlock(dip
);
1876 ip
= hammer2_inode_get(dip
->pmp
, dip
, &xop
->head
.cluster
, -1);
1877 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1879 hammer2_inode_unlink_finisher(ip
, isopen
);
1880 hammer2_inode_unlock(ip
);
1883 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
1887 * Update dip's mtime
1892 hammer2_inode_lock(dip
, HAMMER2_RESOLVE_SHARED
);
1893 hammer2_update_time(&mtime
);
1894 hammer2_inode_modify(dip
);
1895 dip
->meta
.mtime
= mtime
;
1896 hammer2_inode_unlock(dip
);
1899 hammer2_inode_run_sideq(dip
->pmp
);
1900 hammer2_trans_done(dip
->pmp
);
1902 cache_unlink(ap
->a_nch
);
1903 hammer2_knote(ap
->a_dvp
, NOTE_WRITE
| NOTE_LINK
);
1909 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1913 hammer2_vop_nrename(struct vop_nrename_args
*ap
)
1915 struct namecache
*fncp
;
1916 struct namecache
*tncp
;
1917 hammer2_inode_t
*fdip
; /* source directory */
1918 hammer2_inode_t
*tdip
; /* target directory */
1919 hammer2_inode_t
*ip
; /* file being renamed */
1920 hammer2_inode_t
*tip
; /* replaced target during rename or NULL */
1921 const uint8_t *fname
;
1923 const uint8_t *tname
;
1930 if (ap
->a_fdvp
->v_mount
!= ap
->a_tdvp
->v_mount
)
1932 if (ap
->a_fdvp
->v_mount
!= ap
->a_fnch
->ncp
->nc_vp
->v_mount
)
1935 fdip
= VTOI(ap
->a_fdvp
); /* source directory */
1936 tdip
= VTOI(ap
->a_tdvp
); /* target directory */
1938 if (fdip
->pmp
->ronly
)
1940 if (hammer2_vfs_enospace(fdip
, 0, ap
->a_cred
) > 1)
1943 fncp
= ap
->a_fnch
->ncp
; /* entry name in source */
1944 fname
= fncp
->nc_name
;
1945 fname_len
= fncp
->nc_nlen
;
1947 tncp
= ap
->a_tnch
->ncp
; /* entry name in target */
1948 tname
= tncp
->nc_name
;
1949 tname_len
= tncp
->nc_nlen
;
1951 hammer2_pfs_memory_wait(tdip
->pmp
);
1952 hammer2_trans_init(tdip
->pmp
, 0);
1957 ip
= VTOI(fncp
->nc_vp
);
1958 hammer2_inode_ref(ip
); /* extra ref */
1961 * Lookup the target name to determine if a directory entry
1962 * is being overwritten. We only hold related inode locks
1963 * temporarily, the operating system is expected to protect
1964 * against rename races.
1966 tip
= tncp
->nc_vp
? VTOI(tncp
->nc_vp
) : NULL
;
1968 hammer2_inode_ref(tip
); /* extra ref */
1971 * Can return NULL and error == EXDEV if the common parent
1972 * crosses a directory with the xlink flag set.
1974 * For now try to avoid deadlocks with a simple pointer address
1975 * test. (tip) can be NULL.
1979 hammer2_inode_lock(fdip
, 0);
1980 hammer2_inode_lock(tdip
, 0);
1982 hammer2_inode_lock(tdip
, 0);
1983 hammer2_inode_lock(fdip
, 0);
1987 hammer2_inode_lock(ip
, 0);
1988 hammer2_inode_lock(tip
, 0);
1990 hammer2_inode_lock(tip
, 0);
1991 hammer2_inode_lock(ip
, 0);
1994 hammer2_inode_lock(ip
, 0);
1999 * Delete the target namespace.
2001 * REMOVED - NOW FOLDED INTO XOP_NRENAME OPERATION
2004 hammer2_xop_unlink_t
*xop2
;
2005 hammer2_inode_t
*tip
;
2009 * The unlink XOP unlinks the path from the directory and
2010 * locates and returns the cluster associated with the real
2011 * inode. We have to handle nlinks here on the frontend.
2013 xop2
= hammer2_xop_alloc(tdip
, HAMMER2_XOP_MODIFYING
);
2014 hammer2_xop_setname(&xop2
->head
, tname
, tname_len
);
2015 isopen
= cache_isopen(ap
->a_tnch
);
2017 xop2
->dopermanent
= 0;
2018 hammer2_xop_start(&xop2
->head
, hammer2_xop_unlink
);
2021 * Collect the real inode and adjust nlinks, destroy the real
2022 * inode if nlinks transitions to 0 and it was the real inode
2023 * (else it has already been removed).
2025 tnch_error
= hammer2_xop_collect(&xop2
->head
, 0);
2026 tnch_error
= hammer2_error_to_errno(tnch_error
);
2027 /* hammer2_inode_unlock(tdip); */
2029 if (tnch_error
== 0) {
2030 tip
= hammer2_inode_get(tdip
->pmp
, NULL
,
2031 &xop2
->head
.cluster
, -1);
2032 hammer2_xop_retire(&xop2
->head
, HAMMER2_XOPMASK_VOP
);
2034 hammer2_inode_unlink_finisher(tip
, isopen
);
2035 hammer2_inode_unlock(tip
);
2038 hammer2_xop_retire(&xop2
->head
, HAMMER2_XOPMASK_VOP
);
2040 /* hammer2_inode_lock(tdip, 0); */
2042 if (tnch_error
&& tnch_error
!= ENOENT
) {
2051 * Resolve the collision space for (tdip, tname, tname_len)
2053 * tdip must be held exclusively locked to prevent races since
2054 * multiple filenames can end up in the same collision space.
2057 hammer2_xop_scanlhc_t
*sxop
;
2058 hammer2_tid_t lhcbase
;
2060 tlhc
= hammer2_dirhash(tname
, tname_len
);
2062 sxop
= hammer2_xop_alloc(tdip
, HAMMER2_XOP_MODIFYING
);
2064 hammer2_xop_start(&sxop
->head
, hammer2_xop_scanlhc
);
2065 while ((error
= hammer2_xop_collect(&sxop
->head
, 0)) == 0) {
2066 if (tlhc
!= sxop
->head
.cluster
.focus
->bref
.key
)
2070 error
= hammer2_error_to_errno(error
);
2071 hammer2_xop_retire(&sxop
->head
, HAMMER2_XOPMASK_VOP
);
2074 if (error
!= ENOENT
)
2079 if ((lhcbase
^ tlhc
) & ~HAMMER2_DIRHASH_LOMASK
) {
2086 * Ready to go, issue the rename to the backend. Note that meta-data
2087 * updates to the related inodes occur separately from the rename
2090 * NOTE: While it is not necessary to update ip->meta.name*, doing
2091 * so aids catastrophic recovery and debugging.
2094 hammer2_xop_nrename_t
*xop4
;
2096 xop4
= hammer2_xop_alloc(fdip
, HAMMER2_XOP_MODIFYING
);
2098 xop4
->ip_key
= ip
->meta
.name_key
;
2099 hammer2_xop_setip2(&xop4
->head
, ip
);
2100 hammer2_xop_setip3(&xop4
->head
, tdip
);
2101 hammer2_xop_setname(&xop4
->head
, fname
, fname_len
);
2102 hammer2_xop_setname2(&xop4
->head
, tname
, tname_len
);
2103 hammer2_xop_start(&xop4
->head
, hammer2_xop_nrename
);
2105 error
= hammer2_xop_collect(&xop4
->head
, 0);
2106 error
= hammer2_error_to_errno(error
);
2107 hammer2_xop_retire(&xop4
->head
, HAMMER2_XOPMASK_VOP
);
2109 if (error
== ENOENT
)
2113 * Update inode meta-data.
2115 * WARNING! The in-memory inode (ip) structure does not
2116 * maintain a copy of the inode's filename buffer.
2119 (ip
->meta
.name_key
& HAMMER2_DIRHASH_VISIBLE
)) {
2120 hammer2_inode_modify(ip
);
2121 ip
->meta
.name_len
= tname_len
;
2122 ip
->meta
.name_key
= tlhc
;
2125 hammer2_inode_modify(ip
);
2126 ip
->meta
.iparent
= tdip
->meta
.inum
;
2134 * If no error, the backend has replaced the target directory entry.
2135 * We must adjust nlinks on the original replace target if it exists.
2137 if (error
== 0 && tip
) {
2140 isopen
= cache_isopen(ap
->a_tnch
);
2141 hammer2_inode_unlink_finisher(tip
, isopen
);
2145 * Update directory mtimes to represent the something changed.
2147 if (update_fdip
|| update_tdip
) {
2150 hammer2_update_time(&mtime
);
2152 hammer2_inode_modify(fdip
);
2153 fdip
->meta
.mtime
= mtime
;
2156 hammer2_inode_modify(tdip
);
2157 tdip
->meta
.mtime
= mtime
;
2161 hammer2_inode_unlock(tip
);
2162 hammer2_inode_drop(tip
);
2164 hammer2_inode_unlock(ip
);
2165 hammer2_inode_unlock(tdip
);
2166 hammer2_inode_unlock(fdip
);
2167 hammer2_inode_drop(ip
);
2168 hammer2_inode_run_sideq(fdip
->pmp
);
2170 hammer2_trans_done(tdip
->pmp
);
2173 * Issue the namecache update after unlocking all the internal
2174 * hammer structures, otherwise we might deadlock.
2176 if (error
== 0 && tip
) {
2177 cache_unlink(ap
->a_tnch
);
2178 cache_setunresolved(ap
->a_tnch
);
2181 cache_rename(ap
->a_fnch
, ap
->a_tnch
);
2182 hammer2_knote(ap
->a_fdvp
, NOTE_WRITE
);
2183 hammer2_knote(ap
->a_tdvp
, NOTE_WRITE
);
2184 hammer2_knote(fncp
->nc_vp
, NOTE_RENAME
);
2191 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2195 hammer2_vop_ioctl(struct vop_ioctl_args
*ap
)
2197 hammer2_inode_t
*ip
;
2200 ip
= VTOI(ap
->a_vp
);
2202 error
= hammer2_ioctl(ip
, ap
->a_command
, (void *)ap
->a_data
,
2203 ap
->a_fflag
, ap
->a_cred
);
2209 hammer2_vop_mountctl(struct vop_mountctl_args
*ap
)
2216 case (MOUNTCTL_SET_EXPORT
):
2217 mp
= ap
->a_head
.a_ops
->head
.vv_mount
;
2220 if (ap
->a_ctllen
!= sizeof(struct export_args
))
2223 rc
= vfs_export(mp
, &pmp
->export
,
2224 (const struct export_args
*)ap
->a_ctl
);
2227 rc
= vop_stdmountctl(ap
);
2236 static void filt_hammer2detach(struct knote
*kn
);
2237 static int filt_hammer2read(struct knote
*kn
, long hint
);
2238 static int filt_hammer2write(struct knote
*kn
, long hint
);
2239 static int filt_hammer2vnode(struct knote
*kn
, long hint
);
2241 static struct filterops hammer2read_filtops
=
2242 { FILTEROP_ISFD
| FILTEROP_MPSAFE
,
2243 NULL
, filt_hammer2detach
, filt_hammer2read
};
2244 static struct filterops hammer2write_filtops
=
2245 { FILTEROP_ISFD
| FILTEROP_MPSAFE
,
2246 NULL
, filt_hammer2detach
, filt_hammer2write
};
2247 static struct filterops hammer2vnode_filtops
=
2248 { FILTEROP_ISFD
| FILTEROP_MPSAFE
,
2249 NULL
, filt_hammer2detach
, filt_hammer2vnode
};
2253 hammer2_vop_kqfilter(struct vop_kqfilter_args
*ap
)
2255 struct vnode
*vp
= ap
->a_vp
;
2256 struct knote
*kn
= ap
->a_kn
;
2258 switch (kn
->kn_filter
) {
2260 kn
->kn_fop
= &hammer2read_filtops
;
2263 kn
->kn_fop
= &hammer2write_filtops
;
2266 kn
->kn_fop
= &hammer2vnode_filtops
;
2269 return (EOPNOTSUPP
);
2272 kn
->kn_hook
= (caddr_t
)vp
;
2274 knote_insert(&vp
->v_pollinfo
.vpi_kqinfo
.ki_note
, kn
);
2280 filt_hammer2detach(struct knote
*kn
)
2282 struct vnode
*vp
= (void *)kn
->kn_hook
;
2284 knote_remove(&vp
->v_pollinfo
.vpi_kqinfo
.ki_note
, kn
);
2288 filt_hammer2read(struct knote
*kn
, long hint
)
2290 struct vnode
*vp
= (void *)kn
->kn_hook
;
2291 hammer2_inode_t
*ip
= VTOI(vp
);
2294 if (hint
== NOTE_REVOKE
) {
2295 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
| EV_ONESHOT
);
2298 off
= ip
->meta
.size
- kn
->kn_fp
->f_offset
;
2299 kn
->kn_data
= (off
< INTPTR_MAX
) ? off
: INTPTR_MAX
;
2300 if (kn
->kn_sfflags
& NOTE_OLDAPI
)
2302 return (kn
->kn_data
!= 0);
2307 filt_hammer2write(struct knote
*kn
, long hint
)
2309 if (hint
== NOTE_REVOKE
)
2310 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
| EV_ONESHOT
);
2316 filt_hammer2vnode(struct knote
*kn
, long hint
)
2318 if (kn
->kn_sfflags
& hint
)
2319 kn
->kn_fflags
|= hint
;
2320 if (hint
== NOTE_REVOKE
) {
2321 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
);
2324 return (kn
->kn_fflags
!= 0);
2332 hammer2_vop_markatime(struct vop_markatime_args
*ap
)
2334 hammer2_inode_t
*ip
;
2347 hammer2_vop_fifokqfilter(struct vop_kqfilter_args
*ap
)
2351 error
= VOCALL(&fifo_vnode_vops
, &ap
->a_head
);
2353 error
= hammer2_vop_kqfilter(ap
);
2360 struct vop_ops hammer2_vnode_vops
= {
2361 .vop_default
= vop_defaultop
,
2362 .vop_fsync
= hammer2_vop_fsync
,
2363 .vop_getpages
= vop_stdgetpages
,
2364 .vop_putpages
= vop_stdputpages
,
2365 .vop_access
= hammer2_vop_access
,
2366 .vop_advlock
= hammer2_vop_advlock
,
2367 .vop_close
= hammer2_vop_close
,
2368 .vop_nlink
= hammer2_vop_nlink
,
2369 .vop_ncreate
= hammer2_vop_ncreate
,
2370 .vop_nsymlink
= hammer2_vop_nsymlink
,
2371 .vop_nremove
= hammer2_vop_nremove
,
2372 .vop_nrmdir
= hammer2_vop_nrmdir
,
2373 .vop_nrename
= hammer2_vop_nrename
,
2374 .vop_getattr
= hammer2_vop_getattr
,
2375 .vop_setattr
= hammer2_vop_setattr
,
2376 .vop_readdir
= hammer2_vop_readdir
,
2377 .vop_readlink
= hammer2_vop_readlink
,
2378 .vop_getpages
= vop_stdgetpages
,
2379 .vop_putpages
= vop_stdputpages
,
2380 .vop_read
= hammer2_vop_read
,
2381 .vop_write
= hammer2_vop_write
,
2382 .vop_open
= hammer2_vop_open
,
2383 .vop_inactive
= hammer2_vop_inactive
,
2384 .vop_reclaim
= hammer2_vop_reclaim
,
2385 .vop_nresolve
= hammer2_vop_nresolve
,
2386 .vop_nlookupdotdot
= hammer2_vop_nlookupdotdot
,
2387 .vop_nmkdir
= hammer2_vop_nmkdir
,
2388 .vop_nmknod
= hammer2_vop_nmknod
,
2389 .vop_ioctl
= hammer2_vop_ioctl
,
2390 .vop_mountctl
= hammer2_vop_mountctl
,
2391 .vop_bmap
= hammer2_vop_bmap
,
2392 .vop_strategy
= hammer2_vop_strategy
,
2393 .vop_kqfilter
= hammer2_vop_kqfilter
2396 struct vop_ops hammer2_spec_vops
= {
2397 .vop_default
= vop_defaultop
,
2398 .vop_fsync
= hammer2_vop_fsync
,
2399 .vop_read
= vop_stdnoread
,
2400 .vop_write
= vop_stdnowrite
,
2401 .vop_access
= hammer2_vop_access
,
2402 .vop_close
= hammer2_vop_close
,
2403 .vop_markatime
= hammer2_vop_markatime
,
2404 .vop_getattr
= hammer2_vop_getattr
,
2405 .vop_inactive
= hammer2_vop_inactive
,
2406 .vop_reclaim
= hammer2_vop_reclaim
,
2407 .vop_setattr
= hammer2_vop_setattr
2410 struct vop_ops hammer2_fifo_vops
= {
2411 .vop_default
= fifo_vnoperate
,
2412 .vop_fsync
= hammer2_vop_fsync
,
2414 .vop_read
= hammer2_vop_fiforead
,
2415 .vop_write
= hammer2_vop_fifowrite
,
2417 .vop_access
= hammer2_vop_access
,
2419 .vop_close
= hammer2_vop_fifoclose
,
2421 .vop_markatime
= hammer2_vop_markatime
,
2422 .vop_getattr
= hammer2_vop_getattr
,
2423 .vop_inactive
= hammer2_vop_inactive
,
2424 .vop_reclaim
= hammer2_vop_reclaim
,
2425 .vop_setattr
= hammer2_vop_setattr
,
2426 .vop_kqfilter
= hammer2_vop_fifokqfilter