2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/mountctl.h>
36 #include <sys/namecache.h>
38 #include <vfs/fifofs/fifo.h>
45 static int hammer_vop_fsync(struct vop_fsync_args
*);
46 static int hammer_vop_read(struct vop_read_args
*);
47 static int hammer_vop_write(struct vop_write_args
*);
48 static int hammer_vop_access(struct vop_access_args
*);
49 static int hammer_vop_advlock(struct vop_advlock_args
*);
50 static int hammer_vop_close(struct vop_close_args
*);
51 static int hammer_vop_ncreate(struct vop_ncreate_args
*);
52 static int hammer_vop_getattr(struct vop_getattr_args
*);
53 static int hammer_vop_nresolve(struct vop_nresolve_args
*);
54 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args
*);
55 static int hammer_vop_nlink(struct vop_nlink_args
*);
56 static int hammer_vop_nmkdir(struct vop_nmkdir_args
*);
57 static int hammer_vop_nmknod(struct vop_nmknod_args
*);
58 static int hammer_vop_open(struct vop_open_args
*);
59 static int hammer_vop_print(struct vop_print_args
*);
60 static int hammer_vop_readdir(struct vop_readdir_args
*);
61 static int hammer_vop_readlink(struct vop_readlink_args
*);
62 static int hammer_vop_nremove(struct vop_nremove_args
*);
63 static int hammer_vop_nrename(struct vop_nrename_args
*);
64 static int hammer_vop_nrmdir(struct vop_nrmdir_args
*);
65 static int hammer_vop_markatime(struct vop_markatime_args
*);
66 static int hammer_vop_setattr(struct vop_setattr_args
*);
67 static int hammer_vop_strategy(struct vop_strategy_args
*);
68 static int hammer_vop_bmap(struct vop_bmap_args
*ap
);
69 static int hammer_vop_nsymlink(struct vop_nsymlink_args
*);
70 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args
*);
71 static int hammer_vop_ioctl(struct vop_ioctl_args
*);
72 static int hammer_vop_mountctl(struct vop_mountctl_args
*);
73 static int hammer_vop_kqfilter (struct vop_kqfilter_args
*);
75 static int hammer_vop_fifoclose (struct vop_close_args
*);
76 static int hammer_vop_fiforead (struct vop_read_args
*);
77 static int hammer_vop_fifowrite (struct vop_write_args
*);
78 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args
*);
80 struct vop_ops hammer_vnode_vops
= {
81 .vop_default
= vop_defaultop
,
82 .vop_fsync
= hammer_vop_fsync
,
83 .vop_getpages
= vop_stdgetpages
,
84 .vop_putpages
= vop_stdputpages
,
85 .vop_read
= hammer_vop_read
,
86 .vop_write
= hammer_vop_write
,
87 .vop_access
= hammer_vop_access
,
88 .vop_advlock
= hammer_vop_advlock
,
89 .vop_close
= hammer_vop_close
,
90 .vop_ncreate
= hammer_vop_ncreate
,
91 .vop_getattr
= hammer_vop_getattr
,
92 .vop_inactive
= hammer_vop_inactive
,
93 .vop_reclaim
= hammer_vop_reclaim
,
94 .vop_nresolve
= hammer_vop_nresolve
,
95 .vop_nlookupdotdot
= hammer_vop_nlookupdotdot
,
96 .vop_nlink
= hammer_vop_nlink
,
97 .vop_nmkdir
= hammer_vop_nmkdir
,
98 .vop_nmknod
= hammer_vop_nmknod
,
99 .vop_open
= hammer_vop_open
,
100 .vop_pathconf
= vop_stdpathconf
,
101 .vop_print
= hammer_vop_print
,
102 .vop_readdir
= hammer_vop_readdir
,
103 .vop_readlink
= hammer_vop_readlink
,
104 .vop_nremove
= hammer_vop_nremove
,
105 .vop_nrename
= hammer_vop_nrename
,
106 .vop_nrmdir
= hammer_vop_nrmdir
,
107 .vop_markatime
= hammer_vop_markatime
,
108 .vop_setattr
= hammer_vop_setattr
,
109 .vop_bmap
= hammer_vop_bmap
,
110 .vop_strategy
= hammer_vop_strategy
,
111 .vop_nsymlink
= hammer_vop_nsymlink
,
112 .vop_nwhiteout
= hammer_vop_nwhiteout
,
113 .vop_ioctl
= hammer_vop_ioctl
,
114 .vop_mountctl
= hammer_vop_mountctl
,
115 .vop_kqfilter
= hammer_vop_kqfilter
118 struct vop_ops hammer_spec_vops
= {
119 .vop_default
= vop_defaultop
,
120 .vop_fsync
= hammer_vop_fsync
,
121 .vop_read
= vop_stdnoread
,
122 .vop_write
= vop_stdnowrite
,
123 .vop_access
= hammer_vop_access
,
124 .vop_close
= hammer_vop_close
,
125 .vop_markatime
= hammer_vop_markatime
,
126 .vop_getattr
= hammer_vop_getattr
,
127 .vop_inactive
= hammer_vop_inactive
,
128 .vop_reclaim
= hammer_vop_reclaim
,
129 .vop_setattr
= hammer_vop_setattr
132 struct vop_ops hammer_fifo_vops
= {
133 .vop_default
= fifo_vnoperate
,
134 .vop_fsync
= hammer_vop_fsync
,
135 .vop_read
= hammer_vop_fiforead
,
136 .vop_write
= hammer_vop_fifowrite
,
137 .vop_access
= hammer_vop_access
,
138 .vop_close
= hammer_vop_fifoclose
,
139 .vop_markatime
= hammer_vop_markatime
,
140 .vop_getattr
= hammer_vop_getattr
,
141 .vop_inactive
= hammer_vop_inactive
,
142 .vop_reclaim
= hammer_vop_reclaim
,
143 .vop_setattr
= hammer_vop_setattr
,
144 .vop_kqfilter
= hammer_vop_fifokqfilter
149 hammer_knote(struct vnode
*vp
, int flags
)
152 KNOTE(&vp
->v_pollinfo
.vpi_kqinfo
.ki_note
, flags
);
155 static int hammer_dounlink(hammer_transaction_t trans
, struct nchandle
*nch
,
156 struct vnode
*dvp
, struct ucred
*cred
,
157 int flags
, int isdir
);
158 static int hammer_vop_strategy_read(struct vop_strategy_args
*ap
);
159 static int hammer_vop_strategy_write(struct vop_strategy_args
*ap
);
162 * hammer_vop_fsync { vp, waitfor }
164 * fsync() an inode to disk and wait for it to be completely committed
165 * such that the information would not be undone if a crash occured after
168 * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement
169 * a REDO log. A sysctl is provided to relax HAMMER's fsync()
172 * Ultimately the combination of a REDO log and use of fast storage
173 * to front-end cluster caches will make fsync fast, but it aint
174 * here yet. And, in anycase, we need real transactional
175 * all-or-nothing features which are not restricted to a single file.
179 hammer_vop_fsync(struct vop_fsync_args
*ap
)
181 hammer_inode_t ip
= VTOI(ap
->a_vp
);
182 hammer_mount_t hmp
= ip
->hmp
;
183 int waitfor
= ap
->a_waitfor
;
186 lwkt_gettoken(&hmp
->fs_token
);
189 * Fsync rule relaxation (default is either full synchronous flush
190 * or REDO semantics with synchronous flush).
192 if (ap
->a_flags
& VOP_FSYNC_SYSCALL
) {
193 switch(hammer_fsync_mode
) {
196 /* no REDO, full synchronous flush */
200 /* no REDO, full asynchronous flush */
201 if (waitfor
== MNT_WAIT
)
202 waitfor
= MNT_NOWAIT
;
205 /* REDO semantics, synchronous flush */
206 if (hmp
->version
< HAMMER_VOL_VERSION_FOUR
)
208 mode
= HAMMER_FLUSH_UNDOS_AUTO
;
211 /* REDO semantics, relaxed asynchronous flush */
212 if (hmp
->version
< HAMMER_VOL_VERSION_FOUR
)
214 mode
= HAMMER_FLUSH_UNDOS_RELAXED
;
215 if (waitfor
== MNT_WAIT
)
216 waitfor
= MNT_NOWAIT
;
219 /* ignore the fsync() system call */
220 lwkt_reltoken(&hmp
->fs_token
);
223 /* we have to do something */
224 mode
= HAMMER_FLUSH_UNDOS_RELAXED
;
225 if (waitfor
== MNT_WAIT
)
226 waitfor
= MNT_NOWAIT
;
231 * Fast fsync only needs to flush the UNDO/REDO fifo if
232 * HAMMER_INODE_REDO is non-zero and the only modifications
233 * made to the file are write or write-extends.
235 if ((ip
->flags
& HAMMER_INODE_REDO
) &&
236 (ip
->flags
& HAMMER_INODE_MODMASK_NOREDO
) == 0) {
237 ++hammer_count_fsyncs
;
238 hammer_flusher_flush_undos(hmp
, mode
);
240 if (ip
->vp
&& (ip
->flags
& HAMMER_INODE_MODMASK
) == 0)
242 lwkt_reltoken(&hmp
->fs_token
);
247 * REDO is enabled by fsync(), the idea being we really only
248 * want to lay down REDO records when programs are using
249 * fsync() heavily. The first fsync() on the file starts
250 * the gravy train going and later fsync()s keep it hot by
251 * resetting the redo_count.
253 * We weren't running REDOs before now so we have to fall
254 * through and do a full fsync of what we have.
256 if (hmp
->version
>= HAMMER_VOL_VERSION_FOUR
&&
257 (hmp
->flags
& HAMMER_MOUNT_REDO_RECOVERY_RUN
) == 0) {
258 ip
->flags
|= HAMMER_INODE_REDO
;
265 * Do a full flush sequence.
267 * Attempt to release the vnode while waiting for the inode to
268 * finish flushing. This can really mess up inactive->reclaim
269 * sequences so only do it if the vnode is active.
271 * WARNING! The VX lock functions must be used. vn_lock() will
272 * fail when this is part of a VOP_RECLAIM sequence.
274 ++hammer_count_fsyncs
;
275 vfsync(ap
->a_vp
, waitfor
, 1, NULL
, NULL
);
276 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
277 if (waitfor
== MNT_WAIT
) {
280 if ((ap
->a_vp
->v_flag
& VRECLAIMED
) == 0) {
286 hammer_wait_inode(ip
);
288 vn_relock(ap
->a_vp
, LK_EXCLUSIVE
);
290 if (ip
->vp
&& (ip
->flags
& HAMMER_INODE_MODMASK
) == 0)
292 lwkt_reltoken(&hmp
->fs_token
);
297 * hammer_vop_read { vp, uio, ioflag, cred }
299 * MPSAFE (for the cache safe does not require fs_token)
303 hammer_vop_read(struct vop_read_args
*ap
)
305 struct hammer_transaction trans
;
320 if (ap
->a_vp
->v_type
== VDIR
)
322 if (ap
->a_vp
->v_type
!= VREG
)
331 * Attempt to shortcut directly to the VM object using lwbufs.
332 * This is much faster than instantiating buffer cache buffers.
334 resid
= uio
->uio_resid
;
335 error
= vop_helper_read_shortcut(ap
);
336 hammer_stats_file_read
+= resid
- uio
->uio_resid
;
339 if (uio
->uio_resid
== 0)
343 * Allow the UIO's size to override the sequential heuristic.
345 blksize
= hammer_blocksize(uio
->uio_offset
);
346 seqcount
= howmany(uio
->uio_resid
, MAXBSIZE
);
347 ioseqcount
= ap
->a_ioflag
>> IO_SEQSHIFT
;
348 if (seqcount
< ioseqcount
)
349 seqcount
= ioseqcount
;
352 * If reading or writing a huge amount of data we have to break
353 * atomicy and allow the operation to be interrupted by a signal
354 * or it can DOS the machine.
356 bigread
= (uio
->uio_resid
> 100 * 1024 * 1024);
359 * Access the data typically in HAMMER_BUFSIZE blocks via the
360 * buffer cache, but HAMMER may use a variable block size based
363 * XXX Temporary hack, delay the start transaction while we remain
364 * MPSAFE. NOTE: ino_data.size cannot change while vnode is
367 while (uio
->uio_resid
> 0 && uio
->uio_offset
< ip
->ino_data
.size
) {
371 blksize
= hammer_blocksize(uio
->uio_offset
);
372 offset
= (int)uio
->uio_offset
& (blksize
- 1);
373 base_offset
= uio
->uio_offset
- offset
;
375 if (bigread
&& (error
= hammer_signal_check(ip
->hmp
)) != 0)
381 bp
= getblk(ap
->a_vp
, base_offset
, blksize
, 0, 0);
382 if ((bp
->b_flags
& (B_INVAL
| B_CACHE
| B_RAM
)) == B_CACHE
) {
383 bp
->b_flags
&= ~B_AGE
;
387 if (ap
->a_ioflag
& IO_NRDELAY
) {
389 return (EWOULDBLOCK
);
395 if (got_trans
== 0) {
396 hammer_start_transaction(&trans
, ip
->hmp
);
401 * NOTE: A valid bp has already been acquired, but was not
404 if (hammer_cluster_enable
) {
406 * Use file_limit to prevent cluster_read() from
407 * creating buffers of the wrong block size past
410 file_limit
= ip
->ino_data
.size
;
411 if (base_offset
< HAMMER_XDEMARC
&&
412 file_limit
> HAMMER_XDEMARC
) {
413 file_limit
= HAMMER_XDEMARC
;
415 error
= cluster_readx(ap
->a_vp
,
416 file_limit
, base_offset
,
422 error
= breadnx(ap
->a_vp
, base_offset
,
431 if ((hammer_debug_io
& 0x0001) && (bp
->b_flags
& B_IOISSUED
)) {
432 hdkprintf("zone2_offset %016jx read file %016jx@%016jx\n",
433 (intmax_t)bp
->b_bio2
.bio_offset
,
434 (intmax_t)ip
->obj_id
,
435 (intmax_t)bp
->b_loffset
);
437 bp
->b_flags
&= ~B_IOISSUED
;
438 if (blksize
== HAMMER_XBUFSIZE
)
439 bp
->b_flags
|= B_CLUSTEROK
;
441 n
= blksize
- offset
;
442 if (n
> uio
->uio_resid
)
444 if (n
> ip
->ino_data
.size
- uio
->uio_offset
)
445 n
= (int)(ip
->ino_data
.size
- uio
->uio_offset
);
448 * Set B_AGE, data has a lower priority than meta-data.
450 * Use a hold/unlock/drop sequence to run the uiomove
451 * with the buffer unlocked, avoiding deadlocks against
452 * read()s on mmap()'d spaces.
454 bp
->b_flags
|= B_AGE
;
455 error
= uiomovebp(bp
, (char *)bp
->b_data
+ offset
, n
, uio
);
460 hammer_stats_file_read
+= n
;
466 * Try to update the atime with just the inode lock for maximum
467 * concurrency. If we can't shortcut it we have to get the full
470 if (got_trans
== 0 && hammer_update_atime_quick(ip
) < 0) {
471 hammer_start_transaction(&trans
, ip
->hmp
);
476 if ((ip
->flags
& HAMMER_INODE_RO
) == 0 &&
477 (ip
->hmp
->mp
->mnt_flag
& MNT_NOATIME
) == 0) {
478 lwkt_gettoken(&hmp
->fs_token
);
479 ip
->ino_data
.atime
= trans
.time
;
480 hammer_modify_inode(&trans
, ip
, HAMMER_INODE_ATIME
);
481 hammer_done_transaction(&trans
);
482 lwkt_reltoken(&hmp
->fs_token
);
484 hammer_done_transaction(&trans
);
491 * hammer_vop_write { vp, uio, ioflag, cred }
495 hammer_vop_write(struct vop_write_args
*ap
)
497 struct hammer_transaction trans
;
515 if (vp
->v_type
!= VREG
)
521 seqcount
= ap
->a_ioflag
>> IO_SEQSHIFT
;
523 if (ip
->flags
& HAMMER_INODE_RO
)
527 * Create a transaction to cover the operations we perform.
529 hammer_start_transaction(&trans
, hmp
);
533 * Use v_lastwrite_ts if file not open for writing
534 * (i.e. a late msync)
536 if (uio
->uio_segflg
== UIO_NOCOPY
) {
537 if (vp
->v_flag
& VLASTWRITETS
) {
538 trans
.time
= vp
->v_lastwrite_ts
.tv_sec
* 1000000 +
539 vp
->v_lastwrite_ts
.tv_nsec
/ 1000;
541 trans
.time
= ip
->ino_data
.mtime
;
544 vclrflags(vp
, VLASTWRITETS
);
550 if (ap
->a_ioflag
& IO_APPEND
)
551 uio
->uio_offset
= ip
->ino_data
.size
;
554 * Check for illegal write offsets. Valid range is 0...2^63-1.
556 * NOTE: the base_off assignment is required to work around what
557 * I consider to be a GCC-4 optimization bug.
559 if (uio
->uio_offset
< 0) {
560 hammer_done_transaction(&trans
);
563 base_offset
= uio
->uio_offset
+ uio
->uio_resid
; /* work around gcc-4 */
564 if (uio
->uio_resid
> 0 && base_offset
<= uio
->uio_offset
) {
565 hammer_done_transaction(&trans
);
569 if (uio
->uio_resid
> 0 && (td
= uio
->uio_td
) != NULL
&& td
->td_proc
&&
570 base_offset
> td
->td_proc
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
) {
571 hammer_done_transaction(&trans
);
572 lwpsignal(td
->td_proc
, td
->td_lwp
, SIGXFSZ
);
577 * If reading or writing a huge amount of data we have to break
578 * atomicy and allow the operation to be interrupted by a signal
579 * or it can DOS the machine.
581 * Preset redo_count so we stop generating REDOs earlier if the
584 * redo_count is heuristical, SMP races are ok
586 bigwrite
= (uio
->uio_resid
> 100 * 1024 * 1024);
587 if ((ip
->flags
& HAMMER_INODE_REDO
) &&
588 ip
->redo_count
< hammer_limit_redo
) {
589 ip
->redo_count
+= uio
->uio_resid
;
593 * Access the data typically in HAMMER_BUFSIZE blocks via the
594 * buffer cache, but HAMMER may use a variable block size based
597 while (uio
->uio_resid
> 0) {
605 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_WRITE
)) != 0)
607 if (bigwrite
&& (error
= hammer_signal_check(hmp
)) != 0)
610 blksize
= hammer_blocksize(uio
->uio_offset
);
613 * Control the number of pending records associated with
614 * this inode. If too many have accumulated start a
615 * flush. Try to maintain a pipeline with the flusher.
617 * NOTE: It is possible for other sources to grow the
618 * records but not necessarily issue another flush,
619 * so use a timeout and ensure that a re-flush occurs.
621 if (ip
->rsv_recs
>= hammer_limit_inode_recs
) {
622 lwkt_gettoken(&hmp
->fs_token
);
623 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
624 while (ip
->rsv_recs
>= hammer_limit_inode_recs
* 2) {
625 ip
->flags
|= HAMMER_INODE_RECSW
;
626 tsleep(&ip
->rsv_recs
, 0, "hmrwww", hz
);
627 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
629 lwkt_reltoken(&hmp
->fs_token
);
633 * Do not allow HAMMER to blow out the buffer cache. Very
634 * large UIOs can lockout other processes due to bwillwrite()
637 * The hammer inode is not locked during these operations.
638 * The vnode is locked which can interfere with the pageout
639 * daemon for non-UIO_NOCOPY writes but should not interfere
640 * with the buffer cache. Even so, we cannot afford to
641 * allow the pageout daemon to build up too many dirty buffer
644 * Only call this if we aren't being recursively called from
645 * a virtual disk device (vn), else we may deadlock.
647 if ((ap
->a_ioflag
& IO_RECURSE
) == 0)
651 * Calculate the blocksize at the current offset and figure
652 * out how much we can actually write.
654 blkmask
= blksize
- 1;
655 offset
= (int)uio
->uio_offset
& blkmask
;
656 base_offset
= uio
->uio_offset
& ~(int64_t)blkmask
;
657 n
= blksize
- offset
;
658 if (n
> uio
->uio_resid
) {
664 nsize
= uio
->uio_offset
+ n
;
665 if (nsize
> ip
->ino_data
.size
) {
666 if (uio
->uio_offset
> ip
->ino_data
.size
)
669 trivial
= NVEXTF_TRIVIAL
;
670 nvextendbuf(ap
->a_vp
,
673 hammer_blocksize(ip
->ino_data
.size
),
674 hammer_blocksize(nsize
),
675 hammer_blockoff(ip
->ino_data
.size
),
676 hammer_blockoff(nsize
),
679 kflags
|= NOTE_EXTEND
;
682 if (uio
->uio_segflg
== UIO_NOCOPY
) {
684 * Issuing a write with the same data backing the
685 * buffer. Instantiate the buffer to collect the
686 * backing vm pages, then read-in any missing bits.
688 * This case is used by vop_stdputpages().
690 bp
= getblk(ap
->a_vp
, base_offset
,
691 blksize
, GETBLK_BHEAVY
, 0);
692 if ((bp
->b_flags
& B_CACHE
) == 0) {
694 error
= bread(ap
->a_vp
, base_offset
,
697 } else if (offset
== 0 && uio
->uio_resid
>= blksize
) {
699 * Even though we are entirely overwriting the buffer
700 * we may still have to zero it out to avoid a
701 * mmap/write visibility issue.
703 bp
= getblk(ap
->a_vp
, base_offset
, blksize
, GETBLK_BHEAVY
, 0);
704 if ((bp
->b_flags
& B_CACHE
) == 0)
706 } else if (base_offset
>= ip
->ino_data
.size
) {
708 * If the base offset of the buffer is beyond the
709 * file EOF, we don't have to issue a read.
711 bp
= getblk(ap
->a_vp
, base_offset
,
712 blksize
, GETBLK_BHEAVY
, 0);
716 * Partial overwrite, read in any missing bits then
717 * replace the portion being written.
719 error
= bread(ap
->a_vp
, base_offset
, blksize
, &bp
);
724 error
= uiomovebp(bp
, bp
->b_data
+ offset
, n
, uio
);
726 lwkt_gettoken(&hmp
->fs_token
);
729 * Generate REDO records if enabled and redo_count will not
730 * exceeded the limit.
732 * If redo_count exceeds the limit we stop generating records
733 * and clear HAMMER_INODE_REDO. This will cause the next
734 * fsync() to do a full meta-data sync instead of just an
735 * UNDO/REDO fifo update.
737 * When clearing HAMMER_INODE_REDO any pre-existing REDOs
738 * will still be tracked. The tracks will be terminated
739 * when the related meta-data (including possible data
740 * modifications which are not tracked via REDO) is
743 if ((ip
->flags
& HAMMER_INODE_REDO
) && error
== 0) {
744 if (ip
->redo_count
< hammer_limit_redo
) {
745 bp
->b_flags
|= B_VFSFLAG1
;
746 error
= hammer_generate_redo(&trans
, ip
,
747 base_offset
+ offset
,
752 ip
->flags
&= ~HAMMER_INODE_REDO
;
757 * If we screwed up we have to undo any VM size changes we
763 nvtruncbuf(ap
->a_vp
, ip
->ino_data
.size
,
764 hammer_blocksize(ip
->ino_data
.size
),
765 hammer_blockoff(ip
->ino_data
.size
),
768 lwkt_reltoken(&hmp
->fs_token
);
771 kflags
|= NOTE_WRITE
;
772 hammer_stats_file_write
+= n
;
773 if (blksize
== HAMMER_XBUFSIZE
)
774 bp
->b_flags
|= B_CLUSTEROK
;
775 if (ip
->ino_data
.size
< uio
->uio_offset
) {
776 ip
->ino_data
.size
= uio
->uio_offset
;
777 flags
= HAMMER_INODE_SDIRTY
;
781 ip
->ino_data
.mtime
= trans
.time
;
782 flags
|= HAMMER_INODE_MTIME
| HAMMER_INODE_BUFS
;
783 hammer_modify_inode(&trans
, ip
, flags
);
786 * Once we dirty the buffer any cached zone-X offset
787 * becomes invalid. HAMMER NOTE: no-history mode cannot
788 * allow overwriting over the same data sector unless
789 * we provide UNDOs for the old data, which we don't.
791 bp
->b_bio2
.bio_offset
= NOOFFSET
;
793 lwkt_reltoken(&hmp
->fs_token
);
796 * Final buffer disposition.
798 * Because meta-data updates are deferred, HAMMER is
799 * especially sensitive to excessive bdwrite()s because
800 * the I/O stream is not broken up by disk reads. So the
801 * buffer cache simply cannot keep up.
803 * WARNING! blksize is variable. cluster_write() is
804 * expected to not blow up if it encounters
805 * buffers that do not match the passed blksize.
807 * NOTE! Hammer shouldn't need to bawrite()/cluster_write().
808 * The ip->rsv_recs check should burst-flush the data.
809 * If we queue it immediately the buf could be left
810 * locked on the device queue for a very long time.
812 * However, failing to flush a dirty buffer out when
813 * issued from the pageout daemon can result in a low
814 * memory deadlock against bio_page_alloc(), so we
815 * have to bawrite() on IO_ASYNC as well.
817 * NOTE! To avoid degenerate stalls due to mismatched block
818 * sizes we only honor IO_DIRECT on the write which
819 * abuts the end of the buffer. However, we must
820 * honor IO_SYNC in case someone is silly enough to
821 * configure a HAMMER file as swap, or when HAMMER
822 * is serving NFS (for commits). Ick ick.
824 bp
->b_flags
|= B_AGE
;
825 if (blksize
== HAMMER_XBUFSIZE
)
826 bp
->b_flags
|= B_CLUSTEROK
;
828 if (ap
->a_ioflag
& IO_SYNC
) {
830 } else if ((ap
->a_ioflag
& IO_DIRECT
) && endofblk
) {
832 } else if (ap
->a_ioflag
& IO_ASYNC
) {
834 } else if (hammer_cluster_enable
&&
835 !(ap
->a_vp
->v_mount
->mnt_flag
& MNT_NOCLUSTERW
)) {
836 if (base_offset
< HAMMER_XDEMARC
)
837 cluster_eof
= hammer_blockdemarc(base_offset
,
840 cluster_eof
= ip
->ino_data
.size
;
841 cluster_write(bp
, cluster_eof
, blksize
, seqcount
);
846 hammer_done_transaction(&trans
);
847 hammer_knote(ap
->a_vp
, kflags
);
853 * hammer_vop_access { vp, mode, cred }
855 * MPSAFE - does not require fs_token
859 hammer_vop_access(struct vop_access_args
*ap
)
861 hammer_inode_t ip
= VTOI(ap
->a_vp
);
866 uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
867 gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
869 error
= vop_helper_access(ap
, uid
, gid
, ip
->ino_data
.mode
,
870 ip
->ino_data
.uflags
);
875 * hammer_vop_advlock { vp, id, op, fl, flags }
877 * MPSAFE - does not require fs_token
881 hammer_vop_advlock(struct vop_advlock_args
*ap
)
883 hammer_inode_t ip
= VTOI(ap
->a_vp
);
885 return (lf_advlock(ap
, &ip
->advlock
, ip
->ino_data
.size
));
889 * hammer_vop_close { vp, fflag }
891 * We can only sync-on-close for normal closes. XXX disabled for now.
895 hammer_vop_close(struct vop_close_args
*ap
)
898 struct vnode
*vp
= ap
->a_vp
;
899 hammer_inode_t ip
= VTOI(vp
);
901 if (ip
->flags
& (HAMMER_INODE_CLOSESYNC
|HAMMER_INODE_CLOSEASYNC
)) {
902 if (vn_islocked(vp
) == LK_EXCLUSIVE
&&
903 (vp
->v_flag
& (VINACTIVE
|VRECLAIMED
)) == 0) {
904 if (ip
->flags
& HAMMER_INODE_CLOSESYNC
)
907 waitfor
= MNT_NOWAIT
;
908 ip
->flags
&= ~(HAMMER_INODE_CLOSESYNC
|
909 HAMMER_INODE_CLOSEASYNC
);
910 VOP_FSYNC(vp
, MNT_NOWAIT
, waitfor
);
914 return (vop_stdclose(ap
));
918 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
920 * The operating system has already ensured that the directory entry
921 * does not exist and done all appropriate namespace locking.
925 hammer_vop_ncreate(struct vop_ncreate_args
*ap
)
927 struct hammer_transaction trans
;
930 struct nchandle
*nch
;
935 dip
= VTOI(ap
->a_dvp
);
938 if (dip
->flags
& HAMMER_INODE_RO
)
940 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
944 * Create a transaction to cover the operations we perform.
946 lwkt_gettoken(&hmp
->fs_token
);
947 hammer_start_transaction(&trans
, hmp
);
950 * Create a new filesystem object of the requested type. The
951 * returned inode will be referenced and shared-locked to prevent
952 * it from being moved to the flusher.
954 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
,
955 dip
, nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
958 hkprintf("hammer_create_inode error %d\n", error
);
959 hammer_done_transaction(&trans
);
961 lwkt_reltoken(&hmp
->fs_token
);
966 * Add the new filesystem object to the directory. This will also
967 * bump the inode's link count.
969 error
= hammer_ip_add_direntry(&trans
, dip
,
970 nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
973 hkprintf("hammer_ip_add_direntry error %d\n", error
);
979 hammer_rel_inode(nip
, 0);
980 hammer_done_transaction(&trans
);
983 error
= hammer_get_vnode(nip
, ap
->a_vpp
);
984 hammer_done_transaction(&trans
);
985 hammer_rel_inode(nip
, 0);
987 cache_setunresolved(ap
->a_nch
);
988 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
990 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
992 lwkt_reltoken(&hmp
->fs_token
);
997 * hammer_vop_getattr { vp, vap }
999 * Retrieve an inode's attribute information. When accessing inodes
1000 * historically we fake the atime field to ensure consistent results.
1001 * The atime field is stored in the B-Tree element and allowed to be
1002 * updated without cycling the element.
1004 * MPSAFE - does not require fs_token
1008 hammer_vop_getattr(struct vop_getattr_args
*ap
)
1010 hammer_inode_t ip
= VTOI(ap
->a_vp
);
1011 struct vattr
*vap
= ap
->a_vap
;
1014 * We want the fsid to be different when accessing a filesystem
1015 * with different as-of's so programs like diff don't think
1016 * the files are the same.
1018 * We also want the fsid to be the same when comparing snapshots,
1019 * or when comparing mirrors (which might be backed by different
1020 * physical devices). HAMMER fsids are based on the PFS's
1021 * shared_uuid field.
1023 * XXX there is a chance of collision here. The va_fsid reported
1024 * by stat is different from the more involved fsid used in the
1027 hammer_lock_sh(&ip
->lock
);
1028 vap
->va_fsid
= ip
->pfsm
->fsid_udev
^ (uint32_t)ip
->obj_asof
^
1029 (uint32_t)(ip
->obj_asof
>> 32);
1031 vap
->va_fileid
= ip
->ino_leaf
.base
.obj_id
;
1032 vap
->va_mode
= ip
->ino_data
.mode
;
1033 vap
->va_nlink
= ip
->ino_data
.nlinks
;
1034 vap
->va_uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
1035 vap
->va_gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
1038 vap
->va_size
= ip
->ino_data
.size
;
1041 * Special case for @@PFS softlinks. The actual size of the
1042 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
1043 * or for MAX_TID is "@@-1:%05d" == 10 bytes.
1045 * Note that userspace hammer command does not allow users to
1046 * create a @@PFS softlink under an existing other PFS (id!=0)
1047 * so the ip localization here for @@PFS softlink is always 0.
1049 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_SOFTLINK
&&
1050 ip
->ino_data
.size
== 10 &&
1051 ip
->obj_asof
== HAMMER_MAX_TID
&&
1052 ip
->obj_localization
== HAMMER_DEF_LOCALIZATION
&&
1053 strncmp(ip
->ino_data
.ext
.symlink
, "@@PFS", 5) == 0) {
1054 if (hammer_is_pfs_slave(&ip
->pfsm
->pfsd
))
1061 * We must provide a consistent atime and mtime for snapshots
1062 * so people can do a 'tar cf - ... | md5' on them and get
1063 * consistent results.
1065 if (ip
->flags
& HAMMER_INODE_RO
) {
1066 hammer_time_to_timespec(ip
->ino_data
.ctime
, &vap
->va_atime
);
1067 hammer_time_to_timespec(ip
->ino_data
.ctime
, &vap
->va_mtime
);
1069 hammer_time_to_timespec(ip
->ino_data
.atime
, &vap
->va_atime
);
1070 hammer_time_to_timespec(ip
->ino_data
.mtime
, &vap
->va_mtime
);
1072 hammer_time_to_timespec(ip
->ino_data
.ctime
, &vap
->va_ctime
);
1073 vap
->va_flags
= ip
->ino_data
.uflags
;
1074 vap
->va_gen
= 1; /* hammer inums are unique for all time */
1075 vap
->va_blocksize
= HAMMER_BUFSIZE
;
1076 if (ip
->ino_data
.size
>= HAMMER_XDEMARC
) {
1077 vap
->va_bytes
= HAMMER_XBUFSIZE64_DOALIGN(ip
->ino_data
.size
);
1078 } else if (ip
->ino_data
.size
> HAMMER_HBUFSIZE
) {
1079 vap
->va_bytes
= HAMMER_BUFSIZE64_DOALIGN(ip
->ino_data
.size
);
1081 vap
->va_bytes
= HAMMER_DATA_DOALIGN(ip
->ino_data
.size
);
1084 vap
->va_type
= hammer_get_vnode_type(ip
->ino_data
.obj_type
);
1085 vap
->va_filerev
= 0; /* XXX */
1086 vap
->va_uid_uuid
= ip
->ino_data
.uid
;
1087 vap
->va_gid_uuid
= ip
->ino_data
.gid
;
1088 vap
->va_fsid_uuid
= ip
->hmp
->fsid
;
1089 vap
->va_vaflags
= VA_UID_UUID_VALID
| VA_GID_UUID_VALID
|
1092 switch (ip
->ino_data
.obj_type
) {
1093 case HAMMER_OBJTYPE_CDEV
:
1094 case HAMMER_OBJTYPE_BDEV
:
1095 vap
->va_rmajor
= ip
->ino_data
.rmajor
;
1096 vap
->va_rminor
= ip
->ino_data
.rminor
;
1101 hammer_unlock(&ip
->lock
);
1106 * hammer_vop_nresolve { nch, dvp, cred }
1108 * Locate the requested directory entry.
1112 hammer_vop_nresolve(struct vop_nresolve_args
*ap
)
1114 struct hammer_transaction trans
;
1115 struct namecache
*ncp
;
1120 struct hammer_cursor cursor
;
1129 uint32_t localization
;
1130 uint32_t max_iterations
;
1133 * Misc initialization, plus handle as-of name extensions. Look for
1134 * the '@@' extension. Note that as-of files and directories cannot
1137 dip
= VTOI(ap
->a_dvp
);
1138 ncp
= ap
->a_nch
->ncp
;
1139 asof
= dip
->obj_asof
;
1140 localization
= dip
->obj_localization
; /* for code consistency */
1141 nlen
= ncp
->nc_nlen
;
1142 flags
= dip
->flags
& HAMMER_INODE_RO
;
1146 lwkt_gettoken(&hmp
->fs_token
);
1147 hammer_simple_transaction(&trans
, hmp
);
1149 for (i
= 0; i
< nlen
; ++i
) {
1150 if (ncp
->nc_name
[i
] == '@' && ncp
->nc_name
[i
+1] == '@') {
1151 error
= hammer_str_to_tid(ncp
->nc_name
+ i
+ 2,
1152 &ispfs
, &asof
, &localization
);
1157 if (asof
!= HAMMER_MAX_TID
)
1158 flags
|= HAMMER_INODE_RO
;
1165 * If this is a PFS we dive into the PFS root inode
1167 if (ispfs
&& nlen
== 0) {
1168 ip
= hammer_get_inode(&trans
, dip
, HAMMER_OBJID_ROOT
,
1172 error
= hammer_get_vnode(ip
, &vp
);
1173 hammer_rel_inode(ip
, 0);
1179 cache_setvp(ap
->a_nch
, vp
);
1186 * If there is no path component the time extension is relative to dip.
1187 * e.g. "fubar/@@<snapshot>"
1189 * "." is handled by the kernel, but ".@@<snapshot>" is not.
1190 * e.g. "fubar/.@@<snapshot>"
1192 * ".." is handled by the kernel. We do not currently handle
1195 if (nlen
== 0 || (nlen
== 1 && ncp
->nc_name
[0] == '.')) {
1196 ip
= hammer_get_inode(&trans
, dip
, dip
->obj_id
,
1197 asof
, dip
->obj_localization
,
1200 error
= hammer_get_vnode(ip
, &vp
);
1201 hammer_rel_inode(ip
, 0);
1207 cache_setvp(ap
->a_nch
, vp
);
1214 * Calculate the namekey and setup the key range for the scan. This
1215 * works kinda like a chained hash table where the lower 32 bits
1216 * of the namekey synthesize the chain.
1218 * The key range is inclusive of both key_beg and key_end.
1220 namekey
= hammer_direntry_namekey(dip
, ncp
->nc_name
, nlen
,
1223 error
= hammer_init_cursor(&trans
, &cursor
, &dip
->cache
[1], dip
);
1224 cursor
.key_beg
.localization
= dip
->obj_localization
|
1225 hammer_dir_localization(dip
);
1226 cursor
.key_beg
.obj_id
= dip
->obj_id
;
1227 cursor
.key_beg
.key
= namekey
;
1228 cursor
.key_beg
.create_tid
= 0;
1229 cursor
.key_beg
.delete_tid
= 0;
1230 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
1231 cursor
.key_beg
.obj_type
= 0;
1233 cursor
.key_end
= cursor
.key_beg
;
1234 cursor
.key_end
.key
+= max_iterations
;
1236 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
1239 * Scan all matching records (the chain), locate the one matching
1240 * the requested path component.
1242 * The hammer_ip_*() functions merge in-memory records with on-disk
1243 * records for the purposes of the search.
1246 localization
= HAMMER_DEF_LOCALIZATION
;
1249 error
= hammer_ip_first(&cursor
);
1250 while (error
== 0) {
1251 error
= hammer_ip_resolve_data(&cursor
);
1254 if (nlen
== cursor
.leaf
->data_len
- HAMMER_ENTRY_NAME_OFF
&&
1255 bcmp(ncp
->nc_name
, cursor
.data
->entry
.name
, nlen
) == 0) {
1256 obj_id
= cursor
.data
->entry
.obj_id
;
1257 localization
= cursor
.data
->entry
.localization
;
1260 error
= hammer_ip_next(&cursor
);
1263 hammer_done_cursor(&cursor
);
1266 * Lookup the obj_id. This should always succeed. If it does not
1267 * the filesystem may be damaged and we return a dummy inode.
1270 ip
= hammer_get_inode(&trans
, dip
, obj_id
,
1273 if (error
== ENOENT
) {
1274 hkprintf("WARNING: Missing inode for dirent \"%s\"\n"
1275 "\tobj_id = %016jx, asof=%016jx, lo=%08x\n",
1277 (intmax_t)obj_id
, (intmax_t)asof
,
1280 ip
= hammer_get_dummy_inode(&trans
, dip
, obj_id
,
1285 error
= hammer_get_vnode(ip
, &vp
);
1286 hammer_rel_inode(ip
, 0);
1292 cache_setvp(ap
->a_nch
, vp
);
1295 } else if (error
== ENOENT
) {
1296 cache_setvp(ap
->a_nch
, NULL
);
1299 hammer_done_transaction(&trans
);
1300 lwkt_reltoken(&hmp
->fs_token
);
1305 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1307 * Locate the parent directory of a directory vnode.
1309 * dvp is referenced but not locked. *vpp must be returned referenced and
1310 * locked. A parent_obj_id of 0 indicates that we are at the root.
1312 * NOTE: as-of sequences are not linked into the directory structure. If
1313 * we are at the root with a different asof then the mount point, reload
1314 * the same directory with the mount point's asof. I'm not sure what this
1315 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
1316 * get confused, but it hasn't been tested.
1320 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args
*ap
)
1322 struct hammer_transaction trans
;
1326 int64_t parent_obj_id
;
1327 uint32_t parent_obj_localization
;
1331 dip
= VTOI(ap
->a_dvp
);
1332 asof
= dip
->obj_asof
;
1336 * Whos are parent? This could be the root of a pseudo-filesystem
1337 * whos parent is in another localization domain.
1339 lwkt_gettoken(&hmp
->fs_token
);
1340 parent_obj_id
= dip
->ino_data
.parent_obj_id
;
1341 if (dip
->obj_id
== HAMMER_OBJID_ROOT
)
1342 parent_obj_localization
= HAMMER_DEF_LOCALIZATION
;
1344 parent_obj_localization
= dip
->obj_localization
;
1347 * It's probably a PFS root when dip->ino_data.parent_obj_id is 0.
1349 if (parent_obj_id
== 0) {
1350 if (dip
->obj_id
== HAMMER_OBJID_ROOT
&&
1351 asof
!= hmp
->asof
) {
1352 parent_obj_id
= dip
->obj_id
;
1354 *ap
->a_fakename
= kmalloc(19, M_TEMP
, M_WAITOK
);
1355 ksnprintf(*ap
->a_fakename
, 19, "0x%016jx",
1356 (intmax_t)dip
->obj_asof
);
1359 lwkt_reltoken(&hmp
->fs_token
);
1364 hammer_simple_transaction(&trans
, hmp
);
1366 ip
= hammer_get_inode(&trans
, dip
, parent_obj_id
,
1367 asof
, parent_obj_localization
,
1368 dip
->flags
, &error
);
1370 error
= hammer_get_vnode(ip
, ap
->a_vpp
);
1371 hammer_rel_inode(ip
, 0);
1375 hammer_done_transaction(&trans
);
1376 lwkt_reltoken(&hmp
->fs_token
);
1381 * hammer_vop_nlink { nch, dvp, vp, cred }
1385 hammer_vop_nlink(struct vop_nlink_args
*ap
)
1387 struct hammer_transaction trans
;
1390 struct nchandle
*nch
;
1394 if (ap
->a_dvp
->v_mount
!= ap
->a_vp
->v_mount
)
1398 dip
= VTOI(ap
->a_dvp
);
1399 ip
= VTOI(ap
->a_vp
);
1402 if (dip
->obj_localization
!= ip
->obj_localization
)
1405 if (dip
->flags
& HAMMER_INODE_RO
)
1407 if (ip
->flags
& HAMMER_INODE_RO
)
1409 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
1413 * Create a transaction to cover the operations we perform.
1415 lwkt_gettoken(&hmp
->fs_token
);
1416 hammer_start_transaction(&trans
, hmp
);
1419 * Add the filesystem object to the directory. Note that neither
1420 * dip nor ip are referenced or locked, but their vnodes are
1421 * referenced. This function will bump the inode's link count.
1423 error
= hammer_ip_add_direntry(&trans
, dip
,
1424 nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1431 cache_setunresolved(nch
);
1432 cache_setvp(nch
, ap
->a_vp
);
1434 hammer_done_transaction(&trans
);
1435 hammer_knote(ap
->a_vp
, NOTE_LINK
);
1436 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
1437 lwkt_reltoken(&hmp
->fs_token
);
1442 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1444 * The operating system has already ensured that the directory entry
1445 * does not exist and done all appropriate namespace locking.
1449 hammer_vop_nmkdir(struct vop_nmkdir_args
*ap
)
1451 struct hammer_transaction trans
;
1454 struct nchandle
*nch
;
1459 dip
= VTOI(ap
->a_dvp
);
1462 if (dip
->flags
& HAMMER_INODE_RO
)
1464 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
1468 * Create a transaction to cover the operations we perform.
1470 lwkt_gettoken(&hmp
->fs_token
);
1471 hammer_start_transaction(&trans
, hmp
);
1474 * Create a new filesystem object of the requested type. The
1475 * returned inode will be referenced but not locked.
1477 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
,
1478 dip
, nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1481 hammer_done_transaction(&trans
);
1483 lwkt_reltoken(&hmp
->fs_token
);
1487 * Add the new filesystem object to the directory. This will also
1488 * bump the inode's link count.
1490 error
= hammer_ip_add_direntry(&trans
, dip
,
1491 nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1494 hkprintf("hammer_mkdir (add) error %d\n", error
);
1500 hammer_rel_inode(nip
, 0);
1503 error
= hammer_get_vnode(nip
, ap
->a_vpp
);
1504 hammer_rel_inode(nip
, 0);
1506 cache_setunresolved(ap
->a_nch
);
1507 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
1510 hammer_done_transaction(&trans
);
1512 hammer_knote(ap
->a_dvp
, NOTE_WRITE
| NOTE_LINK
);
1513 lwkt_reltoken(&hmp
->fs_token
);
1518 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1520 * The operating system has already ensured that the directory entry
1521 * does not exist and done all appropriate namespace locking.
1525 hammer_vop_nmknod(struct vop_nmknod_args
*ap
)
1527 struct hammer_transaction trans
;
1530 struct nchandle
*nch
;
1535 dip
= VTOI(ap
->a_dvp
);
1538 if (dip
->flags
& HAMMER_INODE_RO
)
1540 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
1544 * Create a transaction to cover the operations we perform.
1546 lwkt_gettoken(&hmp
->fs_token
);
1547 hammer_start_transaction(&trans
, hmp
);
1550 * Create a new filesystem object of the requested type. The
1551 * returned inode will be referenced but not locked.
1553 * If mknod specifies a directory a pseudo-fs is created.
1555 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
,
1556 dip
, nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1559 hammer_done_transaction(&trans
);
1561 lwkt_reltoken(&hmp
->fs_token
);
1566 * Add the new filesystem object to the directory. This will also
1567 * bump the inode's link count.
1569 error
= hammer_ip_add_direntry(&trans
, dip
,
1570 nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1577 hammer_rel_inode(nip
, 0);
1580 error
= hammer_get_vnode(nip
, ap
->a_vpp
);
1581 hammer_rel_inode(nip
, 0);
1583 cache_setunresolved(ap
->a_nch
);
1584 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
1587 hammer_done_transaction(&trans
);
1589 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
1590 lwkt_reltoken(&hmp
->fs_token
);
1595 * hammer_vop_open { vp, mode, cred, fp }
1597 * MPSAFE (does not require fs_token)
1601 hammer_vop_open(struct vop_open_args
*ap
)
1605 ip
= VTOI(ap
->a_vp
);
1607 if ((ap
->a_mode
& FWRITE
) && (ip
->flags
& HAMMER_INODE_RO
))
1609 return(vop_stdopen(ap
));
1613 * hammer_vop_print { vp }
1617 hammer_vop_print(struct vop_print_args
*ap
)
1623 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1627 hammer_vop_readdir(struct vop_readdir_args
*ap
)
1629 struct hammer_transaction trans
;
1630 struct hammer_cursor cursor
;
1634 hammer_base_elm_t base
;
1643 ip
= VTOI(ap
->a_vp
);
1645 saveoff
= uio
->uio_offset
;
1648 if (ap
->a_ncookies
) {
1649 ncookies
= uio
->uio_resid
/ 16 + 1;
1650 if (ncookies
> 1024)
1652 cookies
= kmalloc(ncookies
* sizeof(off_t
), M_TEMP
, M_WAITOK
);
1660 lwkt_gettoken(&hmp
->fs_token
);
1661 hammer_simple_transaction(&trans
, hmp
);
1664 * Handle artificial entries
1666 * It should be noted that the minimum value for a directory
1667 * hash key on-media is 0x0000000100000000, so we can use anything
1668 * less then that to represent our 'special' key space.
1672 r
= vop_write_dirent(&error
, uio
, ip
->obj_id
, DT_DIR
, 1, ".");
1676 cookies
[cookie_index
] = saveoff
;
1679 if (cookie_index
== ncookies
)
1683 if (ip
->ino_data
.parent_obj_id
) {
1684 r
= vop_write_dirent(&error
, uio
,
1685 ip
->ino_data
.parent_obj_id
,
1688 r
= vop_write_dirent(&error
, uio
,
1689 ip
->obj_id
, DT_DIR
, 2, "..");
1694 cookies
[cookie_index
] = saveoff
;
1697 if (cookie_index
== ncookies
)
1702 * Key range (begin and end inclusive) to scan. Directory keys
1703 * directly translate to a 64 bit 'seek' position.
1705 hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
1706 cursor
.key_beg
.localization
= ip
->obj_localization
|
1707 hammer_dir_localization(ip
);
1708 cursor
.key_beg
.obj_id
= ip
->obj_id
;
1709 cursor
.key_beg
.create_tid
= 0;
1710 cursor
.key_beg
.delete_tid
= 0;
1711 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
1712 cursor
.key_beg
.obj_type
= 0;
1713 cursor
.key_beg
.key
= saveoff
;
1715 cursor
.key_end
= cursor
.key_beg
;
1716 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
1717 cursor
.asof
= ip
->obj_asof
;
1718 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
1720 error
= hammer_ip_first(&cursor
);
1722 while (error
== 0) {
1723 error
= hammer_ip_resolve_data(&cursor
);
1726 base
= &cursor
.leaf
->base
;
1727 saveoff
= base
->key
;
1728 KKASSERT(cursor
.leaf
->data_len
> HAMMER_ENTRY_NAME_OFF
);
1730 if (base
->obj_id
!= ip
->obj_id
)
1731 hpanic("bad record at %p", cursor
.node
);
1733 dtype
= hammer_get_dtype(cursor
.leaf
->base
.obj_type
);
1734 r
= vop_write_dirent(
1735 &error
, uio
, cursor
.data
->entry
.obj_id
,
1737 cursor
.leaf
->data_len
- HAMMER_ENTRY_NAME_OFF
,
1738 (void *)cursor
.data
->entry
.name
);
1743 cookies
[cookie_index
] = base
->key
;
1745 if (cookie_index
== ncookies
)
1747 error
= hammer_ip_next(&cursor
);
1749 hammer_done_cursor(&cursor
);
1752 hammer_done_transaction(&trans
);
1755 *ap
->a_eofflag
= (error
== ENOENT
);
1756 uio
->uio_offset
= saveoff
;
1757 if (error
&& cookie_index
== 0) {
1758 if (error
== ENOENT
)
1761 kfree(cookies
, M_TEMP
);
1762 *ap
->a_ncookies
= 0;
1763 *ap
->a_cookies
= NULL
;
1766 if (error
== ENOENT
)
1769 *ap
->a_ncookies
= cookie_index
;
1770 *ap
->a_cookies
= cookies
;
1773 lwkt_reltoken(&hmp
->fs_token
);
1778 * hammer_vop_readlink { vp, uio, cred }
1782 hammer_vop_readlink(struct vop_readlink_args
*ap
)
1784 struct hammer_transaction trans
;
1785 struct hammer_cursor cursor
;
1789 uint32_t localization
;
1790 hammer_pseudofs_inmem_t pfsm
;
1793 ip
= VTOI(ap
->a_vp
);
1796 lwkt_gettoken(&hmp
->fs_token
);
1799 * Shortcut if the symlink data was stuffed into ino_data.
1801 * Also expand special "@@PFS%05d" softlinks (expansion only
1802 * occurs for non-historical (current) accesses made from the
1803 * primary filesystem).
1805 * Note that userspace hammer command does not allow users to
1806 * create a @@PFS softlink under an existing other PFS (id!=0)
1807 * so the ip localization here for @@PFS softlink is always 0.
1809 if (ip
->ino_data
.size
<= HAMMER_INODE_BASESYMLEN
) {
1813 ptr
= ip
->ino_data
.ext
.symlink
;
1814 bytes
= (int)ip
->ino_data
.size
;
1816 ip
->obj_asof
== HAMMER_MAX_TID
&&
1817 ip
->obj_localization
== HAMMER_DEF_LOCALIZATION
&&
1818 strncmp(ptr
, "@@PFS", 5) == 0) {
1819 hammer_simple_transaction(&trans
, hmp
);
1820 bcopy(ptr
+ 5, buf
, 5);
1822 localization
= pfs_to_lo(strtoul(buf
, NULL
, 10));
1823 pfsm
= hammer_load_pseudofs(&trans
, localization
,
1826 if (hammer_is_pfs_slave(&pfsm
->pfsd
)) {
1827 /* vap->va_size == 26 */
1828 ksnprintf(buf
, sizeof(buf
),
1830 (intmax_t)pfsm
->pfsd
.sync_end_tid
,
1831 lo_to_pfs(localization
));
1833 /* vap->va_size == 10 */
1834 ksnprintf(buf
, sizeof(buf
),
1836 lo_to_pfs(localization
));
1839 bytes
= strlen(buf
);
1842 hammer_rel_pseudofs(hmp
, pfsm
);
1843 hammer_done_transaction(&trans
);
1845 error
= uiomove(ptr
, bytes
, ap
->a_uio
);
1846 lwkt_reltoken(&hmp
->fs_token
);
1853 hammer_simple_transaction(&trans
, hmp
);
1854 hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
1857 * Key range (begin and end inclusive) to scan. Directory keys
1858 * directly translate to a 64 bit 'seek' position.
1860 cursor
.key_beg
.localization
= ip
->obj_localization
|
1861 HAMMER_LOCALIZE_MISC
;
1862 cursor
.key_beg
.obj_id
= ip
->obj_id
;
1863 cursor
.key_beg
.create_tid
= 0;
1864 cursor
.key_beg
.delete_tid
= 0;
1865 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_FIX
;
1866 cursor
.key_beg
.obj_type
= 0;
1867 cursor
.key_beg
.key
= HAMMER_FIXKEY_SYMLINK
;
1868 cursor
.asof
= ip
->obj_asof
;
1869 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
1871 error
= hammer_ip_lookup(&cursor
);
1873 error
= hammer_ip_resolve_data(&cursor
);
1875 KKASSERT(cursor
.leaf
->data_len
>=
1876 HAMMER_SYMLINK_NAME_OFF
);
1877 error
= uiomove(cursor
.data
->symlink
.name
,
1878 cursor
.leaf
->data_len
-
1879 HAMMER_SYMLINK_NAME_OFF
,
1883 hammer_done_cursor(&cursor
);
1884 hammer_done_transaction(&trans
);
1885 lwkt_reltoken(&hmp
->fs_token
);
1890 * hammer_vop_nremove { nch, dvp, cred }
1894 hammer_vop_nremove(struct vop_nremove_args
*ap
)
1896 struct hammer_transaction trans
;
1901 dip
= VTOI(ap
->a_dvp
);
1904 if (hammer_nohistory(dip
) == 0 &&
1905 (error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_REMOVE
)) != 0) {
1909 lwkt_gettoken(&hmp
->fs_token
);
1910 hammer_start_transaction(&trans
, hmp
);
1911 error
= hammer_dounlink(&trans
, ap
->a_nch
, ap
->a_dvp
, ap
->a_cred
, 0, 0);
1912 hammer_done_transaction(&trans
);
1914 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
1915 lwkt_reltoken(&hmp
->fs_token
);
1920 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1924 hammer_vop_nrename(struct vop_nrename_args
*ap
)
1926 struct hammer_transaction trans
;
1927 struct namecache
*fncp
;
1928 struct namecache
*tncp
;
1929 hammer_inode_t fdip
;
1930 hammer_inode_t tdip
;
1933 struct hammer_cursor cursor
;
1935 uint32_t max_iterations
;
1938 if (ap
->a_fdvp
->v_mount
!= ap
->a_tdvp
->v_mount
)
1940 if (ap
->a_fdvp
->v_mount
!= ap
->a_fnch
->ncp
->nc_vp
->v_mount
)
1943 fdip
= VTOI(ap
->a_fdvp
);
1944 tdip
= VTOI(ap
->a_tdvp
);
1945 fncp
= ap
->a_fnch
->ncp
;
1946 tncp
= ap
->a_tnch
->ncp
;
1947 ip
= VTOI(fncp
->nc_vp
);
1948 KKASSERT(ip
!= NULL
);
1952 if (fdip
->obj_localization
!= tdip
->obj_localization
)
1954 if (fdip
->obj_localization
!= ip
->obj_localization
)
1957 if (fdip
->flags
& HAMMER_INODE_RO
)
1959 if (tdip
->flags
& HAMMER_INODE_RO
)
1961 if (ip
->flags
& HAMMER_INODE_RO
)
1963 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
1966 lwkt_gettoken(&hmp
->fs_token
);
1967 hammer_start_transaction(&trans
, hmp
);
1970 * Remove tncp from the target directory and then link ip as
1971 * tncp. XXX pass trans to dounlink
1973 * Force the inode sync-time to match the transaction so it is
1974 * in-sync with the creation of the target directory entry.
1976 error
= hammer_dounlink(&trans
, ap
->a_tnch
, ap
->a_tdvp
,
1978 if (error
== 0 || error
== ENOENT
) {
1979 error
= hammer_ip_add_direntry(&trans
, tdip
,
1980 tncp
->nc_name
, tncp
->nc_nlen
,
1983 ip
->ino_data
.parent_obj_id
= tdip
->obj_id
;
1984 ip
->ino_data
.ctime
= trans
.time
;
1985 hammer_modify_inode(&trans
, ip
, HAMMER_INODE_DDIRTY
);
1989 goto failed
; /* XXX */
1992 * Locate the record in the originating directory and remove it.
1994 * Calculate the namekey and setup the key range for the scan. This
1995 * works kinda like a chained hash table where the lower 32 bits
1996 * of the namekey synthesize the chain.
1998 * The key range is inclusive of both key_beg and key_end.
2000 namekey
= hammer_direntry_namekey(fdip
, fncp
->nc_name
, fncp
->nc_nlen
,
2003 hammer_init_cursor(&trans
, &cursor
, &fdip
->cache
[1], fdip
);
2004 cursor
.key_beg
.localization
= fdip
->obj_localization
|
2005 hammer_dir_localization(fdip
);
2006 cursor
.key_beg
.obj_id
= fdip
->obj_id
;
2007 cursor
.key_beg
.key
= namekey
;
2008 cursor
.key_beg
.create_tid
= 0;
2009 cursor
.key_beg
.delete_tid
= 0;
2010 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
2011 cursor
.key_beg
.obj_type
= 0;
2013 cursor
.key_end
= cursor
.key_beg
;
2014 cursor
.key_end
.key
+= max_iterations
;
2015 cursor
.asof
= fdip
->obj_asof
;
2016 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
2019 * Scan all matching records (the chain), locate the one matching
2020 * the requested path component.
2022 * The hammer_ip_*() functions merge in-memory records with on-disk
2023 * records for the purposes of the search.
2025 error
= hammer_ip_first(&cursor
);
2026 while (error
== 0) {
2027 if (hammer_ip_resolve_data(&cursor
) != 0)
2029 nlen
= cursor
.leaf
->data_len
- HAMMER_ENTRY_NAME_OFF
;
2031 if (fncp
->nc_nlen
== nlen
&&
2032 bcmp(fncp
->nc_name
, cursor
.data
->entry
.name
, nlen
) == 0) {
2035 error
= hammer_ip_next(&cursor
);
2039 * If all is ok we have to get the inode so we can adjust nlinks.
2041 * WARNING: hammer_ip_del_direntry() may have to terminate the
2042 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
2046 error
= hammer_ip_del_direntry(&trans
, &cursor
, fdip
, ip
);
2049 * XXX A deadlock here will break rename's atomicy for the purposes
2050 * of crash recovery.
2052 if (error
== EDEADLK
) {
2053 hammer_done_cursor(&cursor
);
2058 * Cleanup and tell the kernel that the rename succeeded.
2060 * NOTE: ip->vp, if non-NULL, cannot be directly referenced
2061 * without formally acquiring the vp since the vp might
2062 * have zero refs on it, or in the middle of a reclaim,
2065 hammer_done_cursor(&cursor
);
2067 cache_rename(ap
->a_fnch
, ap
->a_tnch
);
2068 hammer_knote(ap
->a_fdvp
, NOTE_WRITE
);
2069 hammer_knote(ap
->a_tdvp
, NOTE_WRITE
);
2073 error
= hammer_get_vnode(ip
, &vp
);
2074 if (error
== 0 && vp
) {
2076 hammer_knote(ip
->vp
, NOTE_RENAME
);
2080 hdkprintf("ip/vp race2 avoided\n");
2085 hammer_done_transaction(&trans
);
2086 lwkt_reltoken(&hmp
->fs_token
);
2091 * hammer_vop_nrmdir { nch, dvp, cred }
2095 hammer_vop_nrmdir(struct vop_nrmdir_args
*ap
)
2097 struct hammer_transaction trans
;
2102 dip
= VTOI(ap
->a_dvp
);
2105 if (hammer_nohistory(dip
) == 0 &&
2106 (error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_REMOVE
)) != 0) {
2110 lwkt_gettoken(&hmp
->fs_token
);
2111 hammer_start_transaction(&trans
, hmp
);
2112 error
= hammer_dounlink(&trans
, ap
->a_nch
, ap
->a_dvp
, ap
->a_cred
, 0, 1);
2113 hammer_done_transaction(&trans
);
2115 hammer_knote(ap
->a_dvp
, NOTE_WRITE
| NOTE_LINK
);
2116 lwkt_reltoken(&hmp
->fs_token
);
2121 * hammer_vop_markatime { vp, cred }
2125 hammer_vop_markatime(struct vop_markatime_args
*ap
)
2127 struct hammer_transaction trans
;
2131 ip
= VTOI(ap
->a_vp
);
2132 if (ap
->a_vp
->v_mount
->mnt_flag
& MNT_RDONLY
)
2134 if (ip
->flags
& HAMMER_INODE_RO
)
2137 if (hmp
->mp
->mnt_flag
& MNT_NOATIME
)
2139 lwkt_gettoken(&hmp
->fs_token
);
2140 hammer_start_transaction(&trans
, hmp
);
2142 ip
->ino_data
.atime
= trans
.time
;
2143 hammer_modify_inode(&trans
, ip
, HAMMER_INODE_ATIME
);
2144 hammer_done_transaction(&trans
);
2145 hammer_knote(ap
->a_vp
, NOTE_ATTRIB
);
2146 lwkt_reltoken(&hmp
->fs_token
);
2151 * hammer_vop_setattr { vp, vap, cred }
2155 hammer_vop_setattr(struct vop_setattr_args
*ap
)
2157 struct hammer_transaction trans
;
2167 int64_t aligned_size
;
2172 ip
= ap
->a_vp
->v_data
;
2177 if (ap
->a_vp
->v_mount
->mnt_flag
& MNT_RDONLY
)
2179 if (ip
->flags
& HAMMER_INODE_RO
)
2181 if (hammer_nohistory(ip
) == 0 &&
2182 (error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_REMOVE
)) != 0) {
2186 lwkt_gettoken(&hmp
->fs_token
);
2187 hammer_start_transaction(&trans
, hmp
);
2190 if (vap
->va_flags
!= VNOVAL
) {
2191 flags
= ip
->ino_data
.uflags
;
2192 error
= vop_helper_setattr_flags(&flags
, vap
->va_flags
,
2193 hammer_to_unix_xid(&ip
->ino_data
.uid
),
2196 if (ip
->ino_data
.uflags
!= flags
) {
2197 ip
->ino_data
.uflags
= flags
;
2198 ip
->ino_data
.ctime
= trans
.time
;
2199 modflags
|= HAMMER_INODE_DDIRTY
;
2200 kflags
|= NOTE_ATTRIB
;
2202 if (ip
->ino_data
.uflags
& (IMMUTABLE
| APPEND
)) {
2209 if (ip
->ino_data
.uflags
& (IMMUTABLE
| APPEND
)) {
2213 if (vap
->va_uid
!= (uid_t
)VNOVAL
|| vap
->va_gid
!= (gid_t
)VNOVAL
) {
2214 mode_t cur_mode
= ip
->ino_data
.mode
;
2215 uid_t cur_uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
2216 gid_t cur_gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
2217 hammer_uuid_t uuid_uid
;
2218 hammer_uuid_t uuid_gid
;
2220 error
= vop_helper_chown(ap
->a_vp
, vap
->va_uid
, vap
->va_gid
,
2222 &cur_uid
, &cur_gid
, &cur_mode
);
2224 hammer_guid_to_uuid(&uuid_uid
, cur_uid
);
2225 hammer_guid_to_uuid(&uuid_gid
, cur_gid
);
2226 if (kuuid_compare(&uuid_uid
, &ip
->ino_data
.uid
) ||
2227 kuuid_compare(&uuid_gid
, &ip
->ino_data
.gid
) ||
2228 ip
->ino_data
.mode
!= cur_mode
) {
2229 ip
->ino_data
.uid
= uuid_uid
;
2230 ip
->ino_data
.gid
= uuid_gid
;
2231 ip
->ino_data
.mode
= cur_mode
;
2232 ip
->ino_data
.ctime
= trans
.time
;
2233 modflags
|= HAMMER_INODE_DDIRTY
;
2235 kflags
|= NOTE_ATTRIB
;
2238 while (vap
->va_size
!= VNOVAL
&& ip
->ino_data
.size
!= vap
->va_size
) {
2239 switch(ap
->a_vp
->v_type
) {
2241 if (vap
->va_size
== ip
->ino_data
.size
)
2245 * Log the operation if in fast-fsync mode or if
2246 * there are unterminated redo write records present.
2248 * The second check is needed so the recovery code
2249 * properly truncates write redos even if nominal
2250 * REDO operations is turned off due to excessive
2251 * writes, because the related records might be
2252 * destroyed and never lay down a TERM_WRITE.
2254 if ((ip
->flags
& HAMMER_INODE_REDO
) ||
2255 (ip
->flags
& HAMMER_INODE_RDIRTY
)) {
2256 error
= hammer_generate_redo(&trans
, ip
,
2261 blksize
= hammer_blocksize(vap
->va_size
);
2264 * XXX break atomicy, we can deadlock the backend
2265 * if we do not release the lock. Probably not a
2268 if (vap
->va_size
< ip
->ino_data
.size
) {
2269 nvtruncbuf(ap
->a_vp
, vap
->va_size
,
2271 hammer_blockoff(vap
->va_size
),
2274 kflags
|= NOTE_WRITE
;
2276 nvextendbuf(ap
->a_vp
,
2279 hammer_blocksize(ip
->ino_data
.size
),
2280 hammer_blocksize(vap
->va_size
),
2281 hammer_blockoff(ip
->ino_data
.size
),
2282 hammer_blockoff(vap
->va_size
),
2285 kflags
|= NOTE_WRITE
| NOTE_EXTEND
;
2287 ip
->ino_data
.size
= vap
->va_size
;
2288 ip
->ino_data
.mtime
= trans
.time
;
2289 /* XXX safe to use SDIRTY instead of DDIRTY here? */
2290 modflags
|= HAMMER_INODE_MTIME
| HAMMER_INODE_DDIRTY
;
2291 vclrflags(ap
->a_vp
, VLASTWRITETS
);
2294 * On-media truncation is cached in the inode until
2295 * the inode is synchronized. We must immediately
2296 * handle any frontend records.
2299 hammer_ip_frontend_trunc(ip
, vap
->va_size
);
2300 if ((ip
->flags
& HAMMER_INODE_TRUNCATED
) == 0) {
2301 ip
->flags
|= HAMMER_INODE_TRUNCATED
;
2302 ip
->trunc_off
= vap
->va_size
;
2303 hammer_inode_dirty(ip
);
2304 } else if (ip
->trunc_off
> vap
->va_size
) {
2305 ip
->trunc_off
= vap
->va_size
;
2311 * When truncating, nvtruncbuf() may have cleaned out
2312 * a portion of the last block on-disk in the buffer
2313 * cache. We must clean out any frontend records
2314 * for blocks beyond the new last block.
2316 aligned_size
= (vap
->va_size
+ (blksize
- 1)) &
2317 ~(int64_t)(blksize
- 1);
2318 if (truncating
&& vap
->va_size
< aligned_size
) {
2319 aligned_size
-= blksize
;
2320 hammer_ip_frontend_trunc(ip
, aligned_size
);
2325 if ((ip
->flags
& HAMMER_INODE_TRUNCATED
) == 0) {
2326 ip
->flags
|= HAMMER_INODE_TRUNCATED
;
2327 ip
->trunc_off
= vap
->va_size
;
2328 hammer_inode_dirty(ip
);
2329 } else if (ip
->trunc_off
> vap
->va_size
) {
2330 ip
->trunc_off
= vap
->va_size
;
2332 hammer_ip_frontend_trunc(ip
, vap
->va_size
);
2333 ip
->ino_data
.size
= vap
->va_size
;
2334 ip
->ino_data
.mtime
= trans
.time
;
2335 modflags
|= HAMMER_INODE_MTIME
| HAMMER_INODE_DDIRTY
;
2336 vclrflags(ap
->a_vp
, VLASTWRITETS
);
2337 kflags
|= NOTE_ATTRIB
;
2345 if (vap
->va_atime
.tv_sec
!= VNOVAL
) {
2346 ip
->ino_data
.atime
= hammer_timespec_to_time(&vap
->va_atime
);
2347 modflags
|= HAMMER_INODE_ATIME
;
2348 kflags
|= NOTE_ATTRIB
;
2350 if (vap
->va_mtime
.tv_sec
!= VNOVAL
) {
2351 ip
->ino_data
.mtime
= hammer_timespec_to_time(&vap
->va_mtime
);
2352 modflags
|= HAMMER_INODE_MTIME
;
2353 kflags
|= NOTE_ATTRIB
;
2354 vclrflags(ap
->a_vp
, VLASTWRITETS
);
2356 if (vap
->va_mode
!= (mode_t
)VNOVAL
) {
2357 mode_t cur_mode
= ip
->ino_data
.mode
;
2358 uid_t cur_uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
2359 gid_t cur_gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
2361 error
= vop_helper_chmod(ap
->a_vp
, vap
->va_mode
, ap
->a_cred
,
2362 cur_uid
, cur_gid
, &cur_mode
);
2364 ip
->ino_data
.mode
= cur_mode
;
2365 ip
->ino_data
.ctime
= trans
.time
;
2366 modflags
|= HAMMER_INODE_DDIRTY
;
2367 kflags
|= NOTE_ATTRIB
;
2372 hammer_modify_inode(&trans
, ip
, modflags
);
2373 hammer_done_transaction(&trans
);
2374 hammer_knote(ap
->a_vp
, kflags
);
2375 lwkt_reltoken(&hmp
->fs_token
);
2380 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2384 hammer_vop_nsymlink(struct vop_nsymlink_args
*ap
)
2386 struct hammer_transaction trans
;
2389 hammer_record_t record
;
2390 struct nchandle
*nch
;
2395 ap
->a_vap
->va_type
= VLNK
;
2398 dip
= VTOI(ap
->a_dvp
);
2401 if (dip
->flags
& HAMMER_INODE_RO
)
2403 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
2407 * Create a transaction to cover the operations we perform.
2409 lwkt_gettoken(&hmp
->fs_token
);
2410 hammer_start_transaction(&trans
, hmp
);
2413 * Create a new filesystem object of the requested type. The
2414 * returned inode will be referenced but not locked.
2417 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
,
2418 dip
, nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
2421 hammer_done_transaction(&trans
);
2423 lwkt_reltoken(&hmp
->fs_token
);
2428 * Add a record representing the symlink. symlink stores the link
2429 * as pure data, not a string, and is no \0 terminated.
2432 bytes
= strlen(ap
->a_target
);
2434 if (bytes
<= HAMMER_INODE_BASESYMLEN
) {
2435 bcopy(ap
->a_target
, nip
->ino_data
.ext
.symlink
, bytes
);
2437 record
= hammer_alloc_mem_record(nip
, bytes
);
2438 record
->type
= HAMMER_MEM_RECORD_GENERAL
;
2440 record
->leaf
.base
.localization
= nip
->obj_localization
|
2441 HAMMER_LOCALIZE_MISC
;
2442 record
->leaf
.base
.key
= HAMMER_FIXKEY_SYMLINK
;
2443 record
->leaf
.base
.rec_type
= HAMMER_RECTYPE_FIX
;
2444 record
->leaf
.data_len
= bytes
;
2445 KKASSERT(HAMMER_SYMLINK_NAME_OFF
== 0);
2446 bcopy(ap
->a_target
, record
->data
->symlink
.name
, bytes
);
2447 error
= hammer_ip_add_record(&trans
, record
);
2451 * Set the file size to the length of the link.
2454 nip
->ino_data
.size
= bytes
;
2455 hammer_modify_inode(&trans
, nip
, HAMMER_INODE_DDIRTY
);
2459 error
= hammer_ip_add_direntry(&trans
, dip
, nch
->ncp
->nc_name
,
2460 nch
->ncp
->nc_nlen
, nip
);
2466 hammer_rel_inode(nip
, 0);
2469 error
= hammer_get_vnode(nip
, ap
->a_vpp
);
2470 hammer_rel_inode(nip
, 0);
2472 cache_setunresolved(ap
->a_nch
);
2473 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
2474 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
2477 hammer_done_transaction(&trans
);
2478 lwkt_reltoken(&hmp
->fs_token
);
2483 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2487 hammer_vop_nwhiteout(struct vop_nwhiteout_args
*ap
)
2489 struct hammer_transaction trans
;
2494 dip
= VTOI(ap
->a_dvp
);
2497 if (hammer_nohistory(dip
) == 0 &&
2498 (error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0) {
2502 lwkt_gettoken(&hmp
->fs_token
);
2503 hammer_start_transaction(&trans
, hmp
);
2504 error
= hammer_dounlink(&trans
, ap
->a_nch
, ap
->a_dvp
,
2505 ap
->a_cred
, ap
->a_flags
, -1);
2506 hammer_done_transaction(&trans
);
2507 lwkt_reltoken(&hmp
->fs_token
);
2513 * hammer_vop_ioctl { vp, command, data, fflag, cred }
2517 hammer_vop_ioctl(struct vop_ioctl_args
*ap
)
2519 hammer_inode_t ip
= ap
->a_vp
->v_data
;
2520 hammer_mount_t hmp
= ip
->hmp
;
2523 lwkt_gettoken(&hmp
->fs_token
);
2524 error
= hammer_ioctl(ip
, ap
->a_command
, ap
->a_data
,
2525 ap
->a_fflag
, ap
->a_cred
);
2526 lwkt_reltoken(&hmp
->fs_token
);
2532 hammer_vop_mountctl(struct vop_mountctl_args
*ap
)
2534 static const struct mountctl_opt extraopt
[] = {
2535 { HMNT_NOHISTORY
, "nohistory" },
2536 { HMNT_MASTERID
, "master" },
2537 { HMNT_NOMIRROR
, "nomirror" },
2548 mp
= ap
->a_head
.a_ops
->head
.vv_mount
;
2549 KKASSERT(mp
->mnt_data
!= NULL
);
2550 hmp
= (hammer_mount_t
)mp
->mnt_data
;
2552 lwkt_gettoken(&hmp
->fs_token
);
2555 case MOUNTCTL_SET_EXPORT
:
2556 if (ap
->a_ctllen
!= sizeof(struct export_args
))
2559 error
= hammer_vfs_export(mp
, ap
->a_op
,
2560 (const struct export_args
*)ap
->a_ctl
);
2562 case MOUNTCTL_MOUNTFLAGS
:
2564 * Call standard mountctl VOP function
2565 * so we get user mount flags.
2567 error
= vop_stdmountctl(ap
);
2571 usedbytes
= *ap
->a_res
;
2573 if (usedbytes
> 0 && usedbytes
< ap
->a_buflen
) {
2574 usedbytes
+= vfs_flagstostr(hmp
->hflags
, extraopt
,
2576 ap
->a_buflen
- usedbytes
,
2580 *ap
->a_res
+= usedbytes
;
2583 error
= vop_stdmountctl(ap
);
2586 lwkt_reltoken(&hmp
->fs_token
);
2591 * hammer_vop_strategy { vp, bio }
2593 * Strategy call, used for regular file read & write only. Note that the
2594 * bp may represent a cluster.
2596 * To simplify operation and allow better optimizations in the future,
2597 * this code does not make any assumptions with regards to buffer alignment
2602 hammer_vop_strategy(struct vop_strategy_args
*ap
)
2607 bp
= ap
->a_bio
->bio_buf
;
2611 error
= hammer_vop_strategy_read(ap
);
2614 error
= hammer_vop_strategy_write(ap
);
2617 bp
->b_error
= error
= EINVAL
;
2618 bp
->b_flags
|= B_ERROR
;
2626 * Read from a regular file. Iterate the related records and fill in the
2627 * BIO/BUF. Gaps are zero-filled.
2629 * The support code in hammer_object.c should be used to deal with mixed
2630 * in-memory and on-disk records.
2632 * NOTE: Can be called from the cluster code with an oversized buf.
2638 hammer_vop_strategy_read(struct vop_strategy_args
*ap
)
2640 struct hammer_transaction trans
;
2644 struct hammer_cursor cursor
;
2645 hammer_base_elm_t base
;
2646 hammer_off_t disk_offset
;
2661 ip
= ap
->a_vp
->v_data
;
2665 * The zone-2 disk offset may have been set by the cluster code via
2666 * a BMAP operation, or else should be NOOFFSET.
2668 * Checking the high bits for a match against zone-2 should suffice.
2670 * In cases where a lot of data duplication is present it may be
2671 * more beneficial to drop through and doubule-buffer through the
2674 nbio
= push_bio(bio
);
2675 if (hammer_is_zone_large_data(nbio
->bio_offset
)) {
2676 if (hammer_double_buffer
== 0) {
2677 lwkt_gettoken(&hmp
->fs_token
);
2678 error
= hammer_io_direct_read(hmp
, nbio
, NULL
);
2679 lwkt_reltoken(&hmp
->fs_token
);
2684 * Try to shortcut requests for double_buffer mode too.
2685 * Since this mode runs through the device buffer cache
2686 * only compatible buffer sizes (meaning those generated
2687 * by normal filesystem buffers) are legal.
2689 if ((bp
->b_flags
& B_PAGING
) == 0) {
2690 lwkt_gettoken(&hmp
->fs_token
);
2691 error
= hammer_io_indirect_read(hmp
, nbio
, NULL
);
2692 lwkt_reltoken(&hmp
->fs_token
);
2698 * Well, that sucked. Do it the hard way. If all the stars are
2699 * aligned we may still be able to issue a direct-read.
2701 lwkt_gettoken(&hmp
->fs_token
);
2702 hammer_simple_transaction(&trans
, hmp
);
2703 hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
2706 * Key range (begin and end inclusive) to scan. Note that the key's
2707 * stored in the actual records represent BASE+LEN, not BASE. The
2708 * first record containing bio_offset will have a key > bio_offset.
2710 cursor
.key_beg
.localization
= ip
->obj_localization
|
2711 HAMMER_LOCALIZE_MISC
;
2712 cursor
.key_beg
.obj_id
= ip
->obj_id
;
2713 cursor
.key_beg
.create_tid
= 0;
2714 cursor
.key_beg
.delete_tid
= 0;
2715 cursor
.key_beg
.obj_type
= 0;
2716 cursor
.key_beg
.key
= bio
->bio_offset
+ 1;
2717 cursor
.asof
= ip
->obj_asof
;
2718 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
2720 cursor
.key_end
= cursor
.key_beg
;
2721 KKASSERT(ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_REGFILE
);
2723 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DBFILE
) {
2724 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DB
;
2725 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_DB
;
2726 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
2730 ran_end
= bio
->bio_offset
+ bp
->b_bufsize
;
2731 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DATA
;
2732 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_DATA
;
2733 tmp64
= ran_end
+ MAXPHYS
+ 1; /* work-around GCC-4 bug */
2734 if (tmp64
< ran_end
)
2735 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
2737 cursor
.key_end
.key
= ran_end
+ MAXPHYS
+ 1;
2739 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
;
2742 * Set NOSWAPCACHE for cursor data extraction if double buffering
2743 * is disabled or (if the file is not marked cacheable via chflags
2744 * and vm.swapcache_use_chflags is enabled).
2746 if (hammer_double_buffer
== 0 ||
2747 ((ap
->a_vp
->v_flag
& VSWAPCACHE
) == 0 &&
2748 vm_swapcache_use_chflags
)) {
2749 cursor
.flags
|= HAMMER_CURSOR_NOSWAPCACHE
;
2752 error
= hammer_ip_first(&cursor
);
2755 while (error
== 0) {
2757 * Get the base file offset of the record. The key for
2758 * data records is (base + bytes) rather then (base).
2760 base
= &cursor
.leaf
->base
;
2761 rec_offset
= base
->key
- cursor
.leaf
->data_len
;
2764 * Calculate the gap, if any, and zero-fill it.
2766 * n is the offset of the start of the record verses our
2767 * current seek offset in the bio.
2769 n
= (int)(rec_offset
- (bio
->bio_offset
+ boff
));
2771 if (n
> bp
->b_bufsize
- boff
)
2772 n
= bp
->b_bufsize
- boff
;
2773 bzero((char *)bp
->b_data
+ boff
, n
);
2779 * Calculate the data offset in the record and the number
2780 * of bytes we can copy.
2782 * There are two degenerate cases. First, boff may already
2783 * be at bp->b_bufsize. Secondly, the data offset within
2784 * the record may exceed the record's size.
2788 n
= cursor
.leaf
->data_len
- roff
;
2790 hdkprintf("bad n=%d roff=%d\n", n
, roff
);
2792 } else if (n
> bp
->b_bufsize
- boff
) {
2793 n
= bp
->b_bufsize
- boff
;
2797 * Deal with cached truncations. This cool bit of code
2798 * allows truncate()/ftruncate() to avoid having to sync
2801 * If the frontend is truncated then all backend records are
2802 * subject to the frontend's truncation.
2804 * If the backend is truncated then backend records on-disk
2805 * (but not in-memory) are subject to the backend's
2806 * truncation. In-memory records owned by the backend
2807 * represent data written after the truncation point on the
2808 * backend and must not be truncated.
2810 * Truncate operations deal with frontend buffer cache
2811 * buffers and frontend-owned in-memory records synchronously.
2813 if (ip
->flags
& HAMMER_INODE_TRUNCATED
) {
2814 if (hammer_cursor_ondisk(&cursor
)/* ||
2815 cursor.iprec->flush_state == HAMMER_FST_FLUSH*/) {
2816 if (ip
->trunc_off
<= rec_offset
)
2818 else if (ip
->trunc_off
< rec_offset
+ n
)
2819 n
= (int)(ip
->trunc_off
- rec_offset
);
2822 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) {
2823 if (hammer_cursor_ondisk(&cursor
)) {
2824 if (ip
->sync_trunc_off
<= rec_offset
)
2826 else if (ip
->sync_trunc_off
< rec_offset
+ n
)
2827 n
= (int)(ip
->sync_trunc_off
- rec_offset
);
2832 * Try to issue a direct read into our bio if possible,
2833 * otherwise resolve the element data into a hammer_buffer
2836 * The buffer on-disk should be zerod past any real
2837 * truncation point, but may not be for any synthesized
2838 * truncation point from above.
2840 * NOTE: disk_offset is only valid if the cursor data is
2843 disk_offset
= cursor
.leaf
->data_offset
+ roff
;
2844 isdedupable
= (boff
== 0 && n
== bp
->b_bufsize
&&
2845 hammer_cursor_ondisk(&cursor
) &&
2846 ((int)disk_offset
& HAMMER_BUFMASK
) == 0);
2848 if (isdedupable
&& hammer_double_buffer
== 0) {
2852 KKASSERT(hammer_is_zone_large_data(disk_offset
));
2853 nbio
->bio_offset
= disk_offset
;
2854 error
= hammer_io_direct_read(hmp
, nbio
, cursor
.leaf
);
2856 } else if (isdedupable
) {
2858 * Async I/O case for reading from backing store
2859 * and copying the data to the filesystem buffer.
2861 KKASSERT(hammer_is_zone_large_data(disk_offset
));
2862 nbio
->bio_offset
= disk_offset
;
2863 error
= hammer_io_indirect_read(hmp
, nbio
, cursor
.leaf
);
2866 error
= hammer_ip_resolve_data(&cursor
);
2868 bcopy((char *)cursor
.data
+ roff
,
2869 (char *)bp
->b_data
+ boff
, n
);
2876 * Iterate until we have filled the request.
2879 if (boff
== bp
->b_bufsize
)
2881 error
= hammer_ip_next(&cursor
);
2885 * There may have been a gap after the last record
2887 if (error
== ENOENT
)
2889 if (error
== 0 && boff
!= bp
->b_bufsize
) {
2890 KKASSERT(boff
< bp
->b_bufsize
);
2891 bzero((char *)bp
->b_data
+ boff
, bp
->b_bufsize
- boff
);
2892 /* boff = bp->b_bufsize; */
2896 * Disallow swapcache operation on the vnode buffer if double
2897 * buffering is enabled, the swapcache will get the data via
2898 * the block device buffer.
2900 if (hammer_double_buffer
)
2901 bp
->b_flags
|= B_NOTMETA
;
2907 bp
->b_error
= error
;
2909 bp
->b_flags
|= B_ERROR
;
2914 * Cache the b-tree node for the last data read in cache[1].
2916 * If we hit the file EOF then also cache the node in the
2917 * governing directory's cache[3], it will be used to initialize
2918 * the new inode's cache[1] for any inodes looked up via the directory.
2920 * This doesn't reduce disk accesses since the B-Tree chain is
2921 * likely cached, but it does reduce cpu overhead when looking
2922 * up file offsets for cpdup/tar/cpio style iterations.
2925 hammer_cache_node(&ip
->cache
[1], cursor
.node
);
2926 if (ran_end
>= ip
->ino_data
.size
) {
2927 dip
= hammer_find_inode(&trans
, ip
->ino_data
.parent_obj_id
,
2928 ip
->obj_asof
, ip
->obj_localization
);
2930 hammer_cache_node(&dip
->cache
[3], cursor
.node
);
2931 hammer_rel_inode(dip
, 0);
2934 hammer_done_cursor(&cursor
);
2935 hammer_done_transaction(&trans
);
2936 lwkt_reltoken(&hmp
->fs_token
);
2941 * BMAP operation - used to support cluster_read() only.
2943 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2945 * This routine may return EOPNOTSUPP if the opration is not supported for
2946 * the specified offset. The contents of the pointer arguments do not
2947 * need to be initialized in that case.
2949 * If a disk address is available and properly aligned return 0 with
2950 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2951 * to the run-length relative to that offset. Callers may assume that
2952 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2953 * large, so return EOPNOTSUPP if it is not sufficiently large.
2957 hammer_vop_bmap(struct vop_bmap_args
*ap
)
2959 struct hammer_transaction trans
;
2962 struct hammer_cursor cursor
;
2963 hammer_base_elm_t base
;
2967 int64_t base_offset
;
2968 int64_t base_disk_offset
;
2969 int64_t last_offset
;
2970 hammer_off_t last_disk_offset
;
2971 hammer_off_t disk_offset
;
2976 ip
= ap
->a_vp
->v_data
;
2980 * We can only BMAP regular files. We can't BMAP database files,
2983 if (ip
->ino_data
.obj_type
!= HAMMER_OBJTYPE_REGFILE
)
2987 * bmap is typically called with runp/runb both NULL when used
2988 * for writing. We do not support BMAP for writing atm.
2990 if (ap
->a_cmd
!= BUF_CMD_READ
)
2994 * Scan the B-Tree to acquire blockmap addresses, then translate
2997 lwkt_gettoken(&hmp
->fs_token
);
2998 hammer_simple_transaction(&trans
, hmp
);
3000 hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
3003 * Key range (begin and end inclusive) to scan. Note that the key's
3004 * stored in the actual records represent BASE+LEN, not BASE. The
3005 * first record containing bio_offset will have a key > bio_offset.
3007 cursor
.key_beg
.localization
= ip
->obj_localization
|
3008 HAMMER_LOCALIZE_MISC
;
3009 cursor
.key_beg
.obj_id
= ip
->obj_id
;
3010 cursor
.key_beg
.create_tid
= 0;
3011 cursor
.key_beg
.delete_tid
= 0;
3012 cursor
.key_beg
.obj_type
= 0;
3014 cursor
.key_beg
.key
= ap
->a_loffset
- MAXPHYS
+ 1;
3016 cursor
.key_beg
.key
= ap
->a_loffset
+ 1;
3017 if (cursor
.key_beg
.key
< 0)
3018 cursor
.key_beg
.key
= 0;
3019 cursor
.asof
= ip
->obj_asof
;
3020 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
3022 cursor
.key_end
= cursor
.key_beg
;
3023 KKASSERT(ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_REGFILE
);
3025 ran_end
= ap
->a_loffset
+ MAXPHYS
;
3026 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DATA
;
3027 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_DATA
;
3028 tmp64
= ran_end
+ MAXPHYS
+ 1; /* work-around GCC-4 bug */
3029 if (tmp64
< ran_end
)
3030 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
3032 cursor
.key_end
.key
= ran_end
+ MAXPHYS
+ 1;
3034 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
;
3036 error
= hammer_ip_first(&cursor
);
3037 base_offset
= last_offset
= 0;
3038 base_disk_offset
= last_disk_offset
= 0;
3040 while (error
== 0) {
3042 * Get the base file offset of the record. The key for
3043 * data records is (base + bytes) rather then (base).
3045 * NOTE: rec_offset + rec_len may exceed the end-of-file.
3046 * The extra bytes should be zero on-disk and the BMAP op
3047 * should still be ok.
3049 base
= &cursor
.leaf
->base
;
3050 rec_offset
= base
->key
- cursor
.leaf
->data_len
;
3051 rec_len
= cursor
.leaf
->data_len
;
3054 * Incorporate any cached truncation.
3056 * NOTE: Modifications to rec_len based on synthesized
3057 * truncation points remove the guarantee that any extended
3058 * data on disk is zero (since the truncations may not have
3059 * taken place on-media yet).
3061 if (ip
->flags
& HAMMER_INODE_TRUNCATED
) {
3062 if (hammer_cursor_ondisk(&cursor
) ||
3063 cursor
.iprec
->flush_state
== HAMMER_FST_FLUSH
) {
3064 if (ip
->trunc_off
<= rec_offset
)
3066 else if (ip
->trunc_off
< rec_offset
+ rec_len
)
3067 rec_len
= (int)(ip
->trunc_off
- rec_offset
);
3070 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) {
3071 if (hammer_cursor_ondisk(&cursor
)) {
3072 if (ip
->sync_trunc_off
<= rec_offset
)
3074 else if (ip
->sync_trunc_off
< rec_offset
+ rec_len
)
3075 rec_len
= (int)(ip
->sync_trunc_off
- rec_offset
);
3080 * Accumulate information. If we have hit a discontiguous
3081 * block reset base_offset unless we are already beyond the
3082 * requested offset. If we are, that's it, we stop.
3086 if (hammer_cursor_ondisk(&cursor
)) {
3087 disk_offset
= cursor
.leaf
->data_offset
;
3088 if (rec_offset
!= last_offset
||
3089 disk_offset
!= last_disk_offset
) {
3090 if (rec_offset
> ap
->a_loffset
)
3092 base_offset
= rec_offset
;
3093 base_disk_offset
= disk_offset
;
3095 last_offset
= rec_offset
+ rec_len
;
3096 last_disk_offset
= disk_offset
+ rec_len
;
3098 error
= hammer_ip_next(&cursor
);
3102 hammer_cache_node(&ip
->cache
[1], cursor
.node
);
3104 hammer_done_cursor(&cursor
);
3105 hammer_done_transaction(&trans
);
3106 lwkt_reltoken(&hmp
->fs_token
);
3109 * If we couldn't find any records or the records we did find were
3110 * all behind the requested offset, return failure. A forward
3111 * truncation can leave a hole w/ no on-disk records.
3113 if (last_offset
== 0 || last_offset
< ap
->a_loffset
)
3114 return (EOPNOTSUPP
);
3117 * Figure out the block size at the requested offset and adjust
3118 * our limits so the cluster_read() does not create inappropriately
3119 * sized buffer cache buffers.
3121 blksize
= hammer_blocksize(ap
->a_loffset
);
3122 if (hammer_blocksize(base_offset
) != blksize
) {
3123 base_offset
= hammer_blockdemarc(base_offset
, ap
->a_loffset
);
3125 if (last_offset
!= ap
->a_loffset
&&
3126 hammer_blocksize(last_offset
- 1) != blksize
) {
3127 last_offset
= hammer_blockdemarc(ap
->a_loffset
,
3132 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
3135 disk_offset
= base_disk_offset
+ (ap
->a_loffset
- base_offset
);
3137 if (!hammer_is_zone_large_data(disk_offset
)) {
3139 * Only large-data zones can be direct-IOd
3142 } else if ((disk_offset
& HAMMER_BUFMASK
) ||
3143 (last_offset
- ap
->a_loffset
) < blksize
) {
3145 * doffsetp is not aligned or the forward run size does
3146 * not cover a whole buffer, disallow the direct I/O.
3153 *ap
->a_doffsetp
= disk_offset
;
3155 *ap
->a_runb
= ap
->a_loffset
- base_offset
;
3156 KKASSERT(*ap
->a_runb
>= 0);
3159 *ap
->a_runp
= last_offset
- ap
->a_loffset
;
3160 KKASSERT(*ap
->a_runp
>= 0);
3168 * Write to a regular file. Because this is a strategy call the OS is
3169 * trying to actually get data onto the media.
3173 hammer_vop_strategy_write(struct vop_strategy_args
*ap
)
3175 hammer_record_t record
;
3180 int blksize __debugvar
;
3186 ip
= ap
->a_vp
->v_data
;
3189 blksize
= hammer_blocksize(bio
->bio_offset
);
3190 KKASSERT(bp
->b_bufsize
== blksize
);
3192 if (ip
->flags
& HAMMER_INODE_RO
) {
3193 bp
->b_error
= EROFS
;
3194 bp
->b_flags
|= B_ERROR
;
3199 lwkt_gettoken(&hmp
->fs_token
);
3202 * Disallow swapcache operation on the vnode buffer if double
3203 * buffering is enabled, the swapcache will get the data via
3204 * the block device buffer.
3206 if (hammer_double_buffer
)
3207 bp
->b_flags
|= B_NOTMETA
;
3210 * Interlock with inode destruction (no in-kernel or directory
3211 * topology visibility). If we queue new IO while trying to
3212 * destroy the inode we can deadlock the vtrunc call in
3213 * hammer_inode_unloadable_check().
3215 * Besides, there's no point flushing a bp associated with an
3216 * inode that is being destroyed on-media and has no kernel
3219 if ((ip
->flags
| ip
->sync_flags
) &
3220 (HAMMER_INODE_DELETING
|HAMMER_INODE_DELETED
)) {
3223 lwkt_reltoken(&hmp
->fs_token
);
3228 * Reserve space and issue a direct-write from the front-end.
3229 * NOTE: The direct_io code will hammer_bread/bcopy smaller
3232 * An in-memory record will be installed to reference the storage
3233 * until the flusher can get to it.
3235 * Since we own the high level bio the front-end will not try to
3236 * do a direct-read until the write completes.
3238 * NOTE: The only time we do not reserve a full-sized buffers
3239 * worth of data is if the file is small. We do not try to
3240 * allocate a fragment (from the small-data zone) at the end of
3241 * an otherwise large file as this can lead to wildly separated
3244 KKASSERT((bio
->bio_offset
& HAMMER_BUFMASK
) == 0);
3245 KKASSERT(bio
->bio_offset
< ip
->ino_data
.size
);
3246 if (bio
->bio_offset
|| ip
->ino_data
.size
> HAMMER_HBUFSIZE
)
3247 bytes
= bp
->b_bufsize
;
3249 bytes
= HAMMER_DATA_DOALIGN_WITH(int, ip
->ino_data
.size
);
3251 record
= hammer_ip_add_bulk(ip
, bio
->bio_offset
, bp
->b_data
,
3255 * B_VFSFLAG1 indicates that a REDO_WRITE entry was generated
3256 * in hammer_vop_write(). We must flag the record so the proper
3257 * REDO_TERM_WRITE entry is generated during the flush.
3260 if (bp
->b_flags
& B_VFSFLAG1
) {
3261 record
->flags
|= HAMMER_RECF_REDO
;
3262 bp
->b_flags
&= ~B_VFSFLAG1
;
3264 hammer_io_direct_write(hmp
, bio
, record
);
3265 if (ip
->rsv_recs
> 1 && hmp
->rsv_recs
> hammer_limit_recs
)
3266 hammer_flush_inode(ip
, 0);
3268 bp
->b_bio2
.bio_offset
= NOOFFSET
;
3269 bp
->b_error
= error
;
3270 bp
->b_flags
|= B_ERROR
;
3273 lwkt_reltoken(&hmp
->fs_token
);
3278 * dounlink - disconnect a directory entry
3280 * XXX whiteout support not really in yet
3283 hammer_dounlink(hammer_transaction_t trans
, struct nchandle
*nch
,
3284 struct vnode
*dvp
, struct ucred
*cred
,
3285 int flags
, int isdir
)
3287 struct namecache
*ncp
;
3291 struct hammer_cursor cursor
;
3293 uint32_t max_iterations
;
3297 * Calculate the namekey and setup the key range for the scan. This
3298 * works kinda like a chained hash table where the lower 32 bits
3299 * of the namekey synthesize the chain.
3301 * The key range is inclusive of both key_beg and key_end.
3307 if (dip
->flags
& HAMMER_INODE_RO
)
3310 namekey
= hammer_direntry_namekey(dip
, ncp
->nc_name
, ncp
->nc_nlen
,
3313 hammer_init_cursor(trans
, &cursor
, &dip
->cache
[1], dip
);
3314 cursor
.key_beg
.localization
= dip
->obj_localization
|
3315 hammer_dir_localization(dip
);
3316 cursor
.key_beg
.obj_id
= dip
->obj_id
;
3317 cursor
.key_beg
.key
= namekey
;
3318 cursor
.key_beg
.create_tid
= 0;
3319 cursor
.key_beg
.delete_tid
= 0;
3320 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
3321 cursor
.key_beg
.obj_type
= 0;
3323 cursor
.key_end
= cursor
.key_beg
;
3324 cursor
.key_end
.key
+= max_iterations
;
3325 cursor
.asof
= dip
->obj_asof
;
3326 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
3329 * Scan all matching records (the chain), locate the one matching
3330 * the requested path component. info->last_error contains the
3331 * error code on search termination and could be 0, ENOENT, or
3334 * The hammer_ip_*() functions merge in-memory records with on-disk
3335 * records for the purposes of the search.
3337 error
= hammer_ip_first(&cursor
);
3339 while (error
== 0) {
3340 error
= hammer_ip_resolve_data(&cursor
);
3343 nlen
= cursor
.leaf
->data_len
- HAMMER_ENTRY_NAME_OFF
;
3345 if (ncp
->nc_nlen
== nlen
&&
3346 bcmp(ncp
->nc_name
, cursor
.data
->entry
.name
, nlen
) == 0) {
3349 error
= hammer_ip_next(&cursor
);
3353 * If all is ok we have to get the inode so we can adjust nlinks.
3354 * To avoid a deadlock with the flusher we must release the inode
3355 * lock on the directory when acquiring the inode for the entry.
3357 * If the target is a directory, it must be empty.
3360 hammer_unlock(&cursor
.ip
->lock
);
3361 ip
= hammer_get_inode(trans
, dip
, cursor
.data
->entry
.obj_id
,
3363 cursor
.data
->entry
.localization
,
3365 hammer_lock_sh(&cursor
.ip
->lock
);
3366 if (error
== ENOENT
) {
3367 hkprintf("WARNING: Removing dirent w/missing inode "
3369 "\tobj_id = %016jx\n",
3371 (intmax_t)cursor
.data
->entry
.obj_id
);
3376 * If isdir >= 0 we validate that the entry is or is not a
3377 * directory. If isdir < 0 we don't care.
3379 if (error
== 0 && isdir
>= 0 && ip
) {
3381 ip
->ino_data
.obj_type
!= HAMMER_OBJTYPE_DIRECTORY
) {
3383 } else if (isdir
== 0 &&
3384 ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
) {
3390 * If we are trying to remove a directory the directory must
3393 * The check directory code can loop and deadlock/retry. Our
3394 * own cursor's node locks must be released to avoid a 3-way
3395 * deadlock with the flusher if the check directory code
3398 * If any changes whatsoever have been made to the cursor
3399 * set EDEADLK and retry.
3401 * WARNING: See warnings in hammer_unlock_cursor()
3404 if (error
== 0 && ip
&& ip
->ino_data
.obj_type
==
3405 HAMMER_OBJTYPE_DIRECTORY
) {
3406 hammer_unlock_cursor(&cursor
);
3407 error
= hammer_ip_check_directory_empty(trans
, ip
);
3408 hammer_lock_cursor(&cursor
);
3409 if (cursor
.flags
& HAMMER_CURSOR_RETEST
) {
3410 hkprintf("Warning: avoided deadlock "
3418 * Delete the directory entry.
3420 * WARNING: hammer_ip_del_direntry() may have to terminate
3421 * the cursor to avoid a deadlock. It is ok to call
3422 * hammer_done_cursor() twice.
3425 error
= hammer_ip_del_direntry(trans
, &cursor
,
3428 hammer_done_cursor(&cursor
);
3431 * Tell the namecache that we are now unlinked.
3436 * NOTE: ip->vp, if non-NULL, cannot be directly
3437 * referenced without formally acquiring the
3438 * vp since the vp might have zero refs on it,
3439 * or in the middle of a reclaim, etc.
3441 * NOTE: The cache_setunresolved() can rip the vp
3442 * out from under us since the vp may not have
3443 * any refs, in which case ip->vp will be NULL
3446 while (ip
&& ip
->vp
) {
3449 error
= hammer_get_vnode(ip
, &vp
);
3450 if (error
== 0 && vp
) {
3452 hammer_knote(ip
->vp
, NOTE_DELETE
);
3455 * Don't do this, it can deadlock
3456 * on concurrent rm's of hardlinks.
3457 * Shouldn't be needed any more.
3459 cache_inval_vp(ip
->vp
, CINV_DESTROY
);
3464 hdkprintf("ip/vp race1 avoided\n");
3468 hammer_rel_inode(ip
, 0);
3470 hammer_done_cursor(&cursor
);
3472 if (error
== EDEADLK
)
3478 /************************************************************************
3479 * FIFO AND SPECFS OPS *
3480 ************************************************************************
3484 hammer_vop_fifoclose (struct vop_close_args
*ap
)
3486 /* XXX update itimes */
3487 return (VOCALL(&fifo_vnode_vops
, &ap
->a_head
));
3491 hammer_vop_fiforead (struct vop_read_args
*ap
)
3495 error
= VOCALL(&fifo_vnode_vops
, &ap
->a_head
);
3496 /* XXX update access time */
3501 hammer_vop_fifowrite (struct vop_write_args
*ap
)
3505 error
= VOCALL(&fifo_vnode_vops
, &ap
->a_head
);
3506 /* XXX update access time */
3512 hammer_vop_fifokqfilter(struct vop_kqfilter_args
*ap
)
3516 error
= VOCALL(&fifo_vnode_vops
, &ap
->a_head
);
3518 error
= hammer_vop_kqfilter(ap
);
3522 /************************************************************************
3524 ************************************************************************
3527 static void filt_hammerdetach(struct knote
*kn
);
3528 static int filt_hammerread(struct knote
*kn
, long hint
);
3529 static int filt_hammerwrite(struct knote
*kn
, long hint
);
3530 static int filt_hammervnode(struct knote
*kn
, long hint
);
3532 static struct filterops hammerread_filtops
=
3533 { FILTEROP_ISFD
| FILTEROP_MPSAFE
,
3534 NULL
, filt_hammerdetach
, filt_hammerread
};
3535 static struct filterops hammerwrite_filtops
=
3536 { FILTEROP_ISFD
| FILTEROP_MPSAFE
,
3537 NULL
, filt_hammerdetach
, filt_hammerwrite
};
3538 static struct filterops hammervnode_filtops
=
3539 { FILTEROP_ISFD
| FILTEROP_MPSAFE
,
3540 NULL
, filt_hammerdetach
, filt_hammervnode
};
3544 hammer_vop_kqfilter(struct vop_kqfilter_args
*ap
)
3546 struct vnode
*vp
= ap
->a_vp
;
3547 struct knote
*kn
= ap
->a_kn
;
3549 switch (kn
->kn_filter
) {
3551 kn
->kn_fop
= &hammerread_filtops
;
3554 kn
->kn_fop
= &hammerwrite_filtops
;
3557 kn
->kn_fop
= &hammervnode_filtops
;
3560 return (EOPNOTSUPP
);
3563 kn
->kn_hook
= (caddr_t
)vp
;
3565 knote_insert(&vp
->v_pollinfo
.vpi_kqinfo
.ki_note
, kn
);
3571 filt_hammerdetach(struct knote
*kn
)
3573 struct vnode
*vp
= (void *)kn
->kn_hook
;
3575 knote_remove(&vp
->v_pollinfo
.vpi_kqinfo
.ki_note
, kn
);
3579 filt_hammerread(struct knote
*kn
, long hint
)
3581 struct vnode
*vp
= (void *)kn
->kn_hook
;
3582 hammer_inode_t ip
= VTOI(vp
);
3583 hammer_mount_t hmp
= ip
->hmp
;
3586 if (hint
== NOTE_REVOKE
) {
3587 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
| EV_ONESHOT
);
3590 lwkt_gettoken(&hmp
->fs_token
); /* XXX use per-ip-token */
3591 off
= ip
->ino_data
.size
- kn
->kn_fp
->f_offset
;
3592 kn
->kn_data
= (off
< INTPTR_MAX
) ? off
: INTPTR_MAX
;
3593 lwkt_reltoken(&hmp
->fs_token
);
3594 if (kn
->kn_sfflags
& NOTE_OLDAPI
)
3596 return (kn
->kn_data
!= 0);
3600 filt_hammerwrite(struct knote
*kn
, long hint
)
3602 if (hint
== NOTE_REVOKE
)
3603 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
| EV_ONESHOT
);
3609 filt_hammervnode(struct knote
*kn
, long hint
)
3611 if (kn
->kn_sfflags
& hint
)
3612 kn
->kn_fflags
|= hint
;
3613 if (hint
== NOTE_REVOKE
) {
3614 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
);
3617 return (kn
->kn_fflags
!= 0);