2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/mountctl.h>
36 #include <sys/namecache.h>
38 #include <vfs/fifofs/fifo.h>
45 static int hammer_vop_fsync(struct vop_fsync_args
*);
46 static int hammer_vop_read(struct vop_read_args
*);
47 static int hammer_vop_write(struct vop_write_args
*);
48 static int hammer_vop_access(struct vop_access_args
*);
49 static int hammer_vop_advlock(struct vop_advlock_args
*);
50 static int hammer_vop_close(struct vop_close_args
*);
51 static int hammer_vop_ncreate(struct vop_ncreate_args
*);
52 static int hammer_vop_getattr(struct vop_getattr_args
*);
53 static int hammer_vop_nresolve(struct vop_nresolve_args
*);
54 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args
*);
55 static int hammer_vop_nlink(struct vop_nlink_args
*);
56 static int hammer_vop_nmkdir(struct vop_nmkdir_args
*);
57 static int hammer_vop_nmknod(struct vop_nmknod_args
*);
58 static int hammer_vop_open(struct vop_open_args
*);
59 static int hammer_vop_print(struct vop_print_args
*);
60 static int hammer_vop_readdir(struct vop_readdir_args
*);
61 static int hammer_vop_readlink(struct vop_readlink_args
*);
62 static int hammer_vop_nremove(struct vop_nremove_args
*);
63 static int hammer_vop_nrename(struct vop_nrename_args
*);
64 static int hammer_vop_nrmdir(struct vop_nrmdir_args
*);
65 static int hammer_vop_markatime(struct vop_markatime_args
*);
66 static int hammer_vop_setattr(struct vop_setattr_args
*);
67 static int hammer_vop_strategy(struct vop_strategy_args
*);
68 static int hammer_vop_bmap(struct vop_bmap_args
*ap
);
69 static int hammer_vop_nsymlink(struct vop_nsymlink_args
*);
70 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args
*);
71 static int hammer_vop_ioctl(struct vop_ioctl_args
*);
72 static int hammer_vop_mountctl(struct vop_mountctl_args
*);
73 static int hammer_vop_kqfilter (struct vop_kqfilter_args
*);
75 static int hammer_vop_fifoclose (struct vop_close_args
*);
76 static int hammer_vop_fiforead (struct vop_read_args
*);
77 static int hammer_vop_fifowrite (struct vop_write_args
*);
78 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args
*);
80 struct vop_ops hammer_vnode_vops
= {
81 .vop_default
= vop_defaultop
,
82 .vop_fsync
= hammer_vop_fsync
,
83 .vop_getpages
= vop_stdgetpages
,
84 .vop_putpages
= vop_stdputpages
,
85 .vop_read
= hammer_vop_read
,
86 .vop_write
= hammer_vop_write
,
87 .vop_access
= hammer_vop_access
,
88 .vop_advlock
= hammer_vop_advlock
,
89 .vop_close
= hammer_vop_close
,
90 .vop_ncreate
= hammer_vop_ncreate
,
91 .vop_getattr
= hammer_vop_getattr
,
92 .vop_inactive
= hammer_vop_inactive
,
93 .vop_reclaim
= hammer_vop_reclaim
,
94 .vop_nresolve
= hammer_vop_nresolve
,
95 .vop_nlookupdotdot
= hammer_vop_nlookupdotdot
,
96 .vop_nlink
= hammer_vop_nlink
,
97 .vop_nmkdir
= hammer_vop_nmkdir
,
98 .vop_nmknod
= hammer_vop_nmknod
,
99 .vop_open
= hammer_vop_open
,
100 .vop_pathconf
= vop_stdpathconf
,
101 .vop_print
= hammer_vop_print
,
102 .vop_readdir
= hammer_vop_readdir
,
103 .vop_readlink
= hammer_vop_readlink
,
104 .vop_nremove
= hammer_vop_nremove
,
105 .vop_nrename
= hammer_vop_nrename
,
106 .vop_nrmdir
= hammer_vop_nrmdir
,
107 .vop_markatime
= hammer_vop_markatime
,
108 .vop_setattr
= hammer_vop_setattr
,
109 .vop_bmap
= hammer_vop_bmap
,
110 .vop_strategy
= hammer_vop_strategy
,
111 .vop_nsymlink
= hammer_vop_nsymlink
,
112 .vop_nwhiteout
= hammer_vop_nwhiteout
,
113 .vop_ioctl
= hammer_vop_ioctl
,
114 .vop_mountctl
= hammer_vop_mountctl
,
115 .vop_kqfilter
= hammer_vop_kqfilter
118 struct vop_ops hammer_spec_vops
= {
119 .vop_default
= vop_defaultop
,
120 .vop_fsync
= hammer_vop_fsync
,
121 .vop_read
= vop_stdnoread
,
122 .vop_write
= vop_stdnowrite
,
123 .vop_access
= hammer_vop_access
,
124 .vop_close
= hammer_vop_close
,
125 .vop_markatime
= hammer_vop_markatime
,
126 .vop_getattr
= hammer_vop_getattr
,
127 .vop_inactive
= hammer_vop_inactive
,
128 .vop_reclaim
= hammer_vop_reclaim
,
129 .vop_setattr
= hammer_vop_setattr
132 struct vop_ops hammer_fifo_vops
= {
133 .vop_default
= fifo_vnoperate
,
134 .vop_fsync
= hammer_vop_fsync
,
135 .vop_read
= hammer_vop_fiforead
,
136 .vop_write
= hammer_vop_fifowrite
,
137 .vop_access
= hammer_vop_access
,
138 .vop_close
= hammer_vop_fifoclose
,
139 .vop_markatime
= hammer_vop_markatime
,
140 .vop_getattr
= hammer_vop_getattr
,
141 .vop_inactive
= hammer_vop_inactive
,
142 .vop_reclaim
= hammer_vop_reclaim
,
143 .vop_setattr
= hammer_vop_setattr
,
144 .vop_kqfilter
= hammer_vop_fifokqfilter
149 hammer_knote(struct vnode
*vp
, int flags
)
152 KNOTE(&vp
->v_pollinfo
.vpi_kqinfo
.ki_note
, flags
);
155 static int hammer_dounlink(hammer_transaction_t trans
, struct nchandle
*nch
,
156 struct vnode
*dvp
, struct ucred
*cred
,
157 int flags
, int isdir
);
158 static int hammer_vop_strategy_read(struct vop_strategy_args
*ap
);
159 static int hammer_vop_strategy_write(struct vop_strategy_args
*ap
);
162 * hammer_vop_fsync { vp, waitfor }
164 * fsync() an inode to disk and wait for it to be completely committed
165 * such that the information would not be undone if a crash occured after
168 * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement
169 * a REDO log. A sysctl is provided to relax HAMMER's fsync()
172 * Ultimately the combination of a REDO log and use of fast storage
173 * to front-end cluster caches will make fsync fast, but it aint
174 * here yet. And, in anycase, we need real transactional
175 * all-or-nothing features which are not restricted to a single file.
179 hammer_vop_fsync(struct vop_fsync_args
*ap
)
181 hammer_inode_t ip
= VTOI(ap
->a_vp
);
182 hammer_mount_t hmp
= ip
->hmp
;
183 int waitfor
= ap
->a_waitfor
;
186 lwkt_gettoken(&hmp
->fs_token
);
189 * Fsync rule relaxation (default is either full synchronous flush
190 * or REDO semantics with synchronous flush).
192 if (ap
->a_flags
& VOP_FSYNC_SYSCALL
) {
193 switch(hammer_fsync_mode
) {
196 /* no REDO, full synchronous flush */
200 /* no REDO, full asynchronous flush */
201 if (waitfor
== MNT_WAIT
)
202 waitfor
= MNT_NOWAIT
;
205 /* REDO semantics, synchronous flush */
206 if (hmp
->version
< HAMMER_VOL_VERSION_FOUR
)
208 mode
= HAMMER_FLUSH_UNDOS_AUTO
;
211 /* REDO semantics, relaxed asynchronous flush */
212 if (hmp
->version
< HAMMER_VOL_VERSION_FOUR
)
214 mode
= HAMMER_FLUSH_UNDOS_RELAXED
;
215 if (waitfor
== MNT_WAIT
)
216 waitfor
= MNT_NOWAIT
;
219 /* ignore the fsync() system call */
220 lwkt_reltoken(&hmp
->fs_token
);
223 /* we have to do something */
224 mode
= HAMMER_FLUSH_UNDOS_RELAXED
;
225 if (waitfor
== MNT_WAIT
)
226 waitfor
= MNT_NOWAIT
;
231 * Fast fsync only needs to flush the UNDO/REDO fifo if
232 * HAMMER_INODE_REDO is non-zero and the only modifications
233 * made to the file are write or write-extends.
235 if ((ip
->flags
& HAMMER_INODE_REDO
) &&
236 (ip
->flags
& HAMMER_INODE_MODMASK_NOREDO
) == 0) {
237 ++hammer_count_fsyncs
;
238 hammer_flusher_flush_undos(hmp
, mode
);
240 if (ip
->vp
&& (ip
->flags
& HAMMER_INODE_MODMASK
) == 0)
242 lwkt_reltoken(&hmp
->fs_token
);
247 * REDO is enabled by fsync(), the idea being we really only
248 * want to lay down REDO records when programs are using
249 * fsync() heavily. The first fsync() on the file starts
250 * the gravy train going and later fsync()s keep it hot by
251 * resetting the redo_count.
253 * We weren't running REDOs before now so we have to fall
254 * through and do a full fsync of what we have.
256 if (hmp
->version
>= HAMMER_VOL_VERSION_FOUR
&&
257 (hmp
->flags
& HAMMER_MOUNT_REDO_RECOVERY_RUN
) == 0) {
258 ip
->flags
|= HAMMER_INODE_REDO
;
265 * Do a full flush sequence.
267 * Attempt to release the vnode while waiting for the inode to
268 * finish flushing. This can really mess up inactive->reclaim
269 * sequences so only do it if the vnode is active.
271 * WARNING! The VX lock functions must be used. vn_lock() will
272 * fail when this is part of a VOP_RECLAIM sequence.
274 ++hammer_count_fsyncs
;
275 vfsync(ap
->a_vp
, waitfor
, 1, NULL
, NULL
);
276 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
277 if (waitfor
== MNT_WAIT
) {
280 if ((ap
->a_vp
->v_flag
& VRECLAIMED
) == 0) {
286 hammer_wait_inode(ip
);
290 if (ip
->vp
&& (ip
->flags
& HAMMER_INODE_MODMASK
) == 0)
292 lwkt_reltoken(&hmp
->fs_token
);
297 * hammer_vop_read { vp, uio, ioflag, cred }
299 * MPSAFE (for the cache safe does not require fs_token)
303 hammer_vop_read(struct vop_read_args
*ap
)
305 struct hammer_transaction trans
;
320 if (ap
->a_vp
->v_type
!= VREG
)
329 * Attempt to shortcut directly to the VM object using lwbufs.
330 * This is much faster than instantiating buffer cache buffers.
332 resid
= uio
->uio_resid
;
333 error
= vop_helper_read_shortcut(ap
);
334 hammer_stats_file_read
+= resid
- uio
->uio_resid
;
337 if (uio
->uio_resid
== 0)
341 * Allow the UIO's size to override the sequential heuristic.
343 blksize
= hammer_blocksize(uio
->uio_offset
);
344 seqcount
= (uio
->uio_resid
+ (MAXBSIZE
- 1)) / MAXBSIZE
;
345 ioseqcount
= (ap
->a_ioflag
>> 16);
346 if (seqcount
< ioseqcount
)
347 seqcount
= ioseqcount
;
350 * If reading or writing a huge amount of data we have to break
351 * atomicy and allow the operation to be interrupted by a signal
352 * or it can DOS the machine.
354 bigread
= (uio
->uio_resid
> 100 * 1024 * 1024);
357 * Access the data typically in HAMMER_BUFSIZE blocks via the
358 * buffer cache, but HAMMER may use a variable block size based
361 * XXX Temporary hack, delay the start transaction while we remain
362 * MPSAFE. NOTE: ino_data.size cannot change while vnode is
365 while (uio
->uio_resid
> 0 && uio
->uio_offset
< ip
->ino_data
.size
) {
369 blksize
= hammer_blocksize(uio
->uio_offset
);
370 offset
= (int)uio
->uio_offset
& (blksize
- 1);
371 base_offset
= uio
->uio_offset
- offset
;
373 if (bigread
&& (error
= hammer_signal_check(ip
->hmp
)) != 0)
379 bp
= getblk(ap
->a_vp
, base_offset
, blksize
, 0, 0);
380 if ((bp
->b_flags
& (B_INVAL
| B_CACHE
| B_RAM
)) == B_CACHE
) {
381 bp
->b_flags
&= ~B_AGE
;
385 if (ap
->a_ioflag
& IO_NRDELAY
) {
387 return (EWOULDBLOCK
);
393 if (got_trans
== 0) {
394 hammer_start_transaction(&trans
, ip
->hmp
);
399 * NOTE: A valid bp has already been acquired, but was not
402 if (hammer_cluster_enable
) {
404 * Use file_limit to prevent cluster_read() from
405 * creating buffers of the wrong block size past
408 file_limit
= ip
->ino_data
.size
;
409 if (base_offset
< HAMMER_XDEMARC
&&
410 file_limit
> HAMMER_XDEMARC
) {
411 file_limit
= HAMMER_XDEMARC
;
413 error
= cluster_readx(ap
->a_vp
,
414 file_limit
, base_offset
,
415 blksize
, uio
->uio_resid
,
416 seqcount
* MAXBSIZE
, &bp
);
418 error
= breadnx(ap
->a_vp
, base_offset
, blksize
,
426 if ((hammer_debug_io
& 0x0001) && (bp
->b_flags
& B_IOISSUED
)) {
427 hdkprintf("zone2_offset %016jx read file %016jx@%016jx\n",
428 (intmax_t)bp
->b_bio2
.bio_offset
,
429 (intmax_t)ip
->obj_id
,
430 (intmax_t)bp
->b_loffset
);
432 bp
->b_flags
&= ~B_IOISSUED
;
433 if (blksize
== HAMMER_XBUFSIZE
)
434 bp
->b_flags
|= B_CLUSTEROK
;
436 n
= blksize
- offset
;
437 if (n
> uio
->uio_resid
)
439 if (n
> ip
->ino_data
.size
- uio
->uio_offset
)
440 n
= (int)(ip
->ino_data
.size
- uio
->uio_offset
);
443 * Set B_AGE, data has a lower priority than meta-data.
445 * Use a hold/unlock/drop sequence to run the uiomove
446 * with the buffer unlocked, avoiding deadlocks against
447 * read()s on mmap()'d spaces.
449 bp
->b_flags
|= B_AGE
;
450 error
= uiomovebp(bp
, (char *)bp
->b_data
+ offset
, n
, uio
);
455 hammer_stats_file_read
+= n
;
461 * Try to update the atime with just the inode lock for maximum
462 * concurrency. If we can't shortcut it we have to get the full
465 if (got_trans
== 0 && hammer_update_atime_quick(ip
) < 0) {
466 hammer_start_transaction(&trans
, ip
->hmp
);
471 if ((ip
->flags
& HAMMER_INODE_RO
) == 0 &&
472 (ip
->hmp
->mp
->mnt_flag
& MNT_NOATIME
) == 0) {
473 lwkt_gettoken(&hmp
->fs_token
);
474 ip
->ino_data
.atime
= trans
.time
;
475 hammer_modify_inode(&trans
, ip
, HAMMER_INODE_ATIME
);
476 hammer_done_transaction(&trans
);
477 lwkt_reltoken(&hmp
->fs_token
);
479 hammer_done_transaction(&trans
);
486 * hammer_vop_write { vp, uio, ioflag, cred }
490 hammer_vop_write(struct vop_write_args
*ap
)
492 struct hammer_transaction trans
;
508 if (ap
->a_vp
->v_type
!= VREG
)
514 seqcount
= ap
->a_ioflag
>> 16;
516 if (ip
->flags
& HAMMER_INODE_RO
)
520 * Create a transaction to cover the operations we perform.
522 hammer_start_transaction(&trans
, hmp
);
528 if (ap
->a_ioflag
& IO_APPEND
)
529 uio
->uio_offset
= ip
->ino_data
.size
;
532 * Check for illegal write offsets. Valid range is 0...2^63-1.
534 * NOTE: the base_off assignment is required to work around what
535 * I consider to be a GCC-4 optimization bug.
537 if (uio
->uio_offset
< 0) {
538 hammer_done_transaction(&trans
);
541 base_offset
= uio
->uio_offset
+ uio
->uio_resid
; /* work around gcc-4 */
542 if (uio
->uio_resid
> 0 && base_offset
<= uio
->uio_offset
) {
543 hammer_done_transaction(&trans
);
547 if (uio
->uio_resid
> 0 && (td
= uio
->uio_td
) != NULL
&& td
->td_proc
&&
548 base_offset
> td
->td_proc
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
) {
549 hammer_done_transaction(&trans
);
550 lwpsignal(td
->td_proc
, td
->td_lwp
, SIGXFSZ
);
555 * If reading or writing a huge amount of data we have to break
556 * atomicy and allow the operation to be interrupted by a signal
557 * or it can DOS the machine.
559 * Preset redo_count so we stop generating REDOs earlier if the
562 * redo_count is heuristical, SMP races are ok
564 bigwrite
= (uio
->uio_resid
> 100 * 1024 * 1024);
565 if ((ip
->flags
& HAMMER_INODE_REDO
) &&
566 ip
->redo_count
< hammer_limit_redo
) {
567 ip
->redo_count
+= uio
->uio_resid
;
571 * Access the data typically in HAMMER_BUFSIZE blocks via the
572 * buffer cache, but HAMMER may use a variable block size based
575 while (uio
->uio_resid
> 0) {
583 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_WRITE
)) != 0)
585 if (bigwrite
&& (error
= hammer_signal_check(hmp
)) != 0)
588 blksize
= hammer_blocksize(uio
->uio_offset
);
591 * Control the number of pending records associated with
592 * this inode. If too many have accumulated start a
593 * flush. Try to maintain a pipeline with the flusher.
595 * NOTE: It is possible for other sources to grow the
596 * records but not necessarily issue another flush,
597 * so use a timeout and ensure that a re-flush occurs.
599 if (ip
->rsv_recs
>= hammer_limit_inode_recs
) {
600 lwkt_gettoken(&hmp
->fs_token
);
601 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
602 while (ip
->rsv_recs
>= hammer_limit_inode_recs
* 2) {
603 ip
->flags
|= HAMMER_INODE_RECSW
;
604 tsleep(&ip
->rsv_recs
, 0, "hmrwww", hz
);
605 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
607 lwkt_reltoken(&hmp
->fs_token
);
611 * Do not allow HAMMER to blow out the buffer cache. Very
612 * large UIOs can lockout other processes due to bwillwrite()
615 * The hammer inode is not locked during these operations.
616 * The vnode is locked which can interfere with the pageout
617 * daemon for non-UIO_NOCOPY writes but should not interfere
618 * with the buffer cache. Even so, we cannot afford to
619 * allow the pageout daemon to build up too many dirty buffer
622 * Only call this if we aren't being recursively called from
623 * a virtual disk device (vn), else we may deadlock.
625 if ((ap
->a_ioflag
& IO_RECURSE
) == 0)
629 * Calculate the blocksize at the current offset and figure
630 * out how much we can actually write.
632 blkmask
= blksize
- 1;
633 offset
= (int)uio
->uio_offset
& blkmask
;
634 base_offset
= uio
->uio_offset
& ~(int64_t)blkmask
;
635 n
= blksize
- offset
;
636 if (n
> uio
->uio_resid
) {
642 nsize
= uio
->uio_offset
+ n
;
643 if (nsize
> ip
->ino_data
.size
) {
644 if (uio
->uio_offset
> ip
->ino_data
.size
)
648 nvextendbuf(ap
->a_vp
,
651 hammer_blocksize(ip
->ino_data
.size
),
652 hammer_blocksize(nsize
),
653 hammer_blockoff(ip
->ino_data
.size
),
654 hammer_blockoff(nsize
),
657 kflags
|= NOTE_EXTEND
;
660 if (uio
->uio_segflg
== UIO_NOCOPY
) {
662 * Issuing a write with the same data backing the
663 * buffer. Instantiate the buffer to collect the
664 * backing vm pages, then read-in any missing bits.
666 * This case is used by vop_stdputpages().
668 bp
= getblk(ap
->a_vp
, base_offset
,
669 blksize
, GETBLK_BHEAVY
, 0);
670 if ((bp
->b_flags
& B_CACHE
) == 0) {
672 error
= bread(ap
->a_vp
, base_offset
,
675 } else if (offset
== 0 && uio
->uio_resid
>= blksize
) {
677 * Even though we are entirely overwriting the buffer
678 * we may still have to zero it out to avoid a
679 * mmap/write visibility issue.
681 bp
= getblk(ap
->a_vp
, base_offset
, blksize
, GETBLK_BHEAVY
, 0);
682 if ((bp
->b_flags
& B_CACHE
) == 0)
684 } else if (base_offset
>= ip
->ino_data
.size
) {
686 * If the base offset of the buffer is beyond the
687 * file EOF, we don't have to issue a read.
689 bp
= getblk(ap
->a_vp
, base_offset
,
690 blksize
, GETBLK_BHEAVY
, 0);
694 * Partial overwrite, read in any missing bits then
695 * replace the portion being written.
697 error
= bread(ap
->a_vp
, base_offset
, blksize
, &bp
);
702 error
= uiomovebp(bp
, bp
->b_data
+ offset
, n
, uio
);
704 lwkt_gettoken(&hmp
->fs_token
);
707 * Generate REDO records if enabled and redo_count will not
708 * exceeded the limit.
710 * If redo_count exceeds the limit we stop generating records
711 * and clear HAMMER_INODE_REDO. This will cause the next
712 * fsync() to do a full meta-data sync instead of just an
713 * UNDO/REDO fifo update.
715 * When clearing HAMMER_INODE_REDO any pre-existing REDOs
716 * will still be tracked. The tracks will be terminated
717 * when the related meta-data (including possible data
718 * modifications which are not tracked via REDO) is
721 if ((ip
->flags
& HAMMER_INODE_REDO
) && error
== 0) {
722 if (ip
->redo_count
< hammer_limit_redo
) {
723 bp
->b_flags
|= B_VFSFLAG1
;
724 error
= hammer_generate_redo(&trans
, ip
,
725 base_offset
+ offset
,
730 ip
->flags
&= ~HAMMER_INODE_REDO
;
735 * If we screwed up we have to undo any VM size changes we
741 nvtruncbuf(ap
->a_vp
, ip
->ino_data
.size
,
742 hammer_blocksize(ip
->ino_data
.size
),
743 hammer_blockoff(ip
->ino_data
.size
),
746 lwkt_reltoken(&hmp
->fs_token
);
749 kflags
|= NOTE_WRITE
;
750 hammer_stats_file_write
+= n
;
751 if (blksize
== HAMMER_XBUFSIZE
)
752 bp
->b_flags
|= B_CLUSTEROK
;
753 if (ip
->ino_data
.size
< uio
->uio_offset
) {
754 ip
->ino_data
.size
= uio
->uio_offset
;
755 flags
= HAMMER_INODE_SDIRTY
;
759 ip
->ino_data
.mtime
= trans
.time
;
760 flags
|= HAMMER_INODE_MTIME
| HAMMER_INODE_BUFS
;
761 hammer_modify_inode(&trans
, ip
, flags
);
764 * Once we dirty the buffer any cached zone-X offset
765 * becomes invalid. HAMMER NOTE: no-history mode cannot
766 * allow overwriting over the same data sector unless
767 * we provide UNDOs for the old data, which we don't.
769 bp
->b_bio2
.bio_offset
= NOOFFSET
;
771 lwkt_reltoken(&hmp
->fs_token
);
774 * Final buffer disposition.
776 * Because meta-data updates are deferred, HAMMER is
777 * especially sensitive to excessive bdwrite()s because
778 * the I/O stream is not broken up by disk reads. So the
779 * buffer cache simply cannot keep up.
781 * WARNING! blksize is variable. cluster_write() is
782 * expected to not blow up if it encounters
783 * buffers that do not match the passed blksize.
785 * NOTE! Hammer shouldn't need to bawrite()/cluster_write().
786 * The ip->rsv_recs check should burst-flush the data.
787 * If we queue it immediately the buf could be left
788 * locked on the device queue for a very long time.
790 * However, failing to flush a dirty buffer out when
791 * issued from the pageout daemon can result in a low
792 * memory deadlock against bio_page_alloc(), so we
793 * have to bawrite() on IO_ASYNC as well.
795 * NOTE! To avoid degenerate stalls due to mismatched block
796 * sizes we only honor IO_DIRECT on the write which
797 * abuts the end of the buffer. However, we must
798 * honor IO_SYNC in case someone is silly enough to
799 * configure a HAMMER file as swap, or when HAMMER
800 * is serving NFS (for commits). Ick ick.
802 bp
->b_flags
|= B_AGE
;
803 if (blksize
== HAMMER_XBUFSIZE
)
804 bp
->b_flags
|= B_CLUSTEROK
;
806 if (ap
->a_ioflag
& IO_SYNC
) {
808 } else if ((ap
->a_ioflag
& IO_DIRECT
) && endofblk
) {
810 } else if (ap
->a_ioflag
& IO_ASYNC
) {
812 } else if (hammer_cluster_enable
&&
813 !(ap
->a_vp
->v_mount
->mnt_flag
& MNT_NOCLUSTERW
)) {
814 if (base_offset
< HAMMER_XDEMARC
)
815 cluster_eof
= hammer_blockdemarc(base_offset
,
818 cluster_eof
= ip
->ino_data
.size
;
819 cluster_write(bp
, cluster_eof
, blksize
, seqcount
);
824 hammer_done_transaction(&trans
);
825 hammer_knote(ap
->a_vp
, kflags
);
831 * hammer_vop_access { vp, mode, cred }
833 * MPSAFE - does not require fs_token
837 hammer_vop_access(struct vop_access_args
*ap
)
839 hammer_inode_t ip
= VTOI(ap
->a_vp
);
844 ++hammer_stats_file_iopsr
;
845 uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
846 gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
848 error
= vop_helper_access(ap
, uid
, gid
, ip
->ino_data
.mode
,
849 ip
->ino_data
.uflags
);
854 * hammer_vop_advlock { vp, id, op, fl, flags }
856 * MPSAFE - does not require fs_token
860 hammer_vop_advlock(struct vop_advlock_args
*ap
)
862 hammer_inode_t ip
= VTOI(ap
->a_vp
);
864 return (lf_advlock(ap
, &ip
->advlock
, ip
->ino_data
.size
));
868 * hammer_vop_close { vp, fflag }
870 * We can only sync-on-close for normal closes. XXX disabled for now.
874 hammer_vop_close(struct vop_close_args
*ap
)
877 struct vnode
*vp
= ap
->a_vp
;
878 hammer_inode_t ip
= VTOI(vp
);
880 if (ip
->flags
& (HAMMER_INODE_CLOSESYNC
|HAMMER_INODE_CLOSEASYNC
)) {
881 if (vn_islocked(vp
) == LK_EXCLUSIVE
&&
882 (vp
->v_flag
& (VINACTIVE
|VRECLAIMED
)) == 0) {
883 if (ip
->flags
& HAMMER_INODE_CLOSESYNC
)
886 waitfor
= MNT_NOWAIT
;
887 ip
->flags
&= ~(HAMMER_INODE_CLOSESYNC
|
888 HAMMER_INODE_CLOSEASYNC
);
889 VOP_FSYNC(vp
, MNT_NOWAIT
, waitfor
);
893 return (vop_stdclose(ap
));
897 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
899 * The operating system has already ensured that the directory entry
900 * does not exist and done all appropriate namespace locking.
904 hammer_vop_ncreate(struct vop_ncreate_args
*ap
)
906 struct hammer_transaction trans
;
909 struct nchandle
*nch
;
914 dip
= VTOI(ap
->a_dvp
);
917 if (dip
->flags
& HAMMER_INODE_RO
)
919 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
923 * Create a transaction to cover the operations we perform.
925 lwkt_gettoken(&hmp
->fs_token
);
926 hammer_start_transaction(&trans
, hmp
);
927 ++hammer_stats_file_iopsw
;
930 * Create a new filesystem object of the requested type. The
931 * returned inode will be referenced and shared-locked to prevent
932 * it from being moved to the flusher.
934 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
,
935 dip
, nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
938 hkprintf("hammer_create_inode error %d\n", error
);
939 hammer_done_transaction(&trans
);
941 lwkt_reltoken(&hmp
->fs_token
);
946 * Add the new filesystem object to the directory. This will also
947 * bump the inode's link count.
949 error
= hammer_ip_add_direntry(&trans
, dip
,
950 nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
953 hkprintf("hammer_ip_add_direntry error %d\n", error
);
959 hammer_rel_inode(nip
, 0);
960 hammer_done_transaction(&trans
);
963 error
= hammer_get_vnode(nip
, ap
->a_vpp
);
964 hammer_done_transaction(&trans
);
965 hammer_rel_inode(nip
, 0);
967 cache_setunresolved(ap
->a_nch
);
968 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
970 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
972 lwkt_reltoken(&hmp
->fs_token
);
977 * hammer_vop_getattr { vp, vap }
979 * Retrieve an inode's attribute information. When accessing inodes
980 * historically we fake the atime field to ensure consistent results.
981 * The atime field is stored in the B-Tree element and allowed to be
982 * updated without cycling the element.
984 * MPSAFE - does not require fs_token
988 hammer_vop_getattr(struct vop_getattr_args
*ap
)
990 hammer_inode_t ip
= VTOI(ap
->a_vp
);
991 struct vattr
*vap
= ap
->a_vap
;
994 * We want the fsid to be different when accessing a filesystem
995 * with different as-of's so programs like diff don't think
996 * the files are the same.
998 * We also want the fsid to be the same when comparing snapshots,
999 * or when comparing mirrors (which might be backed by different
1000 * physical devices). HAMMER fsids are based on the PFS's
1001 * shared_uuid field.
1003 * XXX there is a chance of collision here. The va_fsid reported
1004 * by stat is different from the more involved fsid used in the
1007 ++hammer_stats_file_iopsr
;
1008 hammer_lock_sh(&ip
->lock
);
1009 vap
->va_fsid
= ip
->pfsm
->fsid_udev
^ (uint32_t)ip
->obj_asof
^
1010 (uint32_t)(ip
->obj_asof
>> 32);
1012 vap
->va_fileid
= ip
->ino_leaf
.base
.obj_id
;
1013 vap
->va_mode
= ip
->ino_data
.mode
;
1014 vap
->va_nlink
= ip
->ino_data
.nlinks
;
1015 vap
->va_uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
1016 vap
->va_gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
1019 vap
->va_size
= ip
->ino_data
.size
;
1022 * Special case for @@PFS softlinks. The actual size of the
1023 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
1024 * or for MAX_TID is "@@-1:%05d" == 10 bytes.
1026 * Note that userspace hammer command does not allow users to
1027 * create a @@PFS softlink under an existing other PFS (id!=0)
1028 * so the ip localization here for @@PFS softlink is always 0.
1030 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_SOFTLINK
&&
1031 ip
->ino_data
.size
== 10 &&
1032 ip
->obj_asof
== HAMMER_MAX_TID
&&
1033 ip
->obj_localization
== HAMMER_DEF_LOCALIZATION
&&
1034 strncmp(ip
->ino_data
.ext
.symlink
, "@@PFS", 5) == 0) {
1035 if (hammer_is_pfs_slave(&ip
->pfsm
->pfsd
))
1042 * We must provide a consistent atime and mtime for snapshots
1043 * so people can do a 'tar cf - ... | md5' on them and get
1044 * consistent results.
1046 if (ip
->flags
& HAMMER_INODE_RO
) {
1047 hammer_time_to_timespec(ip
->ino_data
.ctime
, &vap
->va_atime
);
1048 hammer_time_to_timespec(ip
->ino_data
.ctime
, &vap
->va_mtime
);
1050 hammer_time_to_timespec(ip
->ino_data
.atime
, &vap
->va_atime
);
1051 hammer_time_to_timespec(ip
->ino_data
.mtime
, &vap
->va_mtime
);
1053 hammer_time_to_timespec(ip
->ino_data
.ctime
, &vap
->va_ctime
);
1054 vap
->va_flags
= ip
->ino_data
.uflags
;
1055 vap
->va_gen
= 1; /* hammer inums are unique for all time */
1056 vap
->va_blocksize
= HAMMER_BUFSIZE
;
1057 if (ip
->ino_data
.size
>= HAMMER_XDEMARC
) {
1058 vap
->va_bytes
= HAMMER_XBUFSIZE64_DOALIGN(ip
->ino_data
.size
);
1059 } else if (ip
->ino_data
.size
> HAMMER_HBUFSIZE
) {
1060 vap
->va_bytes
= HAMMER_BUFSIZE64_DOALIGN(ip
->ino_data
.size
);
1062 vap
->va_bytes
= HAMMER_DATA_DOALIGN(ip
->ino_data
.size
);
1065 vap
->va_type
= hammer_get_vnode_type(ip
->ino_data
.obj_type
);
1066 vap
->va_filerev
= 0; /* XXX */
1067 vap
->va_uid_uuid
= ip
->ino_data
.uid
;
1068 vap
->va_gid_uuid
= ip
->ino_data
.gid
;
1069 vap
->va_fsid_uuid
= ip
->hmp
->fsid
;
1070 vap
->va_vaflags
= VA_UID_UUID_VALID
| VA_GID_UUID_VALID
|
1073 switch (ip
->ino_data
.obj_type
) {
1074 case HAMMER_OBJTYPE_CDEV
:
1075 case HAMMER_OBJTYPE_BDEV
:
1076 vap
->va_rmajor
= ip
->ino_data
.rmajor
;
1077 vap
->va_rminor
= ip
->ino_data
.rminor
;
1082 hammer_unlock(&ip
->lock
);
1087 * hammer_vop_nresolve { nch, dvp, cred }
1089 * Locate the requested directory entry.
1093 hammer_vop_nresolve(struct vop_nresolve_args
*ap
)
1095 struct hammer_transaction trans
;
1096 struct namecache
*ncp
;
1101 struct hammer_cursor cursor
;
1110 uint32_t localization
;
1111 uint32_t max_iterations
;
1114 * Misc initialization, plus handle as-of name extensions. Look for
1115 * the '@@' extension. Note that as-of files and directories cannot
1118 dip
= VTOI(ap
->a_dvp
);
1119 ncp
= ap
->a_nch
->ncp
;
1120 asof
= dip
->obj_asof
;
1121 localization
= dip
->obj_localization
; /* for code consistency */
1122 nlen
= ncp
->nc_nlen
;
1123 flags
= dip
->flags
& HAMMER_INODE_RO
;
1127 lwkt_gettoken(&hmp
->fs_token
);
1128 hammer_simple_transaction(&trans
, hmp
);
1129 ++hammer_stats_file_iopsr
;
1131 for (i
= 0; i
< nlen
; ++i
) {
1132 if (ncp
->nc_name
[i
] == '@' && ncp
->nc_name
[i
+1] == '@') {
1133 error
= hammer_str_to_tid(ncp
->nc_name
+ i
+ 2,
1134 &ispfs
, &asof
, &localization
);
1139 if (asof
!= HAMMER_MAX_TID
)
1140 flags
|= HAMMER_INODE_RO
;
1147 * If this is a PFS softlink we dive into the PFS
1149 if (ispfs
&& nlen
== 0) {
1150 ip
= hammer_get_inode(&trans
, dip
, HAMMER_OBJID_ROOT
,
1154 error
= hammer_get_vnode(ip
, &vp
);
1155 hammer_rel_inode(ip
, 0);
1161 cache_setvp(ap
->a_nch
, vp
);
1168 * If there is no path component the time extension is relative to dip.
1169 * e.g. "fubar/@@<snapshot>"
1171 * "." is handled by the kernel, but ".@@<snapshot>" is not.
1172 * e.g. "fubar/.@@<snapshot>"
1174 * ".." is handled by the kernel. We do not currently handle
1177 if (nlen
== 0 || (nlen
== 1 && ncp
->nc_name
[0] == '.')) {
1178 ip
= hammer_get_inode(&trans
, dip
, dip
->obj_id
,
1179 asof
, dip
->obj_localization
,
1182 error
= hammer_get_vnode(ip
, &vp
);
1183 hammer_rel_inode(ip
, 0);
1189 cache_setvp(ap
->a_nch
, vp
);
1196 * Calculate the namekey and setup the key range for the scan. This
1197 * works kinda like a chained hash table where the lower 32 bits
1198 * of the namekey synthesize the chain.
1200 * The key range is inclusive of both key_beg and key_end.
1202 namekey
= hammer_direntry_namekey(dip
, ncp
->nc_name
, nlen
,
1205 error
= hammer_init_cursor(&trans
, &cursor
, &dip
->cache
[1], dip
);
1206 cursor
.key_beg
.localization
= dip
->obj_localization
|
1207 hammer_dir_localization(dip
);
1208 cursor
.key_beg
.obj_id
= dip
->obj_id
;
1209 cursor
.key_beg
.key
= namekey
;
1210 cursor
.key_beg
.create_tid
= 0;
1211 cursor
.key_beg
.delete_tid
= 0;
1212 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
1213 cursor
.key_beg
.obj_type
= 0;
1215 cursor
.key_end
= cursor
.key_beg
;
1216 cursor
.key_end
.key
+= max_iterations
;
1218 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
1221 * Scan all matching records (the chain), locate the one matching
1222 * the requested path component.
1224 * The hammer_ip_*() functions merge in-memory records with on-disk
1225 * records for the purposes of the search.
1228 localization
= HAMMER_DEF_LOCALIZATION
;
1231 error
= hammer_ip_first(&cursor
);
1232 while (error
== 0) {
1233 error
= hammer_ip_resolve_data(&cursor
);
1236 if (nlen
== cursor
.leaf
->data_len
- HAMMER_ENTRY_NAME_OFF
&&
1237 bcmp(ncp
->nc_name
, cursor
.data
->entry
.name
, nlen
) == 0) {
1238 obj_id
= cursor
.data
->entry
.obj_id
;
1239 localization
= cursor
.data
->entry
.localization
;
1242 error
= hammer_ip_next(&cursor
);
1245 hammer_done_cursor(&cursor
);
1248 * Lookup the obj_id. This should always succeed. If it does not
1249 * the filesystem may be damaged and we return a dummy inode.
1252 ip
= hammer_get_inode(&trans
, dip
, obj_id
,
1255 if (error
== ENOENT
) {
1256 hkprintf("WARNING: Missing inode for dirent \"%s\"\n"
1257 "\tobj_id = %016jx, asof=%016jx, lo=%08x\n",
1259 (intmax_t)obj_id
, (intmax_t)asof
,
1262 ip
= hammer_get_dummy_inode(&trans
, dip
, obj_id
,
1267 error
= hammer_get_vnode(ip
, &vp
);
1268 hammer_rel_inode(ip
, 0);
1274 cache_setvp(ap
->a_nch
, vp
);
1277 } else if (error
== ENOENT
) {
1278 cache_setvp(ap
->a_nch
, NULL
);
1281 hammer_done_transaction(&trans
);
1282 lwkt_reltoken(&hmp
->fs_token
);
1287 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1289 * Locate the parent directory of a directory vnode.
1291 * dvp is referenced but not locked. *vpp must be returned referenced and
1292 * locked. A parent_obj_id of 0 indicates that we are at the root.
1294 * NOTE: as-of sequences are not linked into the directory structure. If
1295 * we are at the root with a different asof then the mount point, reload
1296 * the same directory with the mount point's asof. I'm not sure what this
1297 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
1298 * get confused, but it hasn't been tested.
1302 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args
*ap
)
1304 struct hammer_transaction trans
;
1308 int64_t parent_obj_id
;
1309 uint32_t parent_obj_localization
;
1313 dip
= VTOI(ap
->a_dvp
);
1314 asof
= dip
->obj_asof
;
1318 * Whos are parent? This could be the root of a pseudo-filesystem
1319 * whos parent is in another localization domain.
1321 lwkt_gettoken(&hmp
->fs_token
);
1322 parent_obj_id
= dip
->ino_data
.parent_obj_id
;
1323 if (dip
->obj_id
== HAMMER_OBJID_ROOT
)
1324 parent_obj_localization
= HAMMER_DEF_LOCALIZATION
;
1326 parent_obj_localization
= dip
->obj_localization
;
1329 * It's probably a PFS root when dip->ino_data.parent_obj_id is 0.
1331 if (parent_obj_id
== 0) {
1332 if (dip
->obj_id
== HAMMER_OBJID_ROOT
&&
1333 asof
!= hmp
->asof
) {
1334 parent_obj_id
= dip
->obj_id
;
1336 *ap
->a_fakename
= kmalloc(19, M_TEMP
, M_WAITOK
);
1337 ksnprintf(*ap
->a_fakename
, 19, "0x%016jx",
1338 (intmax_t)dip
->obj_asof
);
1341 lwkt_reltoken(&hmp
->fs_token
);
1346 hammer_simple_transaction(&trans
, hmp
);
1347 ++hammer_stats_file_iopsr
;
1349 ip
= hammer_get_inode(&trans
, dip
, parent_obj_id
,
1350 asof
, parent_obj_localization
,
1351 dip
->flags
, &error
);
1353 error
= hammer_get_vnode(ip
, ap
->a_vpp
);
1354 hammer_rel_inode(ip
, 0);
1358 hammer_done_transaction(&trans
);
1359 lwkt_reltoken(&hmp
->fs_token
);
1364 * hammer_vop_nlink { nch, dvp, vp, cred }
1368 hammer_vop_nlink(struct vop_nlink_args
*ap
)
1370 struct hammer_transaction trans
;
1373 struct nchandle
*nch
;
1377 if (ap
->a_dvp
->v_mount
!= ap
->a_vp
->v_mount
)
1381 dip
= VTOI(ap
->a_dvp
);
1382 ip
= VTOI(ap
->a_vp
);
1385 if (dip
->obj_localization
!= ip
->obj_localization
)
1388 if (dip
->flags
& HAMMER_INODE_RO
)
1390 if (ip
->flags
& HAMMER_INODE_RO
)
1392 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
1396 * Create a transaction to cover the operations we perform.
1398 lwkt_gettoken(&hmp
->fs_token
);
1399 hammer_start_transaction(&trans
, hmp
);
1400 ++hammer_stats_file_iopsw
;
1403 * Add the filesystem object to the directory. Note that neither
1404 * dip nor ip are referenced or locked, but their vnodes are
1405 * referenced. This function will bump the inode's link count.
1407 error
= hammer_ip_add_direntry(&trans
, dip
,
1408 nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1415 cache_setunresolved(nch
);
1416 cache_setvp(nch
, ap
->a_vp
);
1418 hammer_done_transaction(&trans
);
1419 hammer_knote(ap
->a_vp
, NOTE_LINK
);
1420 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
1421 lwkt_reltoken(&hmp
->fs_token
);
1426 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1428 * The operating system has already ensured that the directory entry
1429 * does not exist and done all appropriate namespace locking.
1433 hammer_vop_nmkdir(struct vop_nmkdir_args
*ap
)
1435 struct hammer_transaction trans
;
1438 struct nchandle
*nch
;
1443 dip
= VTOI(ap
->a_dvp
);
1446 if (dip
->flags
& HAMMER_INODE_RO
)
1448 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
1452 * Create a transaction to cover the operations we perform.
1454 lwkt_gettoken(&hmp
->fs_token
);
1455 hammer_start_transaction(&trans
, hmp
);
1456 ++hammer_stats_file_iopsw
;
1459 * Create a new filesystem object of the requested type. The
1460 * returned inode will be referenced but not locked.
1462 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
,
1463 dip
, nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1466 hammer_done_transaction(&trans
);
1468 lwkt_reltoken(&hmp
->fs_token
);
1472 * Add the new filesystem object to the directory. This will also
1473 * bump the inode's link count.
1475 error
= hammer_ip_add_direntry(&trans
, dip
,
1476 nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1479 hkprintf("hammer_mkdir (add) error %d\n", error
);
1485 hammer_rel_inode(nip
, 0);
1488 error
= hammer_get_vnode(nip
, ap
->a_vpp
);
1489 hammer_rel_inode(nip
, 0);
1491 cache_setunresolved(ap
->a_nch
);
1492 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
1495 hammer_done_transaction(&trans
);
1497 hammer_knote(ap
->a_dvp
, NOTE_WRITE
| NOTE_LINK
);
1498 lwkt_reltoken(&hmp
->fs_token
);
1503 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1505 * The operating system has already ensured that the directory entry
1506 * does not exist and done all appropriate namespace locking.
1510 hammer_vop_nmknod(struct vop_nmknod_args
*ap
)
1512 struct hammer_transaction trans
;
1515 struct nchandle
*nch
;
1520 dip
= VTOI(ap
->a_dvp
);
1523 if (dip
->flags
& HAMMER_INODE_RO
)
1525 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
1529 * Create a transaction to cover the operations we perform.
1531 lwkt_gettoken(&hmp
->fs_token
);
1532 hammer_start_transaction(&trans
, hmp
);
1533 ++hammer_stats_file_iopsw
;
1536 * Create a new filesystem object of the requested type. The
1537 * returned inode will be referenced but not locked.
1539 * If mknod specifies a directory a pseudo-fs is created.
1541 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
,
1542 dip
, nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1545 hammer_done_transaction(&trans
);
1547 lwkt_reltoken(&hmp
->fs_token
);
1552 * Add the new filesystem object to the directory. This will also
1553 * bump the inode's link count.
1555 error
= hammer_ip_add_direntry(&trans
, dip
,
1556 nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1563 hammer_rel_inode(nip
, 0);
1566 error
= hammer_get_vnode(nip
, ap
->a_vpp
);
1567 hammer_rel_inode(nip
, 0);
1569 cache_setunresolved(ap
->a_nch
);
1570 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
1573 hammer_done_transaction(&trans
);
1575 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
1576 lwkt_reltoken(&hmp
->fs_token
);
1581 * hammer_vop_open { vp, mode, cred, fp }
1583 * MPSAFE (does not require fs_token)
1587 hammer_vop_open(struct vop_open_args
*ap
)
1591 ++hammer_stats_file_iopsr
;
1592 ip
= VTOI(ap
->a_vp
);
1594 if ((ap
->a_mode
& FWRITE
) && (ip
->flags
& HAMMER_INODE_RO
))
1596 return(vop_stdopen(ap
));
1600 * hammer_vop_print { vp }
1604 hammer_vop_print(struct vop_print_args
*ap
)
1610 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1614 hammer_vop_readdir(struct vop_readdir_args
*ap
)
1616 struct hammer_transaction trans
;
1617 struct hammer_cursor cursor
;
1621 hammer_base_elm_t base
;
1630 ++hammer_stats_file_iopsr
;
1631 ip
= VTOI(ap
->a_vp
);
1633 saveoff
= uio
->uio_offset
;
1636 if (ap
->a_ncookies
) {
1637 ncookies
= uio
->uio_resid
/ 16 + 1;
1638 if (ncookies
> 1024)
1640 cookies
= kmalloc(ncookies
* sizeof(off_t
), M_TEMP
, M_WAITOK
);
1648 lwkt_gettoken(&hmp
->fs_token
);
1649 hammer_simple_transaction(&trans
, hmp
);
1652 * Handle artificial entries
1654 * It should be noted that the minimum value for a directory
1655 * hash key on-media is 0x0000000100000000, so we can use anything
1656 * less then that to represent our 'special' key space.
1660 r
= vop_write_dirent(&error
, uio
, ip
->obj_id
, DT_DIR
, 1, ".");
1664 cookies
[cookie_index
] = saveoff
;
1667 if (cookie_index
== ncookies
)
1671 if (ip
->ino_data
.parent_obj_id
) {
1672 r
= vop_write_dirent(&error
, uio
,
1673 ip
->ino_data
.parent_obj_id
,
1676 r
= vop_write_dirent(&error
, uio
,
1677 ip
->obj_id
, DT_DIR
, 2, "..");
1682 cookies
[cookie_index
] = saveoff
;
1685 if (cookie_index
== ncookies
)
1690 * Key range (begin and end inclusive) to scan. Directory keys
1691 * directly translate to a 64 bit 'seek' position.
1693 hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
1694 cursor
.key_beg
.localization
= ip
->obj_localization
|
1695 hammer_dir_localization(ip
);
1696 cursor
.key_beg
.obj_id
= ip
->obj_id
;
1697 cursor
.key_beg
.create_tid
= 0;
1698 cursor
.key_beg
.delete_tid
= 0;
1699 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
1700 cursor
.key_beg
.obj_type
= 0;
1701 cursor
.key_beg
.key
= saveoff
;
1703 cursor
.key_end
= cursor
.key_beg
;
1704 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
1705 cursor
.asof
= ip
->obj_asof
;
1706 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
1708 error
= hammer_ip_first(&cursor
);
1710 while (error
== 0) {
1711 error
= hammer_ip_resolve_data(&cursor
);
1714 base
= &cursor
.leaf
->base
;
1715 saveoff
= base
->key
;
1716 KKASSERT(cursor
.leaf
->data_len
> HAMMER_ENTRY_NAME_OFF
);
1718 if (base
->obj_id
!= ip
->obj_id
)
1719 hpanic("bad record at %p", cursor
.node
);
1721 dtype
= hammer_get_dtype(cursor
.leaf
->base
.obj_type
);
1722 r
= vop_write_dirent(
1723 &error
, uio
, cursor
.data
->entry
.obj_id
,
1725 cursor
.leaf
->data_len
- HAMMER_ENTRY_NAME_OFF
,
1726 (void *)cursor
.data
->entry
.name
);
1731 cookies
[cookie_index
] = base
->key
;
1733 if (cookie_index
== ncookies
)
1735 error
= hammer_ip_next(&cursor
);
1737 hammer_done_cursor(&cursor
);
1740 hammer_done_transaction(&trans
);
1743 *ap
->a_eofflag
= (error
== ENOENT
);
1744 uio
->uio_offset
= saveoff
;
1745 if (error
&& cookie_index
== 0) {
1746 if (error
== ENOENT
)
1749 kfree(cookies
, M_TEMP
);
1750 *ap
->a_ncookies
= 0;
1751 *ap
->a_cookies
= NULL
;
1754 if (error
== ENOENT
)
1757 *ap
->a_ncookies
= cookie_index
;
1758 *ap
->a_cookies
= cookies
;
1761 lwkt_reltoken(&hmp
->fs_token
);
1766 * hammer_vop_readlink { vp, uio, cred }
1770 hammer_vop_readlink(struct vop_readlink_args
*ap
)
1772 struct hammer_transaction trans
;
1773 struct hammer_cursor cursor
;
1777 uint32_t localization
;
1778 hammer_pseudofs_inmem_t pfsm
;
1781 ip
= VTOI(ap
->a_vp
);
1784 lwkt_gettoken(&hmp
->fs_token
);
1787 * Shortcut if the symlink data was stuffed into ino_data.
1789 * Also expand special "@@PFS%05d" softlinks (expansion only
1790 * occurs for non-historical (current) accesses made from the
1791 * primary filesystem).
1793 * Note that userspace hammer command does not allow users to
1794 * create a @@PFS softlink under an existing other PFS (id!=0)
1795 * so the ip localization here for @@PFS softlink is always 0.
1797 if (ip
->ino_data
.size
<= HAMMER_INODE_BASESYMLEN
) {
1801 ptr
= ip
->ino_data
.ext
.symlink
;
1802 bytes
= (int)ip
->ino_data
.size
;
1804 ip
->obj_asof
== HAMMER_MAX_TID
&&
1805 ip
->obj_localization
== HAMMER_DEF_LOCALIZATION
&&
1806 strncmp(ptr
, "@@PFS", 5) == 0) {
1807 hammer_simple_transaction(&trans
, hmp
);
1808 bcopy(ptr
+ 5, buf
, 5);
1810 localization
= pfs_to_lo(strtoul(buf
, NULL
, 10));
1811 pfsm
= hammer_load_pseudofs(&trans
, localization
,
1814 if (hammer_is_pfs_slave(&pfsm
->pfsd
)) {
1815 /* vap->va_size == 26 */
1816 ksnprintf(buf
, sizeof(buf
),
1818 (intmax_t)pfsm
->pfsd
.sync_end_tid
,
1819 lo_to_pfs(localization
));
1821 /* vap->va_size == 10 */
1822 ksnprintf(buf
, sizeof(buf
),
1824 lo_to_pfs(localization
));
1827 bytes
= strlen(buf
);
1830 hammer_rel_pseudofs(hmp
, pfsm
);
1831 hammer_done_transaction(&trans
);
1833 error
= uiomove(ptr
, bytes
, ap
->a_uio
);
1834 lwkt_reltoken(&hmp
->fs_token
);
1841 hammer_simple_transaction(&trans
, hmp
);
1842 ++hammer_stats_file_iopsr
;
1843 hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
1846 * Key range (begin and end inclusive) to scan. Directory keys
1847 * directly translate to a 64 bit 'seek' position.
1849 cursor
.key_beg
.localization
= ip
->obj_localization
|
1850 HAMMER_LOCALIZE_MISC
;
1851 cursor
.key_beg
.obj_id
= ip
->obj_id
;
1852 cursor
.key_beg
.create_tid
= 0;
1853 cursor
.key_beg
.delete_tid
= 0;
1854 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_FIX
;
1855 cursor
.key_beg
.obj_type
= 0;
1856 cursor
.key_beg
.key
= HAMMER_FIXKEY_SYMLINK
;
1857 cursor
.asof
= ip
->obj_asof
;
1858 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
1860 error
= hammer_ip_lookup(&cursor
);
1862 error
= hammer_ip_resolve_data(&cursor
);
1864 KKASSERT(cursor
.leaf
->data_len
>=
1865 HAMMER_SYMLINK_NAME_OFF
);
1866 error
= uiomove(cursor
.data
->symlink
.name
,
1867 cursor
.leaf
->data_len
-
1868 HAMMER_SYMLINK_NAME_OFF
,
1872 hammer_done_cursor(&cursor
);
1873 hammer_done_transaction(&trans
);
1874 lwkt_reltoken(&hmp
->fs_token
);
1879 * hammer_vop_nremove { nch, dvp, cred }
1883 hammer_vop_nremove(struct vop_nremove_args
*ap
)
1885 struct hammer_transaction trans
;
1890 dip
= VTOI(ap
->a_dvp
);
1893 if (hammer_nohistory(dip
) == 0 &&
1894 (error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_REMOVE
)) != 0) {
1898 lwkt_gettoken(&hmp
->fs_token
);
1899 hammer_start_transaction(&trans
, hmp
);
1900 ++hammer_stats_file_iopsw
;
1901 error
= hammer_dounlink(&trans
, ap
->a_nch
, ap
->a_dvp
, ap
->a_cred
, 0, 0);
1902 hammer_done_transaction(&trans
);
1904 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
1905 lwkt_reltoken(&hmp
->fs_token
);
1910 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1914 hammer_vop_nrename(struct vop_nrename_args
*ap
)
1916 struct hammer_transaction trans
;
1917 struct namecache
*fncp
;
1918 struct namecache
*tncp
;
1919 hammer_inode_t fdip
;
1920 hammer_inode_t tdip
;
1923 struct hammer_cursor cursor
;
1925 uint32_t max_iterations
;
1928 if (ap
->a_fdvp
->v_mount
!= ap
->a_tdvp
->v_mount
)
1930 if (ap
->a_fdvp
->v_mount
!= ap
->a_fnch
->ncp
->nc_vp
->v_mount
)
1933 fdip
= VTOI(ap
->a_fdvp
);
1934 tdip
= VTOI(ap
->a_tdvp
);
1935 fncp
= ap
->a_fnch
->ncp
;
1936 tncp
= ap
->a_tnch
->ncp
;
1937 ip
= VTOI(fncp
->nc_vp
);
1938 KKASSERT(ip
!= NULL
);
1942 if (fdip
->obj_localization
!= tdip
->obj_localization
)
1944 if (fdip
->obj_localization
!= ip
->obj_localization
)
1947 if (fdip
->flags
& HAMMER_INODE_RO
)
1949 if (tdip
->flags
& HAMMER_INODE_RO
)
1951 if (ip
->flags
& HAMMER_INODE_RO
)
1953 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
1956 lwkt_gettoken(&hmp
->fs_token
);
1957 hammer_start_transaction(&trans
, hmp
);
1958 ++hammer_stats_file_iopsw
;
1961 * Remove tncp from the target directory and then link ip as
1962 * tncp. XXX pass trans to dounlink
1964 * Force the inode sync-time to match the transaction so it is
1965 * in-sync with the creation of the target directory entry.
1967 error
= hammer_dounlink(&trans
, ap
->a_tnch
, ap
->a_tdvp
,
1969 if (error
== 0 || error
== ENOENT
) {
1970 error
= hammer_ip_add_direntry(&trans
, tdip
,
1971 tncp
->nc_name
, tncp
->nc_nlen
,
1974 ip
->ino_data
.parent_obj_id
= tdip
->obj_id
;
1975 ip
->ino_data
.ctime
= trans
.time
;
1976 hammer_modify_inode(&trans
, ip
, HAMMER_INODE_DDIRTY
);
1980 goto failed
; /* XXX */
1983 * Locate the record in the originating directory and remove it.
1985 * Calculate the namekey and setup the key range for the scan. This
1986 * works kinda like a chained hash table where the lower 32 bits
1987 * of the namekey synthesize the chain.
1989 * The key range is inclusive of both key_beg and key_end.
1991 namekey
= hammer_direntry_namekey(fdip
, fncp
->nc_name
, fncp
->nc_nlen
,
1994 hammer_init_cursor(&trans
, &cursor
, &fdip
->cache
[1], fdip
);
1995 cursor
.key_beg
.localization
= fdip
->obj_localization
|
1996 hammer_dir_localization(fdip
);
1997 cursor
.key_beg
.obj_id
= fdip
->obj_id
;
1998 cursor
.key_beg
.key
= namekey
;
1999 cursor
.key_beg
.create_tid
= 0;
2000 cursor
.key_beg
.delete_tid
= 0;
2001 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
2002 cursor
.key_beg
.obj_type
= 0;
2004 cursor
.key_end
= cursor
.key_beg
;
2005 cursor
.key_end
.key
+= max_iterations
;
2006 cursor
.asof
= fdip
->obj_asof
;
2007 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
2010 * Scan all matching records (the chain), locate the one matching
2011 * the requested path component.
2013 * The hammer_ip_*() functions merge in-memory records with on-disk
2014 * records for the purposes of the search.
2016 error
= hammer_ip_first(&cursor
);
2017 while (error
== 0) {
2018 if (hammer_ip_resolve_data(&cursor
) != 0)
2020 nlen
= cursor
.leaf
->data_len
- HAMMER_ENTRY_NAME_OFF
;
2022 if (fncp
->nc_nlen
== nlen
&&
2023 bcmp(fncp
->nc_name
, cursor
.data
->entry
.name
, nlen
) == 0) {
2026 error
= hammer_ip_next(&cursor
);
2030 * If all is ok we have to get the inode so we can adjust nlinks.
2032 * WARNING: hammer_ip_del_direntry() may have to terminate the
2033 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
2037 error
= hammer_ip_del_direntry(&trans
, &cursor
, fdip
, ip
);
2040 * XXX A deadlock here will break rename's atomicy for the purposes
2041 * of crash recovery.
2043 if (error
== EDEADLK
) {
2044 hammer_done_cursor(&cursor
);
2049 * Cleanup and tell the kernel that the rename succeeded.
2051 * NOTE: ip->vp, if non-NULL, cannot be directly referenced
2052 * without formally acquiring the vp since the vp might
2053 * have zero refs on it, or in the middle of a reclaim,
2056 hammer_done_cursor(&cursor
);
2058 cache_rename(ap
->a_fnch
, ap
->a_tnch
);
2059 hammer_knote(ap
->a_fdvp
, NOTE_WRITE
);
2060 hammer_knote(ap
->a_tdvp
, NOTE_WRITE
);
2064 error
= hammer_get_vnode(ip
, &vp
);
2065 if (error
== 0 && vp
) {
2067 hammer_knote(ip
->vp
, NOTE_RENAME
);
2071 hdkprintf("ip/vp race2 avoided\n");
2076 hammer_done_transaction(&trans
);
2077 lwkt_reltoken(&hmp
->fs_token
);
2082 * hammer_vop_nrmdir { nch, dvp, cred }
2086 hammer_vop_nrmdir(struct vop_nrmdir_args
*ap
)
2088 struct hammer_transaction trans
;
2093 dip
= VTOI(ap
->a_dvp
);
2096 if (hammer_nohistory(dip
) == 0 &&
2097 (error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_REMOVE
)) != 0) {
2101 lwkt_gettoken(&hmp
->fs_token
);
2102 hammer_start_transaction(&trans
, hmp
);
2103 ++hammer_stats_file_iopsw
;
2104 error
= hammer_dounlink(&trans
, ap
->a_nch
, ap
->a_dvp
, ap
->a_cred
, 0, 1);
2105 hammer_done_transaction(&trans
);
2107 hammer_knote(ap
->a_dvp
, NOTE_WRITE
| NOTE_LINK
);
2108 lwkt_reltoken(&hmp
->fs_token
);
2113 * hammer_vop_markatime { vp, cred }
2117 hammer_vop_markatime(struct vop_markatime_args
*ap
)
2119 struct hammer_transaction trans
;
2123 ip
= VTOI(ap
->a_vp
);
2124 if (ap
->a_vp
->v_mount
->mnt_flag
& MNT_RDONLY
)
2126 if (ip
->flags
& HAMMER_INODE_RO
)
2129 if (hmp
->mp
->mnt_flag
& MNT_NOATIME
)
2131 lwkt_gettoken(&hmp
->fs_token
);
2132 hammer_start_transaction(&trans
, hmp
);
2133 ++hammer_stats_file_iopsw
;
2135 ip
->ino_data
.atime
= trans
.time
;
2136 hammer_modify_inode(&trans
, ip
, HAMMER_INODE_ATIME
);
2137 hammer_done_transaction(&trans
);
2138 hammer_knote(ap
->a_vp
, NOTE_ATTRIB
);
2139 lwkt_reltoken(&hmp
->fs_token
);
2144 * hammer_vop_setattr { vp, vap, cred }
2148 hammer_vop_setattr(struct vop_setattr_args
*ap
)
2150 struct hammer_transaction trans
;
2160 int64_t aligned_size
;
2165 ip
= ap
->a_vp
->v_data
;
2170 if (ap
->a_vp
->v_mount
->mnt_flag
& MNT_RDONLY
)
2172 if (ip
->flags
& HAMMER_INODE_RO
)
2174 if (hammer_nohistory(ip
) == 0 &&
2175 (error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_REMOVE
)) != 0) {
2179 lwkt_gettoken(&hmp
->fs_token
);
2180 hammer_start_transaction(&trans
, hmp
);
2181 ++hammer_stats_file_iopsw
;
2184 if (vap
->va_flags
!= VNOVAL
) {
2185 flags
= ip
->ino_data
.uflags
;
2186 error
= vop_helper_setattr_flags(&flags
, vap
->va_flags
,
2187 hammer_to_unix_xid(&ip
->ino_data
.uid
),
2190 if (ip
->ino_data
.uflags
!= flags
) {
2191 ip
->ino_data
.uflags
= flags
;
2192 ip
->ino_data
.ctime
= trans
.time
;
2193 modflags
|= HAMMER_INODE_DDIRTY
;
2194 kflags
|= NOTE_ATTRIB
;
2196 if (ip
->ino_data
.uflags
& (IMMUTABLE
| APPEND
)) {
2203 if (ip
->ino_data
.uflags
& (IMMUTABLE
| APPEND
)) {
2207 if (vap
->va_uid
!= (uid_t
)VNOVAL
|| vap
->va_gid
!= (gid_t
)VNOVAL
) {
2208 mode_t cur_mode
= ip
->ino_data
.mode
;
2209 uid_t cur_uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
2210 gid_t cur_gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
2214 error
= vop_helper_chown(ap
->a_vp
, vap
->va_uid
, vap
->va_gid
,
2216 &cur_uid
, &cur_gid
, &cur_mode
);
2218 hammer_guid_to_uuid(&uuid_uid
, cur_uid
);
2219 hammer_guid_to_uuid(&uuid_gid
, cur_gid
);
2220 if (bcmp(&uuid_uid
, &ip
->ino_data
.uid
,
2221 sizeof(uuid_uid
)) ||
2222 bcmp(&uuid_gid
, &ip
->ino_data
.gid
,
2223 sizeof(uuid_gid
)) ||
2224 ip
->ino_data
.mode
!= cur_mode
) {
2225 ip
->ino_data
.uid
= uuid_uid
;
2226 ip
->ino_data
.gid
= uuid_gid
;
2227 ip
->ino_data
.mode
= cur_mode
;
2228 ip
->ino_data
.ctime
= trans
.time
;
2229 modflags
|= HAMMER_INODE_DDIRTY
;
2231 kflags
|= NOTE_ATTRIB
;
2234 while (vap
->va_size
!= VNOVAL
&& ip
->ino_data
.size
!= vap
->va_size
) {
2235 switch(ap
->a_vp
->v_type
) {
2237 if (vap
->va_size
== ip
->ino_data
.size
)
2241 * Log the operation if in fast-fsync mode or if
2242 * there are unterminated redo write records present.
2244 * The second check is needed so the recovery code
2245 * properly truncates write redos even if nominal
2246 * REDO operations is turned off due to excessive
2247 * writes, because the related records might be
2248 * destroyed and never lay down a TERM_WRITE.
2250 if ((ip
->flags
& HAMMER_INODE_REDO
) ||
2251 (ip
->flags
& HAMMER_INODE_RDIRTY
)) {
2252 error
= hammer_generate_redo(&trans
, ip
,
2257 blksize
= hammer_blocksize(vap
->va_size
);
2260 * XXX break atomicy, we can deadlock the backend
2261 * if we do not release the lock. Probably not a
2264 if (vap
->va_size
< ip
->ino_data
.size
) {
2265 nvtruncbuf(ap
->a_vp
, vap
->va_size
,
2267 hammer_blockoff(vap
->va_size
),
2270 kflags
|= NOTE_WRITE
;
2272 nvextendbuf(ap
->a_vp
,
2275 hammer_blocksize(ip
->ino_data
.size
),
2276 hammer_blocksize(vap
->va_size
),
2277 hammer_blockoff(ip
->ino_data
.size
),
2278 hammer_blockoff(vap
->va_size
),
2281 kflags
|= NOTE_WRITE
| NOTE_EXTEND
;
2283 ip
->ino_data
.size
= vap
->va_size
;
2284 ip
->ino_data
.mtime
= trans
.time
;
2285 /* XXX safe to use SDIRTY instead of DDIRTY here? */
2286 modflags
|= HAMMER_INODE_MTIME
| HAMMER_INODE_DDIRTY
;
2289 * On-media truncation is cached in the inode until
2290 * the inode is synchronized. We must immediately
2291 * handle any frontend records.
2294 hammer_ip_frontend_trunc(ip
, vap
->va_size
);
2295 if ((ip
->flags
& HAMMER_INODE_TRUNCATED
) == 0) {
2296 ip
->flags
|= HAMMER_INODE_TRUNCATED
;
2297 ip
->trunc_off
= vap
->va_size
;
2298 hammer_inode_dirty(ip
);
2299 } else if (ip
->trunc_off
> vap
->va_size
) {
2300 ip
->trunc_off
= vap
->va_size
;
2306 * When truncating, nvtruncbuf() may have cleaned out
2307 * a portion of the last block on-disk in the buffer
2308 * cache. We must clean out any frontend records
2309 * for blocks beyond the new last block.
2311 aligned_size
= (vap
->va_size
+ (blksize
- 1)) &
2312 ~(int64_t)(blksize
- 1);
2313 if (truncating
&& vap
->va_size
< aligned_size
) {
2314 aligned_size
-= blksize
;
2315 hammer_ip_frontend_trunc(ip
, aligned_size
);
2320 if ((ip
->flags
& HAMMER_INODE_TRUNCATED
) == 0) {
2321 ip
->flags
|= HAMMER_INODE_TRUNCATED
;
2322 ip
->trunc_off
= vap
->va_size
;
2323 hammer_inode_dirty(ip
);
2324 } else if (ip
->trunc_off
> vap
->va_size
) {
2325 ip
->trunc_off
= vap
->va_size
;
2327 hammer_ip_frontend_trunc(ip
, vap
->va_size
);
2328 ip
->ino_data
.size
= vap
->va_size
;
2329 ip
->ino_data
.mtime
= trans
.time
;
2330 modflags
|= HAMMER_INODE_MTIME
| HAMMER_INODE_DDIRTY
;
2331 kflags
|= NOTE_ATTRIB
;
2339 if (vap
->va_atime
.tv_sec
!= VNOVAL
) {
2340 ip
->ino_data
.atime
= hammer_timespec_to_time(&vap
->va_atime
);
2341 modflags
|= HAMMER_INODE_ATIME
;
2342 kflags
|= NOTE_ATTRIB
;
2344 if (vap
->va_mtime
.tv_sec
!= VNOVAL
) {
2345 ip
->ino_data
.mtime
= hammer_timespec_to_time(&vap
->va_mtime
);
2346 modflags
|= HAMMER_INODE_MTIME
;
2347 kflags
|= NOTE_ATTRIB
;
2349 if (vap
->va_mode
!= (mode_t
)VNOVAL
) {
2350 mode_t cur_mode
= ip
->ino_data
.mode
;
2351 uid_t cur_uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
2352 gid_t cur_gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
2354 error
= vop_helper_chmod(ap
->a_vp
, vap
->va_mode
, ap
->a_cred
,
2355 cur_uid
, cur_gid
, &cur_mode
);
2356 if (error
== 0 && ip
->ino_data
.mode
!= cur_mode
) {
2357 ip
->ino_data
.mode
= cur_mode
;
2358 ip
->ino_data
.ctime
= trans
.time
;
2359 modflags
|= HAMMER_INODE_DDIRTY
;
2360 kflags
|= NOTE_ATTRIB
;
2365 hammer_modify_inode(&trans
, ip
, modflags
);
2366 hammer_done_transaction(&trans
);
2367 hammer_knote(ap
->a_vp
, kflags
);
2368 lwkt_reltoken(&hmp
->fs_token
);
2373 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2377 hammer_vop_nsymlink(struct vop_nsymlink_args
*ap
)
2379 struct hammer_transaction trans
;
2382 hammer_record_t record
;
2383 struct nchandle
*nch
;
2388 ap
->a_vap
->va_type
= VLNK
;
2391 dip
= VTOI(ap
->a_dvp
);
2394 if (dip
->flags
& HAMMER_INODE_RO
)
2396 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
2400 * Create a transaction to cover the operations we perform.
2402 lwkt_gettoken(&hmp
->fs_token
);
2403 hammer_start_transaction(&trans
, hmp
);
2404 ++hammer_stats_file_iopsw
;
2407 * Create a new filesystem object of the requested type. The
2408 * returned inode will be referenced but not locked.
2411 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
,
2412 dip
, nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
2415 hammer_done_transaction(&trans
);
2417 lwkt_reltoken(&hmp
->fs_token
);
2422 * Add a record representing the symlink. symlink stores the link
2423 * as pure data, not a string, and is no \0 terminated.
2426 bytes
= strlen(ap
->a_target
);
2428 if (bytes
<= HAMMER_INODE_BASESYMLEN
) {
2429 bcopy(ap
->a_target
, nip
->ino_data
.ext
.symlink
, bytes
);
2431 record
= hammer_alloc_mem_record(nip
, bytes
);
2432 record
->type
= HAMMER_MEM_RECORD_GENERAL
;
2434 record
->leaf
.base
.localization
= nip
->obj_localization
|
2435 HAMMER_LOCALIZE_MISC
;
2436 record
->leaf
.base
.key
= HAMMER_FIXKEY_SYMLINK
;
2437 record
->leaf
.base
.rec_type
= HAMMER_RECTYPE_FIX
;
2438 record
->leaf
.data_len
= bytes
;
2439 KKASSERT(HAMMER_SYMLINK_NAME_OFF
== 0);
2440 bcopy(ap
->a_target
, record
->data
->symlink
.name
, bytes
);
2441 error
= hammer_ip_add_record(&trans
, record
);
2445 * Set the file size to the length of the link.
2448 nip
->ino_data
.size
= bytes
;
2449 hammer_modify_inode(&trans
, nip
, HAMMER_INODE_DDIRTY
);
2453 error
= hammer_ip_add_direntry(&trans
, dip
, nch
->ncp
->nc_name
,
2454 nch
->ncp
->nc_nlen
, nip
);
2460 hammer_rel_inode(nip
, 0);
2463 error
= hammer_get_vnode(nip
, ap
->a_vpp
);
2464 hammer_rel_inode(nip
, 0);
2466 cache_setunresolved(ap
->a_nch
);
2467 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
2468 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
2471 hammer_done_transaction(&trans
);
2472 lwkt_reltoken(&hmp
->fs_token
);
2477 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2481 hammer_vop_nwhiteout(struct vop_nwhiteout_args
*ap
)
2483 struct hammer_transaction trans
;
2488 dip
= VTOI(ap
->a_dvp
);
2491 if (hammer_nohistory(dip
) == 0 &&
2492 (error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_CREATE
)) != 0) {
2496 lwkt_gettoken(&hmp
->fs_token
);
2497 hammer_start_transaction(&trans
, hmp
);
2498 ++hammer_stats_file_iopsw
;
2499 error
= hammer_dounlink(&trans
, ap
->a_nch
, ap
->a_dvp
,
2500 ap
->a_cred
, ap
->a_flags
, -1);
2501 hammer_done_transaction(&trans
);
2502 lwkt_reltoken(&hmp
->fs_token
);
2508 * hammer_vop_ioctl { vp, command, data, fflag, cred }
2512 hammer_vop_ioctl(struct vop_ioctl_args
*ap
)
2514 hammer_inode_t ip
= ap
->a_vp
->v_data
;
2515 hammer_mount_t hmp
= ip
->hmp
;
2518 ++hammer_stats_file_iopsr
;
2519 lwkt_gettoken(&hmp
->fs_token
);
2520 error
= hammer_ioctl(ip
, ap
->a_command
, ap
->a_data
,
2521 ap
->a_fflag
, ap
->a_cred
);
2522 lwkt_reltoken(&hmp
->fs_token
);
2528 hammer_vop_mountctl(struct vop_mountctl_args
*ap
)
2530 static const struct mountctl_opt extraopt
[] = {
2531 { HMNT_NOHISTORY
, "nohistory" },
2532 { HMNT_MASTERID
, "master" },
2533 { HMNT_NOMIRROR
, "nomirror" },
2544 mp
= ap
->a_head
.a_ops
->head
.vv_mount
;
2545 KKASSERT(mp
->mnt_data
!= NULL
);
2546 hmp
= (hammer_mount_t
)mp
->mnt_data
;
2548 lwkt_gettoken(&hmp
->fs_token
);
2551 case MOUNTCTL_SET_EXPORT
:
2552 if (ap
->a_ctllen
!= sizeof(struct export_args
))
2555 error
= hammer_vfs_export(mp
, ap
->a_op
,
2556 (const struct export_args
*)ap
->a_ctl
);
2558 case MOUNTCTL_MOUNTFLAGS
:
2560 * Call standard mountctl VOP function
2561 * so we get user mount flags.
2563 error
= vop_stdmountctl(ap
);
2567 usedbytes
= *ap
->a_res
;
2569 if (usedbytes
> 0 && usedbytes
< ap
->a_buflen
) {
2570 usedbytes
+= vfs_flagstostr(hmp
->hflags
, extraopt
,
2572 ap
->a_buflen
- usedbytes
,
2576 *ap
->a_res
+= usedbytes
;
2579 error
= vop_stdmountctl(ap
);
2582 lwkt_reltoken(&hmp
->fs_token
);
2587 * hammer_vop_strategy { vp, bio }
2589 * Strategy call, used for regular file read & write only. Note that the
2590 * bp may represent a cluster.
2592 * To simplify operation and allow better optimizations in the future,
2593 * this code does not make any assumptions with regards to buffer alignment
2598 hammer_vop_strategy(struct vop_strategy_args
*ap
)
2603 bp
= ap
->a_bio
->bio_buf
;
2607 error
= hammer_vop_strategy_read(ap
);
2610 error
= hammer_vop_strategy_write(ap
);
2613 bp
->b_error
= error
= EINVAL
;
2614 bp
->b_flags
|= B_ERROR
;
2619 /* hammer_dump_dedup_cache(((hammer_inode_t)ap->a_vp->v_data)->hmp); */
2625 * Read from a regular file. Iterate the related records and fill in the
2626 * BIO/BUF. Gaps are zero-filled.
2628 * The support code in hammer_object.c should be used to deal with mixed
2629 * in-memory and on-disk records.
2631 * NOTE: Can be called from the cluster code with an oversized buf.
2637 hammer_vop_strategy_read(struct vop_strategy_args
*ap
)
2639 struct hammer_transaction trans
;
2643 struct hammer_cursor cursor
;
2644 hammer_base_elm_t base
;
2645 hammer_off_t disk_offset
;
2660 ip
= ap
->a_vp
->v_data
;
2664 * The zone-2 disk offset may have been set by the cluster code via
2665 * a BMAP operation, or else should be NOOFFSET.
2667 * Checking the high bits for a match against zone-2 should suffice.
2669 * In cases where a lot of data duplication is present it may be
2670 * more beneficial to drop through and doubule-buffer through the
2673 nbio
= push_bio(bio
);
2674 if (hammer_is_zone_large_data(nbio
->bio_offset
)) {
2675 if (hammer_double_buffer
== 0) {
2676 lwkt_gettoken(&hmp
->fs_token
);
2677 error
= hammer_io_direct_read(hmp
, nbio
, NULL
);
2678 lwkt_reltoken(&hmp
->fs_token
);
2683 * Try to shortcut requests for double_buffer mode too.
2684 * Since this mode runs through the device buffer cache
2685 * only compatible buffer sizes (meaning those generated
2686 * by normal filesystem buffers) are legal.
2688 if (hammer_live_dedup
== 0 && (bp
->b_flags
& B_PAGING
) == 0) {
2689 lwkt_gettoken(&hmp
->fs_token
);
2690 error
= hammer_io_indirect_read(hmp
, nbio
, NULL
);
2691 lwkt_reltoken(&hmp
->fs_token
);
2697 * Well, that sucked. Do it the hard way. If all the stars are
2698 * aligned we may still be able to issue a direct-read.
2700 lwkt_gettoken(&hmp
->fs_token
);
2701 hammer_simple_transaction(&trans
, hmp
);
2702 hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
2705 * Key range (begin and end inclusive) to scan. Note that the key's
2706 * stored in the actual records represent BASE+LEN, not BASE. The
2707 * first record containing bio_offset will have a key > bio_offset.
2709 cursor
.key_beg
.localization
= ip
->obj_localization
|
2710 HAMMER_LOCALIZE_MISC
;
2711 cursor
.key_beg
.obj_id
= ip
->obj_id
;
2712 cursor
.key_beg
.create_tid
= 0;
2713 cursor
.key_beg
.delete_tid
= 0;
2714 cursor
.key_beg
.obj_type
= 0;
2715 cursor
.key_beg
.key
= bio
->bio_offset
+ 1;
2716 cursor
.asof
= ip
->obj_asof
;
2717 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
2719 cursor
.key_end
= cursor
.key_beg
;
2720 KKASSERT(ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_REGFILE
);
2722 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DBFILE
) {
2723 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DB
;
2724 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_DB
;
2725 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
2729 ran_end
= bio
->bio_offset
+ bp
->b_bufsize
;
2730 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DATA
;
2731 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_DATA
;
2732 tmp64
= ran_end
+ MAXPHYS
+ 1; /* work-around GCC-4 bug */
2733 if (tmp64
< ran_end
)
2734 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
2736 cursor
.key_end
.key
= ran_end
+ MAXPHYS
+ 1;
2738 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
;
2741 * Set NOSWAPCACHE for cursor data extraction if double buffering
2742 * is disabled or (if the file is not marked cacheable via chflags
2743 * and vm.swapcache_use_chflags is enabled).
2745 if (hammer_double_buffer
== 0 ||
2746 ((ap
->a_vp
->v_flag
& VSWAPCACHE
) == 0 &&
2747 vm_swapcache_use_chflags
)) {
2748 cursor
.flags
|= HAMMER_CURSOR_NOSWAPCACHE
;
2751 error
= hammer_ip_first(&cursor
);
2754 while (error
== 0) {
2756 * Get the base file offset of the record. The key for
2757 * data records is (base + bytes) rather then (base).
2759 base
= &cursor
.leaf
->base
;
2760 rec_offset
= base
->key
- cursor
.leaf
->data_len
;
2763 * Calculate the gap, if any, and zero-fill it.
2765 * n is the offset of the start of the record verses our
2766 * current seek offset in the bio.
2768 n
= (int)(rec_offset
- (bio
->bio_offset
+ boff
));
2770 if (n
> bp
->b_bufsize
- boff
)
2771 n
= bp
->b_bufsize
- boff
;
2772 bzero((char *)bp
->b_data
+ boff
, n
);
2778 * Calculate the data offset in the record and the number
2779 * of bytes we can copy.
2781 * There are two degenerate cases. First, boff may already
2782 * be at bp->b_bufsize. Secondly, the data offset within
2783 * the record may exceed the record's size.
2787 n
= cursor
.leaf
->data_len
- roff
;
2789 hdkprintf("bad n=%d roff=%d\n", n
, roff
);
2791 } else if (n
> bp
->b_bufsize
- boff
) {
2792 n
= bp
->b_bufsize
- boff
;
2796 * Deal with cached truncations. This cool bit of code
2797 * allows truncate()/ftruncate() to avoid having to sync
2800 * If the frontend is truncated then all backend records are
2801 * subject to the frontend's truncation.
2803 * If the backend is truncated then backend records on-disk
2804 * (but not in-memory) are subject to the backend's
2805 * truncation. In-memory records owned by the backend
2806 * represent data written after the truncation point on the
2807 * backend and must not be truncated.
2809 * Truncate operations deal with frontend buffer cache
2810 * buffers and frontend-owned in-memory records synchronously.
2812 if (ip
->flags
& HAMMER_INODE_TRUNCATED
) {
2813 if (hammer_cursor_ondisk(&cursor
)/* ||
2814 cursor.iprec->flush_state == HAMMER_FST_FLUSH*/) {
2815 if (ip
->trunc_off
<= rec_offset
)
2817 else if (ip
->trunc_off
< rec_offset
+ n
)
2818 n
= (int)(ip
->trunc_off
- rec_offset
);
2821 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) {
2822 if (hammer_cursor_ondisk(&cursor
)) {
2823 if (ip
->sync_trunc_off
<= rec_offset
)
2825 else if (ip
->sync_trunc_off
< rec_offset
+ n
)
2826 n
= (int)(ip
->sync_trunc_off
- rec_offset
);
2831 * Try to issue a direct read into our bio if possible,
2832 * otherwise resolve the element data into a hammer_buffer
2835 * The buffer on-disk should be zerod past any real
2836 * truncation point, but may not be for any synthesized
2837 * truncation point from above.
2839 * NOTE: disk_offset is only valid if the cursor data is
2842 disk_offset
= cursor
.leaf
->data_offset
+ roff
;
2843 isdedupable
= (boff
== 0 && n
== bp
->b_bufsize
&&
2844 hammer_cursor_ondisk(&cursor
) &&
2845 ((int)disk_offset
& HAMMER_BUFMASK
) == 0);
2847 if (isdedupable
&& hammer_double_buffer
== 0) {
2851 KKASSERT(hammer_is_zone_large_data(disk_offset
));
2852 nbio
->bio_offset
= disk_offset
;
2853 error
= hammer_io_direct_read(hmp
, nbio
, cursor
.leaf
);
2854 if (hammer_live_dedup
&& error
== 0)
2855 hammer_dedup_cache_add(ip
, cursor
.leaf
);
2857 } else if (isdedupable
) {
2859 * Async I/O case for reading from backing store
2860 * and copying the data to the filesystem buffer.
2861 * live-dedup has to verify the data anyway if it
2862 * gets a hit later so we can just add the entry
2865 KKASSERT(hammer_is_zone_large_data(disk_offset
));
2866 nbio
->bio_offset
= disk_offset
;
2867 if (hammer_live_dedup
)
2868 hammer_dedup_cache_add(ip
, cursor
.leaf
);
2869 error
= hammer_io_indirect_read(hmp
, nbio
, cursor
.leaf
);
2872 error
= hammer_ip_resolve_data(&cursor
);
2874 if (hammer_live_dedup
&& isdedupable
)
2875 hammer_dedup_cache_add(ip
, cursor
.leaf
);
2876 bcopy((char *)cursor
.data
+ roff
,
2877 (char *)bp
->b_data
+ boff
, n
);
2884 * We have to be sure that the only elements added to the
2885 * dedup cache are those which are already on-media.
2887 if (hammer_live_dedup
&& hammer_cursor_ondisk(&cursor
))
2888 hammer_dedup_cache_add(ip
, cursor
.leaf
);
2891 * Iterate until we have filled the request.
2894 if (boff
== bp
->b_bufsize
)
2896 error
= hammer_ip_next(&cursor
);
2900 * There may have been a gap after the last record
2902 if (error
== ENOENT
)
2904 if (error
== 0 && boff
!= bp
->b_bufsize
) {
2905 KKASSERT(boff
< bp
->b_bufsize
);
2906 bzero((char *)bp
->b_data
+ boff
, bp
->b_bufsize
- boff
);
2907 /* boff = bp->b_bufsize; */
2911 * Disallow swapcache operation on the vnode buffer if double
2912 * buffering is enabled, the swapcache will get the data via
2913 * the block device buffer.
2915 if (hammer_double_buffer
)
2916 bp
->b_flags
|= B_NOTMETA
;
2922 bp
->b_error
= error
;
2924 bp
->b_flags
|= B_ERROR
;
2929 * Cache the b-tree node for the last data read in cache[1].
2931 * If we hit the file EOF then also cache the node in the
2932 * governing directory's cache[3], it will be used to initialize
2933 * the new inode's cache[1] for any inodes looked up via the directory.
2935 * This doesn't reduce disk accesses since the B-Tree chain is
2936 * likely cached, but it does reduce cpu overhead when looking
2937 * up file offsets for cpdup/tar/cpio style iterations.
2940 hammer_cache_node(&ip
->cache
[1], cursor
.node
);
2941 if (ran_end
>= ip
->ino_data
.size
) {
2942 dip
= hammer_find_inode(&trans
, ip
->ino_data
.parent_obj_id
,
2943 ip
->obj_asof
, ip
->obj_localization
);
2945 hammer_cache_node(&dip
->cache
[3], cursor
.node
);
2946 hammer_rel_inode(dip
, 0);
2949 hammer_done_cursor(&cursor
);
2950 hammer_done_transaction(&trans
);
2951 lwkt_reltoken(&hmp
->fs_token
);
2956 * BMAP operation - used to support cluster_read() only.
2958 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2960 * This routine may return EOPNOTSUPP if the opration is not supported for
2961 * the specified offset. The contents of the pointer arguments do not
2962 * need to be initialized in that case.
2964 * If a disk address is available and properly aligned return 0 with
2965 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2966 * to the run-length relative to that offset. Callers may assume that
2967 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2968 * large, so return EOPNOTSUPP if it is not sufficiently large.
2972 hammer_vop_bmap(struct vop_bmap_args
*ap
)
2974 struct hammer_transaction trans
;
2977 struct hammer_cursor cursor
;
2978 hammer_base_elm_t base
;
2982 int64_t base_offset
;
2983 int64_t base_disk_offset
;
2984 int64_t last_offset
;
2985 hammer_off_t last_disk_offset
;
2986 hammer_off_t disk_offset
;
2991 ++hammer_stats_file_iopsr
;
2992 ip
= ap
->a_vp
->v_data
;
2996 * We can only BMAP regular files. We can't BMAP database files,
2999 if (ip
->ino_data
.obj_type
!= HAMMER_OBJTYPE_REGFILE
)
3003 * bmap is typically called with runp/runb both NULL when used
3004 * for writing. We do not support BMAP for writing atm.
3006 if (ap
->a_cmd
!= BUF_CMD_READ
)
3010 * Scan the B-Tree to acquire blockmap addresses, then translate
3013 lwkt_gettoken(&hmp
->fs_token
);
3014 hammer_simple_transaction(&trans
, hmp
);
3016 hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
3019 * Key range (begin and end inclusive) to scan. Note that the key's
3020 * stored in the actual records represent BASE+LEN, not BASE. The
3021 * first record containing bio_offset will have a key > bio_offset.
3023 cursor
.key_beg
.localization
= ip
->obj_localization
|
3024 HAMMER_LOCALIZE_MISC
;
3025 cursor
.key_beg
.obj_id
= ip
->obj_id
;
3026 cursor
.key_beg
.create_tid
= 0;
3027 cursor
.key_beg
.delete_tid
= 0;
3028 cursor
.key_beg
.obj_type
= 0;
3030 cursor
.key_beg
.key
= ap
->a_loffset
- MAXPHYS
+ 1;
3032 cursor
.key_beg
.key
= ap
->a_loffset
+ 1;
3033 if (cursor
.key_beg
.key
< 0)
3034 cursor
.key_beg
.key
= 0;
3035 cursor
.asof
= ip
->obj_asof
;
3036 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
3038 cursor
.key_end
= cursor
.key_beg
;
3039 KKASSERT(ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_REGFILE
);
3041 ran_end
= ap
->a_loffset
+ MAXPHYS
;
3042 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DATA
;
3043 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_DATA
;
3044 tmp64
= ran_end
+ MAXPHYS
+ 1; /* work-around GCC-4 bug */
3045 if (tmp64
< ran_end
)
3046 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
3048 cursor
.key_end
.key
= ran_end
+ MAXPHYS
+ 1;
3050 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
;
3052 error
= hammer_ip_first(&cursor
);
3053 base_offset
= last_offset
= 0;
3054 base_disk_offset
= last_disk_offset
= 0;
3056 while (error
== 0) {
3058 * Get the base file offset of the record. The key for
3059 * data records is (base + bytes) rather then (base).
3061 * NOTE: rec_offset + rec_len may exceed the end-of-file.
3062 * The extra bytes should be zero on-disk and the BMAP op
3063 * should still be ok.
3065 base
= &cursor
.leaf
->base
;
3066 rec_offset
= base
->key
- cursor
.leaf
->data_len
;
3067 rec_len
= cursor
.leaf
->data_len
;
3070 * Incorporate any cached truncation.
3072 * NOTE: Modifications to rec_len based on synthesized
3073 * truncation points remove the guarantee that any extended
3074 * data on disk is zero (since the truncations may not have
3075 * taken place on-media yet).
3077 if (ip
->flags
& HAMMER_INODE_TRUNCATED
) {
3078 if (hammer_cursor_ondisk(&cursor
) ||
3079 cursor
.iprec
->flush_state
== HAMMER_FST_FLUSH
) {
3080 if (ip
->trunc_off
<= rec_offset
)
3082 else if (ip
->trunc_off
< rec_offset
+ rec_len
)
3083 rec_len
= (int)(ip
->trunc_off
- rec_offset
);
3086 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) {
3087 if (hammer_cursor_ondisk(&cursor
)) {
3088 if (ip
->sync_trunc_off
<= rec_offset
)
3090 else if (ip
->sync_trunc_off
< rec_offset
+ rec_len
)
3091 rec_len
= (int)(ip
->sync_trunc_off
- rec_offset
);
3096 * Accumulate information. If we have hit a discontiguous
3097 * block reset base_offset unless we are already beyond the
3098 * requested offset. If we are, that's it, we stop.
3102 if (hammer_cursor_ondisk(&cursor
)) {
3103 disk_offset
= cursor
.leaf
->data_offset
;
3104 if (rec_offset
!= last_offset
||
3105 disk_offset
!= last_disk_offset
) {
3106 if (rec_offset
> ap
->a_loffset
)
3108 base_offset
= rec_offset
;
3109 base_disk_offset
= disk_offset
;
3111 last_offset
= rec_offset
+ rec_len
;
3112 last_disk_offset
= disk_offset
+ rec_len
;
3114 if (hammer_live_dedup
)
3115 hammer_dedup_cache_add(ip
, cursor
.leaf
);
3118 error
= hammer_ip_next(&cursor
);
3122 hammer_cache_node(&ip
->cache
[1], cursor
.node
);
3124 hammer_done_cursor(&cursor
);
3125 hammer_done_transaction(&trans
);
3126 lwkt_reltoken(&hmp
->fs_token
);
3129 * If we couldn't find any records or the records we did find were
3130 * all behind the requested offset, return failure. A forward
3131 * truncation can leave a hole w/ no on-disk records.
3133 if (last_offset
== 0 || last_offset
< ap
->a_loffset
)
3134 return (EOPNOTSUPP
);
3137 * Figure out the block size at the requested offset and adjust
3138 * our limits so the cluster_read() does not create inappropriately
3139 * sized buffer cache buffers.
3141 blksize
= hammer_blocksize(ap
->a_loffset
);
3142 if (hammer_blocksize(base_offset
) != blksize
) {
3143 base_offset
= hammer_blockdemarc(base_offset
, ap
->a_loffset
);
3145 if (last_offset
!= ap
->a_loffset
&&
3146 hammer_blocksize(last_offset
- 1) != blksize
) {
3147 last_offset
= hammer_blockdemarc(ap
->a_loffset
,
3152 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
3155 disk_offset
= base_disk_offset
+ (ap
->a_loffset
- base_offset
);
3157 if (!hammer_is_zone_large_data(disk_offset
)) {
3159 * Only large-data zones can be direct-IOd
3162 } else if ((disk_offset
& HAMMER_BUFMASK
) ||
3163 (last_offset
- ap
->a_loffset
) < blksize
) {
3165 * doffsetp is not aligned or the forward run size does
3166 * not cover a whole buffer, disallow the direct I/O.
3173 *ap
->a_doffsetp
= disk_offset
;
3175 *ap
->a_runb
= ap
->a_loffset
- base_offset
;
3176 KKASSERT(*ap
->a_runb
>= 0);
3179 *ap
->a_runp
= last_offset
- ap
->a_loffset
;
3180 KKASSERT(*ap
->a_runp
>= 0);
3188 * Write to a regular file. Because this is a strategy call the OS is
3189 * trying to actually get data onto the media.
3193 hammer_vop_strategy_write(struct vop_strategy_args
*ap
)
3195 hammer_record_t record
;
3200 int blksize __debugvar
;
3206 ip
= ap
->a_vp
->v_data
;
3209 blksize
= hammer_blocksize(bio
->bio_offset
);
3210 KKASSERT(bp
->b_bufsize
== blksize
);
3212 if (ip
->flags
& HAMMER_INODE_RO
) {
3213 bp
->b_error
= EROFS
;
3214 bp
->b_flags
|= B_ERROR
;
3219 lwkt_gettoken(&hmp
->fs_token
);
3222 * Disallow swapcache operation on the vnode buffer if double
3223 * buffering is enabled, the swapcache will get the data via
3224 * the block device buffer.
3226 if (hammer_double_buffer
)
3227 bp
->b_flags
|= B_NOTMETA
;
3230 * Interlock with inode destruction (no in-kernel or directory
3231 * topology visibility). If we queue new IO while trying to
3232 * destroy the inode we can deadlock the vtrunc call in
3233 * hammer_inode_unloadable_check().
3235 * Besides, there's no point flushing a bp associated with an
3236 * inode that is being destroyed on-media and has no kernel
3239 if ((ip
->flags
| ip
->sync_flags
) &
3240 (HAMMER_INODE_DELETING
|HAMMER_INODE_DELETED
)) {
3243 lwkt_reltoken(&hmp
->fs_token
);
3248 * Reserve space and issue a direct-write from the front-end.
3249 * NOTE: The direct_io code will hammer_bread/bcopy smaller
3252 * An in-memory record will be installed to reference the storage
3253 * until the flusher can get to it.
3255 * Since we own the high level bio the front-end will not try to
3256 * do a direct-read until the write completes.
3258 * NOTE: The only time we do not reserve a full-sized buffers
3259 * worth of data is if the file is small. We do not try to
3260 * allocate a fragment (from the small-data zone) at the end of
3261 * an otherwise large file as this can lead to wildly separated
3264 KKASSERT((bio
->bio_offset
& HAMMER_BUFMASK
) == 0);
3265 KKASSERT(bio
->bio_offset
< ip
->ino_data
.size
);
3266 if (bio
->bio_offset
|| ip
->ino_data
.size
> HAMMER_HBUFSIZE
)
3267 bytes
= bp
->b_bufsize
;
3269 bytes
= HAMMER_DATA_DOALIGN_WITH(int, ip
->ino_data
.size
);
3271 record
= hammer_ip_add_bulk(ip
, bio
->bio_offset
, bp
->b_data
,
3275 * B_VFSFLAG1 indicates that a REDO_WRITE entry was generated
3276 * in hammer_vop_write(). We must flag the record so the proper
3277 * REDO_TERM_WRITE entry is generated during the flush.
3280 if (bp
->b_flags
& B_VFSFLAG1
) {
3281 record
->flags
|= HAMMER_RECF_REDO
;
3282 bp
->b_flags
&= ~B_VFSFLAG1
;
3284 if (record
->flags
& HAMMER_RECF_DEDUPED
) {
3286 hammer_ip_replace_bulk(hmp
, record
);
3289 hammer_io_direct_write(hmp
, bio
, record
);
3291 if (ip
->rsv_recs
> 1 && hmp
->rsv_recs
> hammer_limit_recs
)
3292 hammer_flush_inode(ip
, 0);
3294 bp
->b_bio2
.bio_offset
= NOOFFSET
;
3295 bp
->b_error
= error
;
3296 bp
->b_flags
|= B_ERROR
;
3299 lwkt_reltoken(&hmp
->fs_token
);
3304 * dounlink - disconnect a directory entry
3306 * XXX whiteout support not really in yet
3309 hammer_dounlink(hammer_transaction_t trans
, struct nchandle
*nch
,
3310 struct vnode
*dvp
, struct ucred
*cred
,
3311 int flags
, int isdir
)
3313 struct namecache
*ncp
;
3317 struct hammer_cursor cursor
;
3319 uint32_t max_iterations
;
3323 * Calculate the namekey and setup the key range for the scan. This
3324 * works kinda like a chained hash table where the lower 32 bits
3325 * of the namekey synthesize the chain.
3327 * The key range is inclusive of both key_beg and key_end.
3333 if (dip
->flags
& HAMMER_INODE_RO
)
3336 namekey
= hammer_direntry_namekey(dip
, ncp
->nc_name
, ncp
->nc_nlen
,
3339 hammer_init_cursor(trans
, &cursor
, &dip
->cache
[1], dip
);
3340 cursor
.key_beg
.localization
= dip
->obj_localization
|
3341 hammer_dir_localization(dip
);
3342 cursor
.key_beg
.obj_id
= dip
->obj_id
;
3343 cursor
.key_beg
.key
= namekey
;
3344 cursor
.key_beg
.create_tid
= 0;
3345 cursor
.key_beg
.delete_tid
= 0;
3346 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
3347 cursor
.key_beg
.obj_type
= 0;
3349 cursor
.key_end
= cursor
.key_beg
;
3350 cursor
.key_end
.key
+= max_iterations
;
3351 cursor
.asof
= dip
->obj_asof
;
3352 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
3355 * Scan all matching records (the chain), locate the one matching
3356 * the requested path component. info->last_error contains the
3357 * error code on search termination and could be 0, ENOENT, or
3360 * The hammer_ip_*() functions merge in-memory records with on-disk
3361 * records for the purposes of the search.
3363 error
= hammer_ip_first(&cursor
);
3365 while (error
== 0) {
3366 error
= hammer_ip_resolve_data(&cursor
);
3369 nlen
= cursor
.leaf
->data_len
- HAMMER_ENTRY_NAME_OFF
;
3371 if (ncp
->nc_nlen
== nlen
&&
3372 bcmp(ncp
->nc_name
, cursor
.data
->entry
.name
, nlen
) == 0) {
3375 error
= hammer_ip_next(&cursor
);
3379 * If all is ok we have to get the inode so we can adjust nlinks.
3380 * To avoid a deadlock with the flusher we must release the inode
3381 * lock on the directory when acquiring the inode for the entry.
3383 * If the target is a directory, it must be empty.
3386 hammer_unlock(&cursor
.ip
->lock
);
3387 ip
= hammer_get_inode(trans
, dip
, cursor
.data
->entry
.obj_id
,
3389 cursor
.data
->entry
.localization
,
3391 hammer_lock_sh(&cursor
.ip
->lock
);
3392 if (error
== ENOENT
) {
3393 hkprintf("WARNING: Removing dirent w/missing inode "
3395 "\tobj_id = %016jx\n",
3397 (intmax_t)cursor
.data
->entry
.obj_id
);
3402 * If isdir >= 0 we validate that the entry is or is not a
3403 * directory. If isdir < 0 we don't care.
3405 if (error
== 0 && isdir
>= 0 && ip
) {
3407 ip
->ino_data
.obj_type
!= HAMMER_OBJTYPE_DIRECTORY
) {
3409 } else if (isdir
== 0 &&
3410 ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
) {
3416 * If we are trying to remove a directory the directory must
3419 * The check directory code can loop and deadlock/retry. Our
3420 * own cursor's node locks must be released to avoid a 3-way
3421 * deadlock with the flusher if the check directory code
3424 * If any changes whatsoever have been made to the cursor
3425 * set EDEADLK and retry.
3427 * WARNING: See warnings in hammer_unlock_cursor()
3430 if (error
== 0 && ip
&& ip
->ino_data
.obj_type
==
3431 HAMMER_OBJTYPE_DIRECTORY
) {
3432 hammer_unlock_cursor(&cursor
);
3433 error
= hammer_ip_check_directory_empty(trans
, ip
);
3434 hammer_lock_cursor(&cursor
);
3435 if (cursor
.flags
& HAMMER_CURSOR_RETEST
) {
3436 hkprintf("Warning: avoided deadlock "
3444 * Delete the directory entry.
3446 * WARNING: hammer_ip_del_direntry() may have to terminate
3447 * the cursor to avoid a deadlock. It is ok to call
3448 * hammer_done_cursor() twice.
3451 error
= hammer_ip_del_direntry(trans
, &cursor
,
3454 hammer_done_cursor(&cursor
);
3457 * Tell the namecache that we are now unlinked.
3462 * NOTE: ip->vp, if non-NULL, cannot be directly
3463 * referenced without formally acquiring the
3464 * vp since the vp might have zero refs on it,
3465 * or in the middle of a reclaim, etc.
3467 * NOTE: The cache_setunresolved() can rip the vp
3468 * out from under us since the vp may not have
3469 * any refs, in which case ip->vp will be NULL
3472 while (ip
&& ip
->vp
) {
3475 error
= hammer_get_vnode(ip
, &vp
);
3476 if (error
== 0 && vp
) {
3478 hammer_knote(ip
->vp
, NOTE_DELETE
);
3481 * Don't do this, it can deadlock
3482 * on concurrent rm's of hardlinks.
3483 * Shouldn't be needed any more.
3485 cache_inval_vp(ip
->vp
, CINV_DESTROY
);
3490 hdkprintf("ip/vp race1 avoided\n");
3494 hammer_rel_inode(ip
, 0);
3496 hammer_done_cursor(&cursor
);
3498 if (error
== EDEADLK
)
3504 /************************************************************************
3505 * FIFO AND SPECFS OPS *
3506 ************************************************************************
3510 hammer_vop_fifoclose (struct vop_close_args
*ap
)
3512 /* XXX update itimes */
3513 return (VOCALL(&fifo_vnode_vops
, &ap
->a_head
));
3517 hammer_vop_fiforead (struct vop_read_args
*ap
)
3521 error
= VOCALL(&fifo_vnode_vops
, &ap
->a_head
);
3522 /* XXX update access time */
3527 hammer_vop_fifowrite (struct vop_write_args
*ap
)
3531 error
= VOCALL(&fifo_vnode_vops
, &ap
->a_head
);
3532 /* XXX update access time */
3538 hammer_vop_fifokqfilter(struct vop_kqfilter_args
*ap
)
3542 error
= VOCALL(&fifo_vnode_vops
, &ap
->a_head
);
3544 error
= hammer_vop_kqfilter(ap
);
3548 /************************************************************************
3550 ************************************************************************
3553 static void filt_hammerdetach(struct knote
*kn
);
3554 static int filt_hammerread(struct knote
*kn
, long hint
);
3555 static int filt_hammerwrite(struct knote
*kn
, long hint
);
3556 static int filt_hammervnode(struct knote
*kn
, long hint
);
3558 static struct filterops hammerread_filtops
=
3559 { FILTEROP_ISFD
| FILTEROP_MPSAFE
,
3560 NULL
, filt_hammerdetach
, filt_hammerread
};
3561 static struct filterops hammerwrite_filtops
=
3562 { FILTEROP_ISFD
| FILTEROP_MPSAFE
,
3563 NULL
, filt_hammerdetach
, filt_hammerwrite
};
3564 static struct filterops hammervnode_filtops
=
3565 { FILTEROP_ISFD
| FILTEROP_MPSAFE
,
3566 NULL
, filt_hammerdetach
, filt_hammervnode
};
3570 hammer_vop_kqfilter(struct vop_kqfilter_args
*ap
)
3572 struct vnode
*vp
= ap
->a_vp
;
3573 struct knote
*kn
= ap
->a_kn
;
3575 switch (kn
->kn_filter
) {
3577 kn
->kn_fop
= &hammerread_filtops
;
3580 kn
->kn_fop
= &hammerwrite_filtops
;
3583 kn
->kn_fop
= &hammervnode_filtops
;
3586 return (EOPNOTSUPP
);
3589 kn
->kn_hook
= (caddr_t
)vp
;
3591 knote_insert(&vp
->v_pollinfo
.vpi_kqinfo
.ki_note
, kn
);
3597 filt_hammerdetach(struct knote
*kn
)
3599 struct vnode
*vp
= (void *)kn
->kn_hook
;
3601 knote_remove(&vp
->v_pollinfo
.vpi_kqinfo
.ki_note
, kn
);
3605 filt_hammerread(struct knote
*kn
, long hint
)
3607 struct vnode
*vp
= (void *)kn
->kn_hook
;
3608 hammer_inode_t ip
= VTOI(vp
);
3609 hammer_mount_t hmp
= ip
->hmp
;
3612 if (hint
== NOTE_REVOKE
) {
3613 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
| EV_ONESHOT
);
3616 lwkt_gettoken(&hmp
->fs_token
); /* XXX use per-ip-token */
3617 off
= ip
->ino_data
.size
- kn
->kn_fp
->f_offset
;
3618 kn
->kn_data
= (off
< INTPTR_MAX
) ? off
: INTPTR_MAX
;
3619 lwkt_reltoken(&hmp
->fs_token
);
3620 if (kn
->kn_sfflags
& NOTE_OLDAPI
)
3622 return (kn
->kn_data
!= 0);
3626 filt_hammerwrite(struct knote
*kn
, long hint
)
3628 if (hint
== NOTE_REVOKE
)
3629 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
| EV_ONESHOT
);
3635 filt_hammervnode(struct knote
*kn
, long hint
)
3637 if (kn
->kn_sfflags
& hint
)
3638 kn
->kn_fflags
|= hint
;
3639 if (hint
== NOTE_REVOKE
) {
3640 kn
->kn_flags
|= (EV_EOF
| EV_NODATA
);
3643 return (kn
->kn_fflags
!= 0);