2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.102 2008/10/16 17:24:16 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/namecache.h>
42 #include <sys/vnode.h>
43 #include <sys/lockf.h>
44 #include <sys/event.h>
46 #include <sys/dirent.h>
48 #include <vm/vm_extern.h>
49 #include <vfs/fifofs/fifo.h>
51 #include <sys/mplock2.h>
58 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
59 static int hammer_vop_fsync(struct vop_fsync_args
*);
60 static int hammer_vop_read(struct vop_read_args
*);
61 static int hammer_vop_write(struct vop_write_args
*);
62 static int hammer_vop_access(struct vop_access_args
*);
63 static int hammer_vop_advlock(struct vop_advlock_args
*);
64 static int hammer_vop_close(struct vop_close_args
*);
65 static int hammer_vop_ncreate(struct vop_ncreate_args
*);
66 static int hammer_vop_getattr(struct vop_getattr_args
*);
67 static int hammer_vop_nresolve(struct vop_nresolve_args
*);
68 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args
*);
69 static int hammer_vop_nlink(struct vop_nlink_args
*);
70 static int hammer_vop_nmkdir(struct vop_nmkdir_args
*);
71 static int hammer_vop_nmknod(struct vop_nmknod_args
*);
72 static int hammer_vop_open(struct vop_open_args
*);
73 static int hammer_vop_print(struct vop_print_args
*);
74 static int hammer_vop_readdir(struct vop_readdir_args
*);
75 static int hammer_vop_readlink(struct vop_readlink_args
*);
76 static int hammer_vop_nremove(struct vop_nremove_args
*);
77 static int hammer_vop_nrename(struct vop_nrename_args
*);
78 static int hammer_vop_nrmdir(struct vop_nrmdir_args
*);
79 static int hammer_vop_markatime(struct vop_markatime_args
*);
80 static int hammer_vop_setattr(struct vop_setattr_args
*);
81 static int hammer_vop_strategy(struct vop_strategy_args
*);
82 static int hammer_vop_bmap(struct vop_bmap_args
*ap
);
83 static int hammer_vop_nsymlink(struct vop_nsymlink_args
*);
84 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args
*);
85 static int hammer_vop_ioctl(struct vop_ioctl_args
*);
86 static int hammer_vop_mountctl(struct vop_mountctl_args
*);
87 static int hammer_vop_kqfilter (struct vop_kqfilter_args
*);
89 static int hammer_vop_fifoclose (struct vop_close_args
*);
90 static int hammer_vop_fiforead (struct vop_read_args
*);
91 static int hammer_vop_fifowrite (struct vop_write_args
*);
92 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args
*);
94 struct vop_ops hammer_vnode_vops
= {
95 .vop_default
= vop_defaultop
,
96 .vop_fsync
= hammer_vop_fsync
,
97 .vop_getpages
= vop_stdgetpages
,
98 .vop_putpages
= vop_stdputpages
,
99 .vop_read
= hammer_vop_read
,
100 .vop_write
= hammer_vop_write
,
101 .vop_access
= hammer_vop_access
,
102 .vop_advlock
= hammer_vop_advlock
,
103 .vop_close
= hammer_vop_close
,
104 .vop_ncreate
= hammer_vop_ncreate
,
105 .vop_getattr
= hammer_vop_getattr
,
106 .vop_inactive
= hammer_vop_inactive
,
107 .vop_reclaim
= hammer_vop_reclaim
,
108 .vop_nresolve
= hammer_vop_nresolve
,
109 .vop_nlookupdotdot
= hammer_vop_nlookupdotdot
,
110 .vop_nlink
= hammer_vop_nlink
,
111 .vop_nmkdir
= hammer_vop_nmkdir
,
112 .vop_nmknod
= hammer_vop_nmknod
,
113 .vop_open
= hammer_vop_open
,
114 .vop_pathconf
= vop_stdpathconf
,
115 .vop_print
= hammer_vop_print
,
116 .vop_readdir
= hammer_vop_readdir
,
117 .vop_readlink
= hammer_vop_readlink
,
118 .vop_nremove
= hammer_vop_nremove
,
119 .vop_nrename
= hammer_vop_nrename
,
120 .vop_nrmdir
= hammer_vop_nrmdir
,
121 .vop_markatime
= hammer_vop_markatime
,
122 .vop_setattr
= hammer_vop_setattr
,
123 .vop_bmap
= hammer_vop_bmap
,
124 .vop_strategy
= hammer_vop_strategy
,
125 .vop_nsymlink
= hammer_vop_nsymlink
,
126 .vop_nwhiteout
= hammer_vop_nwhiteout
,
127 .vop_ioctl
= hammer_vop_ioctl
,
128 .vop_mountctl
= hammer_vop_mountctl
,
129 .vop_kqfilter
= hammer_vop_kqfilter
132 struct vop_ops hammer_spec_vops
= {
133 .vop_default
= vop_defaultop
,
134 .vop_fsync
= hammer_vop_fsync
,
135 .vop_read
= vop_stdnoread
,
136 .vop_write
= vop_stdnowrite
,
137 .vop_access
= hammer_vop_access
,
138 .vop_close
= hammer_vop_close
,
139 .vop_markatime
= hammer_vop_markatime
,
140 .vop_getattr
= hammer_vop_getattr
,
141 .vop_inactive
= hammer_vop_inactive
,
142 .vop_reclaim
= hammer_vop_reclaim
,
143 .vop_setattr
= hammer_vop_setattr
146 struct vop_ops hammer_fifo_vops
= {
147 .vop_default
= fifo_vnoperate
,
148 .vop_fsync
= hammer_vop_fsync
,
149 .vop_read
= hammer_vop_fiforead
,
150 .vop_write
= hammer_vop_fifowrite
,
151 .vop_access
= hammer_vop_access
,
152 .vop_close
= hammer_vop_fifoclose
,
153 .vop_markatime
= hammer_vop_markatime
,
154 .vop_getattr
= hammer_vop_getattr
,
155 .vop_inactive
= hammer_vop_inactive
,
156 .vop_reclaim
= hammer_vop_reclaim
,
157 .vop_setattr
= hammer_vop_setattr
,
158 .vop_kqfilter
= hammer_vop_fifokqfilter
163 hammer_knote(struct vnode
*vp
, int flags
)
166 KNOTE(&vp
->v_pollinfo
.vpi_selinfo
.si_note
, flags
);
169 #ifdef DEBUG_TRUNCATE
170 struct hammer_inode
*HammerTruncIp
;
173 static int hammer_dounlink(hammer_transaction_t trans
, struct nchandle
*nch
,
174 struct vnode
*dvp
, struct ucred
*cred
,
175 int flags
, int isdir
);
176 static int hammer_vop_strategy_read(struct vop_strategy_args
*ap
);
177 static int hammer_vop_strategy_write(struct vop_strategy_args
*ap
);
182 hammer_vop_vnoperate(struct vop_generic_args
*)
184 return (VOCALL(&hammer_vnode_vops
, ap
));
189 * hammer_vop_fsync { vp, waitfor }
191 * fsync() an inode to disk and wait for it to be completely committed
192 * such that the information would not be undone if a crash occured after
195 * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement
196 * a REDO log. A sysctl is provided to relax HAMMER's fsync()
199 * Ultimately the combination of a REDO log and use of fast storage
200 * to front-end cluster caches will make fsync fast, but it aint
201 * here yet. And, in anycase, we need real transactional
202 * all-or-nothing features which are not restricted to a single file.
206 hammer_vop_fsync(struct vop_fsync_args
*ap
)
208 hammer_inode_t ip
= VTOI(ap
->a_vp
);
209 hammer_mount_t hmp
= ip
->hmp
;
210 int waitfor
= ap
->a_waitfor
;
214 * Fsync rule relaxation (default is either full synchronous flush
215 * or REDO semantics with synchronous flush).
217 if (ap
->a_flags
& VOP_FSYNC_SYSCALL
) {
218 switch(hammer_fsync_mode
) {
221 /* no REDO, full synchronous flush */
225 /* no REDO, full asynchronous flush */
226 if (waitfor
== MNT_WAIT
)
227 waitfor
= MNT_NOWAIT
;
230 /* REDO semantics, synchronous flush */
231 if (hmp
->version
< HAMMER_VOL_VERSION_FOUR
)
233 mode
= HAMMER_FLUSH_UNDOS_AUTO
;
236 /* REDO semantics, relaxed asynchronous flush */
237 if (hmp
->version
< HAMMER_VOL_VERSION_FOUR
)
239 mode
= HAMMER_FLUSH_UNDOS_RELAXED
;
240 if (waitfor
== MNT_WAIT
)
241 waitfor
= MNT_NOWAIT
;
244 /* ignore the fsync() system call */
247 /* we have to do something */
248 mode
= HAMMER_FLUSH_UNDOS_RELAXED
;
249 if (waitfor
== MNT_WAIT
)
250 waitfor
= MNT_NOWAIT
;
255 * Fast fsync only needs to flush the UNDO/REDO fifo if
256 * HAMMER_INODE_REDO is non-zero and the only modifications
257 * made to the file are write or write-extends.
259 if ((ip
->flags
& HAMMER_INODE_REDO
) &&
260 (ip
->flags
& HAMMER_INODE_MODMASK_NOREDO
) == 0
262 ++hammer_count_fsyncs
;
263 hammer_flusher_flush_undos(hmp
, mode
);
269 * REDO is enabled by fsync(), the idea being we really only
270 * want to lay down REDO records when programs are using
271 * fsync() heavily. The first fsync() on the file starts
272 * the gravy train going and later fsync()s keep it hot by
273 * resetting the redo_count.
275 * We weren't running REDOs before now so we have to fall
276 * through and do a full fsync of what we have.
278 if (hmp
->version
>= HAMMER_VOL_VERSION_FOUR
) {
279 ip
->flags
|= HAMMER_INODE_REDO
;
286 * Do a full flush sequence.
288 ++hammer_count_fsyncs
;
289 vfsync(ap
->a_vp
, waitfor
, 1, NULL
, NULL
);
290 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
291 if (waitfor
== MNT_WAIT
) {
293 hammer_wait_inode(ip
);
294 vn_lock(ap
->a_vp
, LK_EXCLUSIVE
| LK_RETRY
);
300 * hammer_vop_read { vp, uio, ioflag, cred }
306 hammer_vop_read(struct vop_read_args
*ap
)
308 struct hammer_transaction trans
;
321 if (ap
->a_vp
->v_type
!= VREG
)
328 * Allow the UIO's size to override the sequential heuristic.
330 blksize
= hammer_blocksize(uio
->uio_offset
);
331 seqcount
= (uio
->uio_resid
+ (blksize
- 1)) / blksize
;
332 ioseqcount
= ap
->a_ioflag
>> 16;
333 if (seqcount
< ioseqcount
)
334 seqcount
= ioseqcount
;
337 * Temporary hack until more of HAMMER can be made MPSAFE.
340 if (curthread
->td_mpcount
) {
342 hammer_start_transaction(&trans
, ip
->hmp
);
347 hammer_start_transaction(&trans
, ip
->hmp
);
352 * If reading or writing a huge amount of data we have to break
353 * atomicy and allow the operation to be interrupted by a signal
354 * or it can DOS the machine.
356 bigread
= (uio
->uio_resid
> 100 * 1024 * 1024);
359 * Access the data typically in HAMMER_BUFSIZE blocks via the
360 * buffer cache, but HAMMER may use a variable block size based
363 * XXX Temporary hack, delay the start transaction while we remain
364 * MPSAFE. NOTE: ino_data.size cannot change while vnode is
367 while (uio
->uio_resid
> 0 && uio
->uio_offset
< ip
->ino_data
.size
) {
371 blksize
= hammer_blocksize(uio
->uio_offset
);
372 offset
= (int)uio
->uio_offset
& (blksize
- 1);
373 base_offset
= uio
->uio_offset
- offset
;
375 if (bigread
&& (error
= hammer_signal_check(ip
->hmp
)) != 0)
381 bp
= getcacheblk(ap
->a_vp
, base_offset
);
390 if (got_mplock
== 0) {
393 hammer_start_transaction(&trans
, ip
->hmp
);
396 if (hammer_cluster_enable
) {
398 * Use file_limit to prevent cluster_read() from
399 * creating buffers of the wrong block size past
402 file_limit
= ip
->ino_data
.size
;
403 if (base_offset
< HAMMER_XDEMARC
&&
404 file_limit
> HAMMER_XDEMARC
) {
405 file_limit
= HAMMER_XDEMARC
;
407 error
= cluster_read(ap
->a_vp
,
408 file_limit
, base_offset
,
412 error
= bread(ap
->a_vp
, base_offset
, blksize
, &bp
);
420 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
421 n
= blksize
- offset
;
422 if (n
> uio
->uio_resid
)
424 if (n
> ip
->ino_data
.size
- uio
->uio_offset
)
425 n
= (int)(ip
->ino_data
.size
- uio
->uio_offset
);
426 error
= uiomove((char *)bp
->b_data
+ offset
, n
, uio
);
428 /* data has a lower priority then meta-data */
429 bp
->b_flags
|= B_AGE
;
433 hammer_stats_file_read
+= n
;
437 * XXX only update the atime if we had to get the MP lock.
438 * XXX hack hack hack, fixme.
441 if ((ip
->flags
& HAMMER_INODE_RO
) == 0 &&
442 (ip
->hmp
->mp
->mnt_flag
& MNT_NOATIME
) == 0) {
443 ip
->ino_data
.atime
= trans
.time
;
444 hammer_modify_inode(ip
, HAMMER_INODE_ATIME
);
446 hammer_done_transaction(&trans
);
454 * hammer_vop_write { vp, uio, ioflag, cred }
458 hammer_vop_write(struct vop_write_args
*ap
)
460 struct hammer_transaction trans
;
461 struct hammer_inode
*ip
;
474 if (ap
->a_vp
->v_type
!= VREG
)
480 seqcount
= ap
->a_ioflag
>> 16;
482 if (ip
->flags
& HAMMER_INODE_RO
)
486 * Create a transaction to cover the operations we perform.
488 hammer_start_transaction(&trans
, hmp
);
494 if (ap
->a_ioflag
& IO_APPEND
)
495 uio
->uio_offset
= ip
->ino_data
.size
;
498 * Check for illegal write offsets. Valid range is 0...2^63-1.
500 * NOTE: the base_off assignment is required to work around what
501 * I consider to be a GCC-4 optimization bug.
503 if (uio
->uio_offset
< 0) {
504 hammer_done_transaction(&trans
);
507 base_offset
= uio
->uio_offset
+ uio
->uio_resid
; /* work around gcc-4 */
508 if (uio
->uio_resid
> 0 && base_offset
<= uio
->uio_offset
) {
509 hammer_done_transaction(&trans
);
514 * If reading or writing a huge amount of data we have to break
515 * atomicy and allow the operation to be interrupted by a signal
516 * or it can DOS the machine.
518 * Preset redo_count so we stop generating REDOs earlier if the
521 bigwrite
= (uio
->uio_resid
> 100 * 1024 * 1024);
522 if ((ip
->flags
& HAMMER_INODE_REDO
) &&
523 ip
->redo_count
< hammer_limit_redo
) {
524 ip
->redo_count
+= uio
->uio_resid
;
528 * Access the data typically in HAMMER_BUFSIZE blocks via the
529 * buffer cache, but HAMMER may use a variable block size based
532 while (uio
->uio_resid
> 0) {
539 if ((error
= hammer_checkspace(hmp
, HAMMER_CHKSPC_WRITE
)) != 0)
541 if (bigwrite
&& (error
= hammer_signal_check(hmp
)) != 0)
544 blksize
= hammer_blocksize(uio
->uio_offset
);
547 * Do not allow HAMMER to blow out the buffer cache. Very
548 * large UIOs can lockout other processes due to bwillwrite()
551 * The hammer inode is not locked during these operations.
552 * The vnode is locked which can interfere with the pageout
553 * daemon for non-UIO_NOCOPY writes but should not interfere
554 * with the buffer cache. Even so, we cannot afford to
555 * allow the pageout daemon to build up too many dirty buffer
558 * Only call this if we aren't being recursively called from
559 * a virtual disk device (vn), else we may deadlock.
561 if ((ap
->a_ioflag
& IO_RECURSE
) == 0)
565 * Control the number of pending records associated with
566 * this inode. If too many have accumulated start a
567 * flush. Try to maintain a pipeline with the flusher.
569 if (ip
->rsv_recs
>= hammer_limit_inode_recs
) {
570 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
572 if (ip
->rsv_recs
>= hammer_limit_inode_recs
* 2) {
573 while (ip
->rsv_recs
>= hammer_limit_inode_recs
) {
574 tsleep(&ip
->rsv_recs
, 0, "hmrwww", hz
);
576 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
581 * Do not allow HAMMER to blow out system memory by
582 * accumulating too many records. Records are so well
583 * decoupled from the buffer cache that it is possible
584 * for userland to push data out to the media via
585 * direct-write, but build up the records queued to the
586 * backend faster then the backend can flush them out.
587 * HAMMER has hit its write limit but the frontend has
588 * no pushback to slow it down.
590 if (hmp
->rsv_recs
> hammer_limit_recs
/ 2) {
592 * Get the inode on the flush list
594 if (ip
->rsv_recs
>= 64)
595 hammer_flush_inode(ip
, HAMMER_FLUSH_SIGNAL
);
596 else if (ip
->rsv_recs
>= 16)
597 hammer_flush_inode(ip
, 0);
600 * Keep the flusher going if the system keeps
603 delta
= hmp
->count_newrecords
-
604 hmp
->last_newrecords
;
605 if (delta
< 0 || delta
> hammer_limit_recs
/ 2) {
606 hmp
->last_newrecords
= hmp
->count_newrecords
;
607 hammer_sync_hmp(hmp
, MNT_NOWAIT
);
611 * If we have gotten behind start slowing
614 delta
= (hmp
->rsv_recs
- hammer_limit_recs
) *
615 hz
/ hammer_limit_recs
;
617 tsleep(&trans
, 0, "hmrslo", delta
);
622 * Calculate the blocksize at the current offset and figure
623 * out how much we can actually write.
625 blkmask
= blksize
- 1;
626 offset
= (int)uio
->uio_offset
& blkmask
;
627 base_offset
= uio
->uio_offset
& ~(int64_t)blkmask
;
628 n
= blksize
- offset
;
629 if (n
> uio
->uio_resid
)
631 nsize
= uio
->uio_offset
+ n
;
632 if (nsize
> ip
->ino_data
.size
) {
633 if (uio
->uio_offset
> ip
->ino_data
.size
)
637 nvextendbuf(ap
->a_vp
,
640 hammer_blocksize(ip
->ino_data
.size
),
641 hammer_blocksize(nsize
),
642 hammer_blockoff(ip
->ino_data
.size
),
643 hammer_blockoff(nsize
),
646 kflags
|= NOTE_EXTEND
;
649 if (uio
->uio_segflg
== UIO_NOCOPY
) {
651 * Issuing a write with the same data backing the
652 * buffer. Instantiate the buffer to collect the
653 * backing vm pages, then read-in any missing bits.
655 * This case is used by vop_stdputpages().
657 bp
= getblk(ap
->a_vp
, base_offset
,
658 blksize
, GETBLK_BHEAVY
, 0);
659 if ((bp
->b_flags
& B_CACHE
) == 0) {
661 error
= bread(ap
->a_vp
, base_offset
,
664 } else if (offset
== 0 && uio
->uio_resid
>= blksize
) {
666 * Even though we are entirely overwriting the buffer
667 * we may still have to zero it out to avoid a
668 * mmap/write visibility issue.
670 bp
= getblk(ap
->a_vp
, base_offset
, blksize
, GETBLK_BHEAVY
, 0);
671 if ((bp
->b_flags
& B_CACHE
) == 0)
673 } else if (base_offset
>= ip
->ino_data
.size
) {
675 * If the base offset of the buffer is beyond the
676 * file EOF, we don't have to issue a read.
678 bp
= getblk(ap
->a_vp
, base_offset
,
679 blksize
, GETBLK_BHEAVY
, 0);
683 * Partial overwrite, read in any missing bits then
684 * replace the portion being written.
686 error
= bread(ap
->a_vp
, base_offset
, blksize
, &bp
);
691 error
= uiomove(bp
->b_data
+ offset
, n
, uio
);
694 * Generate REDO records if enabled and redo_count will not
695 * exceeded the limit.
697 * If redo_count exceeds the limit we stop generating records
698 * and clear HAMMER_INODE_REDO. This will cause the next
699 * fsync() to do a full meta-data sync instead of just an
700 * UNDO/REDO fifo update.
702 * When clearing HAMMER_INODE_REDO any pre-existing REDOs
703 * will still be tracked. The tracks will be terminated
704 * when the related meta-data (including possible data
705 * modifications which are not tracked via REDO) is
708 if ((ip
->flags
& HAMMER_INODE_REDO
) && error
== 0) {
709 if (ip
->redo_count
< hammer_limit_redo
) {
710 bp
->b_flags
|= B_VFSFLAG1
;
711 error
= hammer_generate_redo(&trans
, ip
,
712 base_offset
+ offset
,
717 ip
->flags
&= ~HAMMER_INODE_REDO
;
722 * If we screwed up we have to undo any VM size changes we
728 nvtruncbuf(ap
->a_vp
, ip
->ino_data
.size
,
729 hammer_blocksize(ip
->ino_data
.size
),
730 hammer_blockoff(ip
->ino_data
.size
));
734 kflags
|= NOTE_WRITE
;
735 hammer_stats_file_write
+= n
;
736 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
737 if (ip
->ino_data
.size
< uio
->uio_offset
) {
738 ip
->ino_data
.size
= uio
->uio_offset
;
739 flags
= HAMMER_INODE_SDIRTY
;
743 ip
->ino_data
.mtime
= trans
.time
;
744 flags
|= HAMMER_INODE_MTIME
| HAMMER_INODE_BUFS
;
745 hammer_modify_inode(ip
, flags
);
748 * Once we dirty the buffer any cached zone-X offset
749 * becomes invalid. HAMMER NOTE: no-history mode cannot
750 * allow overwriting over the same data sector unless
751 * we provide UNDOs for the old data, which we don't.
753 bp
->b_bio2
.bio_offset
= NOOFFSET
;
756 * Final buffer disposition.
758 * Because meta-data updates are deferred, HAMMER is
759 * especially sensitive to excessive bdwrite()s because
760 * the I/O stream is not broken up by disk reads. So the
761 * buffer cache simply cannot keep up.
763 * WARNING! blksize is variable. cluster_write() is
764 * expected to not blow up if it encounters buffers that
765 * do not match the passed blksize.
767 * NOTE! Hammer shouldn't need to bawrite()/cluster_write().
768 * The ip->rsv_recs check should burst-flush the data.
769 * If we queue it immediately the buf could be left
770 * locked on the device queue for a very long time.
772 bp
->b_flags
|= B_AGE
;
773 if (ap
->a_ioflag
& IO_SYNC
) {
775 } else if (ap
->a_ioflag
& IO_DIRECT
) {
779 if (offset
+ n
== blksize
) {
780 if (hammer_cluster_enable
== 0 ||
781 (ap
->a_vp
->v_mount
->mnt_flag
& MNT_NOCLUSTERW
)) {
784 cluster_write(bp
, ip
->ino_data
.size
,
792 hammer_done_transaction(&trans
);
793 hammer_knote(ap
->a_vp
, kflags
);
798 * hammer_vop_access { vp, mode, cred }
802 hammer_vop_access(struct vop_access_args
*ap
)
804 struct hammer_inode
*ip
= VTOI(ap
->a_vp
);
809 ++hammer_stats_file_iopsr
;
810 uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
811 gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
813 error
= vop_helper_access(ap
, uid
, gid
, ip
->ino_data
.mode
,
814 ip
->ino_data
.uflags
);
819 * hammer_vop_advlock { vp, id, op, fl, flags }
823 hammer_vop_advlock(struct vop_advlock_args
*ap
)
825 hammer_inode_t ip
= VTOI(ap
->a_vp
);
827 return (lf_advlock(ap
, &ip
->advlock
, ip
->ino_data
.size
));
831 * hammer_vop_close { vp, fflag }
833 * We can only sync-on-close for normal closes.
837 hammer_vop_close(struct vop_close_args
*ap
)
840 struct vnode
*vp
= ap
->a_vp
;
841 hammer_inode_t ip
= VTOI(vp
);
843 if (ip
->flags
& (HAMMER_INODE_CLOSESYNC
|HAMMER_INODE_CLOSEASYNC
)) {
844 if (vn_islocked(vp
) == LK_EXCLUSIVE
&&
845 (vp
->v_flag
& (VINACTIVE
|VRECLAIMED
)) == 0) {
846 if (ip
->flags
& HAMMER_INODE_CLOSESYNC
)
849 waitfor
= MNT_NOWAIT
;
850 ip
->flags
&= ~(HAMMER_INODE_CLOSESYNC
|
851 HAMMER_INODE_CLOSEASYNC
);
852 VOP_FSYNC(vp
, MNT_NOWAIT
, waitfor
);
856 return (vop_stdclose(ap
));
860 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
862 * The operating system has already ensured that the directory entry
863 * does not exist and done all appropriate namespace locking.
867 hammer_vop_ncreate(struct vop_ncreate_args
*ap
)
869 struct hammer_transaction trans
;
870 struct hammer_inode
*dip
;
871 struct hammer_inode
*nip
;
872 struct nchandle
*nch
;
876 dip
= VTOI(ap
->a_dvp
);
878 if (dip
->flags
& HAMMER_INODE_RO
)
880 if ((error
= hammer_checkspace(dip
->hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
884 * Create a transaction to cover the operations we perform.
886 hammer_start_transaction(&trans
, dip
->hmp
);
887 ++hammer_stats_file_iopsw
;
890 * Create a new filesystem object of the requested type. The
891 * returned inode will be referenced and shared-locked to prevent
892 * it from being moved to the flusher.
894 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
,
895 dip
, nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
898 hkprintf("hammer_create_inode error %d\n", error
);
899 hammer_done_transaction(&trans
);
905 * Add the new filesystem object to the directory. This will also
906 * bump the inode's link count.
908 error
= hammer_ip_add_directory(&trans
, dip
,
909 nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
912 hkprintf("hammer_ip_add_directory error %d\n", error
);
918 hammer_rel_inode(nip
, 0);
919 hammer_done_transaction(&trans
);
922 error
= hammer_get_vnode(nip
, ap
->a_vpp
);
923 hammer_done_transaction(&trans
);
924 hammer_rel_inode(nip
, 0);
926 cache_setunresolved(ap
->a_nch
);
927 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
929 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
935 * hammer_vop_getattr { vp, vap }
937 * Retrieve an inode's attribute information. When accessing inodes
938 * historically we fake the atime field to ensure consistent results.
939 * The atime field is stored in the B-Tree element and allowed to be
940 * updated without cycling the element.
946 hammer_vop_getattr(struct vop_getattr_args
*ap
)
948 struct hammer_inode
*ip
= VTOI(ap
->a_vp
);
949 struct vattr
*vap
= ap
->a_vap
;
952 * We want the fsid to be different when accessing a filesystem
953 * with different as-of's so programs like diff don't think
954 * the files are the same.
956 * We also want the fsid to be the same when comparing snapshots,
957 * or when comparing mirrors (which might be backed by different
958 * physical devices). HAMMER fsids are based on the PFS's
961 * XXX there is a chance of collision here. The va_fsid reported
962 * by stat is different from the more involved fsid used in the
965 ++hammer_stats_file_iopsr
;
966 hammer_lock_sh(&ip
->lock
);
967 vap
->va_fsid
= ip
->pfsm
->fsid_udev
^ (u_int32_t
)ip
->obj_asof
^
968 (u_int32_t
)(ip
->obj_asof
>> 32);
970 vap
->va_fileid
= ip
->ino_leaf
.base
.obj_id
;
971 vap
->va_mode
= ip
->ino_data
.mode
;
972 vap
->va_nlink
= ip
->ino_data
.nlinks
;
973 vap
->va_uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
974 vap
->va_gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
977 vap
->va_size
= ip
->ino_data
.size
;
980 * Special case for @@PFS softlinks. The actual size of the
981 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
982 * or for MAX_TID is "@@-1:%05d" == 10 bytes.
984 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_SOFTLINK
&&
985 ip
->ino_data
.size
== 10 &&
986 ip
->obj_asof
== HAMMER_MAX_TID
&&
987 ip
->obj_localization
== 0 &&
988 strncmp(ip
->ino_data
.ext
.symlink
, "@@PFS", 5) == 0) {
989 if (ip
->pfsm
->pfsd
.mirror_flags
& HAMMER_PFSD_SLAVE
)
996 * We must provide a consistent atime and mtime for snapshots
997 * so people can do a 'tar cf - ... | md5' on them and get
998 * consistent results.
1000 if (ip
->flags
& HAMMER_INODE_RO
) {
1001 hammer_time_to_timespec(ip
->ino_data
.ctime
, &vap
->va_atime
);
1002 hammer_time_to_timespec(ip
->ino_data
.ctime
, &vap
->va_mtime
);
1004 hammer_time_to_timespec(ip
->ino_data
.atime
, &vap
->va_atime
);
1005 hammer_time_to_timespec(ip
->ino_data
.mtime
, &vap
->va_mtime
);
1007 hammer_time_to_timespec(ip
->ino_data
.ctime
, &vap
->va_ctime
);
1008 vap
->va_flags
= ip
->ino_data
.uflags
;
1009 vap
->va_gen
= 1; /* hammer inums are unique for all time */
1010 vap
->va_blocksize
= HAMMER_BUFSIZE
;
1011 if (ip
->ino_data
.size
>= HAMMER_XDEMARC
) {
1012 vap
->va_bytes
= (ip
->ino_data
.size
+ HAMMER_XBUFMASK64
) &
1014 } else if (ip
->ino_data
.size
> HAMMER_BUFSIZE
/ 2) {
1015 vap
->va_bytes
= (ip
->ino_data
.size
+ HAMMER_BUFMASK64
) &
1018 vap
->va_bytes
= (ip
->ino_data
.size
+ 15) & ~15;
1021 vap
->va_type
= hammer_get_vnode_type(ip
->ino_data
.obj_type
);
1022 vap
->va_filerev
= 0; /* XXX */
1023 vap
->va_uid_uuid
= ip
->ino_data
.uid
;
1024 vap
->va_gid_uuid
= ip
->ino_data
.gid
;
1025 vap
->va_fsid_uuid
= ip
->hmp
->fsid
;
1026 vap
->va_vaflags
= VA_UID_UUID_VALID
| VA_GID_UUID_VALID
|
1029 switch (ip
->ino_data
.obj_type
) {
1030 case HAMMER_OBJTYPE_CDEV
:
1031 case HAMMER_OBJTYPE_BDEV
:
1032 vap
->va_rmajor
= ip
->ino_data
.rmajor
;
1033 vap
->va_rminor
= ip
->ino_data
.rminor
;
1038 hammer_unlock(&ip
->lock
);
1043 * hammer_vop_nresolve { nch, dvp, cred }
1045 * Locate the requested directory entry.
1049 hammer_vop_nresolve(struct vop_nresolve_args
*ap
)
1051 struct hammer_transaction trans
;
1052 struct namecache
*ncp
;
1056 struct hammer_cursor cursor
;
1065 u_int32_t localization
;
1066 u_int32_t max_iterations
;
1069 * Misc initialization, plus handle as-of name extensions. Look for
1070 * the '@@' extension. Note that as-of files and directories cannot
1073 dip
= VTOI(ap
->a_dvp
);
1074 ncp
= ap
->a_nch
->ncp
;
1075 asof
= dip
->obj_asof
;
1076 localization
= dip
->obj_localization
; /* for code consistency */
1077 nlen
= ncp
->nc_nlen
;
1078 flags
= dip
->flags
& HAMMER_INODE_RO
;
1081 hammer_simple_transaction(&trans
, dip
->hmp
);
1082 ++hammer_stats_file_iopsr
;
1084 for (i
= 0; i
< nlen
; ++i
) {
1085 if (ncp
->nc_name
[i
] == '@' && ncp
->nc_name
[i
+1] == '@') {
1086 error
= hammer_str_to_tid(ncp
->nc_name
+ i
+ 2,
1087 &ispfs
, &asof
, &localization
);
1092 if (asof
!= HAMMER_MAX_TID
)
1093 flags
|= HAMMER_INODE_RO
;
1100 * If this is a PFS softlink we dive into the PFS
1102 if (ispfs
&& nlen
== 0) {
1103 ip
= hammer_get_inode(&trans
, dip
, HAMMER_OBJID_ROOT
,
1107 error
= hammer_get_vnode(ip
, &vp
);
1108 hammer_rel_inode(ip
, 0);
1114 cache_setvp(ap
->a_nch
, vp
);
1121 * If there is no path component the time extension is relative to dip.
1122 * e.g. "fubar/@@<snapshot>"
1124 * "." is handled by the kernel, but ".@@<snapshot>" is not.
1125 * e.g. "fubar/.@@<snapshot>"
1127 * ".." is handled by the kernel. We do not currently handle
1130 if (nlen
== 0 || (nlen
== 1 && ncp
->nc_name
[0] == '.')) {
1131 ip
= hammer_get_inode(&trans
, dip
, dip
->obj_id
,
1132 asof
, dip
->obj_localization
,
1135 error
= hammer_get_vnode(ip
, &vp
);
1136 hammer_rel_inode(ip
, 0);
1142 cache_setvp(ap
->a_nch
, vp
);
1149 * Calculate the namekey and setup the key range for the scan. This
1150 * works kinda like a chained hash table where the lower 32 bits
1151 * of the namekey synthesize the chain.
1153 * The key range is inclusive of both key_beg and key_end.
1155 namekey
= hammer_directory_namekey(dip
, ncp
->nc_name
, nlen
,
1158 error
= hammer_init_cursor(&trans
, &cursor
, &dip
->cache
[1], dip
);
1159 cursor
.key_beg
.localization
= dip
->obj_localization
+
1160 hammer_dir_localization(dip
);
1161 cursor
.key_beg
.obj_id
= dip
->obj_id
;
1162 cursor
.key_beg
.key
= namekey
;
1163 cursor
.key_beg
.create_tid
= 0;
1164 cursor
.key_beg
.delete_tid
= 0;
1165 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
1166 cursor
.key_beg
.obj_type
= 0;
1168 cursor
.key_end
= cursor
.key_beg
;
1169 cursor
.key_end
.key
+= max_iterations
;
1171 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
1174 * Scan all matching records (the chain), locate the one matching
1175 * the requested path component.
1177 * The hammer_ip_*() functions merge in-memory records with on-disk
1178 * records for the purposes of the search.
1181 localization
= HAMMER_DEF_LOCALIZATION
;
1184 error
= hammer_ip_first(&cursor
);
1185 while (error
== 0) {
1186 error
= hammer_ip_resolve_data(&cursor
);
1189 if (nlen
== cursor
.leaf
->data_len
- HAMMER_ENTRY_NAME_OFF
&&
1190 bcmp(ncp
->nc_name
, cursor
.data
->entry
.name
, nlen
) == 0) {
1191 obj_id
= cursor
.data
->entry
.obj_id
;
1192 localization
= cursor
.data
->entry
.localization
;
1195 error
= hammer_ip_next(&cursor
);
1198 hammer_done_cursor(&cursor
);
1201 * Lookup the obj_id. This should always succeed. If it does not
1202 * the filesystem may be damaged and we return a dummy inode.
1205 ip
= hammer_get_inode(&trans
, dip
, obj_id
,
1208 if (error
== ENOENT
) {
1209 kprintf("HAMMER: WARNING: Missing "
1210 "inode for dirent \"%s\"\n"
1211 "\tobj_id = %016llx, asof=%016llx, lo=%08x\n",
1213 (long long)obj_id
, (long long)asof
,
1216 ip
= hammer_get_dummy_inode(&trans
, dip
, obj_id
,
1221 error
= hammer_get_vnode(ip
, &vp
);
1222 hammer_rel_inode(ip
, 0);
1228 cache_setvp(ap
->a_nch
, vp
);
1231 } else if (error
== ENOENT
) {
1232 cache_setvp(ap
->a_nch
, NULL
);
1235 hammer_done_transaction(&trans
);
1240 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1242 * Locate the parent directory of a directory vnode.
1244 * dvp is referenced but not locked. *vpp must be returned referenced and
1245 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
1246 * at the root, instead it could indicate that the directory we were in was
1249 * NOTE: as-of sequences are not linked into the directory structure. If
1250 * we are at the root with a different asof then the mount point, reload
1251 * the same directory with the mount point's asof. I'm not sure what this
1252 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
1253 * get confused, but it hasn't been tested.
1257 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args
*ap
)
1259 struct hammer_transaction trans
;
1260 struct hammer_inode
*dip
;
1261 struct hammer_inode
*ip
;
1262 int64_t parent_obj_id
;
1263 u_int32_t parent_obj_localization
;
1267 dip
= VTOI(ap
->a_dvp
);
1268 asof
= dip
->obj_asof
;
1271 * Whos are parent? This could be the root of a pseudo-filesystem
1272 * whos parent is in another localization domain.
1274 parent_obj_id
= dip
->ino_data
.parent_obj_id
;
1275 if (dip
->obj_id
== HAMMER_OBJID_ROOT
)
1276 parent_obj_localization
= dip
->ino_data
.ext
.obj
.parent_obj_localization
;
1278 parent_obj_localization
= dip
->obj_localization
;
1280 if (parent_obj_id
== 0) {
1281 if (dip
->obj_id
== HAMMER_OBJID_ROOT
&&
1282 asof
!= dip
->hmp
->asof
) {
1283 parent_obj_id
= dip
->obj_id
;
1284 asof
= dip
->hmp
->asof
;
1285 *ap
->a_fakename
= kmalloc(19, M_TEMP
, M_WAITOK
);
1286 ksnprintf(*ap
->a_fakename
, 19, "0x%016llx",
1287 (long long)dip
->obj_asof
);
1294 hammer_simple_transaction(&trans
, dip
->hmp
);
1295 ++hammer_stats_file_iopsr
;
1297 ip
= hammer_get_inode(&trans
, dip
, parent_obj_id
,
1298 asof
, parent_obj_localization
,
1299 dip
->flags
, &error
);
1301 error
= hammer_get_vnode(ip
, ap
->a_vpp
);
1302 hammer_rel_inode(ip
, 0);
1306 hammer_done_transaction(&trans
);
1311 * hammer_vop_nlink { nch, dvp, vp, cred }
1315 hammer_vop_nlink(struct vop_nlink_args
*ap
)
1317 struct hammer_transaction trans
;
1318 struct hammer_inode
*dip
;
1319 struct hammer_inode
*ip
;
1320 struct nchandle
*nch
;
1323 if (ap
->a_dvp
->v_mount
!= ap
->a_vp
->v_mount
)
1327 dip
= VTOI(ap
->a_dvp
);
1328 ip
= VTOI(ap
->a_vp
);
1330 if (dip
->obj_localization
!= ip
->obj_localization
)
1333 if (dip
->flags
& HAMMER_INODE_RO
)
1335 if (ip
->flags
& HAMMER_INODE_RO
)
1337 if ((error
= hammer_checkspace(dip
->hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
1341 * Create a transaction to cover the operations we perform.
1343 hammer_start_transaction(&trans
, dip
->hmp
);
1344 ++hammer_stats_file_iopsw
;
1347 * Add the filesystem object to the directory. Note that neither
1348 * dip nor ip are referenced or locked, but their vnodes are
1349 * referenced. This function will bump the inode's link count.
1351 error
= hammer_ip_add_directory(&trans
, dip
,
1352 nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1359 cache_setunresolved(nch
);
1360 cache_setvp(nch
, ap
->a_vp
);
1362 hammer_done_transaction(&trans
);
1363 hammer_knote(ap
->a_vp
, NOTE_LINK
);
1364 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
1369 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1371 * The operating system has already ensured that the directory entry
1372 * does not exist and done all appropriate namespace locking.
1376 hammer_vop_nmkdir(struct vop_nmkdir_args
*ap
)
1378 struct hammer_transaction trans
;
1379 struct hammer_inode
*dip
;
1380 struct hammer_inode
*nip
;
1381 struct nchandle
*nch
;
1385 dip
= VTOI(ap
->a_dvp
);
1387 if (dip
->flags
& HAMMER_INODE_RO
)
1389 if ((error
= hammer_checkspace(dip
->hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
1393 * Create a transaction to cover the operations we perform.
1395 hammer_start_transaction(&trans
, dip
->hmp
);
1396 ++hammer_stats_file_iopsw
;
1399 * Create a new filesystem object of the requested type. The
1400 * returned inode will be referenced but not locked.
1402 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
,
1403 dip
, nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1406 hkprintf("hammer_mkdir error %d\n", error
);
1407 hammer_done_transaction(&trans
);
1412 * Add the new filesystem object to the directory. This will also
1413 * bump the inode's link count.
1415 error
= hammer_ip_add_directory(&trans
, dip
,
1416 nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1419 hkprintf("hammer_mkdir (add) error %d\n", error
);
1425 hammer_rel_inode(nip
, 0);
1428 error
= hammer_get_vnode(nip
, ap
->a_vpp
);
1429 hammer_rel_inode(nip
, 0);
1431 cache_setunresolved(ap
->a_nch
);
1432 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
1435 hammer_done_transaction(&trans
);
1437 hammer_knote(ap
->a_dvp
, NOTE_WRITE
| NOTE_LINK
);
1442 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1444 * The operating system has already ensured that the directory entry
1445 * does not exist and done all appropriate namespace locking.
1449 hammer_vop_nmknod(struct vop_nmknod_args
*ap
)
1451 struct hammer_transaction trans
;
1452 struct hammer_inode
*dip
;
1453 struct hammer_inode
*nip
;
1454 struct nchandle
*nch
;
1458 dip
= VTOI(ap
->a_dvp
);
1460 if (dip
->flags
& HAMMER_INODE_RO
)
1462 if ((error
= hammer_checkspace(dip
->hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
1466 * Create a transaction to cover the operations we perform.
1468 hammer_start_transaction(&trans
, dip
->hmp
);
1469 ++hammer_stats_file_iopsw
;
1472 * Create a new filesystem object of the requested type. The
1473 * returned inode will be referenced but not locked.
1475 * If mknod specifies a directory a pseudo-fs is created.
1477 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
,
1478 dip
, nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1481 hammer_done_transaction(&trans
);
1487 * Add the new filesystem object to the directory. This will also
1488 * bump the inode's link count.
1490 error
= hammer_ip_add_directory(&trans
, dip
,
1491 nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
1498 hammer_rel_inode(nip
, 0);
1501 error
= hammer_get_vnode(nip
, ap
->a_vpp
);
1502 hammer_rel_inode(nip
, 0);
1504 cache_setunresolved(ap
->a_nch
);
1505 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
1508 hammer_done_transaction(&trans
);
1510 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
1515 * hammer_vop_open { vp, mode, cred, fp }
1519 hammer_vop_open(struct vop_open_args
*ap
)
1523 ++hammer_stats_file_iopsr
;
1524 ip
= VTOI(ap
->a_vp
);
1526 if ((ap
->a_mode
& FWRITE
) && (ip
->flags
& HAMMER_INODE_RO
))
1528 return(vop_stdopen(ap
));
1532 * hammer_vop_print { vp }
1536 hammer_vop_print(struct vop_print_args
*ap
)
1542 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1546 hammer_vop_readdir(struct vop_readdir_args
*ap
)
1548 struct hammer_transaction trans
;
1549 struct hammer_cursor cursor
;
1550 struct hammer_inode
*ip
;
1552 hammer_base_elm_t base
;
1561 ++hammer_stats_file_iopsr
;
1562 ip
= VTOI(ap
->a_vp
);
1564 saveoff
= uio
->uio_offset
;
1566 if (ap
->a_ncookies
) {
1567 ncookies
= uio
->uio_resid
/ 16 + 1;
1568 if (ncookies
> 1024)
1570 cookies
= kmalloc(ncookies
* sizeof(off_t
), M_TEMP
, M_WAITOK
);
1578 hammer_simple_transaction(&trans
, ip
->hmp
);
1581 * Handle artificial entries
1583 * It should be noted that the minimum value for a directory
1584 * hash key on-media is 0x0000000100000000, so we can use anything
1585 * less then that to represent our 'special' key space.
1589 r
= vop_write_dirent(&error
, uio
, ip
->obj_id
, DT_DIR
, 1, ".");
1593 cookies
[cookie_index
] = saveoff
;
1596 if (cookie_index
== ncookies
)
1600 if (ip
->ino_data
.parent_obj_id
) {
1601 r
= vop_write_dirent(&error
, uio
,
1602 ip
->ino_data
.parent_obj_id
,
1605 r
= vop_write_dirent(&error
, uio
,
1606 ip
->obj_id
, DT_DIR
, 2, "..");
1611 cookies
[cookie_index
] = saveoff
;
1614 if (cookie_index
== ncookies
)
1619 * Key range (begin and end inclusive) to scan. Directory keys
1620 * directly translate to a 64 bit 'seek' position.
1622 hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
1623 cursor
.key_beg
.localization
= ip
->obj_localization
+
1624 hammer_dir_localization(ip
);
1625 cursor
.key_beg
.obj_id
= ip
->obj_id
;
1626 cursor
.key_beg
.create_tid
= 0;
1627 cursor
.key_beg
.delete_tid
= 0;
1628 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
1629 cursor
.key_beg
.obj_type
= 0;
1630 cursor
.key_beg
.key
= saveoff
;
1632 cursor
.key_end
= cursor
.key_beg
;
1633 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
1634 cursor
.asof
= ip
->obj_asof
;
1635 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
1637 error
= hammer_ip_first(&cursor
);
1639 while (error
== 0) {
1640 error
= hammer_ip_resolve_data(&cursor
);
1643 base
= &cursor
.leaf
->base
;
1644 saveoff
= base
->key
;
1645 KKASSERT(cursor
.leaf
->data_len
> HAMMER_ENTRY_NAME_OFF
);
1647 if (base
->obj_id
!= ip
->obj_id
)
1648 panic("readdir: bad record at %p", cursor
.node
);
1651 * Convert pseudo-filesystems into softlinks
1653 dtype
= hammer_get_dtype(cursor
.leaf
->base
.obj_type
);
1654 r
= vop_write_dirent(
1655 &error
, uio
, cursor
.data
->entry
.obj_id
,
1657 cursor
.leaf
->data_len
- HAMMER_ENTRY_NAME_OFF
,
1658 (void *)cursor
.data
->entry
.name
);
1663 cookies
[cookie_index
] = base
->key
;
1665 if (cookie_index
== ncookies
)
1667 error
= hammer_ip_next(&cursor
);
1669 hammer_done_cursor(&cursor
);
1672 hammer_done_transaction(&trans
);
1675 *ap
->a_eofflag
= (error
== ENOENT
);
1676 uio
->uio_offset
= saveoff
;
1677 if (error
&& cookie_index
== 0) {
1678 if (error
== ENOENT
)
1681 kfree(cookies
, M_TEMP
);
1682 *ap
->a_ncookies
= 0;
1683 *ap
->a_cookies
= NULL
;
1686 if (error
== ENOENT
)
1689 *ap
->a_ncookies
= cookie_index
;
1690 *ap
->a_cookies
= cookies
;
1697 * hammer_vop_readlink { vp, uio, cred }
1701 hammer_vop_readlink(struct vop_readlink_args
*ap
)
1703 struct hammer_transaction trans
;
1704 struct hammer_cursor cursor
;
1705 struct hammer_inode
*ip
;
1707 u_int32_t localization
;
1708 hammer_pseudofs_inmem_t pfsm
;
1711 ip
= VTOI(ap
->a_vp
);
1714 * Shortcut if the symlink data was stuffed into ino_data.
1716 * Also expand special "@@PFS%05d" softlinks (expansion only
1717 * occurs for non-historical (current) accesses made from the
1718 * primary filesystem).
1720 if (ip
->ino_data
.size
<= HAMMER_INODE_BASESYMLEN
) {
1724 ptr
= ip
->ino_data
.ext
.symlink
;
1725 bytes
= (int)ip
->ino_data
.size
;
1727 ip
->obj_asof
== HAMMER_MAX_TID
&&
1728 ip
->obj_localization
== 0 &&
1729 strncmp(ptr
, "@@PFS", 5) == 0) {
1730 hammer_simple_transaction(&trans
, ip
->hmp
);
1731 bcopy(ptr
+ 5, buf
, 5);
1733 localization
= strtoul(buf
, NULL
, 10) << 16;
1734 pfsm
= hammer_load_pseudofs(&trans
, localization
,
1737 if (pfsm
->pfsd
.mirror_flags
&
1738 HAMMER_PFSD_SLAVE
) {
1739 /* vap->va_size == 26 */
1740 ksnprintf(buf
, sizeof(buf
),
1742 (long long)pfsm
->pfsd
.sync_end_tid
,
1743 localization
>> 16);
1745 /* vap->va_size == 10 */
1746 ksnprintf(buf
, sizeof(buf
),
1748 localization
>> 16);
1750 ksnprintf(buf
, sizeof(buf
),
1752 (long long)HAMMER_MAX_TID
,
1753 localization
>> 16);
1757 bytes
= strlen(buf
);
1760 hammer_rel_pseudofs(trans
.hmp
, pfsm
);
1761 hammer_done_transaction(&trans
);
1763 error
= uiomove(ptr
, bytes
, ap
->a_uio
);
1770 hammer_simple_transaction(&trans
, ip
->hmp
);
1771 ++hammer_stats_file_iopsr
;
1772 hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
1775 * Key range (begin and end inclusive) to scan. Directory keys
1776 * directly translate to a 64 bit 'seek' position.
1778 cursor
.key_beg
.localization
= ip
->obj_localization
+
1779 HAMMER_LOCALIZE_MISC
;
1780 cursor
.key_beg
.obj_id
= ip
->obj_id
;
1781 cursor
.key_beg
.create_tid
= 0;
1782 cursor
.key_beg
.delete_tid
= 0;
1783 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_FIX
;
1784 cursor
.key_beg
.obj_type
= 0;
1785 cursor
.key_beg
.key
= HAMMER_FIXKEY_SYMLINK
;
1786 cursor
.asof
= ip
->obj_asof
;
1787 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
1789 error
= hammer_ip_lookup(&cursor
);
1791 error
= hammer_ip_resolve_data(&cursor
);
1793 KKASSERT(cursor
.leaf
->data_len
>=
1794 HAMMER_SYMLINK_NAME_OFF
);
1795 error
= uiomove(cursor
.data
->symlink
.name
,
1796 cursor
.leaf
->data_len
-
1797 HAMMER_SYMLINK_NAME_OFF
,
1801 hammer_done_cursor(&cursor
);
1802 hammer_done_transaction(&trans
);
1807 * hammer_vop_nremove { nch, dvp, cred }
1811 hammer_vop_nremove(struct vop_nremove_args
*ap
)
1813 struct hammer_transaction trans
;
1814 struct hammer_inode
*dip
;
1817 dip
= VTOI(ap
->a_dvp
);
1819 if (hammer_nohistory(dip
) == 0 &&
1820 (error
= hammer_checkspace(dip
->hmp
, HAMMER_CHKSPC_REMOVE
)) != 0) {
1824 hammer_start_transaction(&trans
, dip
->hmp
);
1825 ++hammer_stats_file_iopsw
;
1826 error
= hammer_dounlink(&trans
, ap
->a_nch
, ap
->a_dvp
, ap
->a_cred
, 0, 0);
1827 hammer_done_transaction(&trans
);
1829 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
1834 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1838 hammer_vop_nrename(struct vop_nrename_args
*ap
)
1840 struct hammer_transaction trans
;
1841 struct namecache
*fncp
;
1842 struct namecache
*tncp
;
1843 struct hammer_inode
*fdip
;
1844 struct hammer_inode
*tdip
;
1845 struct hammer_inode
*ip
;
1846 struct hammer_cursor cursor
;
1848 u_int32_t max_iterations
;
1851 if (ap
->a_fdvp
->v_mount
!= ap
->a_tdvp
->v_mount
)
1853 if (ap
->a_fdvp
->v_mount
!= ap
->a_fnch
->ncp
->nc_vp
->v_mount
)
1856 fdip
= VTOI(ap
->a_fdvp
);
1857 tdip
= VTOI(ap
->a_tdvp
);
1858 fncp
= ap
->a_fnch
->ncp
;
1859 tncp
= ap
->a_tnch
->ncp
;
1860 ip
= VTOI(fncp
->nc_vp
);
1861 KKASSERT(ip
!= NULL
);
1863 if (fdip
->obj_localization
!= tdip
->obj_localization
)
1865 if (fdip
->obj_localization
!= ip
->obj_localization
)
1868 if (fdip
->flags
& HAMMER_INODE_RO
)
1870 if (tdip
->flags
& HAMMER_INODE_RO
)
1872 if (ip
->flags
& HAMMER_INODE_RO
)
1874 if ((error
= hammer_checkspace(fdip
->hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
1877 hammer_start_transaction(&trans
, fdip
->hmp
);
1878 ++hammer_stats_file_iopsw
;
1881 * Remove tncp from the target directory and then link ip as
1882 * tncp. XXX pass trans to dounlink
1884 * Force the inode sync-time to match the transaction so it is
1885 * in-sync with the creation of the target directory entry.
1887 error
= hammer_dounlink(&trans
, ap
->a_tnch
, ap
->a_tdvp
,
1889 if (error
== 0 || error
== ENOENT
) {
1890 error
= hammer_ip_add_directory(&trans
, tdip
,
1891 tncp
->nc_name
, tncp
->nc_nlen
,
1894 ip
->ino_data
.parent_obj_id
= tdip
->obj_id
;
1895 ip
->ino_data
.ctime
= trans
.time
;
1896 hammer_modify_inode(ip
, HAMMER_INODE_DDIRTY
);
1900 goto failed
; /* XXX */
1903 * Locate the record in the originating directory and remove it.
1905 * Calculate the namekey and setup the key range for the scan. This
1906 * works kinda like a chained hash table where the lower 32 bits
1907 * of the namekey synthesize the chain.
1909 * The key range is inclusive of both key_beg and key_end.
1911 namekey
= hammer_directory_namekey(fdip
, fncp
->nc_name
, fncp
->nc_nlen
,
1914 hammer_init_cursor(&trans
, &cursor
, &fdip
->cache
[1], fdip
);
1915 cursor
.key_beg
.localization
= fdip
->obj_localization
+
1916 hammer_dir_localization(fdip
);
1917 cursor
.key_beg
.obj_id
= fdip
->obj_id
;
1918 cursor
.key_beg
.key
= namekey
;
1919 cursor
.key_beg
.create_tid
= 0;
1920 cursor
.key_beg
.delete_tid
= 0;
1921 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
1922 cursor
.key_beg
.obj_type
= 0;
1924 cursor
.key_end
= cursor
.key_beg
;
1925 cursor
.key_end
.key
+= max_iterations
;
1926 cursor
.asof
= fdip
->obj_asof
;
1927 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
1930 * Scan all matching records (the chain), locate the one matching
1931 * the requested path component.
1933 * The hammer_ip_*() functions merge in-memory records with on-disk
1934 * records for the purposes of the search.
1936 error
= hammer_ip_first(&cursor
);
1937 while (error
== 0) {
1938 if (hammer_ip_resolve_data(&cursor
) != 0)
1940 nlen
= cursor
.leaf
->data_len
- HAMMER_ENTRY_NAME_OFF
;
1942 if (fncp
->nc_nlen
== nlen
&&
1943 bcmp(fncp
->nc_name
, cursor
.data
->entry
.name
, nlen
) == 0) {
1946 error
= hammer_ip_next(&cursor
);
1950 * If all is ok we have to get the inode so we can adjust nlinks.
1952 * WARNING: hammer_ip_del_directory() may have to terminate the
1953 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1957 error
= hammer_ip_del_directory(&trans
, &cursor
, fdip
, ip
);
1960 * XXX A deadlock here will break rename's atomicy for the purposes
1961 * of crash recovery.
1963 if (error
== EDEADLK
) {
1964 hammer_done_cursor(&cursor
);
1969 * Cleanup and tell the kernel that the rename succeeded.
1971 hammer_done_cursor(&cursor
);
1973 cache_rename(ap
->a_fnch
, ap
->a_tnch
);
1974 hammer_knote(ap
->a_fdvp
, NOTE_WRITE
);
1975 hammer_knote(ap
->a_tdvp
, NOTE_WRITE
);
1977 hammer_knote(ip
->vp
, NOTE_RENAME
);
1981 hammer_done_transaction(&trans
);
1986 * hammer_vop_nrmdir { nch, dvp, cred }
1990 hammer_vop_nrmdir(struct vop_nrmdir_args
*ap
)
1992 struct hammer_transaction trans
;
1993 struct hammer_inode
*dip
;
1996 dip
= VTOI(ap
->a_dvp
);
1998 if (hammer_nohistory(dip
) == 0 &&
1999 (error
= hammer_checkspace(dip
->hmp
, HAMMER_CHKSPC_REMOVE
)) != 0) {
2003 hammer_start_transaction(&trans
, dip
->hmp
);
2004 ++hammer_stats_file_iopsw
;
2005 error
= hammer_dounlink(&trans
, ap
->a_nch
, ap
->a_dvp
, ap
->a_cred
, 0, 1);
2006 hammer_done_transaction(&trans
);
2008 hammer_knote(ap
->a_dvp
, NOTE_WRITE
| NOTE_LINK
);
2013 * hammer_vop_markatime { vp, cred }
2017 hammer_vop_markatime(struct vop_markatime_args
*ap
)
2019 struct hammer_transaction trans
;
2020 struct hammer_inode
*ip
;
2022 ip
= VTOI(ap
->a_vp
);
2023 if (ap
->a_vp
->v_mount
->mnt_flag
& MNT_RDONLY
)
2025 if (ip
->flags
& HAMMER_INODE_RO
)
2027 if (ip
->hmp
->mp
->mnt_flag
& MNT_NOATIME
)
2029 hammer_start_transaction(&trans
, ip
->hmp
);
2030 ++hammer_stats_file_iopsw
;
2032 ip
->ino_data
.atime
= trans
.time
;
2033 hammer_modify_inode(ip
, HAMMER_INODE_ATIME
);
2034 hammer_done_transaction(&trans
);
2035 hammer_knote(ap
->a_vp
, NOTE_ATTRIB
);
2040 * hammer_vop_setattr { vp, vap, cred }
2044 hammer_vop_setattr(struct vop_setattr_args
*ap
)
2046 struct hammer_transaction trans
;
2048 struct hammer_inode
*ip
;
2055 int64_t aligned_size
;
2060 ip
= ap
->a_vp
->v_data
;
2064 if (ap
->a_vp
->v_mount
->mnt_flag
& MNT_RDONLY
)
2066 if (ip
->flags
& HAMMER_INODE_RO
)
2068 if (hammer_nohistory(ip
) == 0 &&
2069 (error
= hammer_checkspace(ip
->hmp
, HAMMER_CHKSPC_REMOVE
)) != 0) {
2073 hammer_start_transaction(&trans
, ip
->hmp
);
2074 ++hammer_stats_file_iopsw
;
2077 if (vap
->va_flags
!= VNOVAL
) {
2078 flags
= ip
->ino_data
.uflags
;
2079 error
= vop_helper_setattr_flags(&flags
, vap
->va_flags
,
2080 hammer_to_unix_xid(&ip
->ino_data
.uid
),
2083 if (ip
->ino_data
.uflags
!= flags
) {
2084 ip
->ino_data
.uflags
= flags
;
2085 ip
->ino_data
.ctime
= trans
.time
;
2086 modflags
|= HAMMER_INODE_DDIRTY
;
2087 kflags
|= NOTE_ATTRIB
;
2089 if (ip
->ino_data
.uflags
& (IMMUTABLE
| APPEND
)) {
2096 if (ip
->ino_data
.uflags
& (IMMUTABLE
| APPEND
)) {
2100 if (vap
->va_uid
!= (uid_t
)VNOVAL
|| vap
->va_gid
!= (gid_t
)VNOVAL
) {
2101 mode_t cur_mode
= ip
->ino_data
.mode
;
2102 uid_t cur_uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
2103 gid_t cur_gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
2107 error
= vop_helper_chown(ap
->a_vp
, vap
->va_uid
, vap
->va_gid
,
2109 &cur_uid
, &cur_gid
, &cur_mode
);
2111 hammer_guid_to_uuid(&uuid_uid
, cur_uid
);
2112 hammer_guid_to_uuid(&uuid_gid
, cur_gid
);
2113 if (bcmp(&uuid_uid
, &ip
->ino_data
.uid
,
2114 sizeof(uuid_uid
)) ||
2115 bcmp(&uuid_gid
, &ip
->ino_data
.gid
,
2116 sizeof(uuid_gid
)) ||
2117 ip
->ino_data
.mode
!= cur_mode
2119 ip
->ino_data
.uid
= uuid_uid
;
2120 ip
->ino_data
.gid
= uuid_gid
;
2121 ip
->ino_data
.mode
= cur_mode
;
2122 ip
->ino_data
.ctime
= trans
.time
;
2123 modflags
|= HAMMER_INODE_DDIRTY
;
2125 kflags
|= NOTE_ATTRIB
;
2128 while (vap
->va_size
!= VNOVAL
&& ip
->ino_data
.size
!= vap
->va_size
) {
2129 switch(ap
->a_vp
->v_type
) {
2131 if (vap
->va_size
== ip
->ino_data
.size
)
2135 * Log the operation if in fast-fsync mode.
2137 if (ip
->flags
& HAMMER_INODE_REDO
) {
2138 error
= hammer_generate_redo(&trans
, ip
,
2143 blksize
= hammer_blocksize(vap
->va_size
);
2146 * XXX break atomicy, we can deadlock the backend
2147 * if we do not release the lock. Probably not a
2150 if (vap
->va_size
< ip
->ino_data
.size
) {
2151 nvtruncbuf(ap
->a_vp
, vap
->va_size
,
2153 hammer_blockoff(vap
->va_size
));
2155 kflags
|= NOTE_WRITE
;
2157 nvextendbuf(ap
->a_vp
,
2160 hammer_blocksize(ip
->ino_data
.size
),
2161 hammer_blocksize(vap
->va_size
),
2162 hammer_blockoff(ip
->ino_data
.size
),
2163 hammer_blockoff(vap
->va_size
),
2166 kflags
|= NOTE_WRITE
| NOTE_EXTEND
;
2168 ip
->ino_data
.size
= vap
->va_size
;
2169 ip
->ino_data
.mtime
= trans
.time
;
2170 /* XXX safe to use SDIRTY instead of DDIRTY here? */
2171 modflags
|= HAMMER_INODE_MTIME
| HAMMER_INODE_DDIRTY
;
2174 * On-media truncation is cached in the inode until
2175 * the inode is synchronized. We must immediately
2176 * handle any frontend records.
2179 hammer_ip_frontend_trunc(ip
, vap
->va_size
);
2180 #ifdef DEBUG_TRUNCATE
2181 if (HammerTruncIp
== NULL
)
2184 if ((ip
->flags
& HAMMER_INODE_TRUNCATED
) == 0) {
2185 ip
->flags
|= HAMMER_INODE_TRUNCATED
;
2186 ip
->trunc_off
= vap
->va_size
;
2187 #ifdef DEBUG_TRUNCATE
2188 if (ip
== HammerTruncIp
)
2189 kprintf("truncate1 %016llx\n",
2190 (long long)ip
->trunc_off
);
2192 } else if (ip
->trunc_off
> vap
->va_size
) {
2193 ip
->trunc_off
= vap
->va_size
;
2194 #ifdef DEBUG_TRUNCATE
2195 if (ip
== HammerTruncIp
)
2196 kprintf("truncate2 %016llx\n",
2197 (long long)ip
->trunc_off
);
2200 #ifdef DEBUG_TRUNCATE
2201 if (ip
== HammerTruncIp
)
2202 kprintf("truncate3 %016llx (ignored)\n",
2203 (long long)vap
->va_size
);
2210 * When truncating, nvtruncbuf() may have cleaned out
2211 * a portion of the last block on-disk in the buffer
2212 * cache. We must clean out any frontend records
2213 * for blocks beyond the new last block.
2215 aligned_size
= (vap
->va_size
+ (blksize
- 1)) &
2216 ~(int64_t)(blksize
- 1);
2217 if (truncating
&& vap
->va_size
< aligned_size
) {
2218 aligned_size
-= blksize
;
2219 hammer_ip_frontend_trunc(ip
, aligned_size
);
2224 if ((ip
->flags
& HAMMER_INODE_TRUNCATED
) == 0) {
2225 ip
->flags
|= HAMMER_INODE_TRUNCATED
;
2226 ip
->trunc_off
= vap
->va_size
;
2227 } else if (ip
->trunc_off
> vap
->va_size
) {
2228 ip
->trunc_off
= vap
->va_size
;
2230 hammer_ip_frontend_trunc(ip
, vap
->va_size
);
2231 ip
->ino_data
.size
= vap
->va_size
;
2232 ip
->ino_data
.mtime
= trans
.time
;
2233 modflags
|= HAMMER_INODE_MTIME
| HAMMER_INODE_DDIRTY
;
2234 kflags
|= NOTE_ATTRIB
;
2242 if (vap
->va_atime
.tv_sec
!= VNOVAL
) {
2243 ip
->ino_data
.atime
= hammer_timespec_to_time(&vap
->va_atime
);
2244 modflags
|= HAMMER_INODE_ATIME
;
2245 kflags
|= NOTE_ATTRIB
;
2247 if (vap
->va_mtime
.tv_sec
!= VNOVAL
) {
2248 ip
->ino_data
.mtime
= hammer_timespec_to_time(&vap
->va_mtime
);
2249 modflags
|= HAMMER_INODE_MTIME
;
2250 kflags
|= NOTE_ATTRIB
;
2252 if (vap
->va_mode
!= (mode_t
)VNOVAL
) {
2253 mode_t cur_mode
= ip
->ino_data
.mode
;
2254 uid_t cur_uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
2255 gid_t cur_gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
2257 error
= vop_helper_chmod(ap
->a_vp
, vap
->va_mode
, ap
->a_cred
,
2258 cur_uid
, cur_gid
, &cur_mode
);
2259 if (error
== 0 && ip
->ino_data
.mode
!= cur_mode
) {
2260 ip
->ino_data
.mode
= cur_mode
;
2261 ip
->ino_data
.ctime
= trans
.time
;
2262 modflags
|= HAMMER_INODE_DDIRTY
;
2263 kflags
|= NOTE_ATTRIB
;
2268 hammer_modify_inode(ip
, modflags
);
2269 hammer_done_transaction(&trans
);
2270 hammer_knote(ap
->a_vp
, kflags
);
2275 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2279 hammer_vop_nsymlink(struct vop_nsymlink_args
*ap
)
2281 struct hammer_transaction trans
;
2282 struct hammer_inode
*dip
;
2283 struct hammer_inode
*nip
;
2284 struct nchandle
*nch
;
2285 hammer_record_t record
;
2289 ap
->a_vap
->va_type
= VLNK
;
2292 dip
= VTOI(ap
->a_dvp
);
2294 if (dip
->flags
& HAMMER_INODE_RO
)
2296 if ((error
= hammer_checkspace(dip
->hmp
, HAMMER_CHKSPC_CREATE
)) != 0)
2300 * Create a transaction to cover the operations we perform.
2302 hammer_start_transaction(&trans
, dip
->hmp
);
2303 ++hammer_stats_file_iopsw
;
2306 * Create a new filesystem object of the requested type. The
2307 * returned inode will be referenced but not locked.
2310 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
,
2311 dip
, nch
->ncp
->nc_name
, nch
->ncp
->nc_nlen
,
2314 hammer_done_transaction(&trans
);
2320 * Add a record representing the symlink. symlink stores the link
2321 * as pure data, not a string, and is no \0 terminated.
2324 bytes
= strlen(ap
->a_target
);
2326 if (bytes
<= HAMMER_INODE_BASESYMLEN
) {
2327 bcopy(ap
->a_target
, nip
->ino_data
.ext
.symlink
, bytes
);
2329 record
= hammer_alloc_mem_record(nip
, bytes
);
2330 record
->type
= HAMMER_MEM_RECORD_GENERAL
;
2332 record
->leaf
.base
.localization
= nip
->obj_localization
+
2333 HAMMER_LOCALIZE_MISC
;
2334 record
->leaf
.base
.key
= HAMMER_FIXKEY_SYMLINK
;
2335 record
->leaf
.base
.rec_type
= HAMMER_RECTYPE_FIX
;
2336 record
->leaf
.data_len
= bytes
;
2337 KKASSERT(HAMMER_SYMLINK_NAME_OFF
== 0);
2338 bcopy(ap
->a_target
, record
->data
->symlink
.name
, bytes
);
2339 error
= hammer_ip_add_record(&trans
, record
);
2343 * Set the file size to the length of the link.
2346 nip
->ino_data
.size
= bytes
;
2347 hammer_modify_inode(nip
, HAMMER_INODE_DDIRTY
);
2351 error
= hammer_ip_add_directory(&trans
, dip
, nch
->ncp
->nc_name
,
2352 nch
->ncp
->nc_nlen
, nip
);
2358 hammer_rel_inode(nip
, 0);
2361 error
= hammer_get_vnode(nip
, ap
->a_vpp
);
2362 hammer_rel_inode(nip
, 0);
2364 cache_setunresolved(ap
->a_nch
);
2365 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
2366 hammer_knote(ap
->a_dvp
, NOTE_WRITE
);
2369 hammer_done_transaction(&trans
);
2374 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2378 hammer_vop_nwhiteout(struct vop_nwhiteout_args
*ap
)
2380 struct hammer_transaction trans
;
2381 struct hammer_inode
*dip
;
2384 dip
= VTOI(ap
->a_dvp
);
2386 if (hammer_nohistory(dip
) == 0 &&
2387 (error
= hammer_checkspace(dip
->hmp
, HAMMER_CHKSPC_CREATE
)) != 0) {
2391 hammer_start_transaction(&trans
, dip
->hmp
);
2392 ++hammer_stats_file_iopsw
;
2393 error
= hammer_dounlink(&trans
, ap
->a_nch
, ap
->a_dvp
,
2394 ap
->a_cred
, ap
->a_flags
, -1);
2395 hammer_done_transaction(&trans
);
2401 * hammer_vop_ioctl { vp, command, data, fflag, cred }
2405 hammer_vop_ioctl(struct vop_ioctl_args
*ap
)
2407 struct hammer_inode
*ip
= ap
->a_vp
->v_data
;
2409 ++hammer_stats_file_iopsr
;
2410 return(hammer_ioctl(ip
, ap
->a_command
, ap
->a_data
,
2411 ap
->a_fflag
, ap
->a_cred
));
2416 hammer_vop_mountctl(struct vop_mountctl_args
*ap
)
2418 static const struct mountctl_opt extraopt
[] = {
2419 { HMNT_NOHISTORY
, "nohistory" },
2420 { HMNT_MASTERID
, "master" },
2424 struct hammer_mount
*hmp
;
2431 mp
= ap
->a_head
.a_ops
->head
.vv_mount
;
2432 KKASSERT(mp
->mnt_data
!= NULL
);
2433 hmp
= (struct hammer_mount
*)mp
->mnt_data
;
2437 case MOUNTCTL_SET_EXPORT
:
2438 if (ap
->a_ctllen
!= sizeof(struct export_args
))
2441 error
= hammer_vfs_export(mp
, ap
->a_op
,
2442 (const struct export_args
*)ap
->a_ctl
);
2444 case MOUNTCTL_MOUNTFLAGS
:
2447 * Call standard mountctl VOP function
2448 * so we get user mount flags.
2450 error
= vop_stdmountctl(ap
);
2454 usedbytes
= *ap
->a_res
;
2456 if (usedbytes
> 0 && usedbytes
< ap
->a_buflen
) {
2457 usedbytes
+= vfs_flagstostr(hmp
->hflags
, extraopt
, ap
->a_buf
,
2458 ap
->a_buflen
- usedbytes
,
2462 *ap
->a_res
+= usedbytes
;
2466 error
= vop_stdmountctl(ap
);
2473 * hammer_vop_strategy { vp, bio }
2475 * Strategy call, used for regular file read & write only. Note that the
2476 * bp may represent a cluster.
2478 * To simplify operation and allow better optimizations in the future,
2479 * this code does not make any assumptions with regards to buffer alignment
2484 hammer_vop_strategy(struct vop_strategy_args
*ap
)
2489 bp
= ap
->a_bio
->bio_buf
;
2493 error
= hammer_vop_strategy_read(ap
);
2496 error
= hammer_vop_strategy_write(ap
);
2499 bp
->b_error
= error
= EINVAL
;
2500 bp
->b_flags
|= B_ERROR
;
2508 * Read from a regular file. Iterate the related records and fill in the
2509 * BIO/BUF. Gaps are zero-filled.
2511 * The support code in hammer_object.c should be used to deal with mixed
2512 * in-memory and on-disk records.
2514 * NOTE: Can be called from the cluster code with an oversized buf.
2520 hammer_vop_strategy_read(struct vop_strategy_args
*ap
)
2522 struct hammer_transaction trans
;
2523 struct hammer_inode
*ip
;
2524 struct hammer_inode
*dip
;
2525 struct hammer_cursor cursor
;
2526 hammer_base_elm_t base
;
2527 hammer_off_t disk_offset
;
2541 ip
= ap
->a_vp
->v_data
;
2544 * The zone-2 disk offset may have been set by the cluster code via
2545 * a BMAP operation, or else should be NOOFFSET.
2547 * Checking the high bits for a match against zone-2 should suffice.
2549 nbio
= push_bio(bio
);
2550 if ((nbio
->bio_offset
& HAMMER_OFF_ZONE_MASK
) ==
2551 HAMMER_ZONE_LARGE_DATA
) {
2552 error
= hammer_io_direct_read(ip
->hmp
, nbio
, NULL
);
2557 * Well, that sucked. Do it the hard way. If all the stars are
2558 * aligned we may still be able to issue a direct-read.
2560 hammer_simple_transaction(&trans
, ip
->hmp
);
2561 hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
2564 * Key range (begin and end inclusive) to scan. Note that the key's
2565 * stored in the actual records represent BASE+LEN, not BASE. The
2566 * first record containing bio_offset will have a key > bio_offset.
2568 cursor
.key_beg
.localization
= ip
->obj_localization
+
2569 HAMMER_LOCALIZE_MISC
;
2570 cursor
.key_beg
.obj_id
= ip
->obj_id
;
2571 cursor
.key_beg
.create_tid
= 0;
2572 cursor
.key_beg
.delete_tid
= 0;
2573 cursor
.key_beg
.obj_type
= 0;
2574 cursor
.key_beg
.key
= bio
->bio_offset
+ 1;
2575 cursor
.asof
= ip
->obj_asof
;
2576 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
2578 cursor
.key_end
= cursor
.key_beg
;
2579 KKASSERT(ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_REGFILE
);
2581 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DBFILE
) {
2582 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DB
;
2583 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_DB
;
2584 cursor
.key_end
.key
= 0x7FFFFFFFFFFFFFFFLL
;
2588 ran_end
= bio
->bio_offset
+ bp
->b_bufsize
;
2589 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DATA
;
2590 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_DATA
;
2591 tmp64
= ran_end
+ MAXPHYS
+ 1; /* work-around GCC-4 bug */
2592 if (tmp64
< ran_end
)
2593 cursor
.key_end
.key
= 0x7FFFFFFFFFFFFFFFLL
;
2595 cursor
.key_end
.key
= ran_end
+ MAXPHYS
+ 1;
2597 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
;
2599 error
= hammer_ip_first(&cursor
);
2602 while (error
== 0) {
2604 * Get the base file offset of the record. The key for
2605 * data records is (base + bytes) rather then (base).
2607 base
= &cursor
.leaf
->base
;
2608 rec_offset
= base
->key
- cursor
.leaf
->data_len
;
2611 * Calculate the gap, if any, and zero-fill it.
2613 * n is the offset of the start of the record verses our
2614 * current seek offset in the bio.
2616 n
= (int)(rec_offset
- (bio
->bio_offset
+ boff
));
2618 if (n
> bp
->b_bufsize
- boff
)
2619 n
= bp
->b_bufsize
- boff
;
2620 bzero((char *)bp
->b_data
+ boff
, n
);
2626 * Calculate the data offset in the record and the number
2627 * of bytes we can copy.
2629 * There are two degenerate cases. First, boff may already
2630 * be at bp->b_bufsize. Secondly, the data offset within
2631 * the record may exceed the record's size.
2635 n
= cursor
.leaf
->data_len
- roff
;
2637 kprintf("strategy_read: bad n=%d roff=%d\n", n
, roff
);
2639 } else if (n
> bp
->b_bufsize
- boff
) {
2640 n
= bp
->b_bufsize
- boff
;
2644 * Deal with cached truncations. This cool bit of code
2645 * allows truncate()/ftruncate() to avoid having to sync
2648 * If the frontend is truncated then all backend records are
2649 * subject to the frontend's truncation.
2651 * If the backend is truncated then backend records on-disk
2652 * (but not in-memory) are subject to the backend's
2653 * truncation. In-memory records owned by the backend
2654 * represent data written after the truncation point on the
2655 * backend and must not be truncated.
2657 * Truncate operations deal with frontend buffer cache
2658 * buffers and frontend-owned in-memory records synchronously.
2660 if (ip
->flags
& HAMMER_INODE_TRUNCATED
) {
2661 if (hammer_cursor_ondisk(&cursor
)/* ||
2662 cursor.iprec->flush_state == HAMMER_FST_FLUSH*/) {
2663 if (ip
->trunc_off
<= rec_offset
)
2665 else if (ip
->trunc_off
< rec_offset
+ n
)
2666 n
= (int)(ip
->trunc_off
- rec_offset
);
2669 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) {
2670 if (hammer_cursor_ondisk(&cursor
)) {
2671 if (ip
->sync_trunc_off
<= rec_offset
)
2673 else if (ip
->sync_trunc_off
< rec_offset
+ n
)
2674 n
= (int)(ip
->sync_trunc_off
- rec_offset
);
2679 * Try to issue a direct read into our bio if possible,
2680 * otherwise resolve the element data into a hammer_buffer
2683 * The buffer on-disk should be zerod past any real
2684 * truncation point, but may not be for any synthesized
2685 * truncation point from above.
2687 disk_offset
= cursor
.leaf
->data_offset
+ roff
;
2688 if (boff
== 0 && n
== bp
->b_bufsize
&&
2689 hammer_cursor_ondisk(&cursor
) &&
2690 (disk_offset
& HAMMER_BUFMASK
) == 0) {
2691 KKASSERT((disk_offset
& HAMMER_OFF_ZONE_MASK
) ==
2692 HAMMER_ZONE_LARGE_DATA
);
2693 nbio
->bio_offset
= disk_offset
;
2694 error
= hammer_io_direct_read(trans
.hmp
, nbio
,
2698 error
= hammer_ip_resolve_data(&cursor
);
2700 bcopy((char *)cursor
.data
+ roff
,
2701 (char *)bp
->b_data
+ boff
, n
);
2708 * Iterate until we have filled the request.
2711 if (boff
== bp
->b_bufsize
)
2713 error
= hammer_ip_next(&cursor
);
2717 * There may have been a gap after the last record
2719 if (error
== ENOENT
)
2721 if (error
== 0 && boff
!= bp
->b_bufsize
) {
2722 KKASSERT(boff
< bp
->b_bufsize
);
2723 bzero((char *)bp
->b_data
+ boff
, bp
->b_bufsize
- boff
);
2724 /* boff = bp->b_bufsize; */
2727 bp
->b_error
= error
;
2729 bp
->b_flags
|= B_ERROR
;
2734 * Cache the b-tree node for the last data read in cache[1].
2736 * If we hit the file EOF then also cache the node in the
2737 * governing director's cache[3], it will be used to initialize
2738 * the inode's cache[1] for any inodes looked up via the directory.
2740 * This doesn't reduce disk accesses since the B-Tree chain is
2741 * likely cached, but it does reduce cpu overhead when looking
2742 * up file offsets for cpdup/tar/cpio style iterations.
2745 hammer_cache_node(&ip
->cache
[1], cursor
.node
);
2746 if (ran_end
>= ip
->ino_data
.size
) {
2747 dip
= hammer_find_inode(&trans
, ip
->ino_data
.parent_obj_id
,
2748 ip
->obj_asof
, ip
->obj_localization
);
2750 hammer_cache_node(&dip
->cache
[3], cursor
.node
);
2751 hammer_rel_inode(dip
, 0);
2754 hammer_done_cursor(&cursor
);
2755 hammer_done_transaction(&trans
);
2760 * BMAP operation - used to support cluster_read() only.
2762 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2764 * This routine may return EOPNOTSUPP if the opration is not supported for
2765 * the specified offset. The contents of the pointer arguments do not
2766 * need to be initialized in that case.
2768 * If a disk address is available and properly aligned return 0 with
2769 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2770 * to the run-length relative to that offset. Callers may assume that
2771 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2772 * large, so return EOPNOTSUPP if it is not sufficiently large.
2776 hammer_vop_bmap(struct vop_bmap_args
*ap
)
2778 struct hammer_transaction trans
;
2779 struct hammer_inode
*ip
;
2780 struct hammer_cursor cursor
;
2781 hammer_base_elm_t base
;
2785 int64_t base_offset
;
2786 int64_t base_disk_offset
;
2787 int64_t last_offset
;
2788 hammer_off_t last_disk_offset
;
2789 hammer_off_t disk_offset
;
2794 ++hammer_stats_file_iopsr
;
2795 ip
= ap
->a_vp
->v_data
;
2798 * We can only BMAP regular files. We can't BMAP database files,
2801 if (ip
->ino_data
.obj_type
!= HAMMER_OBJTYPE_REGFILE
)
2805 * bmap is typically called with runp/runb both NULL when used
2806 * for writing. We do not support BMAP for writing atm.
2808 if (ap
->a_cmd
!= BUF_CMD_READ
)
2812 * Scan the B-Tree to acquire blockmap addresses, then translate
2815 hammer_simple_transaction(&trans
, ip
->hmp
);
2817 kprintf("bmap_beg %016llx ip->cache %p\n",
2818 (long long)ap
->a_loffset
, ip
->cache
[1]);
2820 hammer_init_cursor(&trans
, &cursor
, &ip
->cache
[1], ip
);
2823 * Key range (begin and end inclusive) to scan. Note that the key's
2824 * stored in the actual records represent BASE+LEN, not BASE. The
2825 * first record containing bio_offset will have a key > bio_offset.
2827 cursor
.key_beg
.localization
= ip
->obj_localization
+
2828 HAMMER_LOCALIZE_MISC
;
2829 cursor
.key_beg
.obj_id
= ip
->obj_id
;
2830 cursor
.key_beg
.create_tid
= 0;
2831 cursor
.key_beg
.delete_tid
= 0;
2832 cursor
.key_beg
.obj_type
= 0;
2834 cursor
.key_beg
.key
= ap
->a_loffset
- MAXPHYS
+ 1;
2836 cursor
.key_beg
.key
= ap
->a_loffset
+ 1;
2837 if (cursor
.key_beg
.key
< 0)
2838 cursor
.key_beg
.key
= 0;
2839 cursor
.asof
= ip
->obj_asof
;
2840 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
2842 cursor
.key_end
= cursor
.key_beg
;
2843 KKASSERT(ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_REGFILE
);
2845 ran_end
= ap
->a_loffset
+ MAXPHYS
;
2846 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DATA
;
2847 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_DATA
;
2848 tmp64
= ran_end
+ MAXPHYS
+ 1; /* work-around GCC-4 bug */
2849 if (tmp64
< ran_end
)
2850 cursor
.key_end
.key
= 0x7FFFFFFFFFFFFFFFLL
;
2852 cursor
.key_end
.key
= ran_end
+ MAXPHYS
+ 1;
2854 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
;
2856 error
= hammer_ip_first(&cursor
);
2857 base_offset
= last_offset
= 0;
2858 base_disk_offset
= last_disk_offset
= 0;
2860 while (error
== 0) {
2862 * Get the base file offset of the record. The key for
2863 * data records is (base + bytes) rather then (base).
2865 * NOTE: rec_offset + rec_len may exceed the end-of-file.
2866 * The extra bytes should be zero on-disk and the BMAP op
2867 * should still be ok.
2869 base
= &cursor
.leaf
->base
;
2870 rec_offset
= base
->key
- cursor
.leaf
->data_len
;
2871 rec_len
= cursor
.leaf
->data_len
;
2874 * Incorporate any cached truncation.
2876 * NOTE: Modifications to rec_len based on synthesized
2877 * truncation points remove the guarantee that any extended
2878 * data on disk is zero (since the truncations may not have
2879 * taken place on-media yet).
2881 if (ip
->flags
& HAMMER_INODE_TRUNCATED
) {
2882 if (hammer_cursor_ondisk(&cursor
) ||
2883 cursor
.iprec
->flush_state
== HAMMER_FST_FLUSH
) {
2884 if (ip
->trunc_off
<= rec_offset
)
2886 else if (ip
->trunc_off
< rec_offset
+ rec_len
)
2887 rec_len
= (int)(ip
->trunc_off
- rec_offset
);
2890 if (ip
->sync_flags
& HAMMER_INODE_TRUNCATED
) {
2891 if (hammer_cursor_ondisk(&cursor
)) {
2892 if (ip
->sync_trunc_off
<= rec_offset
)
2894 else if (ip
->sync_trunc_off
< rec_offset
+ rec_len
)
2895 rec_len
= (int)(ip
->sync_trunc_off
- rec_offset
);
2900 * Accumulate information. If we have hit a discontiguous
2901 * block reset base_offset unless we are already beyond the
2902 * requested offset. If we are, that's it, we stop.
2906 if (hammer_cursor_ondisk(&cursor
)) {
2907 disk_offset
= cursor
.leaf
->data_offset
;
2908 if (rec_offset
!= last_offset
||
2909 disk_offset
!= last_disk_offset
) {
2910 if (rec_offset
> ap
->a_loffset
)
2912 base_offset
= rec_offset
;
2913 base_disk_offset
= disk_offset
;
2915 last_offset
= rec_offset
+ rec_len
;
2916 last_disk_offset
= disk_offset
+ rec_len
;
2918 error
= hammer_ip_next(&cursor
);
2922 kprintf("BMAP %016llx: %016llx - %016llx\n",
2923 (long long)ap
->a_loffset
,
2924 (long long)base_offset
,
2925 (long long)last_offset
);
2926 kprintf("BMAP %16s: %016llx - %016llx\n", "",
2927 (long long)base_disk_offset
,
2928 (long long)last_disk_offset
);
2932 hammer_cache_node(&ip
->cache
[1], cursor
.node
);
2934 kprintf("bmap_end2 %016llx ip->cache %p\n",
2935 (long long)ap
->a_loffset
, ip
->cache
[1]);
2938 hammer_done_cursor(&cursor
);
2939 hammer_done_transaction(&trans
);
2942 * If we couldn't find any records or the records we did find were
2943 * all behind the requested offset, return failure. A forward
2944 * truncation can leave a hole w/ no on-disk records.
2946 if (last_offset
== 0 || last_offset
< ap
->a_loffset
)
2947 return (EOPNOTSUPP
);
2950 * Figure out the block size at the requested offset and adjust
2951 * our limits so the cluster_read() does not create inappropriately
2952 * sized buffer cache buffers.
2954 blksize
= hammer_blocksize(ap
->a_loffset
);
2955 if (hammer_blocksize(base_offset
) != blksize
) {
2956 base_offset
= hammer_blockdemarc(base_offset
, ap
->a_loffset
);
2958 if (last_offset
!= ap
->a_loffset
&&
2959 hammer_blocksize(last_offset
- 1) != blksize
) {
2960 last_offset
= hammer_blockdemarc(ap
->a_loffset
,
2965 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
2968 disk_offset
= base_disk_offset
+ (ap
->a_loffset
- base_offset
);
2970 if ((disk_offset
& HAMMER_OFF_ZONE_MASK
) != HAMMER_ZONE_LARGE_DATA
) {
2972 * Only large-data zones can be direct-IOd
2975 } else if ((disk_offset
& HAMMER_BUFMASK
) ||
2976 (last_offset
- ap
->a_loffset
) < blksize
) {
2978 * doffsetp is not aligned or the forward run size does
2979 * not cover a whole buffer, disallow the direct I/O.
2986 *ap
->a_doffsetp
= disk_offset
;
2988 *ap
->a_runb
= ap
->a_loffset
- base_offset
;
2989 KKASSERT(*ap
->a_runb
>= 0);
2992 *ap
->a_runp
= last_offset
- ap
->a_loffset
;
2993 KKASSERT(*ap
->a_runp
>= 0);
3001 * Write to a regular file. Because this is a strategy call the OS is
3002 * trying to actually get data onto the media.
3006 hammer_vop_strategy_write(struct vop_strategy_args
*ap
)
3008 hammer_record_t record
;
3019 ip
= ap
->a_vp
->v_data
;
3022 blksize
= hammer_blocksize(bio
->bio_offset
);
3023 KKASSERT(bp
->b_bufsize
== blksize
);
3025 if (ip
->flags
& HAMMER_INODE_RO
) {
3026 bp
->b_error
= EROFS
;
3027 bp
->b_flags
|= B_ERROR
;
3033 * Interlock with inode destruction (no in-kernel or directory
3034 * topology visibility). If we queue new IO while trying to
3035 * destroy the inode we can deadlock the vtrunc call in
3036 * hammer_inode_unloadable_check().
3038 * Besides, there's no point flushing a bp associated with an
3039 * inode that is being destroyed on-media and has no kernel
3042 if ((ip
->flags
| ip
->sync_flags
) &
3043 (HAMMER_INODE_DELETING
|HAMMER_INODE_DELETED
)) {
3050 * Reserve space and issue a direct-write from the front-end.
3051 * NOTE: The direct_io code will hammer_bread/bcopy smaller
3054 * An in-memory record will be installed to reference the storage
3055 * until the flusher can get to it.
3057 * Since we own the high level bio the front-end will not try to
3058 * do a direct-read until the write completes.
3060 * NOTE: The only time we do not reserve a full-sized buffers
3061 * worth of data is if the file is small. We do not try to
3062 * allocate a fragment (from the small-data zone) at the end of
3063 * an otherwise large file as this can lead to wildly separated
3066 KKASSERT((bio
->bio_offset
& HAMMER_BUFMASK
) == 0);
3067 KKASSERT(bio
->bio_offset
< ip
->ino_data
.size
);
3068 if (bio
->bio_offset
|| ip
->ino_data
.size
> HAMMER_BUFSIZE
/ 2)
3069 bytes
= bp
->b_bufsize
;
3071 bytes
= ((int)ip
->ino_data
.size
+ 15) & ~15;
3073 record
= hammer_ip_add_bulk(ip
, bio
->bio_offset
, bp
->b_data
,
3077 * B_VFSFLAG1 indicates that a REDO_WRITE entry was generated
3078 * in hammer_vop_write(). We must flag the record so the proper
3079 * REDO_TERM_WRITE entry is generated during the flush.
3082 if (bp
->b_flags
& B_VFSFLAG1
) {
3083 record
->flags
|= HAMMER_RECF_REDO
;
3084 bp
->b_flags
&= ~B_VFSFLAG1
;
3086 hammer_io_direct_write(hmp
, bio
, record
);
3087 if (ip
->rsv_recs
> 1 && hmp
->rsv_recs
> hammer_limit_recs
)
3088 hammer_flush_inode(ip
, 0);
3090 bp
->b_bio2
.bio_offset
= NOOFFSET
;
3091 bp
->b_error
= error
;
3092 bp
->b_flags
|= B_ERROR
;
3099 * dounlink - disconnect a directory entry
3101 * XXX whiteout support not really in yet
3104 hammer_dounlink(hammer_transaction_t trans
, struct nchandle
*nch
,
3105 struct vnode
*dvp
, struct ucred
*cred
,
3106 int flags
, int isdir
)
3108 struct namecache
*ncp
;
3111 struct hammer_cursor cursor
;
3113 u_int32_t max_iterations
;
3117 * Calculate the namekey and setup the key range for the scan. This
3118 * works kinda like a chained hash table where the lower 32 bits
3119 * of the namekey synthesize the chain.
3121 * The key range is inclusive of both key_beg and key_end.
3126 if (dip
->flags
& HAMMER_INODE_RO
)
3129 namekey
= hammer_directory_namekey(dip
, ncp
->nc_name
, ncp
->nc_nlen
,
3132 hammer_init_cursor(trans
, &cursor
, &dip
->cache
[1], dip
);
3133 cursor
.key_beg
.localization
= dip
->obj_localization
+
3134 hammer_dir_localization(dip
);
3135 cursor
.key_beg
.obj_id
= dip
->obj_id
;
3136 cursor
.key_beg
.key
= namekey
;
3137 cursor
.key_beg
.create_tid
= 0;
3138 cursor
.key_beg
.delete_tid
= 0;
3139 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
3140 cursor
.key_beg
.obj_type
= 0;
3142 cursor
.key_end
= cursor
.key_beg
;
3143 cursor
.key_end
.key
+= max_iterations
;
3144 cursor
.asof
= dip
->obj_asof
;
3145 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
3148 * Scan all matching records (the chain), locate the one matching
3149 * the requested path component. info->last_error contains the
3150 * error code on search termination and could be 0, ENOENT, or
3153 * The hammer_ip_*() functions merge in-memory records with on-disk
3154 * records for the purposes of the search.
3156 error
= hammer_ip_first(&cursor
);
3158 while (error
== 0) {
3159 error
= hammer_ip_resolve_data(&cursor
);
3162 nlen
= cursor
.leaf
->data_len
- HAMMER_ENTRY_NAME_OFF
;
3164 if (ncp
->nc_nlen
== nlen
&&
3165 bcmp(ncp
->nc_name
, cursor
.data
->entry
.name
, nlen
) == 0) {
3168 error
= hammer_ip_next(&cursor
);
3172 * If all is ok we have to get the inode so we can adjust nlinks.
3173 * To avoid a deadlock with the flusher we must release the inode
3174 * lock on the directory when acquiring the inode for the entry.
3176 * If the target is a directory, it must be empty.
3179 hammer_unlock(&cursor
.ip
->lock
);
3180 ip
= hammer_get_inode(trans
, dip
, cursor
.data
->entry
.obj_id
,
3182 cursor
.data
->entry
.localization
,
3184 hammer_lock_sh(&cursor
.ip
->lock
);
3185 if (error
== ENOENT
) {
3186 kprintf("HAMMER: WARNING: Removing "
3187 "dirent w/missing inode \"%s\"\n"
3188 "\tobj_id = %016llx\n",
3190 (long long)cursor
.data
->entry
.obj_id
);
3195 * If isdir >= 0 we validate that the entry is or is not a
3196 * directory. If isdir < 0 we don't care.
3198 if (error
== 0 && isdir
>= 0 && ip
) {
3200 ip
->ino_data
.obj_type
!= HAMMER_OBJTYPE_DIRECTORY
) {
3202 } else if (isdir
== 0 &&
3203 ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DIRECTORY
) {
3209 * If we are trying to remove a directory the directory must
3212 * The check directory code can loop and deadlock/retry. Our
3213 * own cursor's node locks must be released to avoid a 3-way
3214 * deadlock with the flusher if the check directory code
3217 * If any changes whatsoever have been made to the cursor
3218 * set EDEADLK and retry.
3220 * WARNING: See warnings in hammer_unlock_cursor()
3223 if (error
== 0 && ip
&& ip
->ino_data
.obj_type
==
3224 HAMMER_OBJTYPE_DIRECTORY
) {
3225 hammer_unlock_cursor(&cursor
);
3226 error
= hammer_ip_check_directory_empty(trans
, ip
);
3227 hammer_lock_cursor(&cursor
);
3228 if (cursor
.flags
& HAMMER_CURSOR_RETEST
) {
3229 kprintf("HAMMER: Warning: avoided deadlock "
3237 * Delete the directory entry.
3239 * WARNING: hammer_ip_del_directory() may have to terminate
3240 * the cursor to avoid a deadlock. It is ok to call
3241 * hammer_done_cursor() twice.
3244 error
= hammer_ip_del_directory(trans
, &cursor
,
3247 hammer_done_cursor(&cursor
);
3249 cache_setunresolved(nch
);
3250 cache_setvp(nch
, NULL
);
3253 * XXX locking. Note: ip->vp might get ripped out
3254 * when we setunresolved() the nch since we had
3255 * no other reference to it. In that case ip->vp
3259 hammer_knote(ip
->vp
, NOTE_DELETE
);
3260 cache_inval_vp(ip
->vp
, CINV_DESTROY
);
3264 hammer_rel_inode(ip
, 0);
3266 hammer_done_cursor(&cursor
);
3268 if (error
== EDEADLK
)
3274 /************************************************************************
3275 * FIFO AND SPECFS OPS *
3276 ************************************************************************
3281 hammer_vop_fifoclose (struct vop_close_args
*ap
)
3283 /* XXX update itimes */
3284 return (VOCALL(&fifo_vnode_vops
, &ap
->a_head
));
3288 hammer_vop_fiforead (struct vop_read_args
*ap
)
3292 error
= VOCALL(&fifo_vnode_vops
, &ap
->a_head
);
3293 /* XXX update access time */
3298 hammer_vop_fifowrite (struct vop_write_args
*ap
)
3302 error
= VOCALL(&fifo_vnode_vops
, &ap
->a_head
);
3303 /* XXX update access time */
3309 hammer_vop_fifokqfilter(struct vop_kqfilter_args
*ap
)
3313 error
= VOCALL(&fifo_vnode_vops
, &ap
->a_head
);
3315 error
= hammer_vop_kqfilter(ap
);
3319 /************************************************************************
3321 ************************************************************************
3324 static void filt_hammerdetach(struct knote
*kn
);
3325 static int filt_hammerread(struct knote
*kn
, long hint
);
3326 static int filt_hammerwrite(struct knote
*kn
, long hint
);
3327 static int filt_hammervnode(struct knote
*kn
, long hint
);
3329 static struct filterops hammerread_filtops
=
3330 { 1, NULL
, filt_hammerdetach
, filt_hammerread
};
3331 static struct filterops hammerwrite_filtops
=
3332 { 1, NULL
, filt_hammerdetach
, filt_hammerwrite
};
3333 static struct filterops hammervnode_filtops
=
3334 { 1, NULL
, filt_hammerdetach
, filt_hammervnode
};
3338 hammer_vop_kqfilter(struct vop_kqfilter_args
*ap
)
3340 struct vnode
*vp
= ap
->a_vp
;
3341 struct knote
*kn
= ap
->a_kn
;
3344 switch (kn
->kn_filter
) {
3346 kn
->kn_fop
= &hammerread_filtops
;
3349 kn
->kn_fop
= &hammerwrite_filtops
;
3352 kn
->kn_fop
= &hammervnode_filtops
;
3358 kn
->kn_hook
= (caddr_t
)vp
;
3360 lwkt_gettoken(&vlock
, &vp
->v_token
);
3361 SLIST_INSERT_HEAD(&vp
->v_pollinfo
.vpi_selinfo
.si_note
, kn
, kn_selnext
);
3362 lwkt_reltoken(&vlock
);
3368 filt_hammerdetach(struct knote
*kn
)
3370 struct vnode
*vp
= (void *)kn
->kn_hook
;
3373 lwkt_gettoken(&vlock
, &vp
->v_token
);
3374 SLIST_REMOVE(&vp
->v_pollinfo
.vpi_selinfo
.si_note
,
3375 kn
, knote
, kn_selnext
);
3376 lwkt_reltoken(&vlock
);
3380 filt_hammerread(struct knote
*kn
, long hint
)
3382 struct vnode
*vp
= (void *)kn
->kn_hook
;
3383 hammer_inode_t ip
= VTOI(vp
);
3385 if (hint
== NOTE_REVOKE
) {
3386 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
3389 kn
->kn_data
= ip
->ino_data
.size
- kn
->kn_fp
->f_offset
;
3390 return (kn
->kn_data
!= 0);
3394 filt_hammerwrite(struct knote
*kn
, long hint
)
3396 if (hint
== NOTE_REVOKE
)
3397 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
3403 filt_hammervnode(struct knote
*kn
, long hint
)
3405 if (kn
->kn_sfflags
& hint
)
3406 kn
->kn_fflags
|= hint
;
3407 if (hint
== NOTE_REVOKE
) {
3408 kn
->kn_flags
|= EV_EOF
;
3411 return (kn
->kn_fflags
!= 0);