2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.30 2008/02/10 09:51:01 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/namecache.h>
42 #include <sys/vnode.h>
43 #include <sys/lockf.h>
44 #include <sys/event.h>
46 #include <sys/dirent.h>
47 #include <vm/vm_extern.h>
48 #include <vfs/fifofs/fifo.h>
54 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
55 static int hammer_vop_fsync(struct vop_fsync_args
*);
56 static int hammer_vop_read(struct vop_read_args
*);
57 static int hammer_vop_write(struct vop_write_args
*);
58 static int hammer_vop_access(struct vop_access_args
*);
59 static int hammer_vop_advlock(struct vop_advlock_args
*);
60 static int hammer_vop_close(struct vop_close_args
*);
61 static int hammer_vop_ncreate(struct vop_ncreate_args
*);
62 static int hammer_vop_getattr(struct vop_getattr_args
*);
63 static int hammer_vop_nresolve(struct vop_nresolve_args
*);
64 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args
*);
65 static int hammer_vop_nlink(struct vop_nlink_args
*);
66 static int hammer_vop_nmkdir(struct vop_nmkdir_args
*);
67 static int hammer_vop_nmknod(struct vop_nmknod_args
*);
68 static int hammer_vop_open(struct vop_open_args
*);
69 static int hammer_vop_pathconf(struct vop_pathconf_args
*);
70 static int hammer_vop_print(struct vop_print_args
*);
71 static int hammer_vop_readdir(struct vop_readdir_args
*);
72 static int hammer_vop_readlink(struct vop_readlink_args
*);
73 static int hammer_vop_nremove(struct vop_nremove_args
*);
74 static int hammer_vop_nrename(struct vop_nrename_args
*);
75 static int hammer_vop_nrmdir(struct vop_nrmdir_args
*);
76 static int hammer_vop_setattr(struct vop_setattr_args
*);
77 static int hammer_vop_strategy(struct vop_strategy_args
*);
78 static int hammer_vop_nsymlink(struct vop_nsymlink_args
*);
79 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args
*);
80 static int hammer_vop_ioctl(struct vop_ioctl_args
*);
81 static int hammer_vop_mountctl(struct vop_mountctl_args
*);
83 static int hammer_vop_fifoclose (struct vop_close_args
*);
84 static int hammer_vop_fiforead (struct vop_read_args
*);
85 static int hammer_vop_fifowrite (struct vop_write_args
*);
87 static int hammer_vop_specclose (struct vop_close_args
*);
88 static int hammer_vop_specread (struct vop_read_args
*);
89 static int hammer_vop_specwrite (struct vop_write_args
*);
91 struct vop_ops hammer_vnode_vops
= {
92 .vop_default
= vop_defaultop
,
93 .vop_fsync
= hammer_vop_fsync
,
94 .vop_getpages
= vop_stdgetpages
,
95 .vop_putpages
= vop_stdputpages
,
96 .vop_read
= hammer_vop_read
,
97 .vop_write
= hammer_vop_write
,
98 .vop_access
= hammer_vop_access
,
99 .vop_advlock
= hammer_vop_advlock
,
100 .vop_close
= hammer_vop_close
,
101 .vop_ncreate
= hammer_vop_ncreate
,
102 .vop_getattr
= hammer_vop_getattr
,
103 .vop_inactive
= hammer_vop_inactive
,
104 .vop_reclaim
= hammer_vop_reclaim
,
105 .vop_nresolve
= hammer_vop_nresolve
,
106 .vop_nlookupdotdot
= hammer_vop_nlookupdotdot
,
107 .vop_nlink
= hammer_vop_nlink
,
108 .vop_nmkdir
= hammer_vop_nmkdir
,
109 .vop_nmknod
= hammer_vop_nmknod
,
110 .vop_open
= hammer_vop_open
,
111 .vop_pathconf
= hammer_vop_pathconf
,
112 .vop_print
= hammer_vop_print
,
113 .vop_readdir
= hammer_vop_readdir
,
114 .vop_readlink
= hammer_vop_readlink
,
115 .vop_nremove
= hammer_vop_nremove
,
116 .vop_nrename
= hammer_vop_nrename
,
117 .vop_nrmdir
= hammer_vop_nrmdir
,
118 .vop_setattr
= hammer_vop_setattr
,
119 .vop_strategy
= hammer_vop_strategy
,
120 .vop_nsymlink
= hammer_vop_nsymlink
,
121 .vop_nwhiteout
= hammer_vop_nwhiteout
,
122 .vop_ioctl
= hammer_vop_ioctl
,
123 .vop_mountctl
= hammer_vop_mountctl
126 struct vop_ops hammer_spec_vops
= {
127 .vop_default
= spec_vnoperate
,
128 .vop_fsync
= hammer_vop_fsync
,
129 .vop_read
= hammer_vop_specread
,
130 .vop_write
= hammer_vop_specwrite
,
131 .vop_access
= hammer_vop_access
,
132 .vop_close
= hammer_vop_specclose
,
133 .vop_getattr
= hammer_vop_getattr
,
134 .vop_inactive
= hammer_vop_inactive
,
135 .vop_reclaim
= hammer_vop_reclaim
,
136 .vop_setattr
= hammer_vop_setattr
139 struct vop_ops hammer_fifo_vops
= {
140 .vop_default
= fifo_vnoperate
,
141 .vop_fsync
= hammer_vop_fsync
,
142 .vop_read
= hammer_vop_fiforead
,
143 .vop_write
= hammer_vop_fifowrite
,
144 .vop_access
= hammer_vop_access
,
145 .vop_close
= hammer_vop_fifoclose
,
146 .vop_getattr
= hammer_vop_getattr
,
147 .vop_inactive
= hammer_vop_inactive
,
148 .vop_reclaim
= hammer_vop_reclaim
,
149 .vop_setattr
= hammer_vop_setattr
152 static int hammer_dounlink(struct nchandle
*nch
, struct vnode
*dvp
,
153 struct ucred
*cred
, int flags
);
154 static int hammer_vop_strategy_read(struct vop_strategy_args
*ap
);
155 static int hammer_vop_strategy_write(struct vop_strategy_args
*ap
);
160 hammer_vop_vnoperate(struct vop_generic_args
*)
162 return (VOCALL(&hammer_vnode_vops
, ap
));
167 * hammer_vop_fsync { vp, waitfor }
171 hammer_vop_fsync(struct vop_fsync_args
*ap
)
177 error
= hammer_sync_inode(ip
, ap
->a_waitfor
, 0);
182 * hammer_vop_read { vp, uio, ioflag, cred }
186 hammer_vop_read(struct vop_read_args
*ap
)
188 struct hammer_transaction trans
;
197 if (ap
->a_vp
->v_type
!= VREG
)
201 seqcount
= ap
->a_ioflag
>> 16;
203 hammer_start_transaction(&trans
, ip
->hmp
);
206 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
209 while (uio
->uio_resid
> 0 && uio
->uio_offset
< ip
->ino_rec
.ino_size
) {
210 offset
= uio
->uio_offset
& HAMMER_BUFMASK
;
212 error
= cluster_read(ap
->a_vp
, ip
->ino_rec
.ino_size
,
213 uio
->uio_offset
- offset
, HAMMER_BUFSIZE
,
214 MAXBSIZE
, seqcount
, &bp
);
216 error
= bread(ap
->a_vp
, uio
->uio_offset
- offset
,
217 HAMMER_BUFSIZE
, &bp
);
222 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
223 n
= HAMMER_BUFSIZE
- offset
;
224 if (n
> uio
->uio_resid
)
226 if (n
> ip
->ino_rec
.ino_size
- uio
->uio_offset
)
227 n
= (int)(ip
->ino_rec
.ino_size
- uio
->uio_offset
);
228 error
= uiomove((char *)bp
->b_data
+ offset
, n
, uio
);
233 if ((ip
->flags
& HAMMER_INODE_RO
) == 0 &&
234 (ip
->hmp
->mp
->mnt_flag
& MNT_NOATIME
) == 0) {
235 ip
->ino_rec
.ino_atime
= trans
.tid
;
236 hammer_modify_inode(&trans
, ip
, HAMMER_INODE_ITIMES
);
240 hammer_commit_transaction(&trans
);
245 * hammer_vop_write { vp, uio, ioflag, cred }
249 hammer_vop_write(struct vop_write_args
*ap
)
251 struct hammer_transaction trans
;
252 struct hammer_inode
*ip
;
260 if (ap
->a_vp
->v_type
!= VREG
)
265 if (ip
->flags
& HAMMER_INODE_RO
)
269 * Create a transaction to cover the operations we perform.
271 hammer_start_transaction(&trans
, ip
->hmp
);
277 if (ap
->a_ioflag
& IO_APPEND
)
278 uio
->uio_offset
= ip
->ino_rec
.ino_size
;
281 * Check for illegal write offsets. Valid range is 0...2^63-1
283 if (uio
->uio_offset
< 0 || uio
->uio_offset
+ uio
->uio_resid
<= 0) {
284 hammer_commit_transaction(&trans
);
289 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
291 while (uio
->uio_resid
> 0) {
294 offset
= uio
->uio_offset
& HAMMER_BUFMASK
;
295 n
= HAMMER_BUFSIZE
- offset
;
296 if (n
> uio
->uio_resid
)
298 if (uio
->uio_offset
+ n
> ip
->ino_rec
.ino_size
) {
299 vnode_pager_setsize(ap
->a_vp
, uio
->uio_offset
+ n
);
303 if (uio
->uio_segflg
== UIO_NOCOPY
) {
305 * Issuing a write with the same data backing the
306 * buffer. Instantiate the buffer to collect the
307 * backing vm pages, then read-in any missing bits.
309 * This case is used by vop_stdputpages().
311 bp
= getblk(ap
->a_vp
, uio
->uio_offset
- offset
,
312 HAMMER_BUFSIZE
, GETBLK_BHEAVY
, 0);
313 if ((bp
->b_flags
& B_CACHE
) == 0) {
315 error
= bread(ap
->a_vp
,
316 uio
->uio_offset
- offset
,
317 HAMMER_BUFSIZE
, &bp
);
319 } else if (offset
== 0 && uio
->uio_resid
>= HAMMER_BUFSIZE
) {
321 * entirely overwrite the buffer
323 bp
= getblk(ap
->a_vp
, uio
->uio_offset
- offset
,
324 HAMMER_BUFSIZE
, GETBLK_BHEAVY
, 0);
325 } else if (offset
== 0 && uio
->uio_offset
>= ip
->ino_rec
.ino_size
) {
329 bp
= getblk(ap
->a_vp
, uio
->uio_offset
- offset
,
330 HAMMER_BUFSIZE
, GETBLK_BHEAVY
, 0);
334 * Partial overwrite, read in any missing bits then
335 * replace the portion being written.
337 error
= bread(ap
->a_vp
, uio
->uio_offset
- offset
,
338 HAMMER_BUFSIZE
, &bp
);
343 error
= uiomove((char *)bp
->b_data
+ offset
, n
, uio
);
346 * If we screwed up we have to undo any VM size changes we
352 vtruncbuf(ap
->a_vp
, ip
->ino_rec
.ino_size
,
357 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
358 if (ip
->ino_rec
.ino_size
< uio
->uio_offset
) {
359 ip
->ino_rec
.ino_size
= uio
->uio_offset
;
360 flags
= HAMMER_INODE_RDIRTY
;
361 vnode_pager_setsize(ap
->a_vp
, ip
->ino_rec
.ino_size
);
365 ip
->ino_rec
.ino_mtime
= trans
.tid
;
366 flags
|= HAMMER_INODE_ITIMES
| HAMMER_INODE_BUFS
;
367 hammer_modify_inode(&trans
, ip
, flags
);
370 * The file write must be tagged with the same TID as the
371 * inode, for consistency in case the inode changed size.
372 * This guarantees the on-disk data records will have a
373 * TID <= the inode TID representing the size change.
375 * If a prior write has not yet flushed, retain its TID.
378 bp
->b_tid
= ip
->last_tid
;
380 if (ap
->a_ioflag
& IO_SYNC
) {
382 } else if (ap
->a_ioflag
& IO_DIRECT
) {
389 hammer_abort_transaction(&trans
);
391 hammer_commit_transaction(&trans
);
396 * hammer_vop_access { vp, mode, cred }
400 hammer_vop_access(struct vop_access_args
*ap
)
402 struct hammer_inode
*ip
= VTOI(ap
->a_vp
);
407 uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
408 gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
410 error
= vop_helper_access(ap
, uid
, gid
, ip
->ino_data
.mode
,
411 ip
->ino_data
.uflags
);
416 * hammer_vop_advlock { vp, id, op, fl, flags }
420 hammer_vop_advlock(struct vop_advlock_args
*ap
)
422 struct hammer_inode
*ip
= VTOI(ap
->a_vp
);
424 return (lf_advlock(ap
, &ip
->advlock
, ip
->ino_rec
.ino_size
));
428 * hammer_vop_close { vp, fflag }
432 hammer_vop_close(struct vop_close_args
*ap
)
434 return (vop_stdclose(ap
));
438 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
440 * The operating system has already ensured that the directory entry
441 * does not exist and done all appropriate namespace locking.
445 hammer_vop_ncreate(struct vop_ncreate_args
*ap
)
447 struct hammer_transaction trans
;
448 struct hammer_inode
*dip
;
449 struct hammer_inode
*nip
;
450 struct nchandle
*nch
;
454 dip
= VTOI(ap
->a_dvp
);
456 if (dip
->flags
& HAMMER_INODE_RO
)
460 * Create a transaction to cover the operations we perform.
462 hammer_start_transaction(&trans
, dip
->hmp
);
465 * Create a new filesystem object of the requested type. The
466 * returned inode will be referenced but not locked.
469 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
, dip
, &nip
);
471 kprintf("hammer_create_inode error %d\n", error
);
473 hammer_abort_transaction(&trans
);
479 * Add the new filesystem object to the directory. This will also
480 * bump the inode's link count.
482 error
= hammer_ip_add_directory(&trans
, dip
, nch
->ncp
, nip
);
484 kprintf("hammer_ip_add_directory error %d\n", error
);
490 hammer_rel_inode(nip
, 0);
491 hammer_abort_transaction(&trans
);
494 hammer_commit_transaction(&trans
);
495 error
= hammer_get_vnode(nip
, LK_EXCLUSIVE
, ap
->a_vpp
);
496 hammer_rel_inode(nip
, 0);
498 cache_setunresolved(ap
->a_nch
);
499 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
506 * hammer_vop_getattr { vp, vap }
510 hammer_vop_getattr(struct vop_getattr_args
*ap
)
512 struct hammer_inode
*ip
= VTOI(ap
->a_vp
);
513 struct vattr
*vap
= ap
->a_vap
;
516 if (cache_check_fsmid_vp(ap
->a_vp
, &ip
->fsmid
) &&
517 (vp
->v_mount
->mnt_flag
& MNT_RDONLY
) == 0 &&
522 hammer_itimes(ap
->a_vp
);
525 vap
->va_fsid
= ip
->hmp
->fsid_udev
;
526 vap
->va_fileid
= ip
->ino_rec
.base
.base
.obj_id
;
527 vap
->va_mode
= ip
->ino_data
.mode
;
528 vap
->va_nlink
= ip
->ino_rec
.ino_nlinks
;
529 vap
->va_uid
= hammer_to_unix_xid(&ip
->ino_data
.uid
);
530 vap
->va_gid
= hammer_to_unix_xid(&ip
->ino_data
.gid
);
533 vap
->va_size
= ip
->ino_rec
.ino_size
;
534 hammer_to_timespec(ip
->ino_rec
.ino_atime
, &vap
->va_atime
);
535 hammer_to_timespec(ip
->ino_rec
.ino_mtime
, &vap
->va_mtime
);
536 hammer_to_timespec(ip
->ino_data
.ctime
, &vap
->va_ctime
);
537 vap
->va_flags
= ip
->ino_data
.uflags
;
538 vap
->va_gen
= 1; /* hammer inums are unique for all time */
539 vap
->va_blocksize
= 32768; /* XXX - extract from root volume */
540 vap
->va_bytes
= ip
->ino_rec
.ino_size
;
541 vap
->va_type
= hammer_get_vnode_type(ip
->ino_rec
.base
.base
.obj_type
);
542 vap
->va_filerev
= 0; /* XXX */
543 /* mtime uniquely identifies any adjustments made to the file */
544 vap
->va_fsmid
= ip
->ino_rec
.ino_mtime
;
545 vap
->va_uid_uuid
= ip
->ino_data
.uid
;
546 vap
->va_gid_uuid
= ip
->ino_data
.gid
;
547 vap
->va_fsid_uuid
= ip
->hmp
->fsid
;
548 vap
->va_vaflags
= VA_UID_UUID_VALID
| VA_GID_UUID_VALID
|
551 switch (ip
->ino_rec
.base
.base
.obj_type
) {
552 case HAMMER_OBJTYPE_CDEV
:
553 case HAMMER_OBJTYPE_BDEV
:
554 vap
->va_rmajor
= ip
->ino_data
.rmajor
;
555 vap
->va_rminor
= ip
->ino_data
.rminor
;
565 * hammer_vop_nresolve { nch, dvp, cred }
567 * Locate the requested directory entry.
571 hammer_vop_nresolve(struct vop_nresolve_args
*ap
)
573 struct namecache
*ncp
;
577 struct hammer_cursor cursor
;
578 union hammer_record_ondisk
*rec
;
588 * Misc initialization, plus handle as-of name extensions. Look for
589 * the '@@' extension. Note that as-of files and directories cannot
592 dip
= VTOI(ap
->a_dvp
);
593 ncp
= ap
->a_nch
->ncp
;
594 asof
= dip
->obj_asof
;
598 for (i
= 0; i
< nlen
; ++i
) {
599 if (ncp
->nc_name
[i
] == '@' && ncp
->nc_name
[i
+1] == '@') {
600 asof
= hammer_str_to_tid(ncp
->nc_name
+ i
+ 2);
601 flags
|= HAMMER_INODE_RO
;
608 * If there is no path component the time extension is relative to
612 ip
= hammer_get_inode(dip
->hmp
, &dip
->cache
[1], dip
->obj_id
,
613 asof
, flags
, &error
);
615 error
= hammer_get_vnode(ip
, LK_EXCLUSIVE
, &vp
);
616 hammer_rel_inode(ip
, 0);
622 cache_setvp(ap
->a_nch
, vp
);
629 * Calculate the namekey and setup the key range for the scan. This
630 * works kinda like a chained hash table where the lower 32 bits
631 * of the namekey synthesize the chain.
633 * The key range is inclusive of both key_beg and key_end.
635 namekey
= hammer_directory_namekey(ncp
->nc_name
, nlen
);
637 error
= hammer_init_cursor_hmp(&cursor
, &dip
->cache
[0], dip
->hmp
);
638 cursor
.key_beg
.obj_id
= dip
->obj_id
;
639 cursor
.key_beg
.key
= namekey
;
640 cursor
.key_beg
.create_tid
= 0;
641 cursor
.key_beg
.delete_tid
= 0;
642 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
643 cursor
.key_beg
.obj_type
= 0;
645 cursor
.key_end
= cursor
.key_beg
;
646 cursor
.key_end
.key
|= 0xFFFFFFFFULL
;
648 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
651 * Scan all matching records (the chain), locate the one matching
652 * the requested path component.
654 * The hammer_ip_*() functions merge in-memory records with on-disk
655 * records for the purposes of the search.
658 error
= hammer_ip_first(&cursor
, dip
);
664 error
= hammer_ip_resolve_data(&cursor
);
668 if (nlen
== rec
->entry
.base
.data_len
&&
669 bcmp(ncp
->nc_name
, cursor
.data
, nlen
) == 0) {
670 obj_id
= rec
->entry
.obj_id
;
673 error
= hammer_ip_next(&cursor
);
675 hammer_done_cursor(&cursor
);
677 ip
= hammer_get_inode(dip
->hmp
, &dip
->cache
[1],
678 obj_id
, asof
, flags
, &error
);
680 error
= hammer_get_vnode(ip
, LK_EXCLUSIVE
, &vp
);
681 hammer_rel_inode(ip
, 0);
687 cache_setvp(ap
->a_nch
, vp
);
690 } else if (error
== ENOENT
) {
691 cache_setvp(ap
->a_nch
, NULL
);
697 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
699 * Locate the parent directory of a directory vnode.
701 * dvp is referenced but not locked. *vpp must be returned referenced and
702 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
703 * at the root, instead it could indicate that the directory we were in was
706 * NOTE: as-of sequences are not linked into the directory structure. If
707 * we are at the root with a different asof then the mount point, reload
708 * the same directory with the mount point's asof. I'm not sure what this
709 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
710 * get confused, but it hasn't been tested.
714 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args
*ap
)
716 struct hammer_inode
*dip
;
717 struct hammer_inode
*ip
;
718 int64_t parent_obj_id
;
722 dip
= VTOI(ap
->a_dvp
);
723 asof
= dip
->obj_asof
;
724 parent_obj_id
= dip
->ino_data
.parent_obj_id
;
726 if (parent_obj_id
== 0) {
727 if (dip
->obj_id
== HAMMER_OBJID_ROOT
&&
728 asof
!= dip
->hmp
->asof
) {
729 parent_obj_id
= dip
->obj_id
;
730 asof
= dip
->hmp
->asof
;
731 *ap
->a_fakename
= kmalloc(19, M_TEMP
, M_WAITOK
);
732 ksnprintf(*ap
->a_fakename
, 19, "0x%016llx",
740 ip
= hammer_get_inode(dip
->hmp
, &dip
->cache
[1], parent_obj_id
,
741 asof
, dip
->flags
, &error
);
746 error
= hammer_get_vnode(ip
, LK_EXCLUSIVE
, ap
->a_vpp
);
747 hammer_rel_inode(ip
, 0);
752 * hammer_vop_nlink { nch, dvp, vp, cred }
756 hammer_vop_nlink(struct vop_nlink_args
*ap
)
758 struct hammer_transaction trans
;
759 struct hammer_inode
*dip
;
760 struct hammer_inode
*ip
;
761 struct nchandle
*nch
;
765 dip
= VTOI(ap
->a_dvp
);
768 if (dip
->flags
& HAMMER_INODE_RO
)
770 if (ip
->flags
& HAMMER_INODE_RO
)
774 * Create a transaction to cover the operations we perform.
776 hammer_start_transaction(&trans
, dip
->hmp
);
779 * Add the filesystem object to the directory. Note that neither
780 * dip nor ip are referenced or locked, but their vnodes are
781 * referenced. This function will bump the inode's link count.
783 error
= hammer_ip_add_directory(&trans
, dip
, nch
->ncp
, ip
);
789 hammer_abort_transaction(&trans
);
791 cache_setunresolved(nch
);
792 cache_setvp(nch
, ap
->a_vp
);
793 hammer_commit_transaction(&trans
);
799 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
801 * The operating system has already ensured that the directory entry
802 * does not exist and done all appropriate namespace locking.
806 hammer_vop_nmkdir(struct vop_nmkdir_args
*ap
)
808 struct hammer_transaction trans
;
809 struct hammer_inode
*dip
;
810 struct hammer_inode
*nip
;
811 struct nchandle
*nch
;
815 dip
= VTOI(ap
->a_dvp
);
817 if (dip
->flags
& HAMMER_INODE_RO
)
821 * Create a transaction to cover the operations we perform.
823 hammer_start_transaction(&trans
, dip
->hmp
);
826 * Create a new filesystem object of the requested type. The
827 * returned inode will be referenced but not locked.
829 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
, dip
, &nip
);
831 kprintf("hammer_mkdir error %d\n", error
);
833 hammer_abort_transaction(&trans
);
839 * Add the new filesystem object to the directory. This will also
840 * bump the inode's link count.
842 error
= hammer_ip_add_directory(&trans
, dip
, nch
->ncp
, nip
);
844 kprintf("hammer_mkdir (add) error %d\n", error
);
850 hammer_rel_inode(nip
, 0);
851 hammer_abort_transaction(&trans
);
854 hammer_commit_transaction(&trans
);
855 error
= hammer_get_vnode(nip
, LK_EXCLUSIVE
, ap
->a_vpp
);
856 hammer_rel_inode(nip
, 0);
858 cache_setunresolved(ap
->a_nch
);
859 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
866 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
868 * The operating system has already ensured that the directory entry
869 * does not exist and done all appropriate namespace locking.
873 hammer_vop_nmknod(struct vop_nmknod_args
*ap
)
875 struct hammer_transaction trans
;
876 struct hammer_inode
*dip
;
877 struct hammer_inode
*nip
;
878 struct nchandle
*nch
;
882 dip
= VTOI(ap
->a_dvp
);
884 if (dip
->flags
& HAMMER_INODE_RO
)
888 * Create a transaction to cover the operations we perform.
890 hammer_start_transaction(&trans
, dip
->hmp
);
893 * Create a new filesystem object of the requested type. The
894 * returned inode will be referenced but not locked.
896 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
, dip
, &nip
);
898 hammer_abort_transaction(&trans
);
904 * Add the new filesystem object to the directory. This will also
905 * bump the inode's link count.
907 error
= hammer_ip_add_directory(&trans
, dip
, nch
->ncp
, nip
);
913 hammer_rel_inode(nip
, 0);
914 hammer_abort_transaction(&trans
);
917 hammer_commit_transaction(&trans
);
918 error
= hammer_get_vnode(nip
, LK_EXCLUSIVE
, ap
->a_vpp
);
919 hammer_rel_inode(nip
, 0);
921 cache_setunresolved(ap
->a_nch
);
922 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
929 * hammer_vop_open { vp, mode, cred, fp }
933 hammer_vop_open(struct vop_open_args
*ap
)
935 if ((ap
->a_mode
& FWRITE
) && (VTOI(ap
->a_vp
)->flags
& HAMMER_INODE_RO
))
938 return(vop_stdopen(ap
));
942 * hammer_vop_pathconf { vp, name, retval }
946 hammer_vop_pathconf(struct vop_pathconf_args
*ap
)
952 * hammer_vop_print { vp }
956 hammer_vop_print(struct vop_print_args
*ap
)
962 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
966 hammer_vop_readdir(struct vop_readdir_args
*ap
)
968 struct hammer_cursor cursor
;
969 struct hammer_inode
*ip
;
971 hammer_record_ondisk_t rec
;
972 hammer_base_elm_t base
;
982 saveoff
= uio
->uio_offset
;
984 if (ap
->a_ncookies
) {
985 ncookies
= uio
->uio_resid
/ 16 + 1;
988 cookies
= kmalloc(ncookies
* sizeof(off_t
), M_TEMP
, M_WAITOK
);
997 * Handle artificial entries
1001 r
= vop_write_dirent(&error
, uio
, ip
->obj_id
, DT_DIR
, 1, ".");
1005 cookies
[cookie_index
] = saveoff
;
1008 if (cookie_index
== ncookies
)
1012 if (ip
->ino_data
.parent_obj_id
) {
1013 r
= vop_write_dirent(&error
, uio
,
1014 ip
->ino_data
.parent_obj_id
,
1017 r
= vop_write_dirent(&error
, uio
,
1018 ip
->obj_id
, DT_DIR
, 2, "..");
1023 cookies
[cookie_index
] = saveoff
;
1026 if (cookie_index
== ncookies
)
1031 * Key range (begin and end inclusive) to scan. Directory keys
1032 * directly translate to a 64 bit 'seek' position.
1034 hammer_init_cursor_hmp(&cursor
, &ip
->cache
[0], ip
->hmp
);
1035 cursor
.key_beg
.obj_id
= ip
->obj_id
;
1036 cursor
.key_beg
.create_tid
= 0;
1037 cursor
.key_beg
.delete_tid
= 0;
1038 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
1039 cursor
.key_beg
.obj_type
= 0;
1040 cursor
.key_beg
.key
= saveoff
;
1042 cursor
.key_end
= cursor
.key_beg
;
1043 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
1044 cursor
.asof
= ip
->obj_asof
;
1045 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
1047 error
= hammer_ip_first(&cursor
, ip
);
1049 while (error
== 0) {
1050 error
= hammer_ip_resolve_record_and_data(&cursor
);
1053 rec
= cursor
.record
;
1054 base
= &rec
->base
.base
;
1055 saveoff
= base
->key
;
1057 if (base
->obj_id
!= ip
->obj_id
)
1058 panic("readdir: bad record at %p", cursor
.node
);
1060 r
= vop_write_dirent(
1061 &error
, uio
, rec
->entry
.obj_id
,
1062 hammer_get_dtype(rec
->entry
.base
.base
.obj_type
),
1063 rec
->entry
.base
.data_len
,
1064 (void *)cursor
.data
);
1069 cookies
[cookie_index
] = base
->key
;
1071 if (cookie_index
== ncookies
)
1073 error
= hammer_ip_next(&cursor
);
1075 hammer_done_cursor(&cursor
);
1079 *ap
->a_eofflag
= (error
== ENOENT
);
1080 uio
->uio_offset
= saveoff
;
1081 if (error
&& cookie_index
== 0) {
1082 if (error
== ENOENT
)
1085 kfree(cookies
, M_TEMP
);
1086 *ap
->a_ncookies
= 0;
1087 *ap
->a_cookies
= NULL
;
1090 if (error
== ENOENT
)
1093 *ap
->a_ncookies
= cookie_index
;
1094 *ap
->a_cookies
= cookies
;
1101 * hammer_vop_readlink { vp, uio, cred }
1105 hammer_vop_readlink(struct vop_readlink_args
*ap
)
1107 struct hammer_cursor cursor
;
1108 struct hammer_inode
*ip
;
1111 ip
= VTOI(ap
->a_vp
);
1112 hammer_init_cursor_hmp(&cursor
, &ip
->cache
[0], ip
->hmp
);
1115 * Key range (begin and end inclusive) to scan. Directory keys
1116 * directly translate to a 64 bit 'seek' position.
1118 cursor
.key_beg
.obj_id
= ip
->obj_id
;
1119 cursor
.key_beg
.create_tid
= 0;
1120 cursor
.key_beg
.delete_tid
= 0;
1121 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_FIX
;
1122 cursor
.key_beg
.obj_type
= 0;
1123 cursor
.key_beg
.key
= HAMMER_FIXKEY_SYMLINK
;
1124 cursor
.asof
= ip
->obj_asof
;
1125 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
1127 error
= hammer_ip_lookup(&cursor
, ip
);
1129 error
= hammer_ip_resolve_data(&cursor
);
1131 error
= uiomove((char *)cursor
.data
,
1132 cursor
.record
->base
.data_len
,
1136 hammer_done_cursor(&cursor
);
1141 * hammer_vop_nremove { nch, dvp, cred }
1145 hammer_vop_nremove(struct vop_nremove_args
*ap
)
1147 return(hammer_dounlink(ap
->a_nch
, ap
->a_dvp
, ap
->a_cred
, 0));
1151 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1155 hammer_vop_nrename(struct vop_nrename_args
*ap
)
1157 struct hammer_transaction trans
;
1158 struct namecache
*fncp
;
1159 struct namecache
*tncp
;
1160 struct hammer_inode
*fdip
;
1161 struct hammer_inode
*tdip
;
1162 struct hammer_inode
*ip
;
1163 struct hammer_cursor cursor
;
1164 union hammer_record_ondisk
*rec
;
1168 fdip
= VTOI(ap
->a_fdvp
);
1169 tdip
= VTOI(ap
->a_tdvp
);
1170 fncp
= ap
->a_fnch
->ncp
;
1171 tncp
= ap
->a_tnch
->ncp
;
1172 ip
= VTOI(fncp
->nc_vp
);
1173 KKASSERT(ip
!= NULL
);
1175 if (fdip
->flags
& HAMMER_INODE_RO
)
1177 if (tdip
->flags
& HAMMER_INODE_RO
)
1179 if (ip
->flags
& HAMMER_INODE_RO
)
1182 hammer_start_transaction(&trans
, fdip
->hmp
);
1185 * Remove tncp from the target directory and then link ip as
1186 * tncp. XXX pass trans to dounlink
1188 * Force the inode sync-time to match the transaction so it is
1189 * in-sync with the creation of the target directory entry.
1191 error
= hammer_dounlink(ap
->a_tnch
, ap
->a_tdvp
, ap
->a_cred
, 0);
1192 if (error
== 0 || error
== ENOENT
) {
1193 error
= hammer_ip_add_directory(&trans
, tdip
, tncp
, ip
);
1195 ip
->ino_data
.parent_obj_id
= tdip
->obj_id
;
1196 hammer_modify_inode(&trans
, ip
,
1197 HAMMER_INODE_DDIRTY
| HAMMER_INODE_TIDLOCKED
);
1201 goto failed
; /* XXX */
1204 * Locate the record in the originating directory and remove it.
1206 * Calculate the namekey and setup the key range for the scan. This
1207 * works kinda like a chained hash table where the lower 32 bits
1208 * of the namekey synthesize the chain.
1210 * The key range is inclusive of both key_beg and key_end.
1212 namekey
= hammer_directory_namekey(fncp
->nc_name
, fncp
->nc_nlen
);
1214 hammer_init_cursor_hmp(&cursor
, &fdip
->cache
[0], fdip
->hmp
);
1215 cursor
.key_beg
.obj_id
= fdip
->obj_id
;
1216 cursor
.key_beg
.key
= namekey
;
1217 cursor
.key_beg
.create_tid
= 0;
1218 cursor
.key_beg
.delete_tid
= 0;
1219 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
1220 cursor
.key_beg
.obj_type
= 0;
1222 cursor
.key_end
= cursor
.key_beg
;
1223 cursor
.key_end
.key
|= 0xFFFFFFFFULL
;
1224 cursor
.asof
= fdip
->obj_asof
;
1225 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
1228 * Scan all matching records (the chain), locate the one matching
1229 * the requested path component.
1231 * The hammer_ip_*() functions merge in-memory records with on-disk
1232 * records for the purposes of the search.
1234 error
= hammer_ip_first(&cursor
, fdip
);
1235 while (error
== 0) {
1236 if (hammer_ip_resolve_data(&cursor
) != 0)
1238 rec
= cursor
.record
;
1239 if (fncp
->nc_nlen
== rec
->entry
.base
.data_len
&&
1240 bcmp(fncp
->nc_name
, cursor
.data
, fncp
->nc_nlen
) == 0) {
1243 error
= hammer_ip_next(&cursor
);
1247 * If all is ok we have to get the inode so we can adjust nlinks.
1249 * WARNING: hammer_ip_del_directory() may have to terminate the
1250 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1254 error
= hammer_ip_del_directory(&trans
, &cursor
, fdip
, ip
);
1255 hammer_done_cursor(&cursor
);
1257 cache_rename(ap
->a_fnch
, ap
->a_tnch
);
1258 if (error
== EDEADLK
)
1262 hammer_commit_transaction(&trans
);
1264 hammer_abort_transaction(&trans
);
1270 * hammer_vop_nrmdir { nch, dvp, cred }
1274 hammer_vop_nrmdir(struct vop_nrmdir_args
*ap
)
1276 return(hammer_dounlink(ap
->a_nch
, ap
->a_dvp
, ap
->a_cred
, 0));
1280 * hammer_vop_setattr { vp, vap, cred }
1284 hammer_vop_setattr(struct vop_setattr_args
*ap
)
1286 struct hammer_transaction trans
;
1288 struct hammer_inode
*ip
;
1292 int64_t aligned_size
;
1297 ip
= ap
->a_vp
->v_data
;
1300 if (ap
->a_vp
->v_mount
->mnt_flag
& MNT_RDONLY
)
1302 if (ip
->flags
& HAMMER_INODE_RO
)
1305 hammer_start_transaction(&trans
, ip
->hmp
);
1308 if (vap
->va_flags
!= VNOVAL
) {
1309 flags
= ip
->ino_data
.uflags
;
1310 error
= vop_helper_setattr_flags(&flags
, vap
->va_flags
,
1311 hammer_to_unix_xid(&ip
->ino_data
.uid
),
1314 if (ip
->ino_data
.uflags
!= flags
) {
1315 ip
->ino_data
.uflags
= flags
;
1316 modflags
|= HAMMER_INODE_DDIRTY
;
1318 if (ip
->ino_data
.uflags
& (IMMUTABLE
| APPEND
)) {
1325 if (ip
->ino_data
.uflags
& (IMMUTABLE
| APPEND
)) {
1329 if (vap
->va_uid
!= (uid_t
)VNOVAL
) {
1330 hammer_guid_to_uuid(&uuid
, vap
->va_uid
);
1331 if (bcmp(&uuid
, &ip
->ino_data
.uid
, sizeof(uuid
)) != 0) {
1332 ip
->ino_data
.uid
= uuid
;
1333 modflags
|= HAMMER_INODE_DDIRTY
;
1336 if (vap
->va_gid
!= (uid_t
)VNOVAL
) {
1337 hammer_guid_to_uuid(&uuid
, vap
->va_gid
);
1338 if (bcmp(&uuid
, &ip
->ino_data
.gid
, sizeof(uuid
)) != 0) {
1339 ip
->ino_data
.gid
= uuid
;
1340 modflags
|= HAMMER_INODE_DDIRTY
;
1343 while (vap
->va_size
!= VNOVAL
&& ip
->ino_rec
.ino_size
!= vap
->va_size
) {
1344 switch(ap
->a_vp
->v_type
) {
1346 if (vap
->va_size
== ip
->ino_rec
.ino_size
)
1348 if (vap
->va_size
< ip
->ino_rec
.ino_size
) {
1349 vtruncbuf(ap
->a_vp
, vap
->va_size
,
1353 vnode_pager_setsize(ap
->a_vp
, vap
->va_size
);
1356 ip
->ino_rec
.ino_size
= vap
->va_size
;
1357 modflags
|= HAMMER_INODE_RDIRTY
;
1358 aligned_size
= (vap
->va_size
+ HAMMER_BUFMASK
) &
1359 ~(int64_t)HAMMER_BUFMASK
;
1362 error
= hammer_ip_delete_range(&trans
, ip
,
1364 0x7FFFFFFFFFFFFFFFLL
);
1367 * If truncating we have to clean out a portion of
1368 * the last block on-disk.
1370 if (truncating
&& error
== 0 &&
1371 vap
->va_size
< aligned_size
) {
1375 offset
= vap
->va_size
& HAMMER_BUFMASK
;
1376 error
= bread(ap
->a_vp
,
1377 aligned_size
- HAMMER_BUFSIZE
,
1378 HAMMER_BUFSIZE
, &bp
);
1380 bzero(bp
->b_data
+ offset
,
1381 HAMMER_BUFSIZE
- offset
);
1389 error
= hammer_ip_delete_range(&trans
, ip
,
1391 0x7FFFFFFFFFFFFFFFLL
);
1392 ip
->ino_rec
.ino_size
= vap
->va_size
;
1393 modflags
|= HAMMER_INODE_RDIRTY
;
1401 if (vap
->va_atime
.tv_sec
!= VNOVAL
) {
1402 ip
->ino_rec
.ino_atime
=
1403 hammer_timespec_to_transid(&vap
->va_atime
);
1404 modflags
|= HAMMER_INODE_ITIMES
;
1406 if (vap
->va_mtime
.tv_sec
!= VNOVAL
) {
1407 ip
->ino_rec
.ino_mtime
=
1408 hammer_timespec_to_transid(&vap
->va_mtime
);
1409 modflags
|= HAMMER_INODE_ITIMES
;
1411 if (vap
->va_mode
!= (mode_t
)VNOVAL
) {
1412 if (ip
->ino_data
.mode
!= vap
->va_mode
) {
1413 ip
->ino_data
.mode
= vap
->va_mode
;
1414 modflags
|= HAMMER_INODE_DDIRTY
;
1419 hammer_abort_transaction(&trans
);
1421 hammer_modify_inode(&trans
, ip
, modflags
);
1422 hammer_commit_transaction(&trans
);
1428 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1432 hammer_vop_nsymlink(struct vop_nsymlink_args
*ap
)
1434 struct hammer_transaction trans
;
1435 struct hammer_inode
*dip
;
1436 struct hammer_inode
*nip
;
1437 struct nchandle
*nch
;
1438 hammer_record_t record
;
1442 ap
->a_vap
->va_type
= VLNK
;
1445 dip
= VTOI(ap
->a_dvp
);
1447 if (dip
->flags
& HAMMER_INODE_RO
)
1451 * Create a transaction to cover the operations we perform.
1453 hammer_start_transaction(&trans
, dip
->hmp
);
1456 * Create a new filesystem object of the requested type. The
1457 * returned inode will be referenced but not locked.
1460 error
= hammer_create_inode(&trans
, ap
->a_vap
, ap
->a_cred
, dip
, &nip
);
1462 hammer_abort_transaction(&trans
);
1468 * Add the new filesystem object to the directory. This will also
1469 * bump the inode's link count.
1471 error
= hammer_ip_add_directory(&trans
, dip
, nch
->ncp
, nip
);
1474 * Add a record representing the symlink. symlink stores the link
1475 * as pure data, not a string, and is no \0 terminated.
1478 record
= hammer_alloc_mem_record(nip
);
1479 bytes
= strlen(ap
->a_target
);
1481 record
->rec
.base
.base
.key
= HAMMER_FIXKEY_SYMLINK
;
1482 record
->rec
.base
.base
.rec_type
= HAMMER_RECTYPE_FIX
;
1483 record
->rec
.base
.data_len
= bytes
;
1484 record
->data
= (void *)ap
->a_target
;
1485 /* will be reallocated by routine below */
1486 error
= hammer_ip_add_record(&trans
, record
);
1489 * Set the file size to the length of the link.
1492 nip
->ino_rec
.ino_size
= bytes
;
1493 hammer_modify_inode(&trans
, nip
, HAMMER_INODE_RDIRTY
);
1501 hammer_rel_inode(nip
, 0);
1502 hammer_abort_transaction(&trans
);
1505 hammer_commit_transaction(&trans
);
1506 error
= hammer_get_vnode(nip
, LK_EXCLUSIVE
, ap
->a_vpp
);
1507 hammer_rel_inode(nip
, 0);
1509 cache_setunresolved(ap
->a_nch
);
1510 cache_setvp(ap
->a_nch
, *ap
->a_vpp
);
1517 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
1521 hammer_vop_nwhiteout(struct vop_nwhiteout_args
*ap
)
1523 return(hammer_dounlink(ap
->a_nch
, ap
->a_dvp
, ap
->a_cred
, ap
->a_flags
));
1527 * hammer_vop_ioctl { vp, command, data, fflag, cred }
1531 hammer_vop_ioctl(struct vop_ioctl_args
*ap
)
1533 struct hammer_inode
*ip
= ap
->a_vp
->v_data
;
1535 return(hammer_ioctl(ip
, ap
->a_command
, ap
->a_data
,
1536 ap
->a_fflag
, ap
->a_cred
));
1541 hammer_vop_mountctl(struct vop_mountctl_args
*ap
)
1546 mp
= ap
->a_head
.a_ops
->head
.vv_mount
;
1549 case MOUNTCTL_SET_EXPORT
:
1550 if (ap
->a_ctllen
!= sizeof(struct export_args
))
1552 error
= hammer_vfs_export(mp
, ap
->a_op
,
1553 (const struct export_args
*)ap
->a_ctl
);
1556 error
= journal_mountctl(ap
);
1563 * hammer_vop_strategy { vp, bio }
1565 * Strategy call, used for regular file read & write only. Note that the
1566 * bp may represent a cluster.
1568 * To simplify operation and allow better optimizations in the future,
1569 * this code does not make any assumptions with regards to buffer alignment
1574 hammer_vop_strategy(struct vop_strategy_args
*ap
)
1579 bp
= ap
->a_bio
->bio_buf
;
1583 error
= hammer_vop_strategy_read(ap
);
1586 error
= hammer_vop_strategy_write(ap
);
1592 bp
->b_error
= error
;
1594 bp
->b_flags
|= B_ERROR
;
1600 * Read from a regular file. Iterate the related records and fill in the
1601 * BIO/BUF. Gaps are zero-filled.
1603 * The support code in hammer_object.c should be used to deal with mixed
1604 * in-memory and on-disk records.
1610 hammer_vop_strategy_read(struct vop_strategy_args
*ap
)
1612 struct hammer_inode
*ip
= ap
->a_vp
->v_data
;
1613 struct hammer_cursor cursor
;
1614 hammer_record_ondisk_t rec
;
1615 hammer_base_elm_t base
;
1629 hammer_init_cursor_hmp(&cursor
, &ip
->cache
[0], ip
->hmp
);
1632 * Key range (begin and end inclusive) to scan. Note that the key's
1633 * stored in the actual records represent BASE+LEN, not BASE. The
1634 * first record containing bio_offset will have a key > bio_offset.
1636 cursor
.key_beg
.obj_id
= ip
->obj_id
;
1637 cursor
.key_beg
.create_tid
= 0;
1638 cursor
.key_beg
.delete_tid
= 0;
1639 cursor
.key_beg
.obj_type
= 0;
1640 cursor
.key_beg
.key
= bio
->bio_offset
+ 1;
1641 cursor
.asof
= ip
->obj_asof
;
1642 cursor
.flags
|= HAMMER_CURSOR_ASOF
| HAMMER_CURSOR_DATAEXTOK
;
1644 cursor
.key_end
= cursor
.key_beg
;
1645 if (ip
->ino_rec
.base
.base
.obj_type
== HAMMER_OBJTYPE_DBFILE
) {
1646 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DB
;
1647 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_DB
;
1648 cursor
.key_end
.key
= 0x7FFFFFFFFFFFFFFFLL
;
1650 ran_end
= bio
->bio_offset
+ bp
->b_bufsize
;
1651 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DATA
;
1652 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_DATA
;
1653 tmp64
= ran_end
+ MAXPHYS
+ 1; /* work-around GCC-4 bug */
1654 if (tmp64
< ran_end
)
1655 cursor
.key_end
.key
= 0x7FFFFFFFFFFFFFFFLL
;
1657 cursor
.key_end
.key
= ran_end
+ MAXPHYS
+ 1;
1659 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
;
1661 error
= hammer_ip_first(&cursor
, ip
);
1664 while (error
== 0) {
1665 error
= hammer_ip_resolve_data(&cursor
);
1668 rec
= cursor
.record
;
1669 base
= &rec
->base
.base
;
1671 rec_offset
= base
->key
- rec
->data
.base
.data_len
;
1674 * Calculate the gap, if any, and zero-fill it.
1676 n
= (int)(rec_offset
- (bio
->bio_offset
+ boff
));
1678 if (n
> bp
->b_bufsize
- boff
)
1679 n
= bp
->b_bufsize
- boff
;
1680 bzero((char *)bp
->b_data
+ boff
, n
);
1686 * Calculate the data offset in the record and the number
1687 * of bytes we can copy.
1689 * Note there is a degenerate case here where boff may
1690 * already be at bp->b_bufsize.
1693 n
= rec
->data
.base
.data_len
- roff
;
1695 if (n
> bp
->b_bufsize
- boff
)
1696 n
= bp
->b_bufsize
- boff
;
1697 bcopy((char *)cursor
.data
+ roff
,
1698 (char *)bp
->b_data
+ boff
, n
);
1700 if (boff
== bp
->b_bufsize
)
1702 error
= hammer_ip_next(&cursor
);
1704 hammer_done_cursor(&cursor
);
1707 * There may have been a gap after the last record
1709 if (error
== ENOENT
)
1711 if (error
== 0 && boff
!= bp
->b_bufsize
) {
1712 KKASSERT(boff
< bp
->b_bufsize
);
1713 bzero((char *)bp
->b_data
+ boff
, bp
->b_bufsize
- boff
);
1714 /* boff = bp->b_bufsize; */
1721 * Write to a regular file. Iterate the related records and mark for
1722 * deletion. If existing edge records (left and right side) overlap our
1723 * write they have to be marked deleted and new records created, usually
1724 * referencing a portion of the original data. Then add a record to
1725 * represent the buffer.
1727 * The support code in hammer_object.c should be used to deal with mixed
1728 * in-memory and on-disk records.
1732 hammer_vop_strategy_write(struct vop_strategy_args
*ap
)
1734 struct hammer_transaction trans
;
1742 ip
= ap
->a_vp
->v_data
;
1744 if (ip
->flags
& HAMMER_INODE_RO
)
1748 * Start a transaction using the TID stored with the bp.
1750 KKASSERT(bp
->b_tid
!= 0);
1751 hammer_start_transaction_tid(&trans
, ip
->hmp
, bp
->b_tid
);
1754 * Delete any records overlapping our range. This function will
1755 * (eventually) properly truncate partial overlaps.
1757 if (ip
->ino_rec
.base
.base
.obj_type
== HAMMER_OBJTYPE_DBFILE
) {
1758 error
= hammer_ip_delete_range(&trans
, ip
, bio
->bio_offset
,
1761 error
= hammer_ip_delete_range(&trans
, ip
, bio
->bio_offset
,
1767 * Add a single record to cover the write
1770 error
= hammer_ip_sync_data(&trans
, ip
, bio
->bio_offset
,
1771 bp
->b_data
, bp
->b_bufsize
);
1775 * If an error occured abort the transaction
1778 /* XXX undo deletion */
1779 hammer_abort_transaction(&trans
);
1780 bp
->b_resid
= bp
->b_bufsize
;
1782 hammer_commit_transaction(&trans
);
1790 * dounlink - disconnect a directory entry
1792 * XXX whiteout support not really in yet
1795 hammer_dounlink(struct nchandle
*nch
, struct vnode
*dvp
, struct ucred
*cred
,
1798 struct hammer_transaction trans
;
1799 struct namecache
*ncp
;
1802 hammer_record_ondisk_t rec
;
1803 struct hammer_cursor cursor
;
1808 * Calculate the namekey and setup the key range for the scan. This
1809 * works kinda like a chained hash table where the lower 32 bits
1810 * of the namekey synthesize the chain.
1812 * The key range is inclusive of both key_beg and key_end.
1817 if (dip
->flags
& HAMMER_INODE_RO
)
1820 hammer_start_transaction(&trans
, dip
->hmp
);
1822 namekey
= hammer_directory_namekey(ncp
->nc_name
, ncp
->nc_nlen
);
1824 hammer_init_cursor_hmp(&cursor
, &dip
->cache
[0], dip
->hmp
);
1825 cursor
.key_beg
.obj_id
= dip
->obj_id
;
1826 cursor
.key_beg
.key
= namekey
;
1827 cursor
.key_beg
.create_tid
= 0;
1828 cursor
.key_beg
.delete_tid
= 0;
1829 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
1830 cursor
.key_beg
.obj_type
= 0;
1832 cursor
.key_end
= cursor
.key_beg
;
1833 cursor
.key_end
.key
|= 0xFFFFFFFFULL
;
1834 cursor
.asof
= dip
->obj_asof
;
1835 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
1838 * Scan all matching records (the chain), locate the one matching
1839 * the requested path component. info->last_error contains the
1840 * error code on search termination and could be 0, ENOENT, or
1843 * The hammer_ip_*() functions merge in-memory records with on-disk
1844 * records for the purposes of the search.
1846 error
= hammer_ip_first(&cursor
, dip
);
1847 while (error
== 0) {
1848 error
= hammer_ip_resolve_data(&cursor
);
1851 rec
= cursor
.record
;
1852 if (ncp
->nc_nlen
== rec
->entry
.base
.data_len
&&
1853 bcmp(ncp
->nc_name
, cursor
.data
, ncp
->nc_nlen
) == 0) {
1856 error
= hammer_ip_next(&cursor
);
1860 * If all is ok we have to get the inode so we can adjust nlinks.
1862 * If the target is a directory, it must be empty.
1865 ip
= hammer_get_inode(dip
->hmp
, &dip
->cache
[1],
1867 dip
->hmp
->asof
, 0, &error
);
1868 if (error
== ENOENT
) {
1869 kprintf("obj_id %016llx\n", rec
->entry
.obj_id
);
1870 Debugger("ENOENT unlinking object that should exist, cont to sync");
1871 hammer_sync_hmp(dip
->hmp
, MNT_NOWAIT
);
1872 Debugger("ENOENT - sync done");
1874 if (error
== 0 && ip
->ino_rec
.base
.base
.obj_type
==
1875 HAMMER_OBJTYPE_DIRECTORY
) {
1876 error
= hammer_ip_check_directory_empty(&trans
, ip
);
1879 * WARNING: hammer_ip_del_directory() may have to terminate
1880 * the cursor to avoid a lock recursion. It's ok to call
1881 * hammer_done_cursor() twice.
1884 error
= hammer_ip_del_directory(&trans
, &cursor
, dip
, ip
);
1886 cache_setunresolved(nch
);
1887 cache_setvp(nch
, NULL
);
1890 cache_inval_vp(ip
->vp
, CINV_DESTROY
);
1892 hammer_rel_inode(ip
, 0);
1894 hammer_done_cursor(&cursor
);
1895 if (error
== EDEADLK
)
1899 hammer_commit_transaction(&trans
);
1901 hammer_abort_transaction(&trans
);
1905 /************************************************************************
1906 * FIFO AND SPECFS OPS *
1907 ************************************************************************
1912 hammer_vop_fifoclose (struct vop_close_args
*ap
)
1914 /* XXX update itimes */
1915 return (VOCALL(&fifo_vnode_vops
, &ap
->a_head
));
1919 hammer_vop_fiforead (struct vop_read_args
*ap
)
1923 error
= VOCALL(&fifo_vnode_vops
, &ap
->a_head
);
1924 /* XXX update access time */
1929 hammer_vop_fifowrite (struct vop_write_args
*ap
)
1933 error
= VOCALL(&fifo_vnode_vops
, &ap
->a_head
);
1934 /* XXX update access time */
1939 hammer_vop_specclose (struct vop_close_args
*ap
)
1941 /* XXX update itimes */
1942 return (VOCALL(&spec_vnode_vops
, &ap
->a_head
));
1946 hammer_vop_specread (struct vop_read_args
*ap
)
1948 /* XXX update access time */
1949 return (VOCALL(&spec_vnode_vops
, &ap
->a_head
));
1953 hammer_vop_specwrite (struct vop_write_args
*ap
)
1955 /* XXX update last change time */
1956 return (VOCALL(&spec_vnode_vops
, &ap
->a_head
));