9c2dfdaf6f9c1b4695b1e939dbc5d81b37e648d0
[dragonfly.git] / sys / vfs / hammer / hammer_vnops.c
blob9c2dfdaf6f9c1b4695b1e939dbc5d81b37e648d0
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.91.2.3 2008/08/02 21:24:28 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/namecache.h>
42 #include <sys/vnode.h>
43 #include <sys/lockf.h>
44 #include <sys/event.h>
45 #include <sys/stat.h>
46 #include <sys/dirent.h>
47 #include <vm/vm_extern.h>
48 #include <vfs/fifofs/fifo.h>
49 #include "hammer.h"
52 * USERFS VNOPS
54 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
55 static int hammer_vop_fsync(struct vop_fsync_args *);
56 static int hammer_vop_read(struct vop_read_args *);
57 static int hammer_vop_write(struct vop_write_args *);
58 static int hammer_vop_access(struct vop_access_args *);
59 static int hammer_vop_advlock(struct vop_advlock_args *);
60 static int hammer_vop_close(struct vop_close_args *);
61 static int hammer_vop_ncreate(struct vop_ncreate_args *);
62 static int hammer_vop_getattr(struct vop_getattr_args *);
63 static int hammer_vop_nresolve(struct vop_nresolve_args *);
64 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
65 static int hammer_vop_nlink(struct vop_nlink_args *);
66 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
67 static int hammer_vop_nmknod(struct vop_nmknod_args *);
68 static int hammer_vop_open(struct vop_open_args *);
69 static int hammer_vop_pathconf(struct vop_pathconf_args *);
70 static int hammer_vop_print(struct vop_print_args *);
71 static int hammer_vop_readdir(struct vop_readdir_args *);
72 static int hammer_vop_readlink(struct vop_readlink_args *);
73 static int hammer_vop_nremove(struct vop_nremove_args *);
74 static int hammer_vop_nrename(struct vop_nrename_args *);
75 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
76 static int hammer_vop_setattr(struct vop_setattr_args *);
77 static int hammer_vop_strategy(struct vop_strategy_args *);
78 static int hammer_vop_bmap(struct vop_bmap_args *ap);
79 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
80 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
81 static int hammer_vop_ioctl(struct vop_ioctl_args *);
82 static int hammer_vop_mountctl(struct vop_mountctl_args *);
84 static int hammer_vop_fifoclose (struct vop_close_args *);
85 static int hammer_vop_fiforead (struct vop_read_args *);
86 static int hammer_vop_fifowrite (struct vop_write_args *);
88 static int hammer_vop_specclose (struct vop_close_args *);
89 static int hammer_vop_specread (struct vop_read_args *);
90 static int hammer_vop_specwrite (struct vop_write_args *);
92 struct vop_ops hammer_vnode_vops = {
93 .vop_default = vop_defaultop,
94 .vop_fsync = hammer_vop_fsync,
95 .vop_getpages = vop_stdgetpages,
96 .vop_putpages = vop_stdputpages,
97 .vop_read = hammer_vop_read,
98 .vop_write = hammer_vop_write,
99 .vop_access = hammer_vop_access,
100 .vop_advlock = hammer_vop_advlock,
101 .vop_close = hammer_vop_close,
102 .vop_ncreate = hammer_vop_ncreate,
103 .vop_getattr = hammer_vop_getattr,
104 .vop_inactive = hammer_vop_inactive,
105 .vop_reclaim = hammer_vop_reclaim,
106 .vop_nresolve = hammer_vop_nresolve,
107 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
108 .vop_nlink = hammer_vop_nlink,
109 .vop_nmkdir = hammer_vop_nmkdir,
110 .vop_nmknod = hammer_vop_nmknod,
111 .vop_open = hammer_vop_open,
112 .vop_pathconf = hammer_vop_pathconf,
113 .vop_print = hammer_vop_print,
114 .vop_readdir = hammer_vop_readdir,
115 .vop_readlink = hammer_vop_readlink,
116 .vop_nremove = hammer_vop_nremove,
117 .vop_nrename = hammer_vop_nrename,
118 .vop_nrmdir = hammer_vop_nrmdir,
119 .vop_setattr = hammer_vop_setattr,
120 .vop_bmap = hammer_vop_bmap,
121 .vop_strategy = hammer_vop_strategy,
122 .vop_nsymlink = hammer_vop_nsymlink,
123 .vop_nwhiteout = hammer_vop_nwhiteout,
124 .vop_ioctl = hammer_vop_ioctl,
125 .vop_mountctl = hammer_vop_mountctl
128 struct vop_ops hammer_spec_vops = {
129 .vop_default = spec_vnoperate,
130 .vop_fsync = hammer_vop_fsync,
131 .vop_read = hammer_vop_specread,
132 .vop_write = hammer_vop_specwrite,
133 .vop_access = hammer_vop_access,
134 .vop_close = hammer_vop_specclose,
135 .vop_getattr = hammer_vop_getattr,
136 .vop_inactive = hammer_vop_inactive,
137 .vop_reclaim = hammer_vop_reclaim,
138 .vop_setattr = hammer_vop_setattr
141 struct vop_ops hammer_fifo_vops = {
142 .vop_default = fifo_vnoperate,
143 .vop_fsync = hammer_vop_fsync,
144 .vop_read = hammer_vop_fiforead,
145 .vop_write = hammer_vop_fifowrite,
146 .vop_access = hammer_vop_access,
147 .vop_close = hammer_vop_fifoclose,
148 .vop_getattr = hammer_vop_getattr,
149 .vop_inactive = hammer_vop_inactive,
150 .vop_reclaim = hammer_vop_reclaim,
151 .vop_setattr = hammer_vop_setattr
154 #ifdef DEBUG_TRUNCATE
155 struct hammer_inode *HammerTruncIp;
156 #endif
158 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
159 struct vnode *dvp, struct ucred *cred, int flags);
160 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
161 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
163 #if 0
164 static
166 hammer_vop_vnoperate(struct vop_generic_args *)
168 return (VOCALL(&hammer_vnode_vops, ap));
170 #endif
173 * hammer_vop_fsync { vp, waitfor }
175 * fsync() an inode to disk and wait for it to be completely committed
176 * such that the information would not be undone if a crash occured after
177 * return.
179 static
181 hammer_vop_fsync(struct vop_fsync_args *ap)
183 hammer_inode_t ip = VTOI(ap->a_vp);
185 ++hammer_count_fsyncs;
186 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL);
187 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
188 if (ap->a_waitfor == MNT_WAIT)
189 hammer_wait_inode(ip);
190 return (ip->error);
194 * hammer_vop_read { vp, uio, ioflag, cred }
196 static
198 hammer_vop_read(struct vop_read_args *ap)
200 struct hammer_transaction trans;
201 hammer_inode_t ip;
202 off_t offset;
203 struct buf *bp;
204 struct uio *uio;
205 int error;
206 int n;
207 int seqcount;
208 int ioseqcount;
209 int blksize;
211 if (ap->a_vp->v_type != VREG)
212 return (EINVAL);
213 ip = VTOI(ap->a_vp);
214 error = 0;
215 uio = ap->a_uio;
218 * Allow the UIO's size to override the sequential heuristic.
220 blksize = hammer_blocksize(uio->uio_offset);
221 seqcount = (uio->uio_resid + (blksize - 1)) / blksize;
222 ioseqcount = ap->a_ioflag >> 16;
223 if (seqcount < ioseqcount)
224 seqcount = ioseqcount;
226 hammer_start_transaction(&trans, ip->hmp);
229 * Access the data typically in HAMMER_BUFSIZE blocks via the
230 * buffer cache, but HAMMER may use a variable block size based
231 * on the offset.
233 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
234 int64_t base_offset;
235 int64_t file_limit;
237 blksize = hammer_blocksize(uio->uio_offset);
238 offset = (int)uio->uio_offset & (blksize - 1);
239 base_offset = uio->uio_offset - offset;
241 if (hammer_cluster_enable) {
243 * Use file_limit to prevent cluster_read() from
244 * creating buffers of the wrong block size past
245 * the demarc.
247 file_limit = ip->ino_data.size;
248 if (base_offset < HAMMER_XDEMARC &&
249 file_limit > HAMMER_XDEMARC) {
250 file_limit = HAMMER_XDEMARC;
252 error = cluster_read(ap->a_vp,
253 file_limit, base_offset,
254 blksize, MAXPHYS,
255 seqcount, &bp);
256 } else {
257 error = bread(ap->a_vp, base_offset, blksize, &bp);
259 if (error) {
260 kprintf("error %d\n", error);
261 brelse(bp);
262 break;
265 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
266 n = blksize - offset;
267 if (n > uio->uio_resid)
268 n = uio->uio_resid;
269 if (n > ip->ino_data.size - uio->uio_offset)
270 n = (int)(ip->ino_data.size - uio->uio_offset);
271 error = uiomove((char *)bp->b_data + offset, n, uio);
273 /* data has a lower priority then meta-data */
274 bp->b_flags |= B_AGE;
275 bqrelse(bp);
276 if (error)
277 break;
278 hammer_stats_file_read += n;
280 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
281 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
282 ip->ino_data.atime = trans.time;
283 hammer_modify_inode(ip, HAMMER_INODE_ATIME);
285 hammer_done_transaction(&trans);
286 return (error);
290 * hammer_vop_write { vp, uio, ioflag, cred }
292 static
294 hammer_vop_write(struct vop_write_args *ap)
296 struct hammer_transaction trans;
297 struct hammer_inode *ip;
298 hammer_mount_t hmp;
299 struct uio *uio;
300 int offset;
301 off_t base_offset;
302 struct buf *bp;
303 int error;
304 int n;
305 int flags;
306 int delta;
307 int seqcount;
309 if (ap->a_vp->v_type != VREG)
310 return (EINVAL);
311 ip = VTOI(ap->a_vp);
312 hmp = ip->hmp;
313 error = 0;
314 seqcount = ap->a_ioflag >> 16;
316 if (ip->flags & HAMMER_INODE_RO)
317 return (EROFS);
320 * Create a transaction to cover the operations we perform.
322 hammer_start_transaction(&trans, hmp);
323 uio = ap->a_uio;
326 * Check append mode
328 if (ap->a_ioflag & IO_APPEND)
329 uio->uio_offset = ip->ino_data.size;
332 * Check for illegal write offsets. Valid range is 0...2^63-1.
334 * NOTE: the base_off assignment is required to work around what
335 * I consider to be a GCC-4 optimization bug.
337 if (uio->uio_offset < 0) {
338 hammer_done_transaction(&trans);
339 return (EFBIG);
341 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
342 if (uio->uio_resid > 0 && base_offset <= 0) {
343 hammer_done_transaction(&trans);
344 return (EFBIG);
348 * Access the data typically in HAMMER_BUFSIZE blocks via the
349 * buffer cache, but HAMMER may use a variable block size based
350 * on the offset.
352 while (uio->uio_resid > 0) {
353 int fixsize = 0;
354 int blksize;
355 int blkmask;
357 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0)
358 break;
360 blksize = hammer_blocksize(uio->uio_offset);
363 * Do not allow HAMMER to blow out the buffer cache. Very
364 * large UIOs can lockout other processes due to bwillwrite()
365 * mechanics.
367 * The hammer inode is not locked during these operations.
368 * The vnode is locked which can interfere with the pageout
369 * daemon for non-UIO_NOCOPY writes but should not interfere
370 * with the buffer cache. Even so, we cannot afford to
371 * allow the pageout daemon to build up too many dirty buffer
372 * cache buffers.
374 /*if (((int)uio->uio_offset & (blksize - 1)) == 0)*/
375 bwillwrite(blksize);
378 * Do not allow HAMMER to blow out system memory by
379 * accumulating too many records. Records are so well
380 * decoupled from the buffer cache that it is possible
381 * for userland to push data out to the media via
382 * direct-write, but build up the records queued to the
383 * backend faster then the backend can flush them out.
384 * HAMMER has hit its write limit but the frontend has
385 * no pushback to slow it down.
387 if (hmp->rsv_recs > hammer_limit_recs / 2) {
389 * Get the inode on the flush list
391 if (ip->rsv_recs >= 64)
392 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
393 else if (ip->rsv_recs >= 16)
394 hammer_flush_inode(ip, 0);
397 * Keep the flusher going if the system keeps
398 * queueing records.
400 delta = hmp->count_newrecords -
401 hmp->last_newrecords;
402 if (delta < 0 || delta > hammer_limit_recs / 2) {
403 hmp->last_newrecords = hmp->count_newrecords;
404 hammer_sync_hmp(hmp, MNT_NOWAIT);
408 * If we have gotten behind start slowing
409 * down the writers.
411 delta = (hmp->rsv_recs - hammer_limit_recs) *
412 hz / hammer_limit_recs;
413 if (delta > 0)
414 tsleep(&trans, 0, "hmrslo", delta);
418 * Calculate the blocksize at the current offset and figure
419 * out how much we can actually write.
421 blkmask = blksize - 1;
422 offset = (int)uio->uio_offset & blkmask;
423 base_offset = uio->uio_offset & ~(int64_t)blkmask;
424 n = blksize - offset;
425 if (n > uio->uio_resid)
426 n = uio->uio_resid;
427 if (uio->uio_offset + n > ip->ino_data.size) {
428 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
429 fixsize = 1;
432 if (uio->uio_segflg == UIO_NOCOPY) {
434 * Issuing a write with the same data backing the
435 * buffer. Instantiate the buffer to collect the
436 * backing vm pages, then read-in any missing bits.
438 * This case is used by vop_stdputpages().
440 bp = getblk(ap->a_vp, base_offset,
441 blksize, GETBLK_BHEAVY, 0);
442 if ((bp->b_flags & B_CACHE) == 0) {
443 bqrelse(bp);
444 error = bread(ap->a_vp, base_offset,
445 blksize, &bp);
447 } else if (offset == 0 && uio->uio_resid >= blksize) {
449 * Even though we are entirely overwriting the buffer
450 * we may still have to zero it out to avoid a
451 * mmap/write visibility issue.
453 bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
454 if ((bp->b_flags & B_CACHE) == 0)
455 vfs_bio_clrbuf(bp);
456 } else if (base_offset >= ip->ino_data.size) {
458 * If the base offset of the buffer is beyond the
459 * file EOF, we don't have to issue a read.
461 bp = getblk(ap->a_vp, base_offset,
462 blksize, GETBLK_BHEAVY, 0);
463 vfs_bio_clrbuf(bp);
464 } else {
466 * Partial overwrite, read in any missing bits then
467 * replace the portion being written.
469 error = bread(ap->a_vp, base_offset, blksize, &bp);
470 if (error == 0)
471 bheavy(bp);
473 if (error == 0) {
474 error = uiomove((char *)bp->b_data + offset,
475 n, uio);
479 * If we screwed up we have to undo any VM size changes we
480 * made.
482 if (error) {
483 brelse(bp);
484 if (fixsize) {
485 vtruncbuf(ap->a_vp, ip->ino_data.size,
486 hammer_blocksize(ip->ino_data.size));
488 break;
490 hammer_stats_file_write += n;
491 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
492 if (ip->ino_data.size < uio->uio_offset) {
493 ip->ino_data.size = uio->uio_offset;
494 flags = HAMMER_INODE_DDIRTY;
495 vnode_pager_setsize(ap->a_vp, ip->ino_data.size);
496 } else {
497 flags = 0;
499 ip->ino_data.mtime = trans.time;
500 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
501 hammer_modify_inode(ip, flags);
504 * Once we dirty the buffer any cached zone-X offset
505 * becomes invalid. HAMMER NOTE: no-history mode cannot
506 * allow overwriting over the same data sector unless
507 * we provide UNDOs for the old data, which we don't.
509 bp->b_bio2.bio_offset = NOOFFSET;
512 * Final buffer disposition.
514 bp->b_flags |= B_AGE;
515 if (ap->a_ioflag & IO_SYNC) {
516 bwrite(bp);
517 } else if (ap->a_ioflag & IO_DIRECT) {
518 bawrite(bp);
519 } else {
520 bdwrite(bp);
523 hammer_done_transaction(&trans);
524 return (error);
528 * hammer_vop_access { vp, mode, cred }
530 static
532 hammer_vop_access(struct vop_access_args *ap)
534 struct hammer_inode *ip = VTOI(ap->a_vp);
535 uid_t uid;
536 gid_t gid;
537 int error;
539 ++hammer_stats_file_iopsr;
540 uid = hammer_to_unix_xid(&ip->ino_data.uid);
541 gid = hammer_to_unix_xid(&ip->ino_data.gid);
543 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
544 ip->ino_data.uflags);
545 return (error);
549 * hammer_vop_advlock { vp, id, op, fl, flags }
551 static
553 hammer_vop_advlock(struct vop_advlock_args *ap)
555 hammer_inode_t ip = VTOI(ap->a_vp);
557 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
561 * hammer_vop_close { vp, fflag }
563 static
565 hammer_vop_close(struct vop_close_args *ap)
567 hammer_inode_t ip = VTOI(ap->a_vp);
569 if ((ip->flags | ip->sync_flags) & HAMMER_INODE_MODMASK)
570 hammer_inode_waitreclaims(ip->hmp);
571 return (vop_stdclose(ap));
575 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
577 * The operating system has already ensured that the directory entry
578 * does not exist and done all appropriate namespace locking.
580 static
582 hammer_vop_ncreate(struct vop_ncreate_args *ap)
584 struct hammer_transaction trans;
585 struct hammer_inode *dip;
586 struct hammer_inode *nip;
587 struct nchandle *nch;
588 int error;
590 nch = ap->a_nch;
591 dip = VTOI(ap->a_dvp);
593 if (dip->flags & HAMMER_INODE_RO)
594 return (EROFS);
595 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
596 return (error);
599 * Create a transaction to cover the operations we perform.
601 hammer_start_transaction(&trans, dip->hmp);
602 ++hammer_stats_file_iopsw;
605 * Create a new filesystem object of the requested type. The
606 * returned inode will be referenced and shared-locked to prevent
607 * it from being moved to the flusher.
610 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
611 dip, NULL, &nip);
612 if (error) {
613 hkprintf("hammer_create_inode error %d\n", error);
614 hammer_done_transaction(&trans);
615 *ap->a_vpp = NULL;
616 return (error);
620 * Add the new filesystem object to the directory. This will also
621 * bump the inode's link count.
623 error = hammer_ip_add_directory(&trans, dip,
624 nch->ncp->nc_name, nch->ncp->nc_nlen,
625 nip);
626 if (error)
627 hkprintf("hammer_ip_add_directory error %d\n", error);
630 * Finish up.
632 if (error) {
633 hammer_rel_inode(nip, 0);
634 hammer_done_transaction(&trans);
635 *ap->a_vpp = NULL;
636 } else {
637 error = hammer_get_vnode(nip, ap->a_vpp);
638 hammer_done_transaction(&trans);
639 hammer_rel_inode(nip, 0);
640 if (error == 0) {
641 cache_setunresolved(ap->a_nch);
642 cache_setvp(ap->a_nch, *ap->a_vpp);
645 return (error);
649 * hammer_vop_getattr { vp, vap }
651 * Retrieve an inode's attribute information. When accessing inodes
652 * historically we fake the atime field to ensure consistent results.
653 * The atime field is stored in the B-Tree element and allowed to be
654 * updated without cycling the element.
656 static
658 hammer_vop_getattr(struct vop_getattr_args *ap)
660 struct hammer_inode *ip = VTOI(ap->a_vp);
661 struct vattr *vap = ap->a_vap;
664 * We want the fsid to be different when accessing a filesystem
665 * with different as-of's so programs like diff don't think
666 * the files are the same.
668 * We also want the fsid to be the same when comparing snapshots,
669 * or when comparing mirrors (which might be backed by different
670 * physical devices). HAMMER fsids are based on the PFS's
671 * shared_uuid field.
673 * XXX there is a chance of collision here. The va_fsid reported
674 * by stat is different from the more involved fsid used in the
675 * mount structure.
677 ++hammer_stats_file_iopsr;
678 vap->va_fsid = ip->pfsm->fsid_udev ^ (u_int32_t)ip->obj_asof ^
679 (u_int32_t)(ip->obj_asof >> 32);
681 vap->va_fileid = ip->ino_leaf.base.obj_id;
682 vap->va_mode = ip->ino_data.mode;
683 vap->va_nlink = ip->ino_data.nlinks;
684 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
685 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
686 vap->va_rmajor = 0;
687 vap->va_rminor = 0;
688 vap->va_size = ip->ino_data.size;
691 * Special case for @@PFS softlinks. The actual size of the
692 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
694 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK &&
695 ip->ino_data.size == 10 &&
696 ip->obj_asof == HAMMER_MAX_TID &&
697 ip->obj_localization == 0 &&
698 strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) {
699 vap->va_size = 26;
703 * We must provide a consistent atime and mtime for snapshots
704 * so people can do a 'tar cf - ... | md5' on them and get
705 * consistent results.
707 if (ip->flags & HAMMER_INODE_RO) {
708 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
709 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
710 } else {
711 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
712 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
714 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
715 vap->va_flags = ip->ino_data.uflags;
716 vap->va_gen = 1; /* hammer inums are unique for all time */
717 vap->va_blocksize = HAMMER_BUFSIZE;
718 if (ip->ino_data.size >= HAMMER_XDEMARC) {
719 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
720 ~HAMMER_XBUFMASK64;
721 } else if (ip->ino_data.size > HAMMER_BUFSIZE / 2) {
722 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
723 ~HAMMER_BUFMASK64;
724 } else {
725 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
727 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
728 vap->va_filerev = 0; /* XXX */
729 /* mtime uniquely identifies any adjustments made to the file XXX */
730 vap->va_fsmid = ip->ino_data.mtime;
731 vap->va_uid_uuid = ip->ino_data.uid;
732 vap->va_gid_uuid = ip->ino_data.gid;
733 vap->va_fsid_uuid = ip->hmp->fsid;
734 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
735 VA_FSID_UUID_VALID;
737 switch (ip->ino_data.obj_type) {
738 case HAMMER_OBJTYPE_CDEV:
739 case HAMMER_OBJTYPE_BDEV:
740 vap->va_rmajor = ip->ino_data.rmajor;
741 vap->va_rminor = ip->ino_data.rminor;
742 break;
743 default:
744 break;
746 return(0);
750 * hammer_vop_nresolve { nch, dvp, cred }
752 * Locate the requested directory entry.
754 static
756 hammer_vop_nresolve(struct vop_nresolve_args *ap)
758 struct hammer_transaction trans;
759 struct namecache *ncp;
760 hammer_inode_t dip;
761 hammer_inode_t ip;
762 hammer_tid_t asof;
763 struct hammer_cursor cursor;
764 struct vnode *vp;
765 int64_t namekey;
766 int error;
767 int i;
768 int nlen;
769 int flags;
770 int ispfs;
771 int64_t obj_id;
772 u_int32_t localization;
775 * Misc initialization, plus handle as-of name extensions. Look for
776 * the '@@' extension. Note that as-of files and directories cannot
777 * be modified.
779 dip = VTOI(ap->a_dvp);
780 ncp = ap->a_nch->ncp;
781 asof = dip->obj_asof;
782 nlen = ncp->nc_nlen;
783 flags = dip->flags & HAMMER_INODE_RO;
784 ispfs = 0;
786 hammer_simple_transaction(&trans, dip->hmp);
787 ++hammer_stats_file_iopsr;
789 for (i = 0; i < nlen; ++i) {
790 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
791 asof = hammer_str_to_tid(ncp->nc_name + i + 2,
792 &ispfs, &localization);
793 if (asof != HAMMER_MAX_TID)
794 flags |= HAMMER_INODE_RO;
795 break;
798 nlen = i;
801 * If this is a PFS softlink we dive into the PFS
803 if (ispfs && nlen == 0) {
804 ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT,
805 asof, localization,
806 flags, &error);
807 if (error == 0) {
808 error = hammer_get_vnode(ip, &vp);
809 hammer_rel_inode(ip, 0);
810 } else {
811 vp = NULL;
813 if (error == 0) {
814 vn_unlock(vp);
815 cache_setvp(ap->a_nch, vp);
816 vrele(vp);
818 goto done;
822 * If there is no path component the time extension is relative to
823 * dip.
825 if (nlen == 0) {
826 ip = hammer_get_inode(&trans, dip, dip->obj_id,
827 asof, dip->obj_localization,
828 flags, &error);
829 if (error == 0) {
830 error = hammer_get_vnode(ip, &vp);
831 hammer_rel_inode(ip, 0);
832 } else {
833 vp = NULL;
835 if (error == 0) {
836 vn_unlock(vp);
837 cache_setvp(ap->a_nch, vp);
838 vrele(vp);
840 goto done;
844 * Calculate the namekey and setup the key range for the scan. This
845 * works kinda like a chained hash table where the lower 32 bits
846 * of the namekey synthesize the chain.
848 * The key range is inclusive of both key_beg and key_end.
850 namekey = hammer_directory_namekey(ncp->nc_name, nlen);
852 error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
853 cursor.key_beg.localization = dip->obj_localization +
854 HAMMER_LOCALIZE_MISC;
855 cursor.key_beg.obj_id = dip->obj_id;
856 cursor.key_beg.key = namekey;
857 cursor.key_beg.create_tid = 0;
858 cursor.key_beg.delete_tid = 0;
859 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
860 cursor.key_beg.obj_type = 0;
862 cursor.key_end = cursor.key_beg;
863 cursor.key_end.key |= 0xFFFFFFFFULL;
864 cursor.asof = asof;
865 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
868 * Scan all matching records (the chain), locate the one matching
869 * the requested path component.
871 * The hammer_ip_*() functions merge in-memory records with on-disk
872 * records for the purposes of the search.
874 obj_id = 0;
875 localization = HAMMER_DEF_LOCALIZATION;
877 if (error == 0) {
878 error = hammer_ip_first(&cursor);
879 while (error == 0) {
880 error = hammer_ip_resolve_data(&cursor);
881 if (error)
882 break;
883 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
884 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
885 obj_id = cursor.data->entry.obj_id;
886 localization = cursor.data->entry.localization;
887 break;
889 error = hammer_ip_next(&cursor);
892 hammer_done_cursor(&cursor);
893 if (error == 0) {
894 ip = hammer_get_inode(&trans, dip, obj_id,
895 asof, localization,
896 flags, &error);
897 if (error == 0) {
898 error = hammer_get_vnode(ip, &vp);
899 hammer_rel_inode(ip, 0);
900 } else {
901 vp = NULL;
903 if (error == 0) {
904 vn_unlock(vp);
905 cache_setvp(ap->a_nch, vp);
906 vrele(vp);
908 } else if (error == ENOENT) {
909 cache_setvp(ap->a_nch, NULL);
911 done:
912 hammer_done_transaction(&trans);
913 return (error);
917 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
919 * Locate the parent directory of a directory vnode.
921 * dvp is referenced but not locked. *vpp must be returned referenced and
922 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
923 * at the root, instead it could indicate that the directory we were in was
924 * removed.
926 * NOTE: as-of sequences are not linked into the directory structure. If
927 * we are at the root with a different asof then the mount point, reload
928 * the same directory with the mount point's asof. I'm not sure what this
929 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
930 * get confused, but it hasn't been tested.
932 static
934 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
936 struct hammer_transaction trans;
937 struct hammer_inode *dip;
938 struct hammer_inode *ip;
939 int64_t parent_obj_id;
940 u_int32_t parent_obj_localization;
941 hammer_tid_t asof;
942 int error;
944 dip = VTOI(ap->a_dvp);
945 asof = dip->obj_asof;
948 * Whos are parent? This could be the root of a pseudo-filesystem
949 * whos parent is in another localization domain.
951 parent_obj_id = dip->ino_data.parent_obj_id;
952 if (dip->obj_id == HAMMER_OBJID_ROOT)
953 parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
954 else
955 parent_obj_localization = dip->obj_localization;
957 if (parent_obj_id == 0) {
958 if (dip->obj_id == HAMMER_OBJID_ROOT &&
959 asof != dip->hmp->asof) {
960 parent_obj_id = dip->obj_id;
961 asof = dip->hmp->asof;
962 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
963 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
964 dip->obj_asof);
965 } else {
966 *ap->a_vpp = NULL;
967 return ENOENT;
971 hammer_simple_transaction(&trans, dip->hmp);
972 ++hammer_stats_file_iopsr;
974 ip = hammer_get_inode(&trans, dip, parent_obj_id,
975 asof, parent_obj_localization,
976 dip->flags, &error);
977 if (ip) {
978 error = hammer_get_vnode(ip, ap->a_vpp);
979 hammer_rel_inode(ip, 0);
980 } else {
981 *ap->a_vpp = NULL;
983 hammer_done_transaction(&trans);
984 return (error);
988 * hammer_vop_nlink { nch, dvp, vp, cred }
990 static
992 hammer_vop_nlink(struct vop_nlink_args *ap)
994 struct hammer_transaction trans;
995 struct hammer_inode *dip;
996 struct hammer_inode *ip;
997 struct nchandle *nch;
998 int error;
1000 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1001 return(EXDEV);
1003 nch = ap->a_nch;
1004 dip = VTOI(ap->a_dvp);
1005 ip = VTOI(ap->a_vp);
1007 if (dip->obj_localization != ip->obj_localization)
1008 return(EXDEV);
1010 if (dip->flags & HAMMER_INODE_RO)
1011 return (EROFS);
1012 if (ip->flags & HAMMER_INODE_RO)
1013 return (EROFS);
1014 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1015 return (error);
1018 * Create a transaction to cover the operations we perform.
1020 hammer_start_transaction(&trans, dip->hmp);
1021 ++hammer_stats_file_iopsw;
1024 * Add the filesystem object to the directory. Note that neither
1025 * dip nor ip are referenced or locked, but their vnodes are
1026 * referenced. This function will bump the inode's link count.
1028 error = hammer_ip_add_directory(&trans, dip,
1029 nch->ncp->nc_name, nch->ncp->nc_nlen,
1030 ip);
1033 * Finish up.
1035 if (error == 0) {
1036 cache_setunresolved(nch);
1037 cache_setvp(nch, ap->a_vp);
1039 hammer_done_transaction(&trans);
1040 return (error);
1044 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1046 * The operating system has already ensured that the directory entry
1047 * does not exist and done all appropriate namespace locking.
1049 static
1051 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
1053 struct hammer_transaction trans;
1054 struct hammer_inode *dip;
1055 struct hammer_inode *nip;
1056 struct nchandle *nch;
1057 int error;
1059 nch = ap->a_nch;
1060 dip = VTOI(ap->a_dvp);
1062 if (dip->flags & HAMMER_INODE_RO)
1063 return (EROFS);
1064 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1065 return (error);
1068 * Create a transaction to cover the operations we perform.
1070 hammer_start_transaction(&trans, dip->hmp);
1071 ++hammer_stats_file_iopsw;
1074 * Create a new filesystem object of the requested type. The
1075 * returned inode will be referenced but not locked.
1077 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1078 dip, NULL, &nip);
1079 if (error) {
1080 hkprintf("hammer_mkdir error %d\n", error);
1081 hammer_done_transaction(&trans);
1082 *ap->a_vpp = NULL;
1083 return (error);
1086 * Add the new filesystem object to the directory. This will also
1087 * bump the inode's link count.
1089 error = hammer_ip_add_directory(&trans, dip,
1090 nch->ncp->nc_name, nch->ncp->nc_nlen,
1091 nip);
1092 if (error)
1093 hkprintf("hammer_mkdir (add) error %d\n", error);
1096 * Finish up.
1098 if (error) {
1099 hammer_rel_inode(nip, 0);
1100 *ap->a_vpp = NULL;
1101 } else {
1102 error = hammer_get_vnode(nip, ap->a_vpp);
1103 hammer_rel_inode(nip, 0);
1104 if (error == 0) {
1105 cache_setunresolved(ap->a_nch);
1106 cache_setvp(ap->a_nch, *ap->a_vpp);
1109 hammer_done_transaction(&trans);
1110 return (error);
1114 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1116 * The operating system has already ensured that the directory entry
1117 * does not exist and done all appropriate namespace locking.
1119 static
1121 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1123 struct hammer_transaction trans;
1124 struct hammer_inode *dip;
1125 struct hammer_inode *nip;
1126 struct nchandle *nch;
1127 int error;
1129 nch = ap->a_nch;
1130 dip = VTOI(ap->a_dvp);
1132 if (dip->flags & HAMMER_INODE_RO)
1133 return (EROFS);
1134 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1135 return (error);
1138 * Create a transaction to cover the operations we perform.
1140 hammer_start_transaction(&trans, dip->hmp);
1141 ++hammer_stats_file_iopsw;
1144 * Create a new filesystem object of the requested type. The
1145 * returned inode will be referenced but not locked.
1147 * If mknod specifies a directory a pseudo-fs is created.
1149 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1150 dip, NULL, &nip);
1151 if (error) {
1152 hammer_done_transaction(&trans);
1153 *ap->a_vpp = NULL;
1154 return (error);
1158 * Add the new filesystem object to the directory. This will also
1159 * bump the inode's link count.
1161 error = hammer_ip_add_directory(&trans, dip,
1162 nch->ncp->nc_name, nch->ncp->nc_nlen,
1163 nip);
1166 * Finish up.
1168 if (error) {
1169 hammer_rel_inode(nip, 0);
1170 *ap->a_vpp = NULL;
1171 } else {
1172 error = hammer_get_vnode(nip, ap->a_vpp);
1173 hammer_rel_inode(nip, 0);
1174 if (error == 0) {
1175 cache_setunresolved(ap->a_nch);
1176 cache_setvp(ap->a_nch, *ap->a_vpp);
1179 hammer_done_transaction(&trans);
1180 return (error);
1184 * hammer_vop_open { vp, mode, cred, fp }
1186 static
1188 hammer_vop_open(struct vop_open_args *ap)
1190 hammer_inode_t ip;
1192 ++hammer_stats_file_iopsr;
1193 ip = VTOI(ap->a_vp);
1195 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1196 return (EROFS);
1197 return(vop_stdopen(ap));
1201 * hammer_vop_pathconf { vp, name, retval }
1203 static
1205 hammer_vop_pathconf(struct vop_pathconf_args *ap)
1207 return EOPNOTSUPP;
1211 * hammer_vop_print { vp }
1213 static
1215 hammer_vop_print(struct vop_print_args *ap)
1217 return EOPNOTSUPP;
1221 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1223 static
1225 hammer_vop_readdir(struct vop_readdir_args *ap)
1227 struct hammer_transaction trans;
1228 struct hammer_cursor cursor;
1229 struct hammer_inode *ip;
1230 struct uio *uio;
1231 hammer_base_elm_t base;
1232 int error;
1233 int cookie_index;
1234 int ncookies;
1235 off_t *cookies;
1236 off_t saveoff;
1237 int r;
1238 int dtype;
1240 ++hammer_stats_file_iopsr;
1241 ip = VTOI(ap->a_vp);
1242 uio = ap->a_uio;
1243 saveoff = uio->uio_offset;
1245 if (ap->a_ncookies) {
1246 ncookies = uio->uio_resid / 16 + 1;
1247 if (ncookies > 1024)
1248 ncookies = 1024;
1249 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1250 cookie_index = 0;
1251 } else {
1252 ncookies = -1;
1253 cookies = NULL;
1254 cookie_index = 0;
1257 hammer_simple_transaction(&trans, ip->hmp);
1260 * Handle artificial entries
1262 error = 0;
1263 if (saveoff == 0) {
1264 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1265 if (r)
1266 goto done;
1267 if (cookies)
1268 cookies[cookie_index] = saveoff;
1269 ++saveoff;
1270 ++cookie_index;
1271 if (cookie_index == ncookies)
1272 goto done;
1274 if (saveoff == 1) {
1275 if (ip->ino_data.parent_obj_id) {
1276 r = vop_write_dirent(&error, uio,
1277 ip->ino_data.parent_obj_id,
1278 DT_DIR, 2, "..");
1279 } else {
1280 r = vop_write_dirent(&error, uio,
1281 ip->obj_id, DT_DIR, 2, "..");
1283 if (r)
1284 goto done;
1285 if (cookies)
1286 cookies[cookie_index] = saveoff;
1287 ++saveoff;
1288 ++cookie_index;
1289 if (cookie_index == ncookies)
1290 goto done;
1294 * Key range (begin and end inclusive) to scan. Directory keys
1295 * directly translate to a 64 bit 'seek' position.
1297 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1298 cursor.key_beg.localization = ip->obj_localization +
1299 HAMMER_LOCALIZE_MISC;
1300 cursor.key_beg.obj_id = ip->obj_id;
1301 cursor.key_beg.create_tid = 0;
1302 cursor.key_beg.delete_tid = 0;
1303 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1304 cursor.key_beg.obj_type = 0;
1305 cursor.key_beg.key = saveoff;
1307 cursor.key_end = cursor.key_beg;
1308 cursor.key_end.key = HAMMER_MAX_KEY;
1309 cursor.asof = ip->obj_asof;
1310 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1312 error = hammer_ip_first(&cursor);
1314 while (error == 0) {
1315 error = hammer_ip_resolve_data(&cursor);
1316 if (error)
1317 break;
1318 base = &cursor.leaf->base;
1319 saveoff = base->key;
1320 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1322 if (base->obj_id != ip->obj_id)
1323 panic("readdir: bad record at %p", cursor.node);
1326 * Convert pseudo-filesystems into softlinks
1328 dtype = hammer_get_dtype(cursor.leaf->base.obj_type);
1329 r = vop_write_dirent(
1330 &error, uio, cursor.data->entry.obj_id,
1331 dtype,
1332 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1333 (void *)cursor.data->entry.name);
1334 if (r)
1335 break;
1336 ++saveoff;
1337 if (cookies)
1338 cookies[cookie_index] = base->key;
1339 ++cookie_index;
1340 if (cookie_index == ncookies)
1341 break;
1342 error = hammer_ip_next(&cursor);
1344 hammer_done_cursor(&cursor);
1346 done:
1347 hammer_done_transaction(&trans);
1349 if (ap->a_eofflag)
1350 *ap->a_eofflag = (error == ENOENT);
1351 uio->uio_offset = saveoff;
1352 if (error && cookie_index == 0) {
1353 if (error == ENOENT)
1354 error = 0;
1355 if (cookies) {
1356 kfree(cookies, M_TEMP);
1357 *ap->a_ncookies = 0;
1358 *ap->a_cookies = NULL;
1360 } else {
1361 if (error == ENOENT)
1362 error = 0;
1363 if (cookies) {
1364 *ap->a_ncookies = cookie_index;
1365 *ap->a_cookies = cookies;
1368 return(error);
1372 * hammer_vop_readlink { vp, uio, cred }
1374 static
1376 hammer_vop_readlink(struct vop_readlink_args *ap)
1378 struct hammer_transaction trans;
1379 struct hammer_cursor cursor;
1380 struct hammer_inode *ip;
1381 char buf[32];
1382 u_int32_t localization;
1383 hammer_pseudofs_inmem_t pfsm;
1384 int error;
1386 ip = VTOI(ap->a_vp);
1389 * Shortcut if the symlink data was stuffed into ino_data.
1391 * Also expand special "@@PFS%05d" softlinks (expansion only
1392 * occurs for non-historical (current) accesses made from the
1393 * primary filesystem).
1395 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1396 char *ptr;
1397 int bytes;
1399 ptr = ip->ino_data.ext.symlink;
1400 bytes = (int)ip->ino_data.size;
1401 if (bytes == 10 &&
1402 ip->obj_asof == HAMMER_MAX_TID &&
1403 ip->obj_localization == 0 &&
1404 strncmp(ptr, "@@PFS", 5) == 0) {
1405 hammer_simple_transaction(&trans, ip->hmp);
1406 bcopy(ptr + 5, buf, 5);
1407 buf[5] = 0;
1408 localization = strtoul(buf, NULL, 10) << 16;
1409 pfsm = hammer_load_pseudofs(&trans, localization,
1410 &error);
1411 if (error == 0) {
1412 if (pfsm->pfsd.mirror_flags &
1413 HAMMER_PFSD_SLAVE) {
1414 ksnprintf(buf, sizeof(buf),
1415 "@@0x%016llx:%05d",
1416 pfsm->pfsd.sync_end_tid,
1417 localization >> 16);
1418 } else {
1419 ksnprintf(buf, sizeof(buf),
1420 "@@0x%016llx:%05d",
1421 HAMMER_MAX_TID,
1422 localization >> 16);
1424 ptr = buf;
1425 bytes = strlen(buf);
1427 if (pfsm)
1428 hammer_rel_pseudofs(trans.hmp, pfsm);
1429 hammer_done_transaction(&trans);
1431 error = uiomove(ptr, bytes, ap->a_uio);
1432 return(error);
1436 * Long version
1438 hammer_simple_transaction(&trans, ip->hmp);
1439 ++hammer_stats_file_iopsr;
1440 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1443 * Key range (begin and end inclusive) to scan. Directory keys
1444 * directly translate to a 64 bit 'seek' position.
1446 cursor.key_beg.localization = ip->obj_localization +
1447 HAMMER_LOCALIZE_MISC;
1448 cursor.key_beg.obj_id = ip->obj_id;
1449 cursor.key_beg.create_tid = 0;
1450 cursor.key_beg.delete_tid = 0;
1451 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1452 cursor.key_beg.obj_type = 0;
1453 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1454 cursor.asof = ip->obj_asof;
1455 cursor.flags |= HAMMER_CURSOR_ASOF;
1457 error = hammer_ip_lookup(&cursor);
1458 if (error == 0) {
1459 error = hammer_ip_resolve_data(&cursor);
1460 if (error == 0) {
1461 KKASSERT(cursor.leaf->data_len >=
1462 HAMMER_SYMLINK_NAME_OFF);
1463 error = uiomove(cursor.data->symlink.name,
1464 cursor.leaf->data_len -
1465 HAMMER_SYMLINK_NAME_OFF,
1466 ap->a_uio);
1469 hammer_done_cursor(&cursor);
1470 hammer_done_transaction(&trans);
1471 return(error);
1475 * hammer_vop_nremove { nch, dvp, cred }
1477 static
1479 hammer_vop_nremove(struct vop_nremove_args *ap)
1481 struct hammer_transaction trans;
1482 struct hammer_inode *dip;
1483 int error;
1485 dip = VTOI(ap->a_dvp);
1487 if (hammer_nohistory(dip) == 0 &&
1488 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1489 return (error);
1492 hammer_start_transaction(&trans, dip->hmp);
1493 ++hammer_stats_file_iopsw;
1494 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1495 hammer_done_transaction(&trans);
1497 return (error);
1501 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1503 static
1505 hammer_vop_nrename(struct vop_nrename_args *ap)
1507 struct hammer_transaction trans;
1508 struct namecache *fncp;
1509 struct namecache *tncp;
1510 struct hammer_inode *fdip;
1511 struct hammer_inode *tdip;
1512 struct hammer_inode *ip;
1513 struct hammer_cursor cursor;
1514 int64_t namekey;
1515 int nlen, error;
1517 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1518 return(EXDEV);
1519 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1520 return(EXDEV);
1522 fdip = VTOI(ap->a_fdvp);
1523 tdip = VTOI(ap->a_tdvp);
1524 fncp = ap->a_fnch->ncp;
1525 tncp = ap->a_tnch->ncp;
1526 ip = VTOI(fncp->nc_vp);
1527 KKASSERT(ip != NULL);
1529 if (fdip->obj_localization != tdip->obj_localization)
1530 return(EXDEV);
1531 if (fdip->obj_localization != ip->obj_localization)
1532 return(EXDEV);
1534 if (fdip->flags & HAMMER_INODE_RO)
1535 return (EROFS);
1536 if (tdip->flags & HAMMER_INODE_RO)
1537 return (EROFS);
1538 if (ip->flags & HAMMER_INODE_RO)
1539 return (EROFS);
1540 if ((error = hammer_checkspace(fdip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1541 return (error);
1543 hammer_start_transaction(&trans, fdip->hmp);
1544 ++hammer_stats_file_iopsw;
1547 * Remove tncp from the target directory and then link ip as
1548 * tncp. XXX pass trans to dounlink
1550 * Force the inode sync-time to match the transaction so it is
1551 * in-sync with the creation of the target directory entry.
1553 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp, ap->a_cred, 0);
1554 if (error == 0 || error == ENOENT) {
1555 error = hammer_ip_add_directory(&trans, tdip,
1556 tncp->nc_name, tncp->nc_nlen,
1557 ip);
1558 if (error == 0) {
1559 ip->ino_data.parent_obj_id = tdip->obj_id;
1560 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
1563 if (error)
1564 goto failed; /* XXX */
1567 * Locate the record in the originating directory and remove it.
1569 * Calculate the namekey and setup the key range for the scan. This
1570 * works kinda like a chained hash table where the lower 32 bits
1571 * of the namekey synthesize the chain.
1573 * The key range is inclusive of both key_beg and key_end.
1575 namekey = hammer_directory_namekey(fncp->nc_name, fncp->nc_nlen);
1576 retry:
1577 hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
1578 cursor.key_beg.localization = fdip->obj_localization +
1579 HAMMER_LOCALIZE_MISC;
1580 cursor.key_beg.obj_id = fdip->obj_id;
1581 cursor.key_beg.key = namekey;
1582 cursor.key_beg.create_tid = 0;
1583 cursor.key_beg.delete_tid = 0;
1584 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1585 cursor.key_beg.obj_type = 0;
1587 cursor.key_end = cursor.key_beg;
1588 cursor.key_end.key |= 0xFFFFFFFFULL;
1589 cursor.asof = fdip->obj_asof;
1590 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1593 * Scan all matching records (the chain), locate the one matching
1594 * the requested path component.
1596 * The hammer_ip_*() functions merge in-memory records with on-disk
1597 * records for the purposes of the search.
1599 error = hammer_ip_first(&cursor);
1600 while (error == 0) {
1601 if (hammer_ip_resolve_data(&cursor) != 0)
1602 break;
1603 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1604 KKASSERT(nlen > 0);
1605 if (fncp->nc_nlen == nlen &&
1606 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1607 break;
1609 error = hammer_ip_next(&cursor);
1613 * If all is ok we have to get the inode so we can adjust nlinks.
1615 * WARNING: hammer_ip_del_directory() may have to terminate the
1616 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1617 * twice.
1619 if (error == 0)
1620 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
1623 * XXX A deadlock here will break rename's atomicy for the purposes
1624 * of crash recovery.
1626 if (error == EDEADLK) {
1627 hammer_done_cursor(&cursor);
1628 goto retry;
1632 * Cleanup and tell the kernel that the rename succeeded.
1634 hammer_done_cursor(&cursor);
1635 if (error == 0)
1636 cache_rename(ap->a_fnch, ap->a_tnch);
1638 failed:
1639 hammer_done_transaction(&trans);
1640 return (error);
1644 * hammer_vop_nrmdir { nch, dvp, cred }
1646 static
1648 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
1650 struct hammer_transaction trans;
1651 struct hammer_inode *dip;
1652 int error;
1654 dip = VTOI(ap->a_dvp);
1656 if (hammer_nohistory(dip) == 0 &&
1657 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1658 return (error);
1661 hammer_start_transaction(&trans, dip->hmp);
1662 ++hammer_stats_file_iopsw;
1663 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1664 hammer_done_transaction(&trans);
1666 return (error);
1670 * hammer_vop_setattr { vp, vap, cred }
1672 static
1674 hammer_vop_setattr(struct vop_setattr_args *ap)
1676 struct hammer_transaction trans;
1677 struct vattr *vap;
1678 struct hammer_inode *ip;
1679 int modflags;
1680 int error;
1681 int truncating;
1682 int blksize;
1683 int64_t aligned_size;
1684 u_int32_t flags;
1686 vap = ap->a_vap;
1687 ip = ap->a_vp->v_data;
1688 modflags = 0;
1690 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1691 return(EROFS);
1692 if (ip->flags & HAMMER_INODE_RO)
1693 return (EROFS);
1694 if (hammer_nohistory(ip) == 0 &&
1695 (error = hammer_checkspace(ip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1696 return (error);
1699 hammer_start_transaction(&trans, ip->hmp);
1700 ++hammer_stats_file_iopsw;
1701 error = 0;
1703 if (vap->va_flags != VNOVAL) {
1704 flags = ip->ino_data.uflags;
1705 error = vop_helper_setattr_flags(&flags, vap->va_flags,
1706 hammer_to_unix_xid(&ip->ino_data.uid),
1707 ap->a_cred);
1708 if (error == 0) {
1709 if (ip->ino_data.uflags != flags) {
1710 ip->ino_data.uflags = flags;
1711 modflags |= HAMMER_INODE_DDIRTY;
1713 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1714 error = 0;
1715 goto done;
1718 goto done;
1720 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1721 error = EPERM;
1722 goto done;
1724 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
1725 mode_t cur_mode = ip->ino_data.mode;
1726 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1727 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1728 uuid_t uuid_uid;
1729 uuid_t uuid_gid;
1731 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
1732 ap->a_cred,
1733 &cur_uid, &cur_gid, &cur_mode);
1734 if (error == 0) {
1735 hammer_guid_to_uuid(&uuid_uid, cur_uid);
1736 hammer_guid_to_uuid(&uuid_gid, cur_gid);
1737 if (bcmp(&uuid_uid, &ip->ino_data.uid,
1738 sizeof(uuid_uid)) ||
1739 bcmp(&uuid_gid, &ip->ino_data.gid,
1740 sizeof(uuid_gid)) ||
1741 ip->ino_data.mode != cur_mode
1743 ip->ino_data.uid = uuid_uid;
1744 ip->ino_data.gid = uuid_gid;
1745 ip->ino_data.mode = cur_mode;
1747 modflags |= HAMMER_INODE_DDIRTY;
1750 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
1751 switch(ap->a_vp->v_type) {
1752 case VREG:
1753 if (vap->va_size == ip->ino_data.size)
1754 break;
1756 * XXX break atomicy, we can deadlock the backend
1757 * if we do not release the lock. Probably not a
1758 * big deal here.
1760 blksize = hammer_blocksize(vap->va_size);
1761 if (vap->va_size < ip->ino_data.size) {
1762 vtruncbuf(ap->a_vp, vap->va_size, blksize);
1763 truncating = 1;
1764 } else {
1765 vnode_pager_setsize(ap->a_vp, vap->va_size);
1766 truncating = 0;
1768 ip->ino_data.size = vap->va_size;
1769 modflags |= HAMMER_INODE_DDIRTY;
1772 * on-media truncation is cached in the inode until
1773 * the inode is synchronized.
1775 if (truncating) {
1776 hammer_ip_frontend_trunc(ip, vap->va_size);
1777 #ifdef DEBUG_TRUNCATE
1778 if (HammerTruncIp == NULL)
1779 HammerTruncIp = ip;
1780 #endif
1781 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1782 ip->flags |= HAMMER_INODE_TRUNCATED;
1783 ip->trunc_off = vap->va_size;
1784 #ifdef DEBUG_TRUNCATE
1785 if (ip == HammerTruncIp)
1786 kprintf("truncate1 %016llx\n", ip->trunc_off);
1787 #endif
1788 } else if (ip->trunc_off > vap->va_size) {
1789 ip->trunc_off = vap->va_size;
1790 #ifdef DEBUG_TRUNCATE
1791 if (ip == HammerTruncIp)
1792 kprintf("truncate2 %016llx\n", ip->trunc_off);
1793 #endif
1794 } else {
1795 #ifdef DEBUG_TRUNCATE
1796 if (ip == HammerTruncIp)
1797 kprintf("truncate3 %016llx (ignored)\n", vap->va_size);
1798 #endif
1803 * If truncating we have to clean out a portion of
1804 * the last block on-disk. We do this in the
1805 * front-end buffer cache.
1807 aligned_size = (vap->va_size + (blksize - 1)) &
1808 ~(int64_t)(blksize - 1);
1809 if (truncating && vap->va_size < aligned_size) {
1810 struct buf *bp;
1811 int offset;
1813 aligned_size -= blksize;
1815 offset = (int)vap->va_size & (blksize - 1);
1816 error = bread(ap->a_vp, aligned_size,
1817 blksize, &bp);
1818 hammer_ip_frontend_trunc(ip, aligned_size);
1819 if (error == 0) {
1820 bzero(bp->b_data + offset,
1821 blksize - offset);
1822 /* must de-cache direct-io offset */
1823 bp->b_bio2.bio_offset = NOOFFSET;
1824 bdwrite(bp);
1825 } else {
1826 kprintf("ERROR %d\n", error);
1827 brelse(bp);
1830 break;
1831 case VDATABASE:
1832 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1833 ip->flags |= HAMMER_INODE_TRUNCATED;
1834 ip->trunc_off = vap->va_size;
1835 } else if (ip->trunc_off > vap->va_size) {
1836 ip->trunc_off = vap->va_size;
1838 hammer_ip_frontend_trunc(ip, vap->va_size);
1839 ip->ino_data.size = vap->va_size;
1840 modflags |= HAMMER_INODE_DDIRTY;
1841 break;
1842 default:
1843 error = EINVAL;
1844 goto done;
1846 break;
1848 if (vap->va_atime.tv_sec != VNOVAL) {
1849 ip->ino_data.atime =
1850 hammer_timespec_to_time(&vap->va_atime);
1851 modflags |= HAMMER_INODE_ATIME;
1853 if (vap->va_mtime.tv_sec != VNOVAL) {
1854 ip->ino_data.mtime =
1855 hammer_timespec_to_time(&vap->va_mtime);
1856 modflags |= HAMMER_INODE_MTIME;
1858 if (vap->va_mode != (mode_t)VNOVAL) {
1859 mode_t cur_mode = ip->ino_data.mode;
1860 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1861 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1863 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
1864 cur_uid, cur_gid, &cur_mode);
1865 if (error == 0 && ip->ino_data.mode != cur_mode) {
1866 ip->ino_data.mode = cur_mode;
1867 modflags |= HAMMER_INODE_DDIRTY;
1870 done:
1871 if (error == 0)
1872 hammer_modify_inode(ip, modflags);
1873 hammer_done_transaction(&trans);
1874 return (error);
1878 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1880 static
1882 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
1884 struct hammer_transaction trans;
1885 struct hammer_inode *dip;
1886 struct hammer_inode *nip;
1887 struct nchandle *nch;
1888 hammer_record_t record;
1889 int error;
1890 int bytes;
1892 ap->a_vap->va_type = VLNK;
1894 nch = ap->a_nch;
1895 dip = VTOI(ap->a_dvp);
1897 if (dip->flags & HAMMER_INODE_RO)
1898 return (EROFS);
1899 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1900 return (error);
1903 * Create a transaction to cover the operations we perform.
1905 hammer_start_transaction(&trans, dip->hmp);
1906 ++hammer_stats_file_iopsw;
1909 * Create a new filesystem object of the requested type. The
1910 * returned inode will be referenced but not locked.
1913 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1914 dip, NULL, &nip);
1915 if (error) {
1916 hammer_done_transaction(&trans);
1917 *ap->a_vpp = NULL;
1918 return (error);
1922 * Add a record representing the symlink. symlink stores the link
1923 * as pure data, not a string, and is no \0 terminated.
1925 if (error == 0) {
1926 bytes = strlen(ap->a_target);
1928 if (bytes <= HAMMER_INODE_BASESYMLEN) {
1929 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
1930 } else {
1931 record = hammer_alloc_mem_record(nip, bytes);
1932 record->type = HAMMER_MEM_RECORD_GENERAL;
1934 record->leaf.base.localization = nip->obj_localization +
1935 HAMMER_LOCALIZE_MISC;
1936 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
1937 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
1938 record->leaf.data_len = bytes;
1939 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
1940 bcopy(ap->a_target, record->data->symlink.name, bytes);
1941 error = hammer_ip_add_record(&trans, record);
1945 * Set the file size to the length of the link.
1947 if (error == 0) {
1948 nip->ino_data.size = bytes;
1949 hammer_modify_inode(nip, HAMMER_INODE_DDIRTY);
1952 if (error == 0)
1953 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
1954 nch->ncp->nc_nlen, nip);
1957 * Finish up.
1959 if (error) {
1960 hammer_rel_inode(nip, 0);
1961 *ap->a_vpp = NULL;
1962 } else {
1963 error = hammer_get_vnode(nip, ap->a_vpp);
1964 hammer_rel_inode(nip, 0);
1965 if (error == 0) {
1966 cache_setunresolved(ap->a_nch);
1967 cache_setvp(ap->a_nch, *ap->a_vpp);
1970 hammer_done_transaction(&trans);
1971 return (error);
1975 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
1977 static
1979 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
1981 struct hammer_transaction trans;
1982 struct hammer_inode *dip;
1983 int error;
1985 dip = VTOI(ap->a_dvp);
1987 if (hammer_nohistory(dip) == 0 &&
1988 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0) {
1989 return (error);
1992 hammer_start_transaction(&trans, dip->hmp);
1993 ++hammer_stats_file_iopsw;
1994 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
1995 ap->a_cred, ap->a_flags);
1996 hammer_done_transaction(&trans);
1998 return (error);
2002 * hammer_vop_ioctl { vp, command, data, fflag, cred }
2004 static
2006 hammer_vop_ioctl(struct vop_ioctl_args *ap)
2008 struct hammer_inode *ip = ap->a_vp->v_data;
2010 ++hammer_stats_file_iopsr;
2011 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
2012 ap->a_fflag, ap->a_cred));
2015 static
2017 hammer_vop_mountctl(struct vop_mountctl_args *ap)
2019 struct mount *mp;
2020 int error;
2022 mp = ap->a_head.a_ops->head.vv_mount;
2024 switch(ap->a_op) {
2025 case MOUNTCTL_SET_EXPORT:
2026 if (ap->a_ctllen != sizeof(struct export_args))
2027 error = EINVAL;
2028 error = hammer_vfs_export(mp, ap->a_op,
2029 (const struct export_args *)ap->a_ctl);
2030 break;
2031 default:
2032 error = journal_mountctl(ap);
2033 break;
2035 return(error);
2039 * hammer_vop_strategy { vp, bio }
2041 * Strategy call, used for regular file read & write only. Note that the
2042 * bp may represent a cluster.
2044 * To simplify operation and allow better optimizations in the future,
2045 * this code does not make any assumptions with regards to buffer alignment
2046 * or size.
2048 static
2050 hammer_vop_strategy(struct vop_strategy_args *ap)
2052 struct buf *bp;
2053 int error;
2055 bp = ap->a_bio->bio_buf;
2057 switch(bp->b_cmd) {
2058 case BUF_CMD_READ:
2059 error = hammer_vop_strategy_read(ap);
2060 break;
2061 case BUF_CMD_WRITE:
2062 error = hammer_vop_strategy_write(ap);
2063 break;
2064 default:
2065 bp->b_error = error = EINVAL;
2066 bp->b_flags |= B_ERROR;
2067 biodone(ap->a_bio);
2068 break;
2070 return (error);
2074 * Read from a regular file. Iterate the related records and fill in the
2075 * BIO/BUF. Gaps are zero-filled.
2077 * The support code in hammer_object.c should be used to deal with mixed
2078 * in-memory and on-disk records.
2080 * NOTE: Can be called from the cluster code with an oversized buf.
2082 * XXX atime update
2084 static
2086 hammer_vop_strategy_read(struct vop_strategy_args *ap)
2088 struct hammer_transaction trans;
2089 struct hammer_inode *ip;
2090 struct hammer_cursor cursor;
2091 hammer_base_elm_t base;
2092 hammer_off_t disk_offset;
2093 struct bio *bio;
2094 struct bio *nbio;
2095 struct buf *bp;
2096 int64_t rec_offset;
2097 int64_t ran_end;
2098 int64_t tmp64;
2099 int error;
2100 int boff;
2101 int roff;
2102 int n;
2104 bio = ap->a_bio;
2105 bp = bio->bio_buf;
2106 ip = ap->a_vp->v_data;
2109 * The zone-2 disk offset may have been set by the cluster code via
2110 * a BMAP operation, or else should be NOOFFSET.
2112 * Checking the high bits for a match against zone-2 should suffice.
2114 nbio = push_bio(bio);
2115 if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
2116 HAMMER_ZONE_LARGE_DATA) {
2117 error = hammer_io_direct_read(ip->hmp, nbio, NULL);
2118 return (error);
2122 * Well, that sucked. Do it the hard way. If all the stars are
2123 * aligned we may still be able to issue a direct-read.
2125 hammer_simple_transaction(&trans, ip->hmp);
2126 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2129 * Key range (begin and end inclusive) to scan. Note that the key's
2130 * stored in the actual records represent BASE+LEN, not BASE. The
2131 * first record containing bio_offset will have a key > bio_offset.
2133 cursor.key_beg.localization = ip->obj_localization +
2134 HAMMER_LOCALIZE_MISC;
2135 cursor.key_beg.obj_id = ip->obj_id;
2136 cursor.key_beg.create_tid = 0;
2137 cursor.key_beg.delete_tid = 0;
2138 cursor.key_beg.obj_type = 0;
2139 cursor.key_beg.key = bio->bio_offset + 1;
2140 cursor.asof = ip->obj_asof;
2141 cursor.flags |= HAMMER_CURSOR_ASOF;
2143 cursor.key_end = cursor.key_beg;
2144 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2145 #if 0
2146 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2147 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2148 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2149 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2150 } else
2151 #endif
2153 ran_end = bio->bio_offset + bp->b_bufsize;
2154 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2155 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2156 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2157 if (tmp64 < ran_end)
2158 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2159 else
2160 cursor.key_end.key = ran_end + MAXPHYS + 1;
2162 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2164 error = hammer_ip_first(&cursor);
2165 boff = 0;
2167 while (error == 0) {
2169 * Get the base file offset of the record. The key for
2170 * data records is (base + bytes) rather then (base).
2172 base = &cursor.leaf->base;
2173 rec_offset = base->key - cursor.leaf->data_len;
2176 * Calculate the gap, if any, and zero-fill it.
2178 * n is the offset of the start of the record verses our
2179 * current seek offset in the bio.
2181 n = (int)(rec_offset - (bio->bio_offset + boff));
2182 if (n > 0) {
2183 if (n > bp->b_bufsize - boff)
2184 n = bp->b_bufsize - boff;
2185 bzero((char *)bp->b_data + boff, n);
2186 boff += n;
2187 n = 0;
2191 * Calculate the data offset in the record and the number
2192 * of bytes we can copy.
2194 * There are two degenerate cases. First, boff may already
2195 * be at bp->b_bufsize. Secondly, the data offset within
2196 * the record may exceed the record's size.
2198 roff = -n;
2199 rec_offset += roff;
2200 n = cursor.leaf->data_len - roff;
2201 if (n <= 0) {
2202 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
2203 n = 0;
2204 } else if (n > bp->b_bufsize - boff) {
2205 n = bp->b_bufsize - boff;
2209 * Deal with cached truncations. This cool bit of code
2210 * allows truncate()/ftruncate() to avoid having to sync
2211 * the file.
2213 * If the frontend is truncated then all backend records are
2214 * subject to the frontend's truncation.
2216 * If the backend is truncated then backend records on-disk
2217 * (but not in-memory) are subject to the backend's
2218 * truncation. In-memory records owned by the backend
2219 * represent data written after the truncation point on the
2220 * backend and must not be truncated.
2222 * Truncate operations deal with frontend buffer cache
2223 * buffers and frontend-owned in-memory records synchronously.
2225 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2226 if (hammer_cursor_ondisk(&cursor) ||
2227 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2228 if (ip->trunc_off <= rec_offset)
2229 n = 0;
2230 else if (ip->trunc_off < rec_offset + n)
2231 n = (int)(ip->trunc_off - rec_offset);
2234 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2235 if (hammer_cursor_ondisk(&cursor)) {
2236 if (ip->sync_trunc_off <= rec_offset)
2237 n = 0;
2238 else if (ip->sync_trunc_off < rec_offset + n)
2239 n = (int)(ip->sync_trunc_off - rec_offset);
2244 * Try to issue a direct read into our bio if possible,
2245 * otherwise resolve the element data into a hammer_buffer
2246 * and copy.
2248 * The buffer on-disk should be zerod past any real
2249 * truncation point, but may not be for any synthesized
2250 * truncation point from above.
2252 disk_offset = cursor.leaf->data_offset + roff;
2253 if (boff == 0 && n == bp->b_bufsize &&
2254 hammer_cursor_ondisk(&cursor) &&
2255 (disk_offset & HAMMER_BUFMASK) == 0) {
2256 KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2257 HAMMER_ZONE_LARGE_DATA);
2258 nbio->bio_offset = disk_offset;
2259 error = hammer_io_direct_read(trans.hmp, nbio,
2260 cursor.leaf);
2261 goto done;
2262 } else if (n) {
2263 error = hammer_ip_resolve_data(&cursor);
2264 if (error == 0) {
2265 bcopy((char *)cursor.data + roff,
2266 (char *)bp->b_data + boff, n);
2269 if (error)
2270 break;
2273 * Iterate until we have filled the request.
2275 boff += n;
2276 if (boff == bp->b_bufsize)
2277 break;
2278 error = hammer_ip_next(&cursor);
2282 * There may have been a gap after the last record
2284 if (error == ENOENT)
2285 error = 0;
2286 if (error == 0 && boff != bp->b_bufsize) {
2287 KKASSERT(boff < bp->b_bufsize);
2288 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2289 /* boff = bp->b_bufsize; */
2291 bp->b_resid = 0;
2292 bp->b_error = error;
2293 if (error)
2294 bp->b_flags |= B_ERROR;
2295 biodone(ap->a_bio);
2297 done:
2298 if (cursor.node)
2299 hammer_cache_node(&ip->cache[1], cursor.node);
2300 hammer_done_cursor(&cursor);
2301 hammer_done_transaction(&trans);
2302 return(error);
2306 * BMAP operation - used to support cluster_read() only.
2308 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2310 * This routine may return EOPNOTSUPP if the opration is not supported for
2311 * the specified offset. The contents of the pointer arguments do not
2312 * need to be initialized in that case.
2314 * If a disk address is available and properly aligned return 0 with
2315 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2316 * to the run-length relative to that offset. Callers may assume that
2317 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2318 * large, so return EOPNOTSUPP if it is not sufficiently large.
2320 static
2322 hammer_vop_bmap(struct vop_bmap_args *ap)
2324 struct hammer_transaction trans;
2325 struct hammer_inode *ip;
2326 struct hammer_cursor cursor;
2327 hammer_base_elm_t base;
2328 int64_t rec_offset;
2329 int64_t ran_end;
2330 int64_t tmp64;
2331 int64_t base_offset;
2332 int64_t base_disk_offset;
2333 int64_t last_offset;
2334 hammer_off_t last_disk_offset;
2335 hammer_off_t disk_offset;
2336 int rec_len;
2337 int error;
2338 int blksize;
2340 ++hammer_stats_file_iopsr;
2341 ip = ap->a_vp->v_data;
2344 * We can only BMAP regular files. We can't BMAP database files,
2345 * directories, etc.
2347 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
2348 return(EOPNOTSUPP);
2351 * bmap is typically called with runp/runb both NULL when used
2352 * for writing. We do not support BMAP for writing atm.
2354 if (ap->a_cmd != BUF_CMD_READ)
2355 return(EOPNOTSUPP);
2358 * Scan the B-Tree to acquire blockmap addresses, then translate
2359 * to raw addresses.
2361 hammer_simple_transaction(&trans, ip->hmp);
2362 #if 0
2363 kprintf("bmap_beg %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
2364 #endif
2365 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2368 * Key range (begin and end inclusive) to scan. Note that the key's
2369 * stored in the actual records represent BASE+LEN, not BASE. The
2370 * first record containing bio_offset will have a key > bio_offset.
2372 cursor.key_beg.localization = ip->obj_localization +
2373 HAMMER_LOCALIZE_MISC;
2374 cursor.key_beg.obj_id = ip->obj_id;
2375 cursor.key_beg.create_tid = 0;
2376 cursor.key_beg.delete_tid = 0;
2377 cursor.key_beg.obj_type = 0;
2378 if (ap->a_runb)
2379 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
2380 else
2381 cursor.key_beg.key = ap->a_loffset + 1;
2382 if (cursor.key_beg.key < 0)
2383 cursor.key_beg.key = 0;
2384 cursor.asof = ip->obj_asof;
2385 cursor.flags |= HAMMER_CURSOR_ASOF;
2387 cursor.key_end = cursor.key_beg;
2388 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2390 ran_end = ap->a_loffset + MAXPHYS;
2391 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2392 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2393 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2394 if (tmp64 < ran_end)
2395 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2396 else
2397 cursor.key_end.key = ran_end + MAXPHYS + 1;
2399 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2401 error = hammer_ip_first(&cursor);
2402 base_offset = last_offset = 0;
2403 base_disk_offset = last_disk_offset = 0;
2405 while (error == 0) {
2407 * Get the base file offset of the record. The key for
2408 * data records is (base + bytes) rather then (base).
2410 * NOTE: rec_offset + rec_len may exceed the end-of-file.
2411 * The extra bytes should be zero on-disk and the BMAP op
2412 * should still be ok.
2414 base = &cursor.leaf->base;
2415 rec_offset = base->key - cursor.leaf->data_len;
2416 rec_len = cursor.leaf->data_len;
2419 * Incorporate any cached truncation.
2421 * NOTE: Modifications to rec_len based on synthesized
2422 * truncation points remove the guarantee that any extended
2423 * data on disk is zero (since the truncations may not have
2424 * taken place on-media yet).
2426 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2427 if (hammer_cursor_ondisk(&cursor) ||
2428 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2429 if (ip->trunc_off <= rec_offset)
2430 rec_len = 0;
2431 else if (ip->trunc_off < rec_offset + rec_len)
2432 rec_len = (int)(ip->trunc_off - rec_offset);
2435 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2436 if (hammer_cursor_ondisk(&cursor)) {
2437 if (ip->sync_trunc_off <= rec_offset)
2438 rec_len = 0;
2439 else if (ip->sync_trunc_off < rec_offset + rec_len)
2440 rec_len = (int)(ip->sync_trunc_off - rec_offset);
2445 * Accumulate information. If we have hit a discontiguous
2446 * block reset base_offset unless we are already beyond the
2447 * requested offset. If we are, that's it, we stop.
2449 if (error)
2450 break;
2451 if (hammer_cursor_ondisk(&cursor)) {
2452 disk_offset = cursor.leaf->data_offset;
2453 if (rec_offset != last_offset ||
2454 disk_offset != last_disk_offset) {
2455 if (rec_offset > ap->a_loffset)
2456 break;
2457 base_offset = rec_offset;
2458 base_disk_offset = disk_offset;
2460 last_offset = rec_offset + rec_len;
2461 last_disk_offset = disk_offset + rec_len;
2463 error = hammer_ip_next(&cursor);
2466 #if 0
2467 kprintf("BMAP %016llx: %016llx - %016llx\n",
2468 ap->a_loffset, base_offset, last_offset);
2469 kprintf("BMAP %16s: %016llx - %016llx\n",
2470 "", base_disk_offset, last_disk_offset);
2471 #endif
2473 if (cursor.node) {
2474 hammer_cache_node(&ip->cache[1], cursor.node);
2475 #if 0
2476 kprintf("bmap_end2 %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
2477 #endif
2479 hammer_done_cursor(&cursor);
2480 hammer_done_transaction(&trans);
2483 * If we couldn't find any records or the records we did find were
2484 * all behind the requested offset, return failure. A forward
2485 * truncation can leave a hole w/ no on-disk records.
2487 if (last_offset == 0 || last_offset < ap->a_loffset)
2488 return (EOPNOTSUPP);
2491 * Figure out the block size at the requested offset and adjust
2492 * our limits so the cluster_read() does not create inappropriately
2493 * sized buffer cache buffers.
2495 blksize = hammer_blocksize(ap->a_loffset);
2496 if (hammer_blocksize(base_offset) != blksize) {
2497 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
2499 if (last_offset != ap->a_loffset &&
2500 hammer_blocksize(last_offset - 1) != blksize) {
2501 last_offset = hammer_blockdemarc(ap->a_loffset,
2502 last_offset - 1);
2506 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
2507 * from occuring.
2509 disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
2511 if ((disk_offset & HAMMER_OFF_ZONE_MASK) != HAMMER_ZONE_LARGE_DATA) {
2513 * Only large-data zones can be direct-IOd
2515 error = EOPNOTSUPP;
2516 } else if ((disk_offset & HAMMER_BUFMASK) ||
2517 (last_offset - ap->a_loffset) < blksize) {
2519 * doffsetp is not aligned or the forward run size does
2520 * not cover a whole buffer, disallow the direct I/O.
2522 error = EOPNOTSUPP;
2523 } else {
2525 * We're good.
2527 *ap->a_doffsetp = disk_offset;
2528 if (ap->a_runb) {
2529 *ap->a_runb = ap->a_loffset - base_offset;
2530 KKASSERT(*ap->a_runb >= 0);
2532 if (ap->a_runp) {
2533 *ap->a_runp = last_offset - ap->a_loffset;
2534 KKASSERT(*ap->a_runp >= 0);
2536 error = 0;
2538 return(error);
2542 * Write to a regular file. Because this is a strategy call the OS is
2543 * trying to actually get data onto the media.
2545 static
2547 hammer_vop_strategy_write(struct vop_strategy_args *ap)
2549 hammer_record_t record;
2550 hammer_mount_t hmp;
2551 hammer_inode_t ip;
2552 struct bio *bio;
2553 struct buf *bp;
2554 int blksize;
2555 int bytes;
2556 int error;
2558 bio = ap->a_bio;
2559 bp = bio->bio_buf;
2560 ip = ap->a_vp->v_data;
2561 hmp = ip->hmp;
2563 blksize = hammer_blocksize(bio->bio_offset);
2564 KKASSERT(bp->b_bufsize == blksize);
2566 if (ip->flags & HAMMER_INODE_RO) {
2567 bp->b_error = EROFS;
2568 bp->b_flags |= B_ERROR;
2569 biodone(ap->a_bio);
2570 return(EROFS);
2574 * Interlock with inode destruction (no in-kernel or directory
2575 * topology visibility). If we queue new IO while trying to
2576 * destroy the inode we can deadlock the vtrunc call in
2577 * hammer_inode_unloadable_check().
2579 if (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
2580 bp->b_resid = 0;
2581 biodone(ap->a_bio);
2582 return(0);
2586 * Reserve space and issue a direct-write from the front-end.
2587 * NOTE: The direct_io code will hammer_bread/bcopy smaller
2588 * allocations.
2590 * An in-memory record will be installed to reference the storage
2591 * until the flusher can get to it.
2593 * Since we own the high level bio the front-end will not try to
2594 * do a direct-read until the write completes.
2596 * NOTE: The only time we do not reserve a full-sized buffers
2597 * worth of data is if the file is small. We do not try to
2598 * allocate a fragment (from the small-data zone) at the end of
2599 * an otherwise large file as this can lead to wildly separated
2600 * data.
2602 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
2603 KKASSERT(bio->bio_offset < ip->ino_data.size);
2604 if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
2605 bytes = bp->b_bufsize;
2606 else
2607 bytes = ((int)ip->ino_data.size + 15) & ~15;
2609 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
2610 bytes, &error);
2611 if (record) {
2612 hammer_io_direct_write(hmp, record, bio);
2613 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
2614 hammer_flush_inode(ip, 0);
2615 } else {
2616 bp->b_bio2.bio_offset = NOOFFSET;
2617 bp->b_error = error;
2618 bp->b_flags |= B_ERROR;
2619 biodone(ap->a_bio);
2621 return(error);
2625 * dounlink - disconnect a directory entry
2627 * XXX whiteout support not really in yet
2629 static int
2630 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
2631 struct vnode *dvp, struct ucred *cred, int flags)
2633 struct namecache *ncp;
2634 hammer_inode_t dip;
2635 hammer_inode_t ip;
2636 struct hammer_cursor cursor;
2637 int64_t namekey;
2638 int nlen, error;
2641 * Calculate the namekey and setup the key range for the scan. This
2642 * works kinda like a chained hash table where the lower 32 bits
2643 * of the namekey synthesize the chain.
2645 * The key range is inclusive of both key_beg and key_end.
2647 dip = VTOI(dvp);
2648 ncp = nch->ncp;
2650 if (dip->flags & HAMMER_INODE_RO)
2651 return (EROFS);
2653 namekey = hammer_directory_namekey(ncp->nc_name, ncp->nc_nlen);
2654 retry:
2655 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
2656 cursor.key_beg.localization = dip->obj_localization +
2657 HAMMER_LOCALIZE_MISC;
2658 cursor.key_beg.obj_id = dip->obj_id;
2659 cursor.key_beg.key = namekey;
2660 cursor.key_beg.create_tid = 0;
2661 cursor.key_beg.delete_tid = 0;
2662 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2663 cursor.key_beg.obj_type = 0;
2665 cursor.key_end = cursor.key_beg;
2666 cursor.key_end.key |= 0xFFFFFFFFULL;
2667 cursor.asof = dip->obj_asof;
2668 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2671 * Scan all matching records (the chain), locate the one matching
2672 * the requested path component. info->last_error contains the
2673 * error code on search termination and could be 0, ENOENT, or
2674 * something else.
2676 * The hammer_ip_*() functions merge in-memory records with on-disk
2677 * records for the purposes of the search.
2679 error = hammer_ip_first(&cursor);
2681 while (error == 0) {
2682 error = hammer_ip_resolve_data(&cursor);
2683 if (error)
2684 break;
2685 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2686 KKASSERT(nlen > 0);
2687 if (ncp->nc_nlen == nlen &&
2688 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
2689 break;
2691 error = hammer_ip_next(&cursor);
2695 * If all is ok we have to get the inode so we can adjust nlinks.
2696 * To avoid a deadlock with the flusher we must release the inode
2697 * lock on the directory when acquiring the inode for the entry.
2699 * If the target is a directory, it must be empty.
2701 if (error == 0) {
2702 hammer_unlock(&cursor.ip->lock);
2703 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
2704 dip->hmp->asof,
2705 cursor.data->entry.localization,
2706 0, &error);
2707 hammer_lock_sh(&cursor.ip->lock);
2708 if (error == ENOENT) {
2709 kprintf("obj_id %016llx\n", cursor.data->entry.obj_id);
2710 Debugger("ENOENT unlinking object that should exist");
2714 * If we are trying to remove a directory the directory must
2715 * be empty.
2717 * WARNING: hammer_ip_check_directory_empty() may have to
2718 * terminate the cursor to avoid a deadlock. It is ok to
2719 * call hammer_done_cursor() twice.
2721 if (error == 0 && ip->ino_data.obj_type ==
2722 HAMMER_OBJTYPE_DIRECTORY) {
2723 error = hammer_ip_check_directory_empty(trans, ip);
2727 * Delete the directory entry.
2729 * WARNING: hammer_ip_del_directory() may have to terminate
2730 * the cursor to avoid a deadlock. It is ok to call
2731 * hammer_done_cursor() twice.
2733 if (error == 0) {
2734 error = hammer_ip_del_directory(trans, &cursor,
2735 dip, ip);
2737 hammer_done_cursor(&cursor);
2738 if (error == 0) {
2739 cache_setunresolved(nch);
2740 cache_setvp(nch, NULL);
2741 /* XXX locking */
2742 if (ip->vp)
2743 cache_inval_vp(ip->vp, CINV_DESTROY);
2745 if (ip)
2746 hammer_rel_inode(ip, 0);
2747 } else {
2748 hammer_done_cursor(&cursor);
2750 hammer_inode_waitreclaims(dip->hmp);
2751 if (error == EDEADLK)
2752 goto retry;
2754 return (error);
2757 /************************************************************************
2758 * FIFO AND SPECFS OPS *
2759 ************************************************************************
2763 static int
2764 hammer_vop_fifoclose (struct vop_close_args *ap)
2766 /* XXX update itimes */
2767 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
2770 static int
2771 hammer_vop_fiforead (struct vop_read_args *ap)
2773 int error;
2775 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2776 /* XXX update access time */
2777 return (error);
2780 static int
2781 hammer_vop_fifowrite (struct vop_write_args *ap)
2783 int error;
2785 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2786 /* XXX update access time */
2787 return (error);
2790 static int
2791 hammer_vop_specclose (struct vop_close_args *ap)
2793 /* XXX update itimes */
2794 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2797 static int
2798 hammer_vop_specread (struct vop_read_args *ap)
2800 /* XXX update access time */
2801 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2804 static int
2805 hammer_vop_specwrite (struct vop_write_args *ap)
2807 /* XXX update last change time */
2808 return (VOCALL(&spec_vnode_vops, &ap->a_head));