Change hammer_str_to_tid() and its callers to restrict the format of
[dragonfly.git] / sys / vfs / hammer / hammer_vnops.c
blobe9d37bf39fb51ae69392df2e8bf447ef3cf31adc
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.101 2008/10/15 22:38:37 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/namecache.h>
42 #include <sys/vnode.h>
43 #include <sys/lockf.h>
44 #include <sys/event.h>
45 #include <sys/stat.h>
46 #include <sys/dirent.h>
47 #include <vm/vm_extern.h>
48 #include <vfs/fifofs/fifo.h>
49 #include "hammer.h"
52 * USERFS VNOPS
54 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
55 static int hammer_vop_fsync(struct vop_fsync_args *);
56 static int hammer_vop_read(struct vop_read_args *);
57 static int hammer_vop_write(struct vop_write_args *);
58 static int hammer_vop_access(struct vop_access_args *);
59 static int hammer_vop_advlock(struct vop_advlock_args *);
60 static int hammer_vop_close(struct vop_close_args *);
61 static int hammer_vop_ncreate(struct vop_ncreate_args *);
62 static int hammer_vop_getattr(struct vop_getattr_args *);
63 static int hammer_vop_nresolve(struct vop_nresolve_args *);
64 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
65 static int hammer_vop_nlink(struct vop_nlink_args *);
66 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
67 static int hammer_vop_nmknod(struct vop_nmknod_args *);
68 static int hammer_vop_open(struct vop_open_args *);
69 static int hammer_vop_print(struct vop_print_args *);
70 static int hammer_vop_readdir(struct vop_readdir_args *);
71 static int hammer_vop_readlink(struct vop_readlink_args *);
72 static int hammer_vop_nremove(struct vop_nremove_args *);
73 static int hammer_vop_nrename(struct vop_nrename_args *);
74 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
75 static int hammer_vop_setattr(struct vop_setattr_args *);
76 static int hammer_vop_strategy(struct vop_strategy_args *);
77 static int hammer_vop_bmap(struct vop_bmap_args *ap);
78 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
79 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
80 static int hammer_vop_ioctl(struct vop_ioctl_args *);
81 static int hammer_vop_mountctl(struct vop_mountctl_args *);
83 static int hammer_vop_fifoclose (struct vop_close_args *);
84 static int hammer_vop_fiforead (struct vop_read_args *);
85 static int hammer_vop_fifowrite (struct vop_write_args *);
87 static int hammer_vop_specclose (struct vop_close_args *);
88 static int hammer_vop_specread (struct vop_read_args *);
89 static int hammer_vop_specwrite (struct vop_write_args *);
91 struct vop_ops hammer_vnode_vops = {
92 .vop_default = vop_defaultop,
93 .vop_fsync = hammer_vop_fsync,
94 .vop_getpages = vop_stdgetpages,
95 .vop_putpages = vop_stdputpages,
96 .vop_read = hammer_vop_read,
97 .vop_write = hammer_vop_write,
98 .vop_access = hammer_vop_access,
99 .vop_advlock = hammer_vop_advlock,
100 .vop_close = hammer_vop_close,
101 .vop_ncreate = hammer_vop_ncreate,
102 .vop_getattr = hammer_vop_getattr,
103 .vop_inactive = hammer_vop_inactive,
104 .vop_reclaim = hammer_vop_reclaim,
105 .vop_nresolve = hammer_vop_nresolve,
106 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
107 .vop_nlink = hammer_vop_nlink,
108 .vop_nmkdir = hammer_vop_nmkdir,
109 .vop_nmknod = hammer_vop_nmknod,
110 .vop_open = hammer_vop_open,
111 .vop_pathconf = vop_stdpathconf,
112 .vop_print = hammer_vop_print,
113 .vop_readdir = hammer_vop_readdir,
114 .vop_readlink = hammer_vop_readlink,
115 .vop_nremove = hammer_vop_nremove,
116 .vop_nrename = hammer_vop_nrename,
117 .vop_nrmdir = hammer_vop_nrmdir,
118 .vop_setattr = hammer_vop_setattr,
119 .vop_bmap = hammer_vop_bmap,
120 .vop_strategy = hammer_vop_strategy,
121 .vop_nsymlink = hammer_vop_nsymlink,
122 .vop_nwhiteout = hammer_vop_nwhiteout,
123 .vop_ioctl = hammer_vop_ioctl,
124 .vop_mountctl = hammer_vop_mountctl
127 struct vop_ops hammer_spec_vops = {
128 .vop_default = spec_vnoperate,
129 .vop_fsync = hammer_vop_fsync,
130 .vop_read = hammer_vop_specread,
131 .vop_write = hammer_vop_specwrite,
132 .vop_access = hammer_vop_access,
133 .vop_close = hammer_vop_specclose,
134 .vop_getattr = hammer_vop_getattr,
135 .vop_inactive = hammer_vop_inactive,
136 .vop_reclaim = hammer_vop_reclaim,
137 .vop_setattr = hammer_vop_setattr
140 struct vop_ops hammer_fifo_vops = {
141 .vop_default = fifo_vnoperate,
142 .vop_fsync = hammer_vop_fsync,
143 .vop_read = hammer_vop_fiforead,
144 .vop_write = hammer_vop_fifowrite,
145 .vop_access = hammer_vop_access,
146 .vop_close = hammer_vop_fifoclose,
147 .vop_getattr = hammer_vop_getattr,
148 .vop_inactive = hammer_vop_inactive,
149 .vop_reclaim = hammer_vop_reclaim,
150 .vop_setattr = hammer_vop_setattr
153 #ifdef DEBUG_TRUNCATE
154 struct hammer_inode *HammerTruncIp;
155 #endif
157 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
158 struct vnode *dvp, struct ucred *cred,
159 int flags, int isdir);
160 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
161 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
163 #if 0
164 static
166 hammer_vop_vnoperate(struct vop_generic_args *)
168 return (VOCALL(&hammer_vnode_vops, ap));
170 #endif
173 * hammer_vop_fsync { vp, waitfor }
175 * fsync() an inode to disk and wait for it to be completely committed
176 * such that the information would not be undone if a crash occured after
177 * return.
179 static
181 hammer_vop_fsync(struct vop_fsync_args *ap)
183 hammer_inode_t ip = VTOI(ap->a_vp);
185 ++hammer_count_fsyncs;
186 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL);
187 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
188 if (ap->a_waitfor == MNT_WAIT) {
189 vn_unlock(ap->a_vp);
190 hammer_wait_inode(ip);
191 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
193 return (ip->error);
197 * hammer_vop_read { vp, uio, ioflag, cred }
199 static
201 hammer_vop_read(struct vop_read_args *ap)
203 struct hammer_transaction trans;
204 hammer_inode_t ip;
205 off_t offset;
206 struct buf *bp;
207 struct uio *uio;
208 int error;
209 int n;
210 int seqcount;
211 int ioseqcount;
212 int blksize;
214 if (ap->a_vp->v_type != VREG)
215 return (EINVAL);
216 ip = VTOI(ap->a_vp);
217 error = 0;
218 uio = ap->a_uio;
221 * Allow the UIO's size to override the sequential heuristic.
223 blksize = hammer_blocksize(uio->uio_offset);
224 seqcount = (uio->uio_resid + (blksize - 1)) / blksize;
225 ioseqcount = ap->a_ioflag >> 16;
226 if (seqcount < ioseqcount)
227 seqcount = ioseqcount;
229 hammer_start_transaction(&trans, ip->hmp);
232 * Access the data typically in HAMMER_BUFSIZE blocks via the
233 * buffer cache, but HAMMER may use a variable block size based
234 * on the offset.
236 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
237 int64_t base_offset;
238 int64_t file_limit;
240 blksize = hammer_blocksize(uio->uio_offset);
241 offset = (int)uio->uio_offset & (blksize - 1);
242 base_offset = uio->uio_offset - offset;
244 if (hammer_cluster_enable) {
246 * Use file_limit to prevent cluster_read() from
247 * creating buffers of the wrong block size past
248 * the demarc.
250 file_limit = ip->ino_data.size;
251 if (base_offset < HAMMER_XDEMARC &&
252 file_limit > HAMMER_XDEMARC) {
253 file_limit = HAMMER_XDEMARC;
255 error = cluster_read(ap->a_vp,
256 file_limit, base_offset,
257 blksize, MAXPHYS,
258 seqcount, &bp);
259 } else {
260 error = bread(ap->a_vp, base_offset, blksize, &bp);
262 if (error) {
263 kprintf("error %d\n", error);
264 brelse(bp);
265 break;
268 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
269 n = blksize - offset;
270 if (n > uio->uio_resid)
271 n = uio->uio_resid;
272 if (n > ip->ino_data.size - uio->uio_offset)
273 n = (int)(ip->ino_data.size - uio->uio_offset);
274 error = uiomove((char *)bp->b_data + offset, n, uio);
276 /* data has a lower priority then meta-data */
277 bp->b_flags |= B_AGE;
278 bqrelse(bp);
279 if (error)
280 break;
281 hammer_stats_file_read += n;
283 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
284 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
285 ip->ino_data.atime = trans.time;
286 hammer_modify_inode(ip, HAMMER_INODE_ATIME);
288 hammer_done_transaction(&trans);
289 return (error);
293 * hammer_vop_write { vp, uio, ioflag, cred }
295 static
297 hammer_vop_write(struct vop_write_args *ap)
299 struct hammer_transaction trans;
300 struct hammer_inode *ip;
301 hammer_mount_t hmp;
302 struct uio *uio;
303 int offset;
304 off_t base_offset;
305 struct buf *bp;
306 int error;
307 int n;
308 int flags;
309 int delta;
310 int seqcount;
312 if (ap->a_vp->v_type != VREG)
313 return (EINVAL);
314 ip = VTOI(ap->a_vp);
315 hmp = ip->hmp;
316 error = 0;
317 seqcount = ap->a_ioflag >> 16;
319 if (ip->flags & HAMMER_INODE_RO)
320 return (EROFS);
323 * Create a transaction to cover the operations we perform.
325 hammer_start_transaction(&trans, hmp);
326 uio = ap->a_uio;
329 * Check append mode
331 if (ap->a_ioflag & IO_APPEND)
332 uio->uio_offset = ip->ino_data.size;
335 * Check for illegal write offsets. Valid range is 0...2^63-1.
337 * NOTE: the base_off assignment is required to work around what
338 * I consider to be a GCC-4 optimization bug.
340 if (uio->uio_offset < 0) {
341 hammer_done_transaction(&trans);
342 return (EFBIG);
344 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
345 if (uio->uio_resid > 0 && base_offset <= 0) {
346 hammer_done_transaction(&trans);
347 return (EFBIG);
351 * Access the data typically in HAMMER_BUFSIZE blocks via the
352 * buffer cache, but HAMMER may use a variable block size based
353 * on the offset.
355 while (uio->uio_resid > 0) {
356 int fixsize = 0;
357 int blksize;
358 int blkmask;
360 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0)
361 break;
363 blksize = hammer_blocksize(uio->uio_offset);
366 * Do not allow HAMMER to blow out the buffer cache. Very
367 * large UIOs can lockout other processes due to bwillwrite()
368 * mechanics.
370 * The hammer inode is not locked during these operations.
371 * The vnode is locked which can interfere with the pageout
372 * daemon for non-UIO_NOCOPY writes but should not interfere
373 * with the buffer cache. Even so, we cannot afford to
374 * allow the pageout daemon to build up too many dirty buffer
375 * cache buffers.
377 /*if (((int)uio->uio_offset & (blksize - 1)) == 0)*/
378 bwillwrite(blksize);
381 * Do not allow HAMMER to blow out system memory by
382 * accumulating too many records. Records are so well
383 * decoupled from the buffer cache that it is possible
384 * for userland to push data out to the media via
385 * direct-write, but build up the records queued to the
386 * backend faster then the backend can flush them out.
387 * HAMMER has hit its write limit but the frontend has
388 * no pushback to slow it down.
390 if (hmp->rsv_recs > hammer_limit_recs / 2) {
392 * Get the inode on the flush list
394 if (ip->rsv_recs >= 64)
395 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
396 else if (ip->rsv_recs >= 16)
397 hammer_flush_inode(ip, 0);
400 * Keep the flusher going if the system keeps
401 * queueing records.
403 delta = hmp->count_newrecords -
404 hmp->last_newrecords;
405 if (delta < 0 || delta > hammer_limit_recs / 2) {
406 hmp->last_newrecords = hmp->count_newrecords;
407 hammer_sync_hmp(hmp, MNT_NOWAIT);
411 * If we have gotten behind start slowing
412 * down the writers.
414 delta = (hmp->rsv_recs - hammer_limit_recs) *
415 hz / hammer_limit_recs;
416 if (delta > 0)
417 tsleep(&trans, 0, "hmrslo", delta);
421 * Calculate the blocksize at the current offset and figure
422 * out how much we can actually write.
424 blkmask = blksize - 1;
425 offset = (int)uio->uio_offset & blkmask;
426 base_offset = uio->uio_offset & ~(int64_t)blkmask;
427 n = blksize - offset;
428 if (n > uio->uio_resid)
429 n = uio->uio_resid;
430 if (uio->uio_offset + n > ip->ino_data.size) {
431 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
432 fixsize = 1;
435 if (uio->uio_segflg == UIO_NOCOPY) {
437 * Issuing a write with the same data backing the
438 * buffer. Instantiate the buffer to collect the
439 * backing vm pages, then read-in any missing bits.
441 * This case is used by vop_stdputpages().
443 bp = getblk(ap->a_vp, base_offset,
444 blksize, GETBLK_BHEAVY, 0);
445 if ((bp->b_flags & B_CACHE) == 0) {
446 bqrelse(bp);
447 error = bread(ap->a_vp, base_offset,
448 blksize, &bp);
450 } else if (offset == 0 && uio->uio_resid >= blksize) {
452 * Even though we are entirely overwriting the buffer
453 * we may still have to zero it out to avoid a
454 * mmap/write visibility issue.
456 bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
457 if ((bp->b_flags & B_CACHE) == 0)
458 vfs_bio_clrbuf(bp);
459 } else if (base_offset >= ip->ino_data.size) {
461 * If the base offset of the buffer is beyond the
462 * file EOF, we don't have to issue a read.
464 bp = getblk(ap->a_vp, base_offset,
465 blksize, GETBLK_BHEAVY, 0);
466 vfs_bio_clrbuf(bp);
467 } else {
469 * Partial overwrite, read in any missing bits then
470 * replace the portion being written.
472 error = bread(ap->a_vp, base_offset, blksize, &bp);
473 if (error == 0)
474 bheavy(bp);
476 if (error == 0) {
477 error = uiomove((char *)bp->b_data + offset,
478 n, uio);
482 * If we screwed up we have to undo any VM size changes we
483 * made.
485 if (error) {
486 brelse(bp);
487 if (fixsize) {
488 vtruncbuf(ap->a_vp, ip->ino_data.size,
489 hammer_blocksize(ip->ino_data.size));
491 break;
493 hammer_stats_file_write += n;
494 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
495 if (ip->ino_data.size < uio->uio_offset) {
496 ip->ino_data.size = uio->uio_offset;
497 flags = HAMMER_INODE_DDIRTY;
498 vnode_pager_setsize(ap->a_vp, ip->ino_data.size);
499 } else {
500 flags = 0;
502 ip->ino_data.mtime = trans.time;
503 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
504 hammer_modify_inode(ip, flags);
507 * Once we dirty the buffer any cached zone-X offset
508 * becomes invalid. HAMMER NOTE: no-history mode cannot
509 * allow overwriting over the same data sector unless
510 * we provide UNDOs for the old data, which we don't.
512 bp->b_bio2.bio_offset = NOOFFSET;
515 * Final buffer disposition.
517 bp->b_flags |= B_AGE;
518 if (ap->a_ioflag & IO_SYNC) {
519 bwrite(bp);
520 } else if (ap->a_ioflag & IO_DIRECT) {
521 bawrite(bp);
522 } else {
523 bdwrite(bp);
526 hammer_done_transaction(&trans);
527 return (error);
531 * hammer_vop_access { vp, mode, cred }
533 static
535 hammer_vop_access(struct vop_access_args *ap)
537 struct hammer_inode *ip = VTOI(ap->a_vp);
538 uid_t uid;
539 gid_t gid;
540 int error;
542 ++hammer_stats_file_iopsr;
543 uid = hammer_to_unix_xid(&ip->ino_data.uid);
544 gid = hammer_to_unix_xid(&ip->ino_data.gid);
546 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
547 ip->ino_data.uflags);
548 return (error);
552 * hammer_vop_advlock { vp, id, op, fl, flags }
554 static
556 hammer_vop_advlock(struct vop_advlock_args *ap)
558 hammer_inode_t ip = VTOI(ap->a_vp);
560 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
564 * hammer_vop_close { vp, fflag }
566 static
568 hammer_vop_close(struct vop_close_args *ap)
570 /*hammer_inode_t ip = VTOI(ap->a_vp);*/
571 return (vop_stdclose(ap));
575 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
577 * The operating system has already ensured that the directory entry
578 * does not exist and done all appropriate namespace locking.
580 static
582 hammer_vop_ncreate(struct vop_ncreate_args *ap)
584 struct hammer_transaction trans;
585 struct hammer_inode *dip;
586 struct hammer_inode *nip;
587 struct nchandle *nch;
588 int error;
590 nch = ap->a_nch;
591 dip = VTOI(ap->a_dvp);
593 if (dip->flags & HAMMER_INODE_RO)
594 return (EROFS);
595 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
596 return (error);
599 * Create a transaction to cover the operations we perform.
601 hammer_start_transaction(&trans, dip->hmp);
602 ++hammer_stats_file_iopsw;
605 * Create a new filesystem object of the requested type. The
606 * returned inode will be referenced and shared-locked to prevent
607 * it from being moved to the flusher.
610 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
611 dip, NULL, &nip);
612 if (error) {
613 hkprintf("hammer_create_inode error %d\n", error);
614 hammer_done_transaction(&trans);
615 *ap->a_vpp = NULL;
616 return (error);
620 * Add the new filesystem object to the directory. This will also
621 * bump the inode's link count.
623 error = hammer_ip_add_directory(&trans, dip,
624 nch->ncp->nc_name, nch->ncp->nc_nlen,
625 nip);
626 if (error)
627 hkprintf("hammer_ip_add_directory error %d\n", error);
630 * Finish up.
632 if (error) {
633 hammer_rel_inode(nip, 0);
634 hammer_done_transaction(&trans);
635 *ap->a_vpp = NULL;
636 } else {
637 error = hammer_get_vnode(nip, ap->a_vpp);
638 hammer_done_transaction(&trans);
639 hammer_rel_inode(nip, 0);
640 if (error == 0) {
641 cache_setunresolved(ap->a_nch);
642 cache_setvp(ap->a_nch, *ap->a_vpp);
645 return (error);
649 * hammer_vop_getattr { vp, vap }
651 * Retrieve an inode's attribute information. When accessing inodes
652 * historically we fake the atime field to ensure consistent results.
653 * The atime field is stored in the B-Tree element and allowed to be
654 * updated without cycling the element.
656 static
658 hammer_vop_getattr(struct vop_getattr_args *ap)
660 struct hammer_inode *ip = VTOI(ap->a_vp);
661 struct vattr *vap = ap->a_vap;
664 * We want the fsid to be different when accessing a filesystem
665 * with different as-of's so programs like diff don't think
666 * the files are the same.
668 * We also want the fsid to be the same when comparing snapshots,
669 * or when comparing mirrors (which might be backed by different
670 * physical devices). HAMMER fsids are based on the PFS's
671 * shared_uuid field.
673 * XXX there is a chance of collision here. The va_fsid reported
674 * by stat is different from the more involved fsid used in the
675 * mount structure.
677 ++hammer_stats_file_iopsr;
678 vap->va_fsid = ip->pfsm->fsid_udev ^ (u_int32_t)ip->obj_asof ^
679 (u_int32_t)(ip->obj_asof >> 32);
681 vap->va_fileid = ip->ino_leaf.base.obj_id;
682 vap->va_mode = ip->ino_data.mode;
683 vap->va_nlink = ip->ino_data.nlinks;
684 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
685 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
686 vap->va_rmajor = 0;
687 vap->va_rminor = 0;
688 vap->va_size = ip->ino_data.size;
691 * Special case for @@PFS softlinks. The actual size of the
692 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
694 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK &&
695 ip->ino_data.size == 10 &&
696 ip->obj_asof == HAMMER_MAX_TID &&
697 ip->obj_localization == 0 &&
698 strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) {
699 vap->va_size = 26;
703 * We must provide a consistent atime and mtime for snapshots
704 * so people can do a 'tar cf - ... | md5' on them and get
705 * consistent results.
707 if (ip->flags & HAMMER_INODE_RO) {
708 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
709 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
710 } else {
711 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
712 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
714 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
715 vap->va_flags = ip->ino_data.uflags;
716 vap->va_gen = 1; /* hammer inums are unique for all time */
717 vap->va_blocksize = HAMMER_BUFSIZE;
718 if (ip->ino_data.size >= HAMMER_XDEMARC) {
719 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
720 ~HAMMER_XBUFMASK64;
721 } else if (ip->ino_data.size > HAMMER_BUFSIZE / 2) {
722 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
723 ~HAMMER_BUFMASK64;
724 } else {
725 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
728 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
729 vap->va_filerev = 0; /* XXX */
730 /* mtime uniquely identifies any adjustments made to the file XXX */
731 vap->va_fsmid = ip->ino_data.mtime;
732 vap->va_uid_uuid = ip->ino_data.uid;
733 vap->va_gid_uuid = ip->ino_data.gid;
734 vap->va_fsid_uuid = ip->hmp->fsid;
735 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
736 VA_FSID_UUID_VALID;
738 switch (ip->ino_data.obj_type) {
739 case HAMMER_OBJTYPE_CDEV:
740 case HAMMER_OBJTYPE_BDEV:
741 vap->va_rmajor = ip->ino_data.rmajor;
742 vap->va_rminor = ip->ino_data.rminor;
743 break;
744 default:
745 break;
747 return(0);
751 * hammer_vop_nresolve { nch, dvp, cred }
753 * Locate the requested directory entry.
755 static
757 hammer_vop_nresolve(struct vop_nresolve_args *ap)
759 struct hammer_transaction trans;
760 struct namecache *ncp;
761 hammer_inode_t dip;
762 hammer_inode_t ip;
763 hammer_tid_t asof;
764 struct hammer_cursor cursor;
765 struct vnode *vp;
766 int64_t namekey;
767 int error;
768 int i;
769 int nlen;
770 int flags;
771 int ispfs;
772 int64_t obj_id;
773 u_int32_t localization;
776 * Misc initialization, plus handle as-of name extensions. Look for
777 * the '@@' extension. Note that as-of files and directories cannot
778 * be modified.
780 dip = VTOI(ap->a_dvp);
781 ncp = ap->a_nch->ncp;
782 asof = dip->obj_asof;
783 localization = dip->obj_localization; /* for code consistency */
784 nlen = ncp->nc_nlen;
785 flags = dip->flags & HAMMER_INODE_RO;
786 ispfs = 0;
788 hammer_simple_transaction(&trans, dip->hmp);
789 ++hammer_stats_file_iopsr;
791 for (i = 0; i < nlen; ++i) {
792 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
793 error = hammer_str_to_tid(ncp->nc_name + i + 2,
794 &ispfs, &asof, &localization);
795 if (error != 0) {
796 i = nlen;
797 break;
799 if (asof != HAMMER_MAX_TID)
800 flags |= HAMMER_INODE_RO;
801 break;
804 nlen = i;
807 * If this is a PFS softlink we dive into the PFS
809 if (ispfs && nlen == 0) {
810 ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT,
811 asof, localization,
812 flags, &error);
813 if (error == 0) {
814 error = hammer_get_vnode(ip, &vp);
815 hammer_rel_inode(ip, 0);
816 } else {
817 vp = NULL;
819 if (error == 0) {
820 vn_unlock(vp);
821 cache_setvp(ap->a_nch, vp);
822 vrele(vp);
824 goto done;
828 * If there is no path component the time extension is relative to
829 * dip.
831 if (nlen == 0) {
832 ip = hammer_get_inode(&trans, dip, dip->obj_id,
833 asof, dip->obj_localization,
834 flags, &error);
835 if (error == 0) {
836 error = hammer_get_vnode(ip, &vp);
837 hammer_rel_inode(ip, 0);
838 } else {
839 vp = NULL;
841 if (error == 0) {
842 vn_unlock(vp);
843 cache_setvp(ap->a_nch, vp);
844 vrele(vp);
846 goto done;
850 * Calculate the namekey and setup the key range for the scan. This
851 * works kinda like a chained hash table where the lower 32 bits
852 * of the namekey synthesize the chain.
854 * The key range is inclusive of both key_beg and key_end.
856 namekey = hammer_directory_namekey(ncp->nc_name, nlen);
858 error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
859 cursor.key_beg.localization = dip->obj_localization +
860 HAMMER_LOCALIZE_MISC;
861 cursor.key_beg.obj_id = dip->obj_id;
862 cursor.key_beg.key = namekey;
863 cursor.key_beg.create_tid = 0;
864 cursor.key_beg.delete_tid = 0;
865 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
866 cursor.key_beg.obj_type = 0;
868 cursor.key_end = cursor.key_beg;
869 cursor.key_end.key |= 0xFFFFFFFFULL;
870 cursor.asof = asof;
871 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
874 * Scan all matching records (the chain), locate the one matching
875 * the requested path component.
877 * The hammer_ip_*() functions merge in-memory records with on-disk
878 * records for the purposes of the search.
880 obj_id = 0;
881 localization = HAMMER_DEF_LOCALIZATION;
883 if (error == 0) {
884 error = hammer_ip_first(&cursor);
885 while (error == 0) {
886 error = hammer_ip_resolve_data(&cursor);
887 if (error)
888 break;
889 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
890 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
891 obj_id = cursor.data->entry.obj_id;
892 localization = cursor.data->entry.localization;
893 break;
895 error = hammer_ip_next(&cursor);
898 hammer_done_cursor(&cursor);
899 if (error == 0) {
900 ip = hammer_get_inode(&trans, dip, obj_id,
901 asof, localization,
902 flags, &error);
903 if (error == 0) {
904 error = hammer_get_vnode(ip, &vp);
905 hammer_rel_inode(ip, 0);
906 } else {
907 vp = NULL;
909 if (error == 0) {
910 vn_unlock(vp);
911 cache_setvp(ap->a_nch, vp);
912 vrele(vp);
914 } else if (error == ENOENT) {
915 cache_setvp(ap->a_nch, NULL);
917 done:
918 hammer_done_transaction(&trans);
919 return (error);
923 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
925 * Locate the parent directory of a directory vnode.
927 * dvp is referenced but not locked. *vpp must be returned referenced and
928 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
929 * at the root, instead it could indicate that the directory we were in was
930 * removed.
932 * NOTE: as-of sequences are not linked into the directory structure. If
933 * we are at the root with a different asof then the mount point, reload
934 * the same directory with the mount point's asof. I'm not sure what this
935 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
936 * get confused, but it hasn't been tested.
938 static
940 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
942 struct hammer_transaction trans;
943 struct hammer_inode *dip;
944 struct hammer_inode *ip;
945 int64_t parent_obj_id;
946 u_int32_t parent_obj_localization;
947 hammer_tid_t asof;
948 int error;
950 dip = VTOI(ap->a_dvp);
951 asof = dip->obj_asof;
954 * Whos are parent? This could be the root of a pseudo-filesystem
955 * whos parent is in another localization domain.
957 parent_obj_id = dip->ino_data.parent_obj_id;
958 if (dip->obj_id == HAMMER_OBJID_ROOT)
959 parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
960 else
961 parent_obj_localization = dip->obj_localization;
963 if (parent_obj_id == 0) {
964 if (dip->obj_id == HAMMER_OBJID_ROOT &&
965 asof != dip->hmp->asof) {
966 parent_obj_id = dip->obj_id;
967 asof = dip->hmp->asof;
968 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
969 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
970 dip->obj_asof);
971 } else {
972 *ap->a_vpp = NULL;
973 return ENOENT;
977 hammer_simple_transaction(&trans, dip->hmp);
978 ++hammer_stats_file_iopsr;
980 ip = hammer_get_inode(&trans, dip, parent_obj_id,
981 asof, parent_obj_localization,
982 dip->flags, &error);
983 if (ip) {
984 error = hammer_get_vnode(ip, ap->a_vpp);
985 hammer_rel_inode(ip, 0);
986 } else {
987 *ap->a_vpp = NULL;
989 hammer_done_transaction(&trans);
990 return (error);
994 * hammer_vop_nlink { nch, dvp, vp, cred }
996 static
998 hammer_vop_nlink(struct vop_nlink_args *ap)
1000 struct hammer_transaction trans;
1001 struct hammer_inode *dip;
1002 struct hammer_inode *ip;
1003 struct nchandle *nch;
1004 int error;
1006 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1007 return(EXDEV);
1009 nch = ap->a_nch;
1010 dip = VTOI(ap->a_dvp);
1011 ip = VTOI(ap->a_vp);
1013 if (dip->obj_localization != ip->obj_localization)
1014 return(EXDEV);
1016 if (dip->flags & HAMMER_INODE_RO)
1017 return (EROFS);
1018 if (ip->flags & HAMMER_INODE_RO)
1019 return (EROFS);
1020 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1021 return (error);
1024 * Create a transaction to cover the operations we perform.
1026 hammer_start_transaction(&trans, dip->hmp);
1027 ++hammer_stats_file_iopsw;
1030 * Add the filesystem object to the directory. Note that neither
1031 * dip nor ip are referenced or locked, but their vnodes are
1032 * referenced. This function will bump the inode's link count.
1034 error = hammer_ip_add_directory(&trans, dip,
1035 nch->ncp->nc_name, nch->ncp->nc_nlen,
1036 ip);
1039 * Finish up.
1041 if (error == 0) {
1042 cache_setunresolved(nch);
1043 cache_setvp(nch, ap->a_vp);
1045 hammer_done_transaction(&trans);
1046 return (error);
1050 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1052 * The operating system has already ensured that the directory entry
1053 * does not exist and done all appropriate namespace locking.
1055 static
1057 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
1059 struct hammer_transaction trans;
1060 struct hammer_inode *dip;
1061 struct hammer_inode *nip;
1062 struct nchandle *nch;
1063 int error;
1065 nch = ap->a_nch;
1066 dip = VTOI(ap->a_dvp);
1068 if (dip->flags & HAMMER_INODE_RO)
1069 return (EROFS);
1070 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1071 return (error);
1074 * Create a transaction to cover the operations we perform.
1076 hammer_start_transaction(&trans, dip->hmp);
1077 ++hammer_stats_file_iopsw;
1080 * Create a new filesystem object of the requested type. The
1081 * returned inode will be referenced but not locked.
1083 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1084 dip, NULL, &nip);
1085 if (error) {
1086 hkprintf("hammer_mkdir error %d\n", error);
1087 hammer_done_transaction(&trans);
1088 *ap->a_vpp = NULL;
1089 return (error);
1092 * Add the new filesystem object to the directory. This will also
1093 * bump the inode's link count.
1095 error = hammer_ip_add_directory(&trans, dip,
1096 nch->ncp->nc_name, nch->ncp->nc_nlen,
1097 nip);
1098 if (error)
1099 hkprintf("hammer_mkdir (add) error %d\n", error);
1102 * Finish up.
1104 if (error) {
1105 hammer_rel_inode(nip, 0);
1106 *ap->a_vpp = NULL;
1107 } else {
1108 error = hammer_get_vnode(nip, ap->a_vpp);
1109 hammer_rel_inode(nip, 0);
1110 if (error == 0) {
1111 cache_setunresolved(ap->a_nch);
1112 cache_setvp(ap->a_nch, *ap->a_vpp);
1115 hammer_done_transaction(&trans);
1116 return (error);
1120 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1122 * The operating system has already ensured that the directory entry
1123 * does not exist and done all appropriate namespace locking.
1125 static
1127 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1129 struct hammer_transaction trans;
1130 struct hammer_inode *dip;
1131 struct hammer_inode *nip;
1132 struct nchandle *nch;
1133 int error;
1135 nch = ap->a_nch;
1136 dip = VTOI(ap->a_dvp);
1138 if (dip->flags & HAMMER_INODE_RO)
1139 return (EROFS);
1140 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1141 return (error);
1144 * Create a transaction to cover the operations we perform.
1146 hammer_start_transaction(&trans, dip->hmp);
1147 ++hammer_stats_file_iopsw;
1150 * Create a new filesystem object of the requested type. The
1151 * returned inode will be referenced but not locked.
1153 * If mknod specifies a directory a pseudo-fs is created.
1155 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1156 dip, NULL, &nip);
1157 if (error) {
1158 hammer_done_transaction(&trans);
1159 *ap->a_vpp = NULL;
1160 return (error);
1164 * Add the new filesystem object to the directory. This will also
1165 * bump the inode's link count.
1167 error = hammer_ip_add_directory(&trans, dip,
1168 nch->ncp->nc_name, nch->ncp->nc_nlen,
1169 nip);
1172 * Finish up.
1174 if (error) {
1175 hammer_rel_inode(nip, 0);
1176 *ap->a_vpp = NULL;
1177 } else {
1178 error = hammer_get_vnode(nip, ap->a_vpp);
1179 hammer_rel_inode(nip, 0);
1180 if (error == 0) {
1181 cache_setunresolved(ap->a_nch);
1182 cache_setvp(ap->a_nch, *ap->a_vpp);
1185 hammer_done_transaction(&trans);
1186 return (error);
1190 * hammer_vop_open { vp, mode, cred, fp }
1192 static
1194 hammer_vop_open(struct vop_open_args *ap)
1196 hammer_inode_t ip;
1198 ++hammer_stats_file_iopsr;
1199 ip = VTOI(ap->a_vp);
1201 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1202 return (EROFS);
1203 return(vop_stdopen(ap));
1207 * hammer_vop_print { vp }
1209 static
1211 hammer_vop_print(struct vop_print_args *ap)
1213 return EOPNOTSUPP;
1217 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1219 static
1221 hammer_vop_readdir(struct vop_readdir_args *ap)
1223 struct hammer_transaction trans;
1224 struct hammer_cursor cursor;
1225 struct hammer_inode *ip;
1226 struct uio *uio;
1227 hammer_base_elm_t base;
1228 int error;
1229 int cookie_index;
1230 int ncookies;
1231 off_t *cookies;
1232 off_t saveoff;
1233 int r;
1234 int dtype;
1236 ++hammer_stats_file_iopsr;
1237 ip = VTOI(ap->a_vp);
1238 uio = ap->a_uio;
1239 saveoff = uio->uio_offset;
1241 if (ap->a_ncookies) {
1242 ncookies = uio->uio_resid / 16 + 1;
1243 if (ncookies > 1024)
1244 ncookies = 1024;
1245 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1246 cookie_index = 0;
1247 } else {
1248 ncookies = -1;
1249 cookies = NULL;
1250 cookie_index = 0;
1253 hammer_simple_transaction(&trans, ip->hmp);
1256 * Handle artificial entries
1258 error = 0;
1259 if (saveoff == 0) {
1260 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1261 if (r)
1262 goto done;
1263 if (cookies)
1264 cookies[cookie_index] = saveoff;
1265 ++saveoff;
1266 ++cookie_index;
1267 if (cookie_index == ncookies)
1268 goto done;
1270 if (saveoff == 1) {
1271 if (ip->ino_data.parent_obj_id) {
1272 r = vop_write_dirent(&error, uio,
1273 ip->ino_data.parent_obj_id,
1274 DT_DIR, 2, "..");
1275 } else {
1276 r = vop_write_dirent(&error, uio,
1277 ip->obj_id, DT_DIR, 2, "..");
1279 if (r)
1280 goto done;
1281 if (cookies)
1282 cookies[cookie_index] = saveoff;
1283 ++saveoff;
1284 ++cookie_index;
1285 if (cookie_index == ncookies)
1286 goto done;
1290 * Key range (begin and end inclusive) to scan. Directory keys
1291 * directly translate to a 64 bit 'seek' position.
1293 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1294 cursor.key_beg.localization = ip->obj_localization +
1295 HAMMER_LOCALIZE_MISC;
1296 cursor.key_beg.obj_id = ip->obj_id;
1297 cursor.key_beg.create_tid = 0;
1298 cursor.key_beg.delete_tid = 0;
1299 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1300 cursor.key_beg.obj_type = 0;
1301 cursor.key_beg.key = saveoff;
1303 cursor.key_end = cursor.key_beg;
1304 cursor.key_end.key = HAMMER_MAX_KEY;
1305 cursor.asof = ip->obj_asof;
1306 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1308 error = hammer_ip_first(&cursor);
1310 while (error == 0) {
1311 error = hammer_ip_resolve_data(&cursor);
1312 if (error)
1313 break;
1314 base = &cursor.leaf->base;
1315 saveoff = base->key;
1316 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1318 if (base->obj_id != ip->obj_id)
1319 panic("readdir: bad record at %p", cursor.node);
1322 * Convert pseudo-filesystems into softlinks
1324 dtype = hammer_get_dtype(cursor.leaf->base.obj_type);
1325 r = vop_write_dirent(
1326 &error, uio, cursor.data->entry.obj_id,
1327 dtype,
1328 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1329 (void *)cursor.data->entry.name);
1330 if (r)
1331 break;
1332 ++saveoff;
1333 if (cookies)
1334 cookies[cookie_index] = base->key;
1335 ++cookie_index;
1336 if (cookie_index == ncookies)
1337 break;
1338 error = hammer_ip_next(&cursor);
1340 hammer_done_cursor(&cursor);
1342 done:
1343 hammer_done_transaction(&trans);
1345 if (ap->a_eofflag)
1346 *ap->a_eofflag = (error == ENOENT);
1347 uio->uio_offset = saveoff;
1348 if (error && cookie_index == 0) {
1349 if (error == ENOENT)
1350 error = 0;
1351 if (cookies) {
1352 kfree(cookies, M_TEMP);
1353 *ap->a_ncookies = 0;
1354 *ap->a_cookies = NULL;
1356 } else {
1357 if (error == ENOENT)
1358 error = 0;
1359 if (cookies) {
1360 *ap->a_ncookies = cookie_index;
1361 *ap->a_cookies = cookies;
1364 return(error);
1368 * hammer_vop_readlink { vp, uio, cred }
1370 static
1372 hammer_vop_readlink(struct vop_readlink_args *ap)
1374 struct hammer_transaction trans;
1375 struct hammer_cursor cursor;
1376 struct hammer_inode *ip;
1377 char buf[32];
1378 u_int32_t localization;
1379 hammer_pseudofs_inmem_t pfsm;
1380 int error;
1382 ip = VTOI(ap->a_vp);
1385 * Shortcut if the symlink data was stuffed into ino_data.
1387 * Also expand special "@@PFS%05d" softlinks (expansion only
1388 * occurs for non-historical (current) accesses made from the
1389 * primary filesystem).
1391 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1392 char *ptr;
1393 int bytes;
1395 ptr = ip->ino_data.ext.symlink;
1396 bytes = (int)ip->ino_data.size;
1397 if (bytes == 10 &&
1398 ip->obj_asof == HAMMER_MAX_TID &&
1399 ip->obj_localization == 0 &&
1400 strncmp(ptr, "@@PFS", 5) == 0) {
1401 hammer_simple_transaction(&trans, ip->hmp);
1402 bcopy(ptr + 5, buf, 5);
1403 buf[5] = 0;
1404 localization = strtoul(buf, NULL, 10) << 16;
1405 pfsm = hammer_load_pseudofs(&trans, localization,
1406 &error);
1407 if (error == 0) {
1408 if (pfsm->pfsd.mirror_flags &
1409 HAMMER_PFSD_SLAVE) {
1410 ksnprintf(buf, sizeof(buf),
1411 "@@0x%016llx:%05d",
1412 pfsm->pfsd.sync_end_tid,
1413 localization >> 16);
1414 } else {
1415 ksnprintf(buf, sizeof(buf),
1416 "@@0x%016llx:%05d",
1417 HAMMER_MAX_TID,
1418 localization >> 16);
1420 ptr = buf;
1421 bytes = strlen(buf);
1423 if (pfsm)
1424 hammer_rel_pseudofs(trans.hmp, pfsm);
1425 hammer_done_transaction(&trans);
1427 error = uiomove(ptr, bytes, ap->a_uio);
1428 return(error);
1432 * Long version
1434 hammer_simple_transaction(&trans, ip->hmp);
1435 ++hammer_stats_file_iopsr;
1436 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1439 * Key range (begin and end inclusive) to scan. Directory keys
1440 * directly translate to a 64 bit 'seek' position.
1442 cursor.key_beg.localization = ip->obj_localization +
1443 HAMMER_LOCALIZE_MISC;
1444 cursor.key_beg.obj_id = ip->obj_id;
1445 cursor.key_beg.create_tid = 0;
1446 cursor.key_beg.delete_tid = 0;
1447 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1448 cursor.key_beg.obj_type = 0;
1449 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1450 cursor.asof = ip->obj_asof;
1451 cursor.flags |= HAMMER_CURSOR_ASOF;
1453 error = hammer_ip_lookup(&cursor);
1454 if (error == 0) {
1455 error = hammer_ip_resolve_data(&cursor);
1456 if (error == 0) {
1457 KKASSERT(cursor.leaf->data_len >=
1458 HAMMER_SYMLINK_NAME_OFF);
1459 error = uiomove(cursor.data->symlink.name,
1460 cursor.leaf->data_len -
1461 HAMMER_SYMLINK_NAME_OFF,
1462 ap->a_uio);
1465 hammer_done_cursor(&cursor);
1466 hammer_done_transaction(&trans);
1467 return(error);
1471 * hammer_vop_nremove { nch, dvp, cred }
1473 static
1475 hammer_vop_nremove(struct vop_nremove_args *ap)
1477 struct hammer_transaction trans;
1478 struct hammer_inode *dip;
1479 int error;
1481 dip = VTOI(ap->a_dvp);
1483 if (hammer_nohistory(dip) == 0 &&
1484 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1485 return (error);
1488 hammer_start_transaction(&trans, dip->hmp);
1489 ++hammer_stats_file_iopsw;
1490 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0);
1491 hammer_done_transaction(&trans);
1493 return (error);
1497 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1499 static
1501 hammer_vop_nrename(struct vop_nrename_args *ap)
1503 struct hammer_transaction trans;
1504 struct namecache *fncp;
1505 struct namecache *tncp;
1506 struct hammer_inode *fdip;
1507 struct hammer_inode *tdip;
1508 struct hammer_inode *ip;
1509 struct hammer_cursor cursor;
1510 int64_t namekey;
1511 int nlen, error;
1513 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1514 return(EXDEV);
1515 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1516 return(EXDEV);
1518 fdip = VTOI(ap->a_fdvp);
1519 tdip = VTOI(ap->a_tdvp);
1520 fncp = ap->a_fnch->ncp;
1521 tncp = ap->a_tnch->ncp;
1522 ip = VTOI(fncp->nc_vp);
1523 KKASSERT(ip != NULL);
1525 if (fdip->obj_localization != tdip->obj_localization)
1526 return(EXDEV);
1527 if (fdip->obj_localization != ip->obj_localization)
1528 return(EXDEV);
1530 if (fdip->flags & HAMMER_INODE_RO)
1531 return (EROFS);
1532 if (tdip->flags & HAMMER_INODE_RO)
1533 return (EROFS);
1534 if (ip->flags & HAMMER_INODE_RO)
1535 return (EROFS);
1536 if ((error = hammer_checkspace(fdip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1537 return (error);
1539 hammer_start_transaction(&trans, fdip->hmp);
1540 ++hammer_stats_file_iopsw;
1543 * Remove tncp from the target directory and then link ip as
1544 * tncp. XXX pass trans to dounlink
1546 * Force the inode sync-time to match the transaction so it is
1547 * in-sync with the creation of the target directory entry.
1549 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp,
1550 ap->a_cred, 0, -1);
1551 if (error == 0 || error == ENOENT) {
1552 error = hammer_ip_add_directory(&trans, tdip,
1553 tncp->nc_name, tncp->nc_nlen,
1554 ip);
1555 if (error == 0) {
1556 ip->ino_data.parent_obj_id = tdip->obj_id;
1557 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
1560 if (error)
1561 goto failed; /* XXX */
1564 * Locate the record in the originating directory and remove it.
1566 * Calculate the namekey and setup the key range for the scan. This
1567 * works kinda like a chained hash table where the lower 32 bits
1568 * of the namekey synthesize the chain.
1570 * The key range is inclusive of both key_beg and key_end.
1572 namekey = hammer_directory_namekey(fncp->nc_name, fncp->nc_nlen);
1573 retry:
1574 hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
1575 cursor.key_beg.localization = fdip->obj_localization +
1576 HAMMER_LOCALIZE_MISC;
1577 cursor.key_beg.obj_id = fdip->obj_id;
1578 cursor.key_beg.key = namekey;
1579 cursor.key_beg.create_tid = 0;
1580 cursor.key_beg.delete_tid = 0;
1581 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1582 cursor.key_beg.obj_type = 0;
1584 cursor.key_end = cursor.key_beg;
1585 cursor.key_end.key |= 0xFFFFFFFFULL;
1586 cursor.asof = fdip->obj_asof;
1587 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1590 * Scan all matching records (the chain), locate the one matching
1591 * the requested path component.
1593 * The hammer_ip_*() functions merge in-memory records with on-disk
1594 * records for the purposes of the search.
1596 error = hammer_ip_first(&cursor);
1597 while (error == 0) {
1598 if (hammer_ip_resolve_data(&cursor) != 0)
1599 break;
1600 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1601 KKASSERT(nlen > 0);
1602 if (fncp->nc_nlen == nlen &&
1603 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1604 break;
1606 error = hammer_ip_next(&cursor);
1610 * If all is ok we have to get the inode so we can adjust nlinks.
1612 * WARNING: hammer_ip_del_directory() may have to terminate the
1613 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1614 * twice.
1616 if (error == 0)
1617 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
1620 * XXX A deadlock here will break rename's atomicy for the purposes
1621 * of crash recovery.
1623 if (error == EDEADLK) {
1624 hammer_done_cursor(&cursor);
1625 goto retry;
1629 * Cleanup and tell the kernel that the rename succeeded.
1631 hammer_done_cursor(&cursor);
1632 if (error == 0)
1633 cache_rename(ap->a_fnch, ap->a_tnch);
1635 failed:
1636 hammer_done_transaction(&trans);
1637 return (error);
1641 * hammer_vop_nrmdir { nch, dvp, cred }
1643 static
1645 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
1647 struct hammer_transaction trans;
1648 struct hammer_inode *dip;
1649 int error;
1651 dip = VTOI(ap->a_dvp);
1653 if (hammer_nohistory(dip) == 0 &&
1654 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1655 return (error);
1658 hammer_start_transaction(&trans, dip->hmp);
1659 ++hammer_stats_file_iopsw;
1660 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1);
1661 hammer_done_transaction(&trans);
1663 return (error);
1667 * hammer_vop_setattr { vp, vap, cred }
1669 static
1671 hammer_vop_setattr(struct vop_setattr_args *ap)
1673 struct hammer_transaction trans;
1674 struct vattr *vap;
1675 struct hammer_inode *ip;
1676 int modflags;
1677 int error;
1678 int truncating;
1679 int blksize;
1680 int64_t aligned_size;
1681 u_int32_t flags;
1683 vap = ap->a_vap;
1684 ip = ap->a_vp->v_data;
1685 modflags = 0;
1687 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1688 return(EROFS);
1689 if (ip->flags & HAMMER_INODE_RO)
1690 return (EROFS);
1691 if (hammer_nohistory(ip) == 0 &&
1692 (error = hammer_checkspace(ip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1693 return (error);
1696 hammer_start_transaction(&trans, ip->hmp);
1697 ++hammer_stats_file_iopsw;
1698 error = 0;
1700 if (vap->va_flags != VNOVAL) {
1701 flags = ip->ino_data.uflags;
1702 error = vop_helper_setattr_flags(&flags, vap->va_flags,
1703 hammer_to_unix_xid(&ip->ino_data.uid),
1704 ap->a_cred);
1705 if (error == 0) {
1706 if (ip->ino_data.uflags != flags) {
1707 ip->ino_data.uflags = flags;
1708 modflags |= HAMMER_INODE_DDIRTY;
1710 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1711 error = 0;
1712 goto done;
1715 goto done;
1717 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1718 error = EPERM;
1719 goto done;
1721 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
1722 mode_t cur_mode = ip->ino_data.mode;
1723 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1724 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1725 uuid_t uuid_uid;
1726 uuid_t uuid_gid;
1728 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
1729 ap->a_cred,
1730 &cur_uid, &cur_gid, &cur_mode);
1731 if (error == 0) {
1732 hammer_guid_to_uuid(&uuid_uid, cur_uid);
1733 hammer_guid_to_uuid(&uuid_gid, cur_gid);
1734 if (bcmp(&uuid_uid, &ip->ino_data.uid,
1735 sizeof(uuid_uid)) ||
1736 bcmp(&uuid_gid, &ip->ino_data.gid,
1737 sizeof(uuid_gid)) ||
1738 ip->ino_data.mode != cur_mode
1740 ip->ino_data.uid = uuid_uid;
1741 ip->ino_data.gid = uuid_gid;
1742 ip->ino_data.mode = cur_mode;
1744 modflags |= HAMMER_INODE_DDIRTY;
1747 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
1748 switch(ap->a_vp->v_type) {
1749 case VREG:
1750 if (vap->va_size == ip->ino_data.size)
1751 break;
1753 * XXX break atomicy, we can deadlock the backend
1754 * if we do not release the lock. Probably not a
1755 * big deal here.
1757 blksize = hammer_blocksize(vap->va_size);
1758 if (vap->va_size < ip->ino_data.size) {
1759 vtruncbuf(ap->a_vp, vap->va_size, blksize);
1760 truncating = 1;
1761 } else {
1762 vnode_pager_setsize(ap->a_vp, vap->va_size);
1763 truncating = 0;
1765 ip->ino_data.size = vap->va_size;
1766 modflags |= HAMMER_INODE_DDIRTY;
1769 * on-media truncation is cached in the inode until
1770 * the inode is synchronized.
1772 if (truncating) {
1773 hammer_ip_frontend_trunc(ip, vap->va_size);
1774 #ifdef DEBUG_TRUNCATE
1775 if (HammerTruncIp == NULL)
1776 HammerTruncIp = ip;
1777 #endif
1778 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1779 ip->flags |= HAMMER_INODE_TRUNCATED;
1780 ip->trunc_off = vap->va_size;
1781 #ifdef DEBUG_TRUNCATE
1782 if (ip == HammerTruncIp)
1783 kprintf("truncate1 %016llx\n", ip->trunc_off);
1784 #endif
1785 } else if (ip->trunc_off > vap->va_size) {
1786 ip->trunc_off = vap->va_size;
1787 #ifdef DEBUG_TRUNCATE
1788 if (ip == HammerTruncIp)
1789 kprintf("truncate2 %016llx\n", ip->trunc_off);
1790 #endif
1791 } else {
1792 #ifdef DEBUG_TRUNCATE
1793 if (ip == HammerTruncIp)
1794 kprintf("truncate3 %016llx (ignored)\n", vap->va_size);
1795 #endif
1800 * If truncating we have to clean out a portion of
1801 * the last block on-disk. We do this in the
1802 * front-end buffer cache.
1804 aligned_size = (vap->va_size + (blksize - 1)) &
1805 ~(int64_t)(blksize - 1);
1806 if (truncating && vap->va_size < aligned_size) {
1807 struct buf *bp;
1808 int offset;
1810 aligned_size -= blksize;
1812 offset = (int)vap->va_size & (blksize - 1);
1813 error = bread(ap->a_vp, aligned_size,
1814 blksize, &bp);
1815 hammer_ip_frontend_trunc(ip, aligned_size);
1816 if (error == 0) {
1817 bzero(bp->b_data + offset,
1818 blksize - offset);
1819 /* must de-cache direct-io offset */
1820 bp->b_bio2.bio_offset = NOOFFSET;
1821 bdwrite(bp);
1822 } else {
1823 kprintf("ERROR %d\n", error);
1824 brelse(bp);
1827 break;
1828 case VDATABASE:
1829 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1830 ip->flags |= HAMMER_INODE_TRUNCATED;
1831 ip->trunc_off = vap->va_size;
1832 } else if (ip->trunc_off > vap->va_size) {
1833 ip->trunc_off = vap->va_size;
1835 hammer_ip_frontend_trunc(ip, vap->va_size);
1836 ip->ino_data.size = vap->va_size;
1837 modflags |= HAMMER_INODE_DDIRTY;
1838 break;
1839 default:
1840 error = EINVAL;
1841 goto done;
1843 break;
1845 if (vap->va_atime.tv_sec != VNOVAL) {
1846 ip->ino_data.atime =
1847 hammer_timespec_to_time(&vap->va_atime);
1848 modflags |= HAMMER_INODE_ATIME;
1850 if (vap->va_mtime.tv_sec != VNOVAL) {
1851 ip->ino_data.mtime =
1852 hammer_timespec_to_time(&vap->va_mtime);
1853 modflags |= HAMMER_INODE_MTIME;
1855 if (vap->va_mode != (mode_t)VNOVAL) {
1856 mode_t cur_mode = ip->ino_data.mode;
1857 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1858 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1860 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
1861 cur_uid, cur_gid, &cur_mode);
1862 if (error == 0 && ip->ino_data.mode != cur_mode) {
1863 ip->ino_data.mode = cur_mode;
1864 modflags |= HAMMER_INODE_DDIRTY;
1867 done:
1868 if (error == 0)
1869 hammer_modify_inode(ip, modflags);
1870 hammer_done_transaction(&trans);
1871 return (error);
1875 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1877 static
1879 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
1881 struct hammer_transaction trans;
1882 struct hammer_inode *dip;
1883 struct hammer_inode *nip;
1884 struct nchandle *nch;
1885 hammer_record_t record;
1886 int error;
1887 int bytes;
1889 ap->a_vap->va_type = VLNK;
1891 nch = ap->a_nch;
1892 dip = VTOI(ap->a_dvp);
1894 if (dip->flags & HAMMER_INODE_RO)
1895 return (EROFS);
1896 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1897 return (error);
1900 * Create a transaction to cover the operations we perform.
1902 hammer_start_transaction(&trans, dip->hmp);
1903 ++hammer_stats_file_iopsw;
1906 * Create a new filesystem object of the requested type. The
1907 * returned inode will be referenced but not locked.
1910 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1911 dip, NULL, &nip);
1912 if (error) {
1913 hammer_done_transaction(&trans);
1914 *ap->a_vpp = NULL;
1915 return (error);
1919 * Add a record representing the symlink. symlink stores the link
1920 * as pure data, not a string, and is no \0 terminated.
1922 if (error == 0) {
1923 bytes = strlen(ap->a_target);
1925 if (bytes <= HAMMER_INODE_BASESYMLEN) {
1926 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
1927 } else {
1928 record = hammer_alloc_mem_record(nip, bytes);
1929 record->type = HAMMER_MEM_RECORD_GENERAL;
1931 record->leaf.base.localization = nip->obj_localization +
1932 HAMMER_LOCALIZE_MISC;
1933 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
1934 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
1935 record->leaf.data_len = bytes;
1936 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
1937 bcopy(ap->a_target, record->data->symlink.name, bytes);
1938 error = hammer_ip_add_record(&trans, record);
1942 * Set the file size to the length of the link.
1944 if (error == 0) {
1945 nip->ino_data.size = bytes;
1946 hammer_modify_inode(nip, HAMMER_INODE_DDIRTY);
1949 if (error == 0)
1950 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
1951 nch->ncp->nc_nlen, nip);
1954 * Finish up.
1956 if (error) {
1957 hammer_rel_inode(nip, 0);
1958 *ap->a_vpp = NULL;
1959 } else {
1960 error = hammer_get_vnode(nip, ap->a_vpp);
1961 hammer_rel_inode(nip, 0);
1962 if (error == 0) {
1963 cache_setunresolved(ap->a_nch);
1964 cache_setvp(ap->a_nch, *ap->a_vpp);
1967 hammer_done_transaction(&trans);
1968 return (error);
1972 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
1974 static
1976 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
1978 struct hammer_transaction trans;
1979 struct hammer_inode *dip;
1980 int error;
1982 dip = VTOI(ap->a_dvp);
1984 if (hammer_nohistory(dip) == 0 &&
1985 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0) {
1986 return (error);
1989 hammer_start_transaction(&trans, dip->hmp);
1990 ++hammer_stats_file_iopsw;
1991 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
1992 ap->a_cred, ap->a_flags, -1);
1993 hammer_done_transaction(&trans);
1995 return (error);
1999 * hammer_vop_ioctl { vp, command, data, fflag, cred }
2001 static
2003 hammer_vop_ioctl(struct vop_ioctl_args *ap)
2005 struct hammer_inode *ip = ap->a_vp->v_data;
2007 ++hammer_stats_file_iopsr;
2008 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
2009 ap->a_fflag, ap->a_cred));
2012 static
2014 hammer_vop_mountctl(struct vop_mountctl_args *ap)
2016 struct mount *mp;
2017 int error;
2019 mp = ap->a_head.a_ops->head.vv_mount;
2021 switch(ap->a_op) {
2022 case MOUNTCTL_SET_EXPORT:
2023 if (ap->a_ctllen != sizeof(struct export_args))
2024 error = EINVAL;
2025 else
2026 error = hammer_vfs_export(mp, ap->a_op,
2027 (const struct export_args *)ap->a_ctl);
2028 break;
2029 default:
2030 error = journal_mountctl(ap);
2031 break;
2033 return(error);
2037 * hammer_vop_strategy { vp, bio }
2039 * Strategy call, used for regular file read & write only. Note that the
2040 * bp may represent a cluster.
2042 * To simplify operation and allow better optimizations in the future,
2043 * this code does not make any assumptions with regards to buffer alignment
2044 * or size.
2046 static
2048 hammer_vop_strategy(struct vop_strategy_args *ap)
2050 struct buf *bp;
2051 int error;
2053 bp = ap->a_bio->bio_buf;
2055 switch(bp->b_cmd) {
2056 case BUF_CMD_READ:
2057 error = hammer_vop_strategy_read(ap);
2058 break;
2059 case BUF_CMD_WRITE:
2060 error = hammer_vop_strategy_write(ap);
2061 break;
2062 default:
2063 bp->b_error = error = EINVAL;
2064 bp->b_flags |= B_ERROR;
2065 biodone(ap->a_bio);
2066 break;
2068 return (error);
2072 * Read from a regular file. Iterate the related records and fill in the
2073 * BIO/BUF. Gaps are zero-filled.
2075 * The support code in hammer_object.c should be used to deal with mixed
2076 * in-memory and on-disk records.
2078 * NOTE: Can be called from the cluster code with an oversized buf.
2080 * XXX atime update
2082 static
2084 hammer_vop_strategy_read(struct vop_strategy_args *ap)
2086 struct hammer_transaction trans;
2087 struct hammer_inode *ip;
2088 struct hammer_cursor cursor;
2089 hammer_base_elm_t base;
2090 hammer_off_t disk_offset;
2091 struct bio *bio;
2092 struct bio *nbio;
2093 struct buf *bp;
2094 int64_t rec_offset;
2095 int64_t ran_end;
2096 int64_t tmp64;
2097 int error;
2098 int boff;
2099 int roff;
2100 int n;
2102 bio = ap->a_bio;
2103 bp = bio->bio_buf;
2104 ip = ap->a_vp->v_data;
2107 * The zone-2 disk offset may have been set by the cluster code via
2108 * a BMAP operation, or else should be NOOFFSET.
2110 * Checking the high bits for a match against zone-2 should suffice.
2112 nbio = push_bio(bio);
2113 if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
2114 HAMMER_ZONE_LARGE_DATA) {
2115 error = hammer_io_direct_read(ip->hmp, nbio, NULL);
2116 return (error);
2120 * Well, that sucked. Do it the hard way. If all the stars are
2121 * aligned we may still be able to issue a direct-read.
2123 hammer_simple_transaction(&trans, ip->hmp);
2124 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2127 * Key range (begin and end inclusive) to scan. Note that the key's
2128 * stored in the actual records represent BASE+LEN, not BASE. The
2129 * first record containing bio_offset will have a key > bio_offset.
2131 cursor.key_beg.localization = ip->obj_localization +
2132 HAMMER_LOCALIZE_MISC;
2133 cursor.key_beg.obj_id = ip->obj_id;
2134 cursor.key_beg.create_tid = 0;
2135 cursor.key_beg.delete_tid = 0;
2136 cursor.key_beg.obj_type = 0;
2137 cursor.key_beg.key = bio->bio_offset + 1;
2138 cursor.asof = ip->obj_asof;
2139 cursor.flags |= HAMMER_CURSOR_ASOF;
2141 cursor.key_end = cursor.key_beg;
2142 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2143 #if 0
2144 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2145 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2146 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2147 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2148 } else
2149 #endif
2151 ran_end = bio->bio_offset + bp->b_bufsize;
2152 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2153 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2154 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2155 if (tmp64 < ran_end)
2156 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2157 else
2158 cursor.key_end.key = ran_end + MAXPHYS + 1;
2160 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2162 error = hammer_ip_first(&cursor);
2163 boff = 0;
2165 while (error == 0) {
2167 * Get the base file offset of the record. The key for
2168 * data records is (base + bytes) rather then (base).
2170 base = &cursor.leaf->base;
2171 rec_offset = base->key - cursor.leaf->data_len;
2174 * Calculate the gap, if any, and zero-fill it.
2176 * n is the offset of the start of the record verses our
2177 * current seek offset in the bio.
2179 n = (int)(rec_offset - (bio->bio_offset + boff));
2180 if (n > 0) {
2181 if (n > bp->b_bufsize - boff)
2182 n = bp->b_bufsize - boff;
2183 bzero((char *)bp->b_data + boff, n);
2184 boff += n;
2185 n = 0;
2189 * Calculate the data offset in the record and the number
2190 * of bytes we can copy.
2192 * There are two degenerate cases. First, boff may already
2193 * be at bp->b_bufsize. Secondly, the data offset within
2194 * the record may exceed the record's size.
2196 roff = -n;
2197 rec_offset += roff;
2198 n = cursor.leaf->data_len - roff;
2199 if (n <= 0) {
2200 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
2201 n = 0;
2202 } else if (n > bp->b_bufsize - boff) {
2203 n = bp->b_bufsize - boff;
2207 * Deal with cached truncations. This cool bit of code
2208 * allows truncate()/ftruncate() to avoid having to sync
2209 * the file.
2211 * If the frontend is truncated then all backend records are
2212 * subject to the frontend's truncation.
2214 * If the backend is truncated then backend records on-disk
2215 * (but not in-memory) are subject to the backend's
2216 * truncation. In-memory records owned by the backend
2217 * represent data written after the truncation point on the
2218 * backend and must not be truncated.
2220 * Truncate operations deal with frontend buffer cache
2221 * buffers and frontend-owned in-memory records synchronously.
2223 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2224 if (hammer_cursor_ondisk(&cursor) ||
2225 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2226 if (ip->trunc_off <= rec_offset)
2227 n = 0;
2228 else if (ip->trunc_off < rec_offset + n)
2229 n = (int)(ip->trunc_off - rec_offset);
2232 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2233 if (hammer_cursor_ondisk(&cursor)) {
2234 if (ip->sync_trunc_off <= rec_offset)
2235 n = 0;
2236 else if (ip->sync_trunc_off < rec_offset + n)
2237 n = (int)(ip->sync_trunc_off - rec_offset);
2242 * Try to issue a direct read into our bio if possible,
2243 * otherwise resolve the element data into a hammer_buffer
2244 * and copy.
2246 * The buffer on-disk should be zerod past any real
2247 * truncation point, but may not be for any synthesized
2248 * truncation point from above.
2250 disk_offset = cursor.leaf->data_offset + roff;
2251 if (boff == 0 && n == bp->b_bufsize &&
2252 hammer_cursor_ondisk(&cursor) &&
2253 (disk_offset & HAMMER_BUFMASK) == 0) {
2254 KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2255 HAMMER_ZONE_LARGE_DATA);
2256 nbio->bio_offset = disk_offset;
2257 error = hammer_io_direct_read(trans.hmp, nbio,
2258 cursor.leaf);
2259 goto done;
2260 } else if (n) {
2261 error = hammer_ip_resolve_data(&cursor);
2262 if (error == 0) {
2263 bcopy((char *)cursor.data + roff,
2264 (char *)bp->b_data + boff, n);
2267 if (error)
2268 break;
2271 * Iterate until we have filled the request.
2273 boff += n;
2274 if (boff == bp->b_bufsize)
2275 break;
2276 error = hammer_ip_next(&cursor);
2280 * There may have been a gap after the last record
2282 if (error == ENOENT)
2283 error = 0;
2284 if (error == 0 && boff != bp->b_bufsize) {
2285 KKASSERT(boff < bp->b_bufsize);
2286 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2287 /* boff = bp->b_bufsize; */
2289 bp->b_resid = 0;
2290 bp->b_error = error;
2291 if (error)
2292 bp->b_flags |= B_ERROR;
2293 biodone(ap->a_bio);
2295 done:
2296 if (cursor.node)
2297 hammer_cache_node(&ip->cache[1], cursor.node);
2298 hammer_done_cursor(&cursor);
2299 hammer_done_transaction(&trans);
2300 return(error);
2304 * BMAP operation - used to support cluster_read() only.
2306 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2308 * This routine may return EOPNOTSUPP if the opration is not supported for
2309 * the specified offset. The contents of the pointer arguments do not
2310 * need to be initialized in that case.
2312 * If a disk address is available and properly aligned return 0 with
2313 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2314 * to the run-length relative to that offset. Callers may assume that
2315 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2316 * large, so return EOPNOTSUPP if it is not sufficiently large.
2318 static
2320 hammer_vop_bmap(struct vop_bmap_args *ap)
2322 struct hammer_transaction trans;
2323 struct hammer_inode *ip;
2324 struct hammer_cursor cursor;
2325 hammer_base_elm_t base;
2326 int64_t rec_offset;
2327 int64_t ran_end;
2328 int64_t tmp64;
2329 int64_t base_offset;
2330 int64_t base_disk_offset;
2331 int64_t last_offset;
2332 hammer_off_t last_disk_offset;
2333 hammer_off_t disk_offset;
2334 int rec_len;
2335 int error;
2336 int blksize;
2338 ++hammer_stats_file_iopsr;
2339 ip = ap->a_vp->v_data;
2342 * We can only BMAP regular files. We can't BMAP database files,
2343 * directories, etc.
2345 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
2346 return(EOPNOTSUPP);
2349 * bmap is typically called with runp/runb both NULL when used
2350 * for writing. We do not support BMAP for writing atm.
2352 if (ap->a_cmd != BUF_CMD_READ)
2353 return(EOPNOTSUPP);
2356 * Scan the B-Tree to acquire blockmap addresses, then translate
2357 * to raw addresses.
2359 hammer_simple_transaction(&trans, ip->hmp);
2360 #if 0
2361 kprintf("bmap_beg %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
2362 #endif
2363 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2366 * Key range (begin and end inclusive) to scan. Note that the key's
2367 * stored in the actual records represent BASE+LEN, not BASE. The
2368 * first record containing bio_offset will have a key > bio_offset.
2370 cursor.key_beg.localization = ip->obj_localization +
2371 HAMMER_LOCALIZE_MISC;
2372 cursor.key_beg.obj_id = ip->obj_id;
2373 cursor.key_beg.create_tid = 0;
2374 cursor.key_beg.delete_tid = 0;
2375 cursor.key_beg.obj_type = 0;
2376 if (ap->a_runb)
2377 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
2378 else
2379 cursor.key_beg.key = ap->a_loffset + 1;
2380 if (cursor.key_beg.key < 0)
2381 cursor.key_beg.key = 0;
2382 cursor.asof = ip->obj_asof;
2383 cursor.flags |= HAMMER_CURSOR_ASOF;
2385 cursor.key_end = cursor.key_beg;
2386 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2388 ran_end = ap->a_loffset + MAXPHYS;
2389 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2390 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2391 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2392 if (tmp64 < ran_end)
2393 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2394 else
2395 cursor.key_end.key = ran_end + MAXPHYS + 1;
2397 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2399 error = hammer_ip_first(&cursor);
2400 base_offset = last_offset = 0;
2401 base_disk_offset = last_disk_offset = 0;
2403 while (error == 0) {
2405 * Get the base file offset of the record. The key for
2406 * data records is (base + bytes) rather then (base).
2408 * NOTE: rec_offset + rec_len may exceed the end-of-file.
2409 * The extra bytes should be zero on-disk and the BMAP op
2410 * should still be ok.
2412 base = &cursor.leaf->base;
2413 rec_offset = base->key - cursor.leaf->data_len;
2414 rec_len = cursor.leaf->data_len;
2417 * Incorporate any cached truncation.
2419 * NOTE: Modifications to rec_len based on synthesized
2420 * truncation points remove the guarantee that any extended
2421 * data on disk is zero (since the truncations may not have
2422 * taken place on-media yet).
2424 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2425 if (hammer_cursor_ondisk(&cursor) ||
2426 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2427 if (ip->trunc_off <= rec_offset)
2428 rec_len = 0;
2429 else if (ip->trunc_off < rec_offset + rec_len)
2430 rec_len = (int)(ip->trunc_off - rec_offset);
2433 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2434 if (hammer_cursor_ondisk(&cursor)) {
2435 if (ip->sync_trunc_off <= rec_offset)
2436 rec_len = 0;
2437 else if (ip->sync_trunc_off < rec_offset + rec_len)
2438 rec_len = (int)(ip->sync_trunc_off - rec_offset);
2443 * Accumulate information. If we have hit a discontiguous
2444 * block reset base_offset unless we are already beyond the
2445 * requested offset. If we are, that's it, we stop.
2447 if (error)
2448 break;
2449 if (hammer_cursor_ondisk(&cursor)) {
2450 disk_offset = cursor.leaf->data_offset;
2451 if (rec_offset != last_offset ||
2452 disk_offset != last_disk_offset) {
2453 if (rec_offset > ap->a_loffset)
2454 break;
2455 base_offset = rec_offset;
2456 base_disk_offset = disk_offset;
2458 last_offset = rec_offset + rec_len;
2459 last_disk_offset = disk_offset + rec_len;
2461 error = hammer_ip_next(&cursor);
2464 #if 0
2465 kprintf("BMAP %016llx: %016llx - %016llx\n",
2466 ap->a_loffset, base_offset, last_offset);
2467 kprintf("BMAP %16s: %016llx - %016llx\n",
2468 "", base_disk_offset, last_disk_offset);
2469 #endif
2471 if (cursor.node) {
2472 hammer_cache_node(&ip->cache[1], cursor.node);
2473 #if 0
2474 kprintf("bmap_end2 %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
2475 #endif
2477 hammer_done_cursor(&cursor);
2478 hammer_done_transaction(&trans);
2481 * If we couldn't find any records or the records we did find were
2482 * all behind the requested offset, return failure. A forward
2483 * truncation can leave a hole w/ no on-disk records.
2485 if (last_offset == 0 || last_offset < ap->a_loffset)
2486 return (EOPNOTSUPP);
2489 * Figure out the block size at the requested offset and adjust
2490 * our limits so the cluster_read() does not create inappropriately
2491 * sized buffer cache buffers.
2493 blksize = hammer_blocksize(ap->a_loffset);
2494 if (hammer_blocksize(base_offset) != blksize) {
2495 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
2497 if (last_offset != ap->a_loffset &&
2498 hammer_blocksize(last_offset - 1) != blksize) {
2499 last_offset = hammer_blockdemarc(ap->a_loffset,
2500 last_offset - 1);
2504 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
2505 * from occuring.
2507 disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
2509 if ((disk_offset & HAMMER_OFF_ZONE_MASK) != HAMMER_ZONE_LARGE_DATA) {
2511 * Only large-data zones can be direct-IOd
2513 error = EOPNOTSUPP;
2514 } else if ((disk_offset & HAMMER_BUFMASK) ||
2515 (last_offset - ap->a_loffset) < blksize) {
2517 * doffsetp is not aligned or the forward run size does
2518 * not cover a whole buffer, disallow the direct I/O.
2520 error = EOPNOTSUPP;
2521 } else {
2523 * We're good.
2525 *ap->a_doffsetp = disk_offset;
2526 if (ap->a_runb) {
2527 *ap->a_runb = ap->a_loffset - base_offset;
2528 KKASSERT(*ap->a_runb >= 0);
2530 if (ap->a_runp) {
2531 *ap->a_runp = last_offset - ap->a_loffset;
2532 KKASSERT(*ap->a_runp >= 0);
2534 error = 0;
2536 return(error);
2540 * Write to a regular file. Because this is a strategy call the OS is
2541 * trying to actually get data onto the media.
2543 static
2545 hammer_vop_strategy_write(struct vop_strategy_args *ap)
2547 hammer_record_t record;
2548 hammer_mount_t hmp;
2549 hammer_inode_t ip;
2550 struct bio *bio;
2551 struct buf *bp;
2552 int blksize;
2553 int bytes;
2554 int error;
2556 bio = ap->a_bio;
2557 bp = bio->bio_buf;
2558 ip = ap->a_vp->v_data;
2559 hmp = ip->hmp;
2561 blksize = hammer_blocksize(bio->bio_offset);
2562 KKASSERT(bp->b_bufsize == blksize);
2564 if (ip->flags & HAMMER_INODE_RO) {
2565 bp->b_error = EROFS;
2566 bp->b_flags |= B_ERROR;
2567 biodone(ap->a_bio);
2568 return(EROFS);
2572 * Interlock with inode destruction (no in-kernel or directory
2573 * topology visibility). If we queue new IO while trying to
2574 * destroy the inode we can deadlock the vtrunc call in
2575 * hammer_inode_unloadable_check().
2577 * Besides, there's no point flushing a bp associated with an
2578 * inode that is being destroyed on-media and has no kernel
2579 * references.
2581 if ((ip->flags | ip->sync_flags) &
2582 (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
2583 bp->b_resid = 0;
2584 biodone(ap->a_bio);
2585 return(0);
2589 * Reserve space and issue a direct-write from the front-end.
2590 * NOTE: The direct_io code will hammer_bread/bcopy smaller
2591 * allocations.
2593 * An in-memory record will be installed to reference the storage
2594 * until the flusher can get to it.
2596 * Since we own the high level bio the front-end will not try to
2597 * do a direct-read until the write completes.
2599 * NOTE: The only time we do not reserve a full-sized buffers
2600 * worth of data is if the file is small. We do not try to
2601 * allocate a fragment (from the small-data zone) at the end of
2602 * an otherwise large file as this can lead to wildly separated
2603 * data.
2605 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
2606 KKASSERT(bio->bio_offset < ip->ino_data.size);
2607 if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
2608 bytes = bp->b_bufsize;
2609 else
2610 bytes = ((int)ip->ino_data.size + 15) & ~15;
2612 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
2613 bytes, &error);
2614 if (record) {
2615 hammer_io_direct_write(hmp, record, bio);
2616 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
2617 hammer_flush_inode(ip, 0);
2618 } else {
2619 bp->b_bio2.bio_offset = NOOFFSET;
2620 bp->b_error = error;
2621 bp->b_flags |= B_ERROR;
2622 biodone(ap->a_bio);
2624 return(error);
2628 * dounlink - disconnect a directory entry
2630 * XXX whiteout support not really in yet
2632 static int
2633 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
2634 struct vnode *dvp, struct ucred *cred,
2635 int flags, int isdir)
2637 struct namecache *ncp;
2638 hammer_inode_t dip;
2639 hammer_inode_t ip;
2640 struct hammer_cursor cursor;
2641 int64_t namekey;
2642 int nlen, error;
2645 * Calculate the namekey and setup the key range for the scan. This
2646 * works kinda like a chained hash table where the lower 32 bits
2647 * of the namekey synthesize the chain.
2649 * The key range is inclusive of both key_beg and key_end.
2651 dip = VTOI(dvp);
2652 ncp = nch->ncp;
2654 if (dip->flags & HAMMER_INODE_RO)
2655 return (EROFS);
2657 namekey = hammer_directory_namekey(ncp->nc_name, ncp->nc_nlen);
2658 retry:
2659 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
2660 cursor.key_beg.localization = dip->obj_localization +
2661 HAMMER_LOCALIZE_MISC;
2662 cursor.key_beg.obj_id = dip->obj_id;
2663 cursor.key_beg.key = namekey;
2664 cursor.key_beg.create_tid = 0;
2665 cursor.key_beg.delete_tid = 0;
2666 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2667 cursor.key_beg.obj_type = 0;
2669 cursor.key_end = cursor.key_beg;
2670 cursor.key_end.key |= 0xFFFFFFFFULL;
2671 cursor.asof = dip->obj_asof;
2672 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2675 * Scan all matching records (the chain), locate the one matching
2676 * the requested path component. info->last_error contains the
2677 * error code on search termination and could be 0, ENOENT, or
2678 * something else.
2680 * The hammer_ip_*() functions merge in-memory records with on-disk
2681 * records for the purposes of the search.
2683 error = hammer_ip_first(&cursor);
2685 while (error == 0) {
2686 error = hammer_ip_resolve_data(&cursor);
2687 if (error)
2688 break;
2689 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2690 KKASSERT(nlen > 0);
2691 if (ncp->nc_nlen == nlen &&
2692 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
2693 break;
2695 error = hammer_ip_next(&cursor);
2699 * If all is ok we have to get the inode so we can adjust nlinks.
2700 * To avoid a deadlock with the flusher we must release the inode
2701 * lock on the directory when acquiring the inode for the entry.
2703 * If the target is a directory, it must be empty.
2705 if (error == 0) {
2706 hammer_unlock(&cursor.ip->lock);
2707 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
2708 dip->hmp->asof,
2709 cursor.data->entry.localization,
2710 0, &error);
2711 hammer_lock_sh(&cursor.ip->lock);
2712 if (error == ENOENT) {
2713 kprintf("obj_id %016llx\n", cursor.data->entry.obj_id);
2714 Debugger("ENOENT unlinking object that should exist");
2718 * If isdir >= 0 we validate that the entry is or is not a
2719 * directory. If isdir < 0 we don't care.
2721 if (error == 0 && isdir >= 0) {
2722 if (isdir &&
2723 ip->ino_data.obj_type != HAMMER_OBJTYPE_DIRECTORY) {
2724 error = ENOTDIR;
2725 } else if (isdir == 0 &&
2726 ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
2727 error = EISDIR;
2732 * If we are trying to remove a directory the directory must
2733 * be empty.
2735 * WARNING: hammer_ip_check_directory_empty() may have to
2736 * terminate the cursor to avoid a deadlock. It is ok to
2737 * call hammer_done_cursor() twice.
2739 if (error == 0 && ip->ino_data.obj_type ==
2740 HAMMER_OBJTYPE_DIRECTORY) {
2741 error = hammer_ip_check_directory_empty(trans, ip);
2745 * Delete the directory entry.
2747 * WARNING: hammer_ip_del_directory() may have to terminate
2748 * the cursor to avoid a deadlock. It is ok to call
2749 * hammer_done_cursor() twice.
2751 if (error == 0) {
2752 error = hammer_ip_del_directory(trans, &cursor,
2753 dip, ip);
2755 hammer_done_cursor(&cursor);
2756 if (error == 0) {
2757 cache_setunresolved(nch);
2758 cache_setvp(nch, NULL);
2759 /* XXX locking */
2760 if (ip->vp)
2761 cache_inval_vp(ip->vp, CINV_DESTROY);
2763 if (ip)
2764 hammer_rel_inode(ip, 0);
2765 } else {
2766 hammer_done_cursor(&cursor);
2768 if (error == EDEADLK)
2769 goto retry;
2771 return (error);
2774 /************************************************************************
2775 * FIFO AND SPECFS OPS *
2776 ************************************************************************
2780 static int
2781 hammer_vop_fifoclose (struct vop_close_args *ap)
2783 /* XXX update itimes */
2784 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
2787 static int
2788 hammer_vop_fiforead (struct vop_read_args *ap)
2790 int error;
2792 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2793 /* XXX update access time */
2794 return (error);
2797 static int
2798 hammer_vop_fifowrite (struct vop_write_args *ap)
2800 int error;
2802 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2803 /* XXX update access time */
2804 return (error);
2807 static int
2808 hammer_vop_specclose (struct vop_close_args *ap)
2810 /* XXX update itimes */
2811 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2814 static int
2815 hammer_vop_specread (struct vop_read_args *ap)
2817 /* XXX update access time */
2818 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2821 static int
2822 hammer_vop_specwrite (struct vop_write_args *ap)
2824 /* XXX update last change time */
2825 return (VOCALL(&spec_vnode_vops, &ap->a_head));