HAMMER - Rework write pipelining
[dragonfly.git] / sys / vfs / hammer / hammer_vnops.c
blobf7d32096481bd2c99f93d0bcde4ef5f2a63fb291
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.102 2008/10/16 17:24:16 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/namecache.h>
42 #include <sys/vnode.h>
43 #include <sys/lockf.h>
44 #include <sys/event.h>
45 #include <sys/stat.h>
46 #include <sys/dirent.h>
47 #include <sys/file.h>
48 #include <vm/vm_extern.h>
49 #include <vfs/fifofs/fifo.h>
50 #include "hammer.h"
53 * USERFS VNOPS
55 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
56 static int hammer_vop_fsync(struct vop_fsync_args *);
57 static int hammer_vop_read(struct vop_read_args *);
58 static int hammer_vop_write(struct vop_write_args *);
59 static int hammer_vop_access(struct vop_access_args *);
60 static int hammer_vop_advlock(struct vop_advlock_args *);
61 static int hammer_vop_close(struct vop_close_args *);
62 static int hammer_vop_ncreate(struct vop_ncreate_args *);
63 static int hammer_vop_getattr(struct vop_getattr_args *);
64 static int hammer_vop_nresolve(struct vop_nresolve_args *);
65 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
66 static int hammer_vop_nlink(struct vop_nlink_args *);
67 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
68 static int hammer_vop_nmknod(struct vop_nmknod_args *);
69 static int hammer_vop_open(struct vop_open_args *);
70 static int hammer_vop_print(struct vop_print_args *);
71 static int hammer_vop_readdir(struct vop_readdir_args *);
72 static int hammer_vop_readlink(struct vop_readlink_args *);
73 static int hammer_vop_nremove(struct vop_nremove_args *);
74 static int hammer_vop_nrename(struct vop_nrename_args *);
75 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
76 static int hammer_vop_markatime(struct vop_markatime_args *);
77 static int hammer_vop_setattr(struct vop_setattr_args *);
78 static int hammer_vop_strategy(struct vop_strategy_args *);
79 static int hammer_vop_bmap(struct vop_bmap_args *ap);
80 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
81 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
82 static int hammer_vop_ioctl(struct vop_ioctl_args *);
83 static int hammer_vop_mountctl(struct vop_mountctl_args *);
84 static int hammer_vop_kqfilter (struct vop_kqfilter_args *);
86 static int hammer_vop_fifoclose (struct vop_close_args *);
87 static int hammer_vop_fiforead (struct vop_read_args *);
88 static int hammer_vop_fifowrite (struct vop_write_args *);
89 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args *);
91 struct vop_ops hammer_vnode_vops = {
92 .vop_default = vop_defaultop,
93 .vop_fsync = hammer_vop_fsync,
94 .vop_getpages = vop_stdgetpages,
95 .vop_putpages = vop_stdputpages,
96 .vop_read = hammer_vop_read,
97 .vop_write = hammer_vop_write,
98 .vop_access = hammer_vop_access,
99 .vop_advlock = hammer_vop_advlock,
100 .vop_close = hammer_vop_close,
101 .vop_ncreate = hammer_vop_ncreate,
102 .vop_getattr = hammer_vop_getattr,
103 .vop_inactive = hammer_vop_inactive,
104 .vop_reclaim = hammer_vop_reclaim,
105 .vop_nresolve = hammer_vop_nresolve,
106 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
107 .vop_nlink = hammer_vop_nlink,
108 .vop_nmkdir = hammer_vop_nmkdir,
109 .vop_nmknod = hammer_vop_nmknod,
110 .vop_open = hammer_vop_open,
111 .vop_pathconf = vop_stdpathconf,
112 .vop_print = hammer_vop_print,
113 .vop_readdir = hammer_vop_readdir,
114 .vop_readlink = hammer_vop_readlink,
115 .vop_nremove = hammer_vop_nremove,
116 .vop_nrename = hammer_vop_nrename,
117 .vop_nrmdir = hammer_vop_nrmdir,
118 .vop_markatime = hammer_vop_markatime,
119 .vop_setattr = hammer_vop_setattr,
120 .vop_bmap = hammer_vop_bmap,
121 .vop_strategy = hammer_vop_strategy,
122 .vop_nsymlink = hammer_vop_nsymlink,
123 .vop_nwhiteout = hammer_vop_nwhiteout,
124 .vop_ioctl = hammer_vop_ioctl,
125 .vop_mountctl = hammer_vop_mountctl,
126 .vop_kqfilter = hammer_vop_kqfilter
129 struct vop_ops hammer_spec_vops = {
130 .vop_default = vop_defaultop,
131 .vop_fsync = hammer_vop_fsync,
132 .vop_read = vop_stdnoread,
133 .vop_write = vop_stdnowrite,
134 .vop_access = hammer_vop_access,
135 .vop_close = hammer_vop_close,
136 .vop_markatime = hammer_vop_markatime,
137 .vop_getattr = hammer_vop_getattr,
138 .vop_inactive = hammer_vop_inactive,
139 .vop_reclaim = hammer_vop_reclaim,
140 .vop_setattr = hammer_vop_setattr
143 struct vop_ops hammer_fifo_vops = {
144 .vop_default = fifo_vnoperate,
145 .vop_fsync = hammer_vop_fsync,
146 .vop_read = hammer_vop_fiforead,
147 .vop_write = hammer_vop_fifowrite,
148 .vop_access = hammer_vop_access,
149 .vop_close = hammer_vop_fifoclose,
150 .vop_markatime = hammer_vop_markatime,
151 .vop_getattr = hammer_vop_getattr,
152 .vop_inactive = hammer_vop_inactive,
153 .vop_reclaim = hammer_vop_reclaim,
154 .vop_setattr = hammer_vop_setattr,
155 .vop_kqfilter = hammer_vop_fifokqfilter
158 static __inline
159 void
160 hammer_knote(struct vnode *vp, int flags)
162 if (flags)
163 KNOTE(&vp->v_pollinfo.vpi_selinfo.si_note, flags);
166 #ifdef DEBUG_TRUNCATE
167 struct hammer_inode *HammerTruncIp;
168 #endif
170 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
171 struct vnode *dvp, struct ucred *cred,
172 int flags, int isdir);
173 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
174 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
176 #if 0
177 static
179 hammer_vop_vnoperate(struct vop_generic_args *)
181 return (VOCALL(&hammer_vnode_vops, ap));
183 #endif
186 * hammer_vop_fsync { vp, waitfor }
188 * fsync() an inode to disk and wait for it to be completely committed
189 * such that the information would not be undone if a crash occured after
190 * return.
192 static
194 hammer_vop_fsync(struct vop_fsync_args *ap)
196 hammer_inode_t ip = VTOI(ap->a_vp);
198 ++hammer_count_fsyncs;
199 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL);
200 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
201 if (ap->a_waitfor == MNT_WAIT) {
202 vn_unlock(ap->a_vp);
203 hammer_wait_inode(ip);
204 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
206 return (ip->error);
210 * hammer_vop_read { vp, uio, ioflag, cred }
212 * MPALMOSTSAFE
214 static
216 hammer_vop_read(struct vop_read_args *ap)
218 struct hammer_transaction trans;
219 hammer_inode_t ip;
220 off_t offset;
221 struct buf *bp;
222 struct uio *uio;
223 int error;
224 int n;
225 int seqcount;
226 int ioseqcount;
227 int blksize;
228 int got_mplock;
229 int bigread;
231 if (ap->a_vp->v_type != VREG)
232 return (EINVAL);
233 ip = VTOI(ap->a_vp);
234 error = 0;
235 uio = ap->a_uio;
238 * Allow the UIO's size to override the sequential heuristic.
240 blksize = hammer_blocksize(uio->uio_offset);
241 seqcount = (uio->uio_resid + (blksize - 1)) / blksize;
242 ioseqcount = ap->a_ioflag >> 16;
243 if (seqcount < ioseqcount)
244 seqcount = ioseqcount;
247 * Temporary hack until more of HAMMER can be made MPSAFE.
249 #ifdef SMP
250 if (curthread->td_mpcount) {
251 got_mplock = -1;
252 hammer_start_transaction(&trans, ip->hmp);
253 } else {
254 got_mplock = 0;
256 #else
257 hammer_start_transaction(&trans, ip->hmp);
258 got_mplock = -1;
259 #endif
262 * If reading or writing a huge amount of data we have to break
263 * atomicy and allow the operation to be interrupted by a signal
264 * or it can DOS the machine.
266 bigread = (uio->uio_resid > 100 * 1024 * 1024);
269 * Access the data typically in HAMMER_BUFSIZE blocks via the
270 * buffer cache, but HAMMER may use a variable block size based
271 * on the offset.
273 * XXX Temporary hack, delay the start transaction while we remain
274 * MPSAFE. NOTE: ino_data.size cannot change while vnode is
275 * locked-shared.
277 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
278 int64_t base_offset;
279 int64_t file_limit;
281 blksize = hammer_blocksize(uio->uio_offset);
282 offset = (int)uio->uio_offset & (blksize - 1);
283 base_offset = uio->uio_offset - offset;
285 if (bigread && (error = hammer_signal_check(ip->hmp)) != 0)
286 break;
289 * MPSAFE
291 bp = getcacheblk(ap->a_vp, base_offset);
292 if (bp) {
293 error = 0;
294 goto skip;
298 * MPUNSAFE
300 if (got_mplock == 0) {
301 got_mplock = 1;
302 get_mplock();
303 hammer_start_transaction(&trans, ip->hmp);
306 if (hammer_cluster_enable) {
308 * Use file_limit to prevent cluster_read() from
309 * creating buffers of the wrong block size past
310 * the demarc.
312 file_limit = ip->ino_data.size;
313 if (base_offset < HAMMER_XDEMARC &&
314 file_limit > HAMMER_XDEMARC) {
315 file_limit = HAMMER_XDEMARC;
317 error = cluster_read(ap->a_vp,
318 file_limit, base_offset,
319 blksize, MAXPHYS,
320 seqcount, &bp);
321 } else {
322 error = bread(ap->a_vp, base_offset, blksize, &bp);
324 if (error) {
325 kprintf("error %d\n", error);
326 brelse(bp);
327 break;
329 skip:
331 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
332 n = blksize - offset;
333 if (n > uio->uio_resid)
334 n = uio->uio_resid;
335 if (n > ip->ino_data.size - uio->uio_offset)
336 n = (int)(ip->ino_data.size - uio->uio_offset);
337 error = uiomove((char *)bp->b_data + offset, n, uio);
339 /* data has a lower priority then meta-data */
340 bp->b_flags |= B_AGE;
341 bqrelse(bp);
342 if (error)
343 break;
344 hammer_stats_file_read += n;
348 * XXX only update the atime if we had to get the MP lock.
349 * XXX hack hack hack, fixme.
351 if (got_mplock) {
352 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
353 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
354 ip->ino_data.atime = trans.time;
355 hammer_modify_inode(ip, HAMMER_INODE_ATIME);
357 hammer_done_transaction(&trans);
358 if (got_mplock > 0)
359 rel_mplock();
361 return (error);
365 * hammer_vop_write { vp, uio, ioflag, cred }
367 static
369 hammer_vop_write(struct vop_write_args *ap)
371 struct hammer_transaction trans;
372 struct hammer_inode *ip;
373 hammer_mount_t hmp;
374 struct uio *uio;
375 int offset;
376 off_t base_offset;
377 struct buf *bp;
378 int kflags;
379 int error;
380 int n;
381 int flags;
382 int seqcount;
383 int bigwrite;
385 if (ap->a_vp->v_type != VREG)
386 return (EINVAL);
387 ip = VTOI(ap->a_vp);
388 hmp = ip->hmp;
389 error = 0;
390 kflags = 0;
391 seqcount = ap->a_ioflag >> 16;
393 if (ip->flags & HAMMER_INODE_RO)
394 return (EROFS);
397 * Create a transaction to cover the operations we perform.
399 hammer_start_transaction(&trans, hmp);
400 uio = ap->a_uio;
403 * Check append mode
405 if (ap->a_ioflag & IO_APPEND)
406 uio->uio_offset = ip->ino_data.size;
409 * Check for illegal write offsets. Valid range is 0...2^63-1.
411 * NOTE: the base_off assignment is required to work around what
412 * I consider to be a GCC-4 optimization bug.
414 if (uio->uio_offset < 0) {
415 hammer_done_transaction(&trans);
416 return (EFBIG);
418 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
419 if (uio->uio_resid > 0 && base_offset <= uio->uio_offset) {
420 hammer_done_transaction(&trans);
421 return (EFBIG);
425 * If reading or writing a huge amount of data we have to break
426 * atomicy and allow the operation to be interrupted by a signal
427 * or it can DOS the machine.
429 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
432 * Access the data typically in HAMMER_BUFSIZE blocks via the
433 * buffer cache, but HAMMER may use a variable block size based
434 * on the offset.
436 while (uio->uio_resid > 0) {
437 int fixsize = 0;
438 int blksize;
439 int blkmask;
441 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0)
442 break;
443 if (bigwrite && (error = hammer_signal_check(hmp)) != 0)
444 break;
446 blksize = hammer_blocksize(uio->uio_offset);
449 * Do not allow HAMMER to blow out the buffer cache. Very
450 * large UIOs can lockout other processes due to bwillwrite()
451 * mechanics.
453 * The hammer inode is not locked during these operations.
454 * The vnode is locked which can interfere with the pageout
455 * daemon for non-UIO_NOCOPY writes but should not interfere
456 * with the buffer cache. Even so, we cannot afford to
457 * allow the pageout daemon to build up too many dirty buffer
458 * cache buffers.
460 * Only call this if we aren't being recursively called from
461 * a virtual disk device (vn), else we may deadlock.
463 if ((ap->a_ioflag & IO_RECURSE) == 0)
464 bwillwrite(blksize);
467 * Control the number of pending records associated with
468 * this inode. If too many have accumulated start a
469 * flush. Try to maintain a pipeline with the flusher.
471 if (ip->rsv_recs >= hammer_limit_inode_recs) {
472 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
474 if (ip->rsv_recs >= hammer_limit_inode_recs * 2) {
475 while (ip->rsv_recs >= hammer_limit_inode_recs) {
476 tsleep(&ip->rsv_recs, 0, "hmrwww", hz);
478 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
481 #if 0
483 * Do not allow HAMMER to blow out system memory by
484 * accumulating too many records. Records are so well
485 * decoupled from the buffer cache that it is possible
486 * for userland to push data out to the media via
487 * direct-write, but build up the records queued to the
488 * backend faster then the backend can flush them out.
489 * HAMMER has hit its write limit but the frontend has
490 * no pushback to slow it down.
492 if (hmp->rsv_recs > hammer_limit_recs / 2) {
494 * Get the inode on the flush list
496 if (ip->rsv_recs >= 64)
497 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
498 else if (ip->rsv_recs >= 16)
499 hammer_flush_inode(ip, 0);
502 * Keep the flusher going if the system keeps
503 * queueing records.
505 delta = hmp->count_newrecords -
506 hmp->last_newrecords;
507 if (delta < 0 || delta > hammer_limit_recs / 2) {
508 hmp->last_newrecords = hmp->count_newrecords;
509 hammer_sync_hmp(hmp, MNT_NOWAIT);
513 * If we have gotten behind start slowing
514 * down the writers.
516 delta = (hmp->rsv_recs - hammer_limit_recs) *
517 hz / hammer_limit_recs;
518 if (delta > 0)
519 tsleep(&trans, 0, "hmrslo", delta);
521 #endif
524 * Calculate the blocksize at the current offset and figure
525 * out how much we can actually write.
527 blkmask = blksize - 1;
528 offset = (int)uio->uio_offset & blkmask;
529 base_offset = uio->uio_offset & ~(int64_t)blkmask;
530 n = blksize - offset;
531 if (n > uio->uio_resid)
532 n = uio->uio_resid;
533 if (uio->uio_offset + n > ip->ino_data.size) {
534 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
535 fixsize = 1;
536 kflags |= NOTE_EXTEND;
539 if (uio->uio_segflg == UIO_NOCOPY) {
541 * Issuing a write with the same data backing the
542 * buffer. Instantiate the buffer to collect the
543 * backing vm pages, then read-in any missing bits.
545 * This case is used by vop_stdputpages().
547 bp = getblk(ap->a_vp, base_offset,
548 blksize, GETBLK_BHEAVY, 0);
549 if ((bp->b_flags & B_CACHE) == 0) {
550 bqrelse(bp);
551 error = bread(ap->a_vp, base_offset,
552 blksize, &bp);
554 } else if (offset == 0 && uio->uio_resid >= blksize) {
556 * Even though we are entirely overwriting the buffer
557 * we may still have to zero it out to avoid a
558 * mmap/write visibility issue.
560 bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
561 if ((bp->b_flags & B_CACHE) == 0)
562 vfs_bio_clrbuf(bp);
563 } else if (base_offset >= ip->ino_data.size) {
565 * If the base offset of the buffer is beyond the
566 * file EOF, we don't have to issue a read.
568 bp = getblk(ap->a_vp, base_offset,
569 blksize, GETBLK_BHEAVY, 0);
570 vfs_bio_clrbuf(bp);
571 } else {
573 * Partial overwrite, read in any missing bits then
574 * replace the portion being written.
576 error = bread(ap->a_vp, base_offset, blksize, &bp);
577 if (error == 0)
578 bheavy(bp);
580 if (error == 0) {
581 error = uiomove((char *)bp->b_data + offset,
582 n, uio);
586 * If we screwed up we have to undo any VM size changes we
587 * made.
589 if (error) {
590 brelse(bp);
591 if (fixsize) {
592 vtruncbuf(ap->a_vp, ip->ino_data.size,
593 hammer_blocksize(ip->ino_data.size));
595 break;
597 kflags |= NOTE_WRITE;
598 hammer_stats_file_write += n;
599 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
600 if (ip->ino_data.size < uio->uio_offset) {
601 ip->ino_data.size = uio->uio_offset;
602 flags = HAMMER_INODE_DDIRTY;
603 vnode_pager_setsize(ap->a_vp, ip->ino_data.size);
604 } else {
605 flags = 0;
607 ip->ino_data.mtime = trans.time;
608 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
609 hammer_modify_inode(ip, flags);
612 * Once we dirty the buffer any cached zone-X offset
613 * becomes invalid. HAMMER NOTE: no-history mode cannot
614 * allow overwriting over the same data sector unless
615 * we provide UNDOs for the old data, which we don't.
617 bp->b_bio2.bio_offset = NOOFFSET;
620 * Final buffer disposition.
622 * Because meta-data updates are deferred, HAMMER is
623 * especially sensitive to excessive bdwrite()s because
624 * the I/O stream is not broken up by disk reads. So the
625 * buffer cache simply cannot keep up.
627 * WARNING! blksize is variable. cluster_write() is
628 * expected to not blow up if it encounters buffers that
629 * do not match the passed blksize.
631 bp->b_flags |= B_AGE;
632 if (ap->a_ioflag & IO_SYNC) {
633 bwrite(bp);
634 } else if (ap->a_ioflag & IO_DIRECT) {
635 bawrite(bp);
636 } else if (offset + n == blksize) {
637 if (hammer_cluster_enable == 0 ||
638 (ap->a_vp->v_mount->mnt_flag & MNT_NOCLUSTERW)) {
639 bawrite(bp);
640 } else {
641 cluster_write(bp, ip->ino_data.size,
642 blksize, seqcount);
644 } else {
645 bdwrite(bp);
648 hammer_done_transaction(&trans);
649 hammer_knote(ap->a_vp, kflags);
650 return (error);
654 * hammer_vop_access { vp, mode, cred }
656 static
658 hammer_vop_access(struct vop_access_args *ap)
660 struct hammer_inode *ip = VTOI(ap->a_vp);
661 uid_t uid;
662 gid_t gid;
663 int error;
665 ++hammer_stats_file_iopsr;
666 uid = hammer_to_unix_xid(&ip->ino_data.uid);
667 gid = hammer_to_unix_xid(&ip->ino_data.gid);
669 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
670 ip->ino_data.uflags);
671 return (error);
675 * hammer_vop_advlock { vp, id, op, fl, flags }
677 static
679 hammer_vop_advlock(struct vop_advlock_args *ap)
681 hammer_inode_t ip = VTOI(ap->a_vp);
683 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
687 * hammer_vop_close { vp, fflag }
689 static
691 hammer_vop_close(struct vop_close_args *ap)
693 /*hammer_inode_t ip = VTOI(ap->a_vp);*/
694 return (vop_stdclose(ap));
698 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
700 * The operating system has already ensured that the directory entry
701 * does not exist and done all appropriate namespace locking.
703 static
705 hammer_vop_ncreate(struct vop_ncreate_args *ap)
707 struct hammer_transaction trans;
708 struct hammer_inode *dip;
709 struct hammer_inode *nip;
710 struct nchandle *nch;
711 int error;
713 nch = ap->a_nch;
714 dip = VTOI(ap->a_dvp);
716 if (dip->flags & HAMMER_INODE_RO)
717 return (EROFS);
718 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
719 return (error);
722 * Create a transaction to cover the operations we perform.
724 hammer_start_transaction(&trans, dip->hmp);
725 ++hammer_stats_file_iopsw;
728 * Create a new filesystem object of the requested type. The
729 * returned inode will be referenced and shared-locked to prevent
730 * it from being moved to the flusher.
732 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
733 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
734 NULL, &nip);
735 if (error) {
736 hkprintf("hammer_create_inode error %d\n", error);
737 hammer_done_transaction(&trans);
738 *ap->a_vpp = NULL;
739 return (error);
743 * Add the new filesystem object to the directory. This will also
744 * bump the inode's link count.
746 error = hammer_ip_add_directory(&trans, dip,
747 nch->ncp->nc_name, nch->ncp->nc_nlen,
748 nip);
749 if (error)
750 hkprintf("hammer_ip_add_directory error %d\n", error);
753 * Finish up.
755 if (error) {
756 hammer_rel_inode(nip, 0);
757 hammer_done_transaction(&trans);
758 *ap->a_vpp = NULL;
759 } else {
760 error = hammer_get_vnode(nip, ap->a_vpp);
761 hammer_done_transaction(&trans);
762 hammer_rel_inode(nip, 0);
763 if (error == 0) {
764 cache_setunresolved(ap->a_nch);
765 cache_setvp(ap->a_nch, *ap->a_vpp);
767 hammer_knote(ap->a_dvp, NOTE_WRITE);
769 return (error);
773 * hammer_vop_getattr { vp, vap }
775 * Retrieve an inode's attribute information. When accessing inodes
776 * historically we fake the atime field to ensure consistent results.
777 * The atime field is stored in the B-Tree element and allowed to be
778 * updated without cycling the element.
780 * MPSAFE
782 static
784 hammer_vop_getattr(struct vop_getattr_args *ap)
786 struct hammer_inode *ip = VTOI(ap->a_vp);
787 struct vattr *vap = ap->a_vap;
790 * We want the fsid to be different when accessing a filesystem
791 * with different as-of's so programs like diff don't think
792 * the files are the same.
794 * We also want the fsid to be the same when comparing snapshots,
795 * or when comparing mirrors (which might be backed by different
796 * physical devices). HAMMER fsids are based on the PFS's
797 * shared_uuid field.
799 * XXX there is a chance of collision here. The va_fsid reported
800 * by stat is different from the more involved fsid used in the
801 * mount structure.
803 ++hammer_stats_file_iopsr;
804 hammer_lock_sh(&ip->lock);
805 vap->va_fsid = ip->pfsm->fsid_udev ^ (u_int32_t)ip->obj_asof ^
806 (u_int32_t)(ip->obj_asof >> 32);
808 vap->va_fileid = ip->ino_leaf.base.obj_id;
809 vap->va_mode = ip->ino_data.mode;
810 vap->va_nlink = ip->ino_data.nlinks;
811 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
812 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
813 vap->va_rmajor = 0;
814 vap->va_rminor = 0;
815 vap->va_size = ip->ino_data.size;
818 * Special case for @@PFS softlinks. The actual size of the
819 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
820 * or for MAX_TID is "@@-1:%05d" == 10 bytes.
822 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK &&
823 ip->ino_data.size == 10 &&
824 ip->obj_asof == HAMMER_MAX_TID &&
825 ip->obj_localization == 0 &&
826 strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) {
827 if (ip->pfsm->pfsd.mirror_flags & HAMMER_PFSD_SLAVE)
828 vap->va_size = 26;
829 else
830 vap->va_size = 10;
834 * We must provide a consistent atime and mtime for snapshots
835 * so people can do a 'tar cf - ... | md5' on them and get
836 * consistent results.
838 if (ip->flags & HAMMER_INODE_RO) {
839 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
840 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
841 } else {
842 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
843 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
845 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
846 vap->va_flags = ip->ino_data.uflags;
847 vap->va_gen = 1; /* hammer inums are unique for all time */
848 vap->va_blocksize = HAMMER_BUFSIZE;
849 if (ip->ino_data.size >= HAMMER_XDEMARC) {
850 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
851 ~HAMMER_XBUFMASK64;
852 } else if (ip->ino_data.size > HAMMER_BUFSIZE / 2) {
853 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
854 ~HAMMER_BUFMASK64;
855 } else {
856 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
859 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
860 vap->va_filerev = 0; /* XXX */
861 /* mtime uniquely identifies any adjustments made to the file XXX */
862 vap->va_fsmid = ip->ino_data.mtime;
863 vap->va_uid_uuid = ip->ino_data.uid;
864 vap->va_gid_uuid = ip->ino_data.gid;
865 vap->va_fsid_uuid = ip->hmp->fsid;
866 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
867 VA_FSID_UUID_VALID;
869 switch (ip->ino_data.obj_type) {
870 case HAMMER_OBJTYPE_CDEV:
871 case HAMMER_OBJTYPE_BDEV:
872 vap->va_rmajor = ip->ino_data.rmajor;
873 vap->va_rminor = ip->ino_data.rminor;
874 break;
875 default:
876 break;
878 hammer_unlock(&ip->lock);
879 return(0);
883 * hammer_vop_nresolve { nch, dvp, cred }
885 * Locate the requested directory entry.
887 static
889 hammer_vop_nresolve(struct vop_nresolve_args *ap)
891 struct hammer_transaction trans;
892 struct namecache *ncp;
893 hammer_inode_t dip;
894 hammer_inode_t ip;
895 hammer_tid_t asof;
896 struct hammer_cursor cursor;
897 struct vnode *vp;
898 int64_t namekey;
899 int error;
900 int i;
901 int nlen;
902 int flags;
903 int ispfs;
904 int64_t obj_id;
905 u_int32_t localization;
906 u_int32_t max_iterations;
909 * Misc initialization, plus handle as-of name extensions. Look for
910 * the '@@' extension. Note that as-of files and directories cannot
911 * be modified.
913 dip = VTOI(ap->a_dvp);
914 ncp = ap->a_nch->ncp;
915 asof = dip->obj_asof;
916 localization = dip->obj_localization; /* for code consistency */
917 nlen = ncp->nc_nlen;
918 flags = dip->flags & HAMMER_INODE_RO;
919 ispfs = 0;
921 hammer_simple_transaction(&trans, dip->hmp);
922 ++hammer_stats_file_iopsr;
924 for (i = 0; i < nlen; ++i) {
925 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
926 error = hammer_str_to_tid(ncp->nc_name + i + 2,
927 &ispfs, &asof, &localization);
928 if (error != 0) {
929 i = nlen;
930 break;
932 if (asof != HAMMER_MAX_TID)
933 flags |= HAMMER_INODE_RO;
934 break;
937 nlen = i;
940 * If this is a PFS softlink we dive into the PFS
942 if (ispfs && nlen == 0) {
943 ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT,
944 asof, localization,
945 flags, &error);
946 if (error == 0) {
947 error = hammer_get_vnode(ip, &vp);
948 hammer_rel_inode(ip, 0);
949 } else {
950 vp = NULL;
952 if (error == 0) {
953 vn_unlock(vp);
954 cache_setvp(ap->a_nch, vp);
955 vrele(vp);
957 goto done;
961 * If there is no path component the time extension is relative to dip.
962 * e.g. "fubar/@@<snapshot>"
964 * "." is handled by the kernel, but ".@@<snapshot>" is not.
965 * e.g. "fubar/.@@<snapshot>"
967 * ".." is handled by the kernel. We do not currently handle
968 * "..@<snapshot>".
970 if (nlen == 0 || (nlen == 1 && ncp->nc_name[0] == '.')) {
971 ip = hammer_get_inode(&trans, dip, dip->obj_id,
972 asof, dip->obj_localization,
973 flags, &error);
974 if (error == 0) {
975 error = hammer_get_vnode(ip, &vp);
976 hammer_rel_inode(ip, 0);
977 } else {
978 vp = NULL;
980 if (error == 0) {
981 vn_unlock(vp);
982 cache_setvp(ap->a_nch, vp);
983 vrele(vp);
985 goto done;
989 * Calculate the namekey and setup the key range for the scan. This
990 * works kinda like a chained hash table where the lower 32 bits
991 * of the namekey synthesize the chain.
993 * The key range is inclusive of both key_beg and key_end.
995 namekey = hammer_directory_namekey(dip, ncp->nc_name, nlen,
996 &max_iterations);
998 error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
999 cursor.key_beg.localization = dip->obj_localization +
1000 hammer_dir_localization(dip);
1001 cursor.key_beg.obj_id = dip->obj_id;
1002 cursor.key_beg.key = namekey;
1003 cursor.key_beg.create_tid = 0;
1004 cursor.key_beg.delete_tid = 0;
1005 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1006 cursor.key_beg.obj_type = 0;
1008 cursor.key_end = cursor.key_beg;
1009 cursor.key_end.key += max_iterations;
1010 cursor.asof = asof;
1011 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1014 * Scan all matching records (the chain), locate the one matching
1015 * the requested path component.
1017 * The hammer_ip_*() functions merge in-memory records with on-disk
1018 * records for the purposes of the search.
1020 obj_id = 0;
1021 localization = HAMMER_DEF_LOCALIZATION;
1023 if (error == 0) {
1024 error = hammer_ip_first(&cursor);
1025 while (error == 0) {
1026 error = hammer_ip_resolve_data(&cursor);
1027 if (error)
1028 break;
1029 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
1030 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1031 obj_id = cursor.data->entry.obj_id;
1032 localization = cursor.data->entry.localization;
1033 break;
1035 error = hammer_ip_next(&cursor);
1038 hammer_done_cursor(&cursor);
1041 * Lookup the obj_id. This should always succeed. If it does not
1042 * the filesystem may be damaged and we return a dummy inode.
1044 if (error == 0) {
1045 ip = hammer_get_inode(&trans, dip, obj_id,
1046 asof, localization,
1047 flags, &error);
1048 if (error == ENOENT) {
1049 kprintf("HAMMER: WARNING: Missing "
1050 "inode for dirent \"%s\"\n"
1051 "\tobj_id = %016llx, asof=%016llx, lo=%08x\n",
1052 ncp->nc_name,
1053 (long long)obj_id, (long long)asof,
1054 localization);
1055 error = 0;
1056 ip = hammer_get_dummy_inode(&trans, dip, obj_id,
1057 asof, localization,
1058 flags, &error);
1060 if (error == 0) {
1061 error = hammer_get_vnode(ip, &vp);
1062 hammer_rel_inode(ip, 0);
1063 } else {
1064 vp = NULL;
1066 if (error == 0) {
1067 vn_unlock(vp);
1068 cache_setvp(ap->a_nch, vp);
1069 vrele(vp);
1071 } else if (error == ENOENT) {
1072 cache_setvp(ap->a_nch, NULL);
1074 done:
1075 hammer_done_transaction(&trans);
1076 return (error);
1080 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1082 * Locate the parent directory of a directory vnode.
1084 * dvp is referenced but not locked. *vpp must be returned referenced and
1085 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
1086 * at the root, instead it could indicate that the directory we were in was
1087 * removed.
1089 * NOTE: as-of sequences are not linked into the directory structure. If
1090 * we are at the root with a different asof then the mount point, reload
1091 * the same directory with the mount point's asof. I'm not sure what this
1092 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
1093 * get confused, but it hasn't been tested.
1095 static
1097 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1099 struct hammer_transaction trans;
1100 struct hammer_inode *dip;
1101 struct hammer_inode *ip;
1102 int64_t parent_obj_id;
1103 u_int32_t parent_obj_localization;
1104 hammer_tid_t asof;
1105 int error;
1107 dip = VTOI(ap->a_dvp);
1108 asof = dip->obj_asof;
1111 * Whos are parent? This could be the root of a pseudo-filesystem
1112 * whos parent is in another localization domain.
1114 parent_obj_id = dip->ino_data.parent_obj_id;
1115 if (dip->obj_id == HAMMER_OBJID_ROOT)
1116 parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
1117 else
1118 parent_obj_localization = dip->obj_localization;
1120 if (parent_obj_id == 0) {
1121 if (dip->obj_id == HAMMER_OBJID_ROOT &&
1122 asof != dip->hmp->asof) {
1123 parent_obj_id = dip->obj_id;
1124 asof = dip->hmp->asof;
1125 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
1126 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
1127 (long long)dip->obj_asof);
1128 } else {
1129 *ap->a_vpp = NULL;
1130 return ENOENT;
1134 hammer_simple_transaction(&trans, dip->hmp);
1135 ++hammer_stats_file_iopsr;
1137 ip = hammer_get_inode(&trans, dip, parent_obj_id,
1138 asof, parent_obj_localization,
1139 dip->flags, &error);
1140 if (ip) {
1141 error = hammer_get_vnode(ip, ap->a_vpp);
1142 hammer_rel_inode(ip, 0);
1143 } else {
1144 *ap->a_vpp = NULL;
1146 hammer_done_transaction(&trans);
1147 return (error);
1151 * hammer_vop_nlink { nch, dvp, vp, cred }
1153 static
1155 hammer_vop_nlink(struct vop_nlink_args *ap)
1157 struct hammer_transaction trans;
1158 struct hammer_inode *dip;
1159 struct hammer_inode *ip;
1160 struct nchandle *nch;
1161 int error;
1163 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1164 return(EXDEV);
1166 nch = ap->a_nch;
1167 dip = VTOI(ap->a_dvp);
1168 ip = VTOI(ap->a_vp);
1170 if (dip->obj_localization != ip->obj_localization)
1171 return(EXDEV);
1173 if (dip->flags & HAMMER_INODE_RO)
1174 return (EROFS);
1175 if (ip->flags & HAMMER_INODE_RO)
1176 return (EROFS);
1177 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1178 return (error);
1181 * Create a transaction to cover the operations we perform.
1183 hammer_start_transaction(&trans, dip->hmp);
1184 ++hammer_stats_file_iopsw;
1187 * Add the filesystem object to the directory. Note that neither
1188 * dip nor ip are referenced or locked, but their vnodes are
1189 * referenced. This function will bump the inode's link count.
1191 error = hammer_ip_add_directory(&trans, dip,
1192 nch->ncp->nc_name, nch->ncp->nc_nlen,
1193 ip);
1196 * Finish up.
1198 if (error == 0) {
1199 cache_setunresolved(nch);
1200 cache_setvp(nch, ap->a_vp);
1202 hammer_done_transaction(&trans);
1203 hammer_knote(ap->a_vp, NOTE_LINK);
1204 hammer_knote(ap->a_dvp, NOTE_WRITE);
1205 return (error);
1209 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1211 * The operating system has already ensured that the directory entry
1212 * does not exist and done all appropriate namespace locking.
1214 static
1216 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
1218 struct hammer_transaction trans;
1219 struct hammer_inode *dip;
1220 struct hammer_inode *nip;
1221 struct nchandle *nch;
1222 int error;
1224 nch = ap->a_nch;
1225 dip = VTOI(ap->a_dvp);
1227 if (dip->flags & HAMMER_INODE_RO)
1228 return (EROFS);
1229 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1230 return (error);
1233 * Create a transaction to cover the operations we perform.
1235 hammer_start_transaction(&trans, dip->hmp);
1236 ++hammer_stats_file_iopsw;
1239 * Create a new filesystem object of the requested type. The
1240 * returned inode will be referenced but not locked.
1242 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1243 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1244 NULL, &nip);
1245 if (error) {
1246 hkprintf("hammer_mkdir error %d\n", error);
1247 hammer_done_transaction(&trans);
1248 *ap->a_vpp = NULL;
1249 return (error);
1252 * Add the new filesystem object to the directory. This will also
1253 * bump the inode's link count.
1255 error = hammer_ip_add_directory(&trans, dip,
1256 nch->ncp->nc_name, nch->ncp->nc_nlen,
1257 nip);
1258 if (error)
1259 hkprintf("hammer_mkdir (add) error %d\n", error);
1262 * Finish up.
1264 if (error) {
1265 hammer_rel_inode(nip, 0);
1266 *ap->a_vpp = NULL;
1267 } else {
1268 error = hammer_get_vnode(nip, ap->a_vpp);
1269 hammer_rel_inode(nip, 0);
1270 if (error == 0) {
1271 cache_setunresolved(ap->a_nch);
1272 cache_setvp(ap->a_nch, *ap->a_vpp);
1275 hammer_done_transaction(&trans);
1276 if (error == 0)
1277 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1278 return (error);
1282 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1284 * The operating system has already ensured that the directory entry
1285 * does not exist and done all appropriate namespace locking.
1287 static
1289 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1291 struct hammer_transaction trans;
1292 struct hammer_inode *dip;
1293 struct hammer_inode *nip;
1294 struct nchandle *nch;
1295 int error;
1297 nch = ap->a_nch;
1298 dip = VTOI(ap->a_dvp);
1300 if (dip->flags & HAMMER_INODE_RO)
1301 return (EROFS);
1302 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1303 return (error);
1306 * Create a transaction to cover the operations we perform.
1308 hammer_start_transaction(&trans, dip->hmp);
1309 ++hammer_stats_file_iopsw;
1312 * Create a new filesystem object of the requested type. The
1313 * returned inode will be referenced but not locked.
1315 * If mknod specifies a directory a pseudo-fs is created.
1317 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1318 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1319 NULL, &nip);
1320 if (error) {
1321 hammer_done_transaction(&trans);
1322 *ap->a_vpp = NULL;
1323 return (error);
1327 * Add the new filesystem object to the directory. This will also
1328 * bump the inode's link count.
1330 error = hammer_ip_add_directory(&trans, dip,
1331 nch->ncp->nc_name, nch->ncp->nc_nlen,
1332 nip);
1335 * Finish up.
1337 if (error) {
1338 hammer_rel_inode(nip, 0);
1339 *ap->a_vpp = NULL;
1340 } else {
1341 error = hammer_get_vnode(nip, ap->a_vpp);
1342 hammer_rel_inode(nip, 0);
1343 if (error == 0) {
1344 cache_setunresolved(ap->a_nch);
1345 cache_setvp(ap->a_nch, *ap->a_vpp);
1348 hammer_done_transaction(&trans);
1349 if (error == 0)
1350 hammer_knote(ap->a_dvp, NOTE_WRITE);
1351 return (error);
1355 * hammer_vop_open { vp, mode, cred, fp }
1357 static
1359 hammer_vop_open(struct vop_open_args *ap)
1361 hammer_inode_t ip;
1363 ++hammer_stats_file_iopsr;
1364 ip = VTOI(ap->a_vp);
1366 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1367 return (EROFS);
1368 return(vop_stdopen(ap));
1372 * hammer_vop_print { vp }
1374 static
1376 hammer_vop_print(struct vop_print_args *ap)
1378 return EOPNOTSUPP;
1382 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1384 static
1386 hammer_vop_readdir(struct vop_readdir_args *ap)
1388 struct hammer_transaction trans;
1389 struct hammer_cursor cursor;
1390 struct hammer_inode *ip;
1391 struct uio *uio;
1392 hammer_base_elm_t base;
1393 int error;
1394 int cookie_index;
1395 int ncookies;
1396 off_t *cookies;
1397 off_t saveoff;
1398 int r;
1399 int dtype;
1401 ++hammer_stats_file_iopsr;
1402 ip = VTOI(ap->a_vp);
1403 uio = ap->a_uio;
1404 saveoff = uio->uio_offset;
1406 if (ap->a_ncookies) {
1407 ncookies = uio->uio_resid / 16 + 1;
1408 if (ncookies > 1024)
1409 ncookies = 1024;
1410 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1411 cookie_index = 0;
1412 } else {
1413 ncookies = -1;
1414 cookies = NULL;
1415 cookie_index = 0;
1418 hammer_simple_transaction(&trans, ip->hmp);
1421 * Handle artificial entries
1423 * It should be noted that the minimum value for a directory
1424 * hash key on-media is 0x0000000100000000, so we can use anything
1425 * less then that to represent our 'special' key space.
1427 error = 0;
1428 if (saveoff == 0) {
1429 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1430 if (r)
1431 goto done;
1432 if (cookies)
1433 cookies[cookie_index] = saveoff;
1434 ++saveoff;
1435 ++cookie_index;
1436 if (cookie_index == ncookies)
1437 goto done;
1439 if (saveoff == 1) {
1440 if (ip->ino_data.parent_obj_id) {
1441 r = vop_write_dirent(&error, uio,
1442 ip->ino_data.parent_obj_id,
1443 DT_DIR, 2, "..");
1444 } else {
1445 r = vop_write_dirent(&error, uio,
1446 ip->obj_id, DT_DIR, 2, "..");
1448 if (r)
1449 goto done;
1450 if (cookies)
1451 cookies[cookie_index] = saveoff;
1452 ++saveoff;
1453 ++cookie_index;
1454 if (cookie_index == ncookies)
1455 goto done;
1459 * Key range (begin and end inclusive) to scan. Directory keys
1460 * directly translate to a 64 bit 'seek' position.
1462 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1463 cursor.key_beg.localization = ip->obj_localization +
1464 hammer_dir_localization(ip);
1465 cursor.key_beg.obj_id = ip->obj_id;
1466 cursor.key_beg.create_tid = 0;
1467 cursor.key_beg.delete_tid = 0;
1468 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1469 cursor.key_beg.obj_type = 0;
1470 cursor.key_beg.key = saveoff;
1472 cursor.key_end = cursor.key_beg;
1473 cursor.key_end.key = HAMMER_MAX_KEY;
1474 cursor.asof = ip->obj_asof;
1475 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1477 error = hammer_ip_first(&cursor);
1479 while (error == 0) {
1480 error = hammer_ip_resolve_data(&cursor);
1481 if (error)
1482 break;
1483 base = &cursor.leaf->base;
1484 saveoff = base->key;
1485 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1487 if (base->obj_id != ip->obj_id)
1488 panic("readdir: bad record at %p", cursor.node);
1491 * Convert pseudo-filesystems into softlinks
1493 dtype = hammer_get_dtype(cursor.leaf->base.obj_type);
1494 r = vop_write_dirent(
1495 &error, uio, cursor.data->entry.obj_id,
1496 dtype,
1497 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1498 (void *)cursor.data->entry.name);
1499 if (r)
1500 break;
1501 ++saveoff;
1502 if (cookies)
1503 cookies[cookie_index] = base->key;
1504 ++cookie_index;
1505 if (cookie_index == ncookies)
1506 break;
1507 error = hammer_ip_next(&cursor);
1509 hammer_done_cursor(&cursor);
1511 done:
1512 hammer_done_transaction(&trans);
1514 if (ap->a_eofflag)
1515 *ap->a_eofflag = (error == ENOENT);
1516 uio->uio_offset = saveoff;
1517 if (error && cookie_index == 0) {
1518 if (error == ENOENT)
1519 error = 0;
1520 if (cookies) {
1521 kfree(cookies, M_TEMP);
1522 *ap->a_ncookies = 0;
1523 *ap->a_cookies = NULL;
1525 } else {
1526 if (error == ENOENT)
1527 error = 0;
1528 if (cookies) {
1529 *ap->a_ncookies = cookie_index;
1530 *ap->a_cookies = cookies;
1533 return(error);
1537 * hammer_vop_readlink { vp, uio, cred }
1539 static
1541 hammer_vop_readlink(struct vop_readlink_args *ap)
1543 struct hammer_transaction trans;
1544 struct hammer_cursor cursor;
1545 struct hammer_inode *ip;
1546 char buf[32];
1547 u_int32_t localization;
1548 hammer_pseudofs_inmem_t pfsm;
1549 int error;
1551 ip = VTOI(ap->a_vp);
1554 * Shortcut if the symlink data was stuffed into ino_data.
1556 * Also expand special "@@PFS%05d" softlinks (expansion only
1557 * occurs for non-historical (current) accesses made from the
1558 * primary filesystem).
1560 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1561 char *ptr;
1562 int bytes;
1564 ptr = ip->ino_data.ext.symlink;
1565 bytes = (int)ip->ino_data.size;
1566 if (bytes == 10 &&
1567 ip->obj_asof == HAMMER_MAX_TID &&
1568 ip->obj_localization == 0 &&
1569 strncmp(ptr, "@@PFS", 5) == 0) {
1570 hammer_simple_transaction(&trans, ip->hmp);
1571 bcopy(ptr + 5, buf, 5);
1572 buf[5] = 0;
1573 localization = strtoul(buf, NULL, 10) << 16;
1574 pfsm = hammer_load_pseudofs(&trans, localization,
1575 &error);
1576 if (error == 0) {
1577 if (pfsm->pfsd.mirror_flags &
1578 HAMMER_PFSD_SLAVE) {
1579 /* vap->va_size == 26 */
1580 ksnprintf(buf, sizeof(buf),
1581 "@@0x%016llx:%05d",
1582 (long long)pfsm->pfsd.sync_end_tid,
1583 localization >> 16);
1584 } else {
1585 /* vap->va_size == 10 */
1586 ksnprintf(buf, sizeof(buf),
1587 "@@-1:%05d",
1588 localization >> 16);
1589 #if 0
1590 ksnprintf(buf, sizeof(buf),
1591 "@@0x%016llx:%05d",
1592 (long long)HAMMER_MAX_TID,
1593 localization >> 16);
1594 #endif
1596 ptr = buf;
1597 bytes = strlen(buf);
1599 if (pfsm)
1600 hammer_rel_pseudofs(trans.hmp, pfsm);
1601 hammer_done_transaction(&trans);
1603 error = uiomove(ptr, bytes, ap->a_uio);
1604 return(error);
1608 * Long version
1610 hammer_simple_transaction(&trans, ip->hmp);
1611 ++hammer_stats_file_iopsr;
1612 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1615 * Key range (begin and end inclusive) to scan. Directory keys
1616 * directly translate to a 64 bit 'seek' position.
1618 cursor.key_beg.localization = ip->obj_localization +
1619 HAMMER_LOCALIZE_MISC;
1620 cursor.key_beg.obj_id = ip->obj_id;
1621 cursor.key_beg.create_tid = 0;
1622 cursor.key_beg.delete_tid = 0;
1623 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1624 cursor.key_beg.obj_type = 0;
1625 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1626 cursor.asof = ip->obj_asof;
1627 cursor.flags |= HAMMER_CURSOR_ASOF;
1629 error = hammer_ip_lookup(&cursor);
1630 if (error == 0) {
1631 error = hammer_ip_resolve_data(&cursor);
1632 if (error == 0) {
1633 KKASSERT(cursor.leaf->data_len >=
1634 HAMMER_SYMLINK_NAME_OFF);
1635 error = uiomove(cursor.data->symlink.name,
1636 cursor.leaf->data_len -
1637 HAMMER_SYMLINK_NAME_OFF,
1638 ap->a_uio);
1641 hammer_done_cursor(&cursor);
1642 hammer_done_transaction(&trans);
1643 return(error);
1647 * hammer_vop_nremove { nch, dvp, cred }
1649 static
1651 hammer_vop_nremove(struct vop_nremove_args *ap)
1653 struct hammer_transaction trans;
1654 struct hammer_inode *dip;
1655 int error;
1657 dip = VTOI(ap->a_dvp);
1659 if (hammer_nohistory(dip) == 0 &&
1660 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1661 return (error);
1664 hammer_start_transaction(&trans, dip->hmp);
1665 ++hammer_stats_file_iopsw;
1666 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0);
1667 hammer_done_transaction(&trans);
1668 if (error == 0)
1669 hammer_knote(ap->a_dvp, NOTE_WRITE);
1670 return (error);
1674 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1676 static
1678 hammer_vop_nrename(struct vop_nrename_args *ap)
1680 struct hammer_transaction trans;
1681 struct namecache *fncp;
1682 struct namecache *tncp;
1683 struct hammer_inode *fdip;
1684 struct hammer_inode *tdip;
1685 struct hammer_inode *ip;
1686 struct hammer_cursor cursor;
1687 int64_t namekey;
1688 u_int32_t max_iterations;
1689 int nlen, error;
1691 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1692 return(EXDEV);
1693 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1694 return(EXDEV);
1696 fdip = VTOI(ap->a_fdvp);
1697 tdip = VTOI(ap->a_tdvp);
1698 fncp = ap->a_fnch->ncp;
1699 tncp = ap->a_tnch->ncp;
1700 ip = VTOI(fncp->nc_vp);
1701 KKASSERT(ip != NULL);
1703 if (fdip->obj_localization != tdip->obj_localization)
1704 return(EXDEV);
1705 if (fdip->obj_localization != ip->obj_localization)
1706 return(EXDEV);
1708 if (fdip->flags & HAMMER_INODE_RO)
1709 return (EROFS);
1710 if (tdip->flags & HAMMER_INODE_RO)
1711 return (EROFS);
1712 if (ip->flags & HAMMER_INODE_RO)
1713 return (EROFS);
1714 if ((error = hammer_checkspace(fdip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
1715 return (error);
1717 hammer_start_transaction(&trans, fdip->hmp);
1718 ++hammer_stats_file_iopsw;
1721 * Remove tncp from the target directory and then link ip as
1722 * tncp. XXX pass trans to dounlink
1724 * Force the inode sync-time to match the transaction so it is
1725 * in-sync with the creation of the target directory entry.
1727 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp,
1728 ap->a_cred, 0, -1);
1729 if (error == 0 || error == ENOENT) {
1730 error = hammer_ip_add_directory(&trans, tdip,
1731 tncp->nc_name, tncp->nc_nlen,
1732 ip);
1733 if (error == 0) {
1734 ip->ino_data.parent_obj_id = tdip->obj_id;
1735 ip->ino_data.ctime = trans.time;
1736 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
1739 if (error)
1740 goto failed; /* XXX */
1743 * Locate the record in the originating directory and remove it.
1745 * Calculate the namekey and setup the key range for the scan. This
1746 * works kinda like a chained hash table where the lower 32 bits
1747 * of the namekey synthesize the chain.
1749 * The key range is inclusive of both key_beg and key_end.
1751 namekey = hammer_directory_namekey(fdip, fncp->nc_name, fncp->nc_nlen,
1752 &max_iterations);
1753 retry:
1754 hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
1755 cursor.key_beg.localization = fdip->obj_localization +
1756 hammer_dir_localization(fdip);
1757 cursor.key_beg.obj_id = fdip->obj_id;
1758 cursor.key_beg.key = namekey;
1759 cursor.key_beg.create_tid = 0;
1760 cursor.key_beg.delete_tid = 0;
1761 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1762 cursor.key_beg.obj_type = 0;
1764 cursor.key_end = cursor.key_beg;
1765 cursor.key_end.key += max_iterations;
1766 cursor.asof = fdip->obj_asof;
1767 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1770 * Scan all matching records (the chain), locate the one matching
1771 * the requested path component.
1773 * The hammer_ip_*() functions merge in-memory records with on-disk
1774 * records for the purposes of the search.
1776 error = hammer_ip_first(&cursor);
1777 while (error == 0) {
1778 if (hammer_ip_resolve_data(&cursor) != 0)
1779 break;
1780 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1781 KKASSERT(nlen > 0);
1782 if (fncp->nc_nlen == nlen &&
1783 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1784 break;
1786 error = hammer_ip_next(&cursor);
1790 * If all is ok we have to get the inode so we can adjust nlinks.
1792 * WARNING: hammer_ip_del_directory() may have to terminate the
1793 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1794 * twice.
1796 if (error == 0)
1797 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
1800 * XXX A deadlock here will break rename's atomicy for the purposes
1801 * of crash recovery.
1803 if (error == EDEADLK) {
1804 hammer_done_cursor(&cursor);
1805 goto retry;
1809 * Cleanup and tell the kernel that the rename succeeded.
1811 hammer_done_cursor(&cursor);
1812 if (error == 0) {
1813 cache_rename(ap->a_fnch, ap->a_tnch);
1814 hammer_knote(ap->a_fdvp, NOTE_WRITE);
1815 hammer_knote(ap->a_tdvp, NOTE_WRITE);
1816 if (ip->vp)
1817 hammer_knote(ip->vp, NOTE_RENAME);
1820 failed:
1821 hammer_done_transaction(&trans);
1822 return (error);
1826 * hammer_vop_nrmdir { nch, dvp, cred }
1828 static
1830 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
1832 struct hammer_transaction trans;
1833 struct hammer_inode *dip;
1834 int error;
1836 dip = VTOI(ap->a_dvp);
1838 if (hammer_nohistory(dip) == 0 &&
1839 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1840 return (error);
1843 hammer_start_transaction(&trans, dip->hmp);
1844 ++hammer_stats_file_iopsw;
1845 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1);
1846 hammer_done_transaction(&trans);
1847 if (error == 0)
1848 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1849 return (error);
1853 * hammer_vop_markatime { vp, cred }
1855 static
1857 hammer_vop_markatime(struct vop_markatime_args *ap)
1859 struct hammer_transaction trans;
1860 struct hammer_inode *ip;
1862 ip = VTOI(ap->a_vp);
1863 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1864 return (EROFS);
1865 if (ip->flags & HAMMER_INODE_RO)
1866 return (EROFS);
1867 if (ip->hmp->mp->mnt_flag & MNT_NOATIME)
1868 return (0);
1869 hammer_start_transaction(&trans, ip->hmp);
1870 ++hammer_stats_file_iopsw;
1872 ip->ino_data.atime = trans.time;
1873 hammer_modify_inode(ip, HAMMER_INODE_ATIME);
1874 hammer_done_transaction(&trans);
1875 hammer_knote(ap->a_vp, NOTE_ATTRIB);
1876 return (0);
1880 * hammer_vop_setattr { vp, vap, cred }
1882 static
1884 hammer_vop_setattr(struct vop_setattr_args *ap)
1886 struct hammer_transaction trans;
1887 struct vattr *vap;
1888 struct hammer_inode *ip;
1889 int modflags;
1890 int error;
1891 int truncating;
1892 int blksize;
1893 int kflags;
1894 int64_t aligned_size;
1895 u_int32_t flags;
1897 vap = ap->a_vap;
1898 ip = ap->a_vp->v_data;
1899 modflags = 0;
1900 kflags = 0;
1902 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1903 return(EROFS);
1904 if (ip->flags & HAMMER_INODE_RO)
1905 return (EROFS);
1906 if (hammer_nohistory(ip) == 0 &&
1907 (error = hammer_checkspace(ip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1908 return (error);
1911 hammer_start_transaction(&trans, ip->hmp);
1912 ++hammer_stats_file_iopsw;
1913 error = 0;
1915 if (vap->va_flags != VNOVAL) {
1916 flags = ip->ino_data.uflags;
1917 error = vop_helper_setattr_flags(&flags, vap->va_flags,
1918 hammer_to_unix_xid(&ip->ino_data.uid),
1919 ap->a_cred);
1920 if (error == 0) {
1921 if (ip->ino_data.uflags != flags) {
1922 ip->ino_data.uflags = flags;
1923 ip->ino_data.ctime = trans.time;
1924 modflags |= HAMMER_INODE_DDIRTY;
1925 kflags |= NOTE_ATTRIB;
1927 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1928 error = 0;
1929 goto done;
1932 goto done;
1934 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1935 error = EPERM;
1936 goto done;
1938 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
1939 mode_t cur_mode = ip->ino_data.mode;
1940 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1941 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1942 uuid_t uuid_uid;
1943 uuid_t uuid_gid;
1945 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
1946 ap->a_cred,
1947 &cur_uid, &cur_gid, &cur_mode);
1948 if (error == 0) {
1949 hammer_guid_to_uuid(&uuid_uid, cur_uid);
1950 hammer_guid_to_uuid(&uuid_gid, cur_gid);
1951 if (bcmp(&uuid_uid, &ip->ino_data.uid,
1952 sizeof(uuid_uid)) ||
1953 bcmp(&uuid_gid, &ip->ino_data.gid,
1954 sizeof(uuid_gid)) ||
1955 ip->ino_data.mode != cur_mode
1957 ip->ino_data.uid = uuid_uid;
1958 ip->ino_data.gid = uuid_gid;
1959 ip->ino_data.mode = cur_mode;
1960 ip->ino_data.ctime = trans.time;
1961 modflags |= HAMMER_INODE_DDIRTY;
1963 kflags |= NOTE_ATTRIB;
1966 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
1967 switch(ap->a_vp->v_type) {
1968 case VREG:
1969 if (vap->va_size == ip->ino_data.size)
1970 break;
1972 * XXX break atomicy, we can deadlock the backend
1973 * if we do not release the lock. Probably not a
1974 * big deal here.
1976 blksize = hammer_blocksize(vap->va_size);
1977 if (vap->va_size < ip->ino_data.size) {
1978 vtruncbuf(ap->a_vp, vap->va_size, blksize);
1979 truncating = 1;
1980 kflags |= NOTE_WRITE;
1981 } else {
1982 vnode_pager_setsize(ap->a_vp, vap->va_size);
1983 truncating = 0;
1984 kflags |= NOTE_WRITE | NOTE_EXTEND;
1986 ip->ino_data.size = vap->va_size;
1987 ip->ino_data.mtime = trans.time;
1988 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
1991 * on-media truncation is cached in the inode until
1992 * the inode is synchronized.
1994 if (truncating) {
1995 hammer_ip_frontend_trunc(ip, vap->va_size);
1996 #ifdef DEBUG_TRUNCATE
1997 if (HammerTruncIp == NULL)
1998 HammerTruncIp = ip;
1999 #endif
2000 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2001 ip->flags |= HAMMER_INODE_TRUNCATED;
2002 ip->trunc_off = vap->va_size;
2003 #ifdef DEBUG_TRUNCATE
2004 if (ip == HammerTruncIp)
2005 kprintf("truncate1 %016llx\n",
2006 (long long)ip->trunc_off);
2007 #endif
2008 } else if (ip->trunc_off > vap->va_size) {
2009 ip->trunc_off = vap->va_size;
2010 #ifdef DEBUG_TRUNCATE
2011 if (ip == HammerTruncIp)
2012 kprintf("truncate2 %016llx\n",
2013 (long long)ip->trunc_off);
2014 #endif
2015 } else {
2016 #ifdef DEBUG_TRUNCATE
2017 if (ip == HammerTruncIp)
2018 kprintf("truncate3 %016llx (ignored)\n",
2019 (long long)vap->va_size);
2020 #endif
2025 * If truncating we have to clean out a portion of
2026 * the last block on-disk. We do this in the
2027 * front-end buffer cache.
2029 aligned_size = (vap->va_size + (blksize - 1)) &
2030 ~(int64_t)(blksize - 1);
2031 if (truncating && vap->va_size < aligned_size) {
2032 struct buf *bp;
2033 int offset;
2035 aligned_size -= blksize;
2037 offset = (int)vap->va_size & (blksize - 1);
2038 error = bread(ap->a_vp, aligned_size,
2039 blksize, &bp);
2040 hammer_ip_frontend_trunc(ip, aligned_size);
2041 if (error == 0) {
2042 bzero(bp->b_data + offset,
2043 blksize - offset);
2044 /* must de-cache direct-io offset */
2045 bp->b_bio2.bio_offset = NOOFFSET;
2046 bdwrite(bp);
2047 } else {
2048 kprintf("ERROR %d\n", error);
2049 brelse(bp);
2052 break;
2053 case VDATABASE:
2054 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2055 ip->flags |= HAMMER_INODE_TRUNCATED;
2056 ip->trunc_off = vap->va_size;
2057 } else if (ip->trunc_off > vap->va_size) {
2058 ip->trunc_off = vap->va_size;
2060 hammer_ip_frontend_trunc(ip, vap->va_size);
2061 ip->ino_data.size = vap->va_size;
2062 ip->ino_data.mtime = trans.time;
2063 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2064 kflags |= NOTE_ATTRIB;
2065 break;
2066 default:
2067 error = EINVAL;
2068 goto done;
2070 break;
2072 if (vap->va_atime.tv_sec != VNOVAL) {
2073 ip->ino_data.atime = hammer_timespec_to_time(&vap->va_atime);
2074 modflags |= HAMMER_INODE_ATIME;
2075 kflags |= NOTE_ATTRIB;
2077 if (vap->va_mtime.tv_sec != VNOVAL) {
2078 ip->ino_data.mtime = hammer_timespec_to_time(&vap->va_mtime);
2079 modflags |= HAMMER_INODE_MTIME;
2080 kflags |= NOTE_ATTRIB;
2082 if (vap->va_mode != (mode_t)VNOVAL) {
2083 mode_t cur_mode = ip->ino_data.mode;
2084 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2085 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2087 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
2088 cur_uid, cur_gid, &cur_mode);
2089 if (error == 0 && ip->ino_data.mode != cur_mode) {
2090 ip->ino_data.mode = cur_mode;
2091 ip->ino_data.ctime = trans.time;
2092 modflags |= HAMMER_INODE_DDIRTY;
2093 kflags |= NOTE_ATTRIB;
2096 done:
2097 if (error == 0)
2098 hammer_modify_inode(ip, modflags);
2099 hammer_done_transaction(&trans);
2100 hammer_knote(ap->a_vp, kflags);
2101 return (error);
2105 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2107 static
2109 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
2111 struct hammer_transaction trans;
2112 struct hammer_inode *dip;
2113 struct hammer_inode *nip;
2114 struct nchandle *nch;
2115 hammer_record_t record;
2116 int error;
2117 int bytes;
2119 ap->a_vap->va_type = VLNK;
2121 nch = ap->a_nch;
2122 dip = VTOI(ap->a_dvp);
2124 if (dip->flags & HAMMER_INODE_RO)
2125 return (EROFS);
2126 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
2127 return (error);
2130 * Create a transaction to cover the operations we perform.
2132 hammer_start_transaction(&trans, dip->hmp);
2133 ++hammer_stats_file_iopsw;
2136 * Create a new filesystem object of the requested type. The
2137 * returned inode will be referenced but not locked.
2140 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
2141 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
2142 NULL, &nip);
2143 if (error) {
2144 hammer_done_transaction(&trans);
2145 *ap->a_vpp = NULL;
2146 return (error);
2150 * Add a record representing the symlink. symlink stores the link
2151 * as pure data, not a string, and is no \0 terminated.
2153 if (error == 0) {
2154 bytes = strlen(ap->a_target);
2156 if (bytes <= HAMMER_INODE_BASESYMLEN) {
2157 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
2158 } else {
2159 record = hammer_alloc_mem_record(nip, bytes);
2160 record->type = HAMMER_MEM_RECORD_GENERAL;
2162 record->leaf.base.localization = nip->obj_localization +
2163 HAMMER_LOCALIZE_MISC;
2164 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
2165 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
2166 record->leaf.data_len = bytes;
2167 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
2168 bcopy(ap->a_target, record->data->symlink.name, bytes);
2169 error = hammer_ip_add_record(&trans, record);
2173 * Set the file size to the length of the link.
2175 if (error == 0) {
2176 nip->ino_data.size = bytes;
2177 hammer_modify_inode(nip, HAMMER_INODE_DDIRTY);
2180 if (error == 0)
2181 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
2182 nch->ncp->nc_nlen, nip);
2185 * Finish up.
2187 if (error) {
2188 hammer_rel_inode(nip, 0);
2189 *ap->a_vpp = NULL;
2190 } else {
2191 error = hammer_get_vnode(nip, ap->a_vpp);
2192 hammer_rel_inode(nip, 0);
2193 if (error == 0) {
2194 cache_setunresolved(ap->a_nch);
2195 cache_setvp(ap->a_nch, *ap->a_vpp);
2196 hammer_knote(ap->a_dvp, NOTE_WRITE);
2199 hammer_done_transaction(&trans);
2200 return (error);
2204 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2206 static
2208 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
2210 struct hammer_transaction trans;
2211 struct hammer_inode *dip;
2212 int error;
2214 dip = VTOI(ap->a_dvp);
2216 if (hammer_nohistory(dip) == 0 &&
2217 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0) {
2218 return (error);
2221 hammer_start_transaction(&trans, dip->hmp);
2222 ++hammer_stats_file_iopsw;
2223 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
2224 ap->a_cred, ap->a_flags, -1);
2225 hammer_done_transaction(&trans);
2227 return (error);
2231 * hammer_vop_ioctl { vp, command, data, fflag, cred }
2233 static
2235 hammer_vop_ioctl(struct vop_ioctl_args *ap)
2237 struct hammer_inode *ip = ap->a_vp->v_data;
2239 ++hammer_stats_file_iopsr;
2240 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
2241 ap->a_fflag, ap->a_cred));
2244 static
2246 hammer_vop_mountctl(struct vop_mountctl_args *ap)
2248 static const struct mountctl_opt extraopt[] = {
2249 { HMNT_NOHISTORY, "nohistory" },
2250 { HMNT_MASTERID, "master" },
2251 { 0, NULL}
2254 struct hammer_mount *hmp;
2255 struct mount *mp;
2256 int usedbytes;
2257 int error;
2259 error = 0;
2260 usedbytes = 0;
2261 mp = ap->a_head.a_ops->head.vv_mount;
2262 KKASSERT(mp->mnt_data != NULL);
2263 hmp = (struct hammer_mount *)mp->mnt_data;
2265 switch(ap->a_op) {
2267 case MOUNTCTL_SET_EXPORT:
2268 if (ap->a_ctllen != sizeof(struct export_args))
2269 error = EINVAL;
2270 else
2271 error = hammer_vfs_export(mp, ap->a_op,
2272 (const struct export_args *)ap->a_ctl);
2273 break;
2274 case MOUNTCTL_MOUNTFLAGS:
2277 * Call standard mountctl VOP function
2278 * so we get user mount flags.
2280 error = vop_stdmountctl(ap);
2281 if (error)
2282 break;
2284 usedbytes = *ap->a_res;
2286 if (usedbytes > 0 && usedbytes < ap->a_buflen) {
2287 usedbytes += vfs_flagstostr(hmp->hflags, extraopt, ap->a_buf,
2288 ap->a_buflen - usedbytes,
2289 &error);
2292 *ap->a_res += usedbytes;
2293 break;
2295 default:
2296 error = vop_stdmountctl(ap);
2297 break;
2299 return(error);
2303 * hammer_vop_strategy { vp, bio }
2305 * Strategy call, used for regular file read & write only. Note that the
2306 * bp may represent a cluster.
2308 * To simplify operation and allow better optimizations in the future,
2309 * this code does not make any assumptions with regards to buffer alignment
2310 * or size.
2312 static
2314 hammer_vop_strategy(struct vop_strategy_args *ap)
2316 struct buf *bp;
2317 int error;
2319 bp = ap->a_bio->bio_buf;
2321 switch(bp->b_cmd) {
2322 case BUF_CMD_READ:
2323 error = hammer_vop_strategy_read(ap);
2324 break;
2325 case BUF_CMD_WRITE:
2326 error = hammer_vop_strategy_write(ap);
2327 break;
2328 default:
2329 bp->b_error = error = EINVAL;
2330 bp->b_flags |= B_ERROR;
2331 biodone(ap->a_bio);
2332 break;
2334 return (error);
2338 * Read from a regular file. Iterate the related records and fill in the
2339 * BIO/BUF. Gaps are zero-filled.
2341 * The support code in hammer_object.c should be used to deal with mixed
2342 * in-memory and on-disk records.
2344 * NOTE: Can be called from the cluster code with an oversized buf.
2346 * XXX atime update
2348 static
2350 hammer_vop_strategy_read(struct vop_strategy_args *ap)
2352 struct hammer_transaction trans;
2353 struct hammer_inode *ip;
2354 struct hammer_inode *dip;
2355 struct hammer_cursor cursor;
2356 hammer_base_elm_t base;
2357 hammer_off_t disk_offset;
2358 struct bio *bio;
2359 struct bio *nbio;
2360 struct buf *bp;
2361 int64_t rec_offset;
2362 int64_t ran_end;
2363 int64_t tmp64;
2364 int error;
2365 int boff;
2366 int roff;
2367 int n;
2369 bio = ap->a_bio;
2370 bp = bio->bio_buf;
2371 ip = ap->a_vp->v_data;
2374 * The zone-2 disk offset may have been set by the cluster code via
2375 * a BMAP operation, or else should be NOOFFSET.
2377 * Checking the high bits for a match against zone-2 should suffice.
2379 nbio = push_bio(bio);
2380 if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
2381 HAMMER_ZONE_LARGE_DATA) {
2382 error = hammer_io_direct_read(ip->hmp, nbio, NULL);
2383 return (error);
2387 * Well, that sucked. Do it the hard way. If all the stars are
2388 * aligned we may still be able to issue a direct-read.
2390 hammer_simple_transaction(&trans, ip->hmp);
2391 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2394 * Key range (begin and end inclusive) to scan. Note that the key's
2395 * stored in the actual records represent BASE+LEN, not BASE. The
2396 * first record containing bio_offset will have a key > bio_offset.
2398 cursor.key_beg.localization = ip->obj_localization +
2399 HAMMER_LOCALIZE_MISC;
2400 cursor.key_beg.obj_id = ip->obj_id;
2401 cursor.key_beg.create_tid = 0;
2402 cursor.key_beg.delete_tid = 0;
2403 cursor.key_beg.obj_type = 0;
2404 cursor.key_beg.key = bio->bio_offset + 1;
2405 cursor.asof = ip->obj_asof;
2406 cursor.flags |= HAMMER_CURSOR_ASOF;
2408 cursor.key_end = cursor.key_beg;
2409 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2410 #if 0
2411 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2412 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2413 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2414 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2415 } else
2416 #endif
2418 ran_end = bio->bio_offset + bp->b_bufsize;
2419 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2420 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2421 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2422 if (tmp64 < ran_end)
2423 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2424 else
2425 cursor.key_end.key = ran_end + MAXPHYS + 1;
2427 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2429 error = hammer_ip_first(&cursor);
2430 boff = 0;
2432 while (error == 0) {
2434 * Get the base file offset of the record. The key for
2435 * data records is (base + bytes) rather then (base).
2437 base = &cursor.leaf->base;
2438 rec_offset = base->key - cursor.leaf->data_len;
2441 * Calculate the gap, if any, and zero-fill it.
2443 * n is the offset of the start of the record verses our
2444 * current seek offset in the bio.
2446 n = (int)(rec_offset - (bio->bio_offset + boff));
2447 if (n > 0) {
2448 if (n > bp->b_bufsize - boff)
2449 n = bp->b_bufsize - boff;
2450 bzero((char *)bp->b_data + boff, n);
2451 boff += n;
2452 n = 0;
2456 * Calculate the data offset in the record and the number
2457 * of bytes we can copy.
2459 * There are two degenerate cases. First, boff may already
2460 * be at bp->b_bufsize. Secondly, the data offset within
2461 * the record may exceed the record's size.
2463 roff = -n;
2464 rec_offset += roff;
2465 n = cursor.leaf->data_len - roff;
2466 if (n <= 0) {
2467 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
2468 n = 0;
2469 } else if (n > bp->b_bufsize - boff) {
2470 n = bp->b_bufsize - boff;
2474 * Deal with cached truncations. This cool bit of code
2475 * allows truncate()/ftruncate() to avoid having to sync
2476 * the file.
2478 * If the frontend is truncated then all backend records are
2479 * subject to the frontend's truncation.
2481 * If the backend is truncated then backend records on-disk
2482 * (but not in-memory) are subject to the backend's
2483 * truncation. In-memory records owned by the backend
2484 * represent data written after the truncation point on the
2485 * backend and must not be truncated.
2487 * Truncate operations deal with frontend buffer cache
2488 * buffers and frontend-owned in-memory records synchronously.
2490 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2491 if (hammer_cursor_ondisk(&cursor) ||
2492 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2493 if (ip->trunc_off <= rec_offset)
2494 n = 0;
2495 else if (ip->trunc_off < rec_offset + n)
2496 n = (int)(ip->trunc_off - rec_offset);
2499 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2500 if (hammer_cursor_ondisk(&cursor)) {
2501 if (ip->sync_trunc_off <= rec_offset)
2502 n = 0;
2503 else if (ip->sync_trunc_off < rec_offset + n)
2504 n = (int)(ip->sync_trunc_off - rec_offset);
2509 * Try to issue a direct read into our bio if possible,
2510 * otherwise resolve the element data into a hammer_buffer
2511 * and copy.
2513 * The buffer on-disk should be zerod past any real
2514 * truncation point, but may not be for any synthesized
2515 * truncation point from above.
2517 disk_offset = cursor.leaf->data_offset + roff;
2518 if (boff == 0 && n == bp->b_bufsize &&
2519 hammer_cursor_ondisk(&cursor) &&
2520 (disk_offset & HAMMER_BUFMASK) == 0) {
2521 KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2522 HAMMER_ZONE_LARGE_DATA);
2523 nbio->bio_offset = disk_offset;
2524 error = hammer_io_direct_read(trans.hmp, nbio,
2525 cursor.leaf);
2526 goto done;
2527 } else if (n) {
2528 error = hammer_ip_resolve_data(&cursor);
2529 if (error == 0) {
2530 bcopy((char *)cursor.data + roff,
2531 (char *)bp->b_data + boff, n);
2534 if (error)
2535 break;
2538 * Iterate until we have filled the request.
2540 boff += n;
2541 if (boff == bp->b_bufsize)
2542 break;
2543 error = hammer_ip_next(&cursor);
2547 * There may have been a gap after the last record
2549 if (error == ENOENT)
2550 error = 0;
2551 if (error == 0 && boff != bp->b_bufsize) {
2552 KKASSERT(boff < bp->b_bufsize);
2553 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2554 /* boff = bp->b_bufsize; */
2556 bp->b_resid = 0;
2557 bp->b_error = error;
2558 if (error)
2559 bp->b_flags |= B_ERROR;
2560 biodone(ap->a_bio);
2562 done:
2564 * Cache the b-tree node for the last data read in cache[1].
2566 * If we hit the file EOF then also cache the node in the
2567 * governing director's cache[3], it will be used to initialize
2568 * the inode's cache[1] for any inodes looked up via the directory.
2570 * This doesn't reduce disk accesses since the B-Tree chain is
2571 * likely cached, but it does reduce cpu overhead when looking
2572 * up file offsets for cpdup/tar/cpio style iterations.
2574 if (cursor.node)
2575 hammer_cache_node(&ip->cache[1], cursor.node);
2576 if (ran_end >= ip->ino_data.size) {
2577 dip = hammer_find_inode(&trans, ip->ino_data.parent_obj_id,
2578 ip->obj_asof, ip->obj_localization);
2579 if (dip) {
2580 hammer_cache_node(&dip->cache[3], cursor.node);
2581 hammer_rel_inode(dip, 0);
2584 hammer_done_cursor(&cursor);
2585 hammer_done_transaction(&trans);
2586 return(error);
2590 * BMAP operation - used to support cluster_read() only.
2592 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2594 * This routine may return EOPNOTSUPP if the opration is not supported for
2595 * the specified offset. The contents of the pointer arguments do not
2596 * need to be initialized in that case.
2598 * If a disk address is available and properly aligned return 0 with
2599 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2600 * to the run-length relative to that offset. Callers may assume that
2601 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2602 * large, so return EOPNOTSUPP if it is not sufficiently large.
2604 static
2606 hammer_vop_bmap(struct vop_bmap_args *ap)
2608 struct hammer_transaction trans;
2609 struct hammer_inode *ip;
2610 struct hammer_cursor cursor;
2611 hammer_base_elm_t base;
2612 int64_t rec_offset;
2613 int64_t ran_end;
2614 int64_t tmp64;
2615 int64_t base_offset;
2616 int64_t base_disk_offset;
2617 int64_t last_offset;
2618 hammer_off_t last_disk_offset;
2619 hammer_off_t disk_offset;
2620 int rec_len;
2621 int error;
2622 int blksize;
2624 ++hammer_stats_file_iopsr;
2625 ip = ap->a_vp->v_data;
2628 * We can only BMAP regular files. We can't BMAP database files,
2629 * directories, etc.
2631 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
2632 return(EOPNOTSUPP);
2635 * bmap is typically called with runp/runb both NULL when used
2636 * for writing. We do not support BMAP for writing atm.
2638 if (ap->a_cmd != BUF_CMD_READ)
2639 return(EOPNOTSUPP);
2642 * Scan the B-Tree to acquire blockmap addresses, then translate
2643 * to raw addresses.
2645 hammer_simple_transaction(&trans, ip->hmp);
2646 #if 0
2647 kprintf("bmap_beg %016llx ip->cache %p\n",
2648 (long long)ap->a_loffset, ip->cache[1]);
2649 #endif
2650 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2653 * Key range (begin and end inclusive) to scan. Note that the key's
2654 * stored in the actual records represent BASE+LEN, not BASE. The
2655 * first record containing bio_offset will have a key > bio_offset.
2657 cursor.key_beg.localization = ip->obj_localization +
2658 HAMMER_LOCALIZE_MISC;
2659 cursor.key_beg.obj_id = ip->obj_id;
2660 cursor.key_beg.create_tid = 0;
2661 cursor.key_beg.delete_tid = 0;
2662 cursor.key_beg.obj_type = 0;
2663 if (ap->a_runb)
2664 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
2665 else
2666 cursor.key_beg.key = ap->a_loffset + 1;
2667 if (cursor.key_beg.key < 0)
2668 cursor.key_beg.key = 0;
2669 cursor.asof = ip->obj_asof;
2670 cursor.flags |= HAMMER_CURSOR_ASOF;
2672 cursor.key_end = cursor.key_beg;
2673 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2675 ran_end = ap->a_loffset + MAXPHYS;
2676 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2677 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2678 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2679 if (tmp64 < ran_end)
2680 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2681 else
2682 cursor.key_end.key = ran_end + MAXPHYS + 1;
2684 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2686 error = hammer_ip_first(&cursor);
2687 base_offset = last_offset = 0;
2688 base_disk_offset = last_disk_offset = 0;
2690 while (error == 0) {
2692 * Get the base file offset of the record. The key for
2693 * data records is (base + bytes) rather then (base).
2695 * NOTE: rec_offset + rec_len may exceed the end-of-file.
2696 * The extra bytes should be zero on-disk and the BMAP op
2697 * should still be ok.
2699 base = &cursor.leaf->base;
2700 rec_offset = base->key - cursor.leaf->data_len;
2701 rec_len = cursor.leaf->data_len;
2704 * Incorporate any cached truncation.
2706 * NOTE: Modifications to rec_len based on synthesized
2707 * truncation points remove the guarantee that any extended
2708 * data on disk is zero (since the truncations may not have
2709 * taken place on-media yet).
2711 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2712 if (hammer_cursor_ondisk(&cursor) ||
2713 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2714 if (ip->trunc_off <= rec_offset)
2715 rec_len = 0;
2716 else if (ip->trunc_off < rec_offset + rec_len)
2717 rec_len = (int)(ip->trunc_off - rec_offset);
2720 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2721 if (hammer_cursor_ondisk(&cursor)) {
2722 if (ip->sync_trunc_off <= rec_offset)
2723 rec_len = 0;
2724 else if (ip->sync_trunc_off < rec_offset + rec_len)
2725 rec_len = (int)(ip->sync_trunc_off - rec_offset);
2730 * Accumulate information. If we have hit a discontiguous
2731 * block reset base_offset unless we are already beyond the
2732 * requested offset. If we are, that's it, we stop.
2734 if (error)
2735 break;
2736 if (hammer_cursor_ondisk(&cursor)) {
2737 disk_offset = cursor.leaf->data_offset;
2738 if (rec_offset != last_offset ||
2739 disk_offset != last_disk_offset) {
2740 if (rec_offset > ap->a_loffset)
2741 break;
2742 base_offset = rec_offset;
2743 base_disk_offset = disk_offset;
2745 last_offset = rec_offset + rec_len;
2746 last_disk_offset = disk_offset + rec_len;
2748 error = hammer_ip_next(&cursor);
2751 #if 0
2752 kprintf("BMAP %016llx: %016llx - %016llx\n",
2753 (long long)ap->a_loffset,
2754 (long long)base_offset,
2755 (long long)last_offset);
2756 kprintf("BMAP %16s: %016llx - %016llx\n", "",
2757 (long long)base_disk_offset,
2758 (long long)last_disk_offset);
2759 #endif
2761 if (cursor.node) {
2762 hammer_cache_node(&ip->cache[1], cursor.node);
2763 #if 0
2764 kprintf("bmap_end2 %016llx ip->cache %p\n",
2765 (long long)ap->a_loffset, ip->cache[1]);
2766 #endif
2768 hammer_done_cursor(&cursor);
2769 hammer_done_transaction(&trans);
2772 * If we couldn't find any records or the records we did find were
2773 * all behind the requested offset, return failure. A forward
2774 * truncation can leave a hole w/ no on-disk records.
2776 if (last_offset == 0 || last_offset < ap->a_loffset)
2777 return (EOPNOTSUPP);
2780 * Figure out the block size at the requested offset and adjust
2781 * our limits so the cluster_read() does not create inappropriately
2782 * sized buffer cache buffers.
2784 blksize = hammer_blocksize(ap->a_loffset);
2785 if (hammer_blocksize(base_offset) != blksize) {
2786 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
2788 if (last_offset != ap->a_loffset &&
2789 hammer_blocksize(last_offset - 1) != blksize) {
2790 last_offset = hammer_blockdemarc(ap->a_loffset,
2791 last_offset - 1);
2795 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
2796 * from occuring.
2798 disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
2800 if ((disk_offset & HAMMER_OFF_ZONE_MASK) != HAMMER_ZONE_LARGE_DATA) {
2802 * Only large-data zones can be direct-IOd
2804 error = EOPNOTSUPP;
2805 } else if ((disk_offset & HAMMER_BUFMASK) ||
2806 (last_offset - ap->a_loffset) < blksize) {
2808 * doffsetp is not aligned or the forward run size does
2809 * not cover a whole buffer, disallow the direct I/O.
2811 error = EOPNOTSUPP;
2812 } else {
2814 * We're good.
2816 *ap->a_doffsetp = disk_offset;
2817 if (ap->a_runb) {
2818 *ap->a_runb = ap->a_loffset - base_offset;
2819 KKASSERT(*ap->a_runb >= 0);
2821 if (ap->a_runp) {
2822 *ap->a_runp = last_offset - ap->a_loffset;
2823 KKASSERT(*ap->a_runp >= 0);
2825 error = 0;
2827 return(error);
2831 * Write to a regular file. Because this is a strategy call the OS is
2832 * trying to actually get data onto the media.
2834 static
2836 hammer_vop_strategy_write(struct vop_strategy_args *ap)
2838 hammer_record_t record;
2839 hammer_mount_t hmp;
2840 hammer_inode_t ip;
2841 struct bio *bio;
2842 struct buf *bp;
2843 int blksize;
2844 int bytes;
2845 int error;
2847 bio = ap->a_bio;
2848 bp = bio->bio_buf;
2849 ip = ap->a_vp->v_data;
2850 hmp = ip->hmp;
2852 blksize = hammer_blocksize(bio->bio_offset);
2853 KKASSERT(bp->b_bufsize == blksize);
2855 if (ip->flags & HAMMER_INODE_RO) {
2856 bp->b_error = EROFS;
2857 bp->b_flags |= B_ERROR;
2858 biodone(ap->a_bio);
2859 return(EROFS);
2863 * Interlock with inode destruction (no in-kernel or directory
2864 * topology visibility). If we queue new IO while trying to
2865 * destroy the inode we can deadlock the vtrunc call in
2866 * hammer_inode_unloadable_check().
2868 * Besides, there's no point flushing a bp associated with an
2869 * inode that is being destroyed on-media and has no kernel
2870 * references.
2872 if ((ip->flags | ip->sync_flags) &
2873 (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
2874 bp->b_resid = 0;
2875 biodone(ap->a_bio);
2876 return(0);
2880 * Reserve space and issue a direct-write from the front-end.
2881 * NOTE: The direct_io code will hammer_bread/bcopy smaller
2882 * allocations.
2884 * An in-memory record will be installed to reference the storage
2885 * until the flusher can get to it.
2887 * Since we own the high level bio the front-end will not try to
2888 * do a direct-read until the write completes.
2890 * NOTE: The only time we do not reserve a full-sized buffers
2891 * worth of data is if the file is small. We do not try to
2892 * allocate a fragment (from the small-data zone) at the end of
2893 * an otherwise large file as this can lead to wildly separated
2894 * data.
2896 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
2897 KKASSERT(bio->bio_offset < ip->ino_data.size);
2898 if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
2899 bytes = bp->b_bufsize;
2900 else
2901 bytes = ((int)ip->ino_data.size + 15) & ~15;
2903 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
2904 bytes, &error);
2905 if (record) {
2906 hammer_io_direct_write(hmp, record, bio);
2907 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
2908 hammer_flush_inode(ip, 0);
2909 } else {
2910 bp->b_bio2.bio_offset = NOOFFSET;
2911 bp->b_error = error;
2912 bp->b_flags |= B_ERROR;
2913 biodone(ap->a_bio);
2915 return(error);
2919 * dounlink - disconnect a directory entry
2921 * XXX whiteout support not really in yet
2923 static int
2924 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
2925 struct vnode *dvp, struct ucred *cred,
2926 int flags, int isdir)
2928 struct namecache *ncp;
2929 hammer_inode_t dip;
2930 hammer_inode_t ip;
2931 struct hammer_cursor cursor;
2932 int64_t namekey;
2933 u_int32_t max_iterations;
2934 int nlen, error;
2937 * Calculate the namekey and setup the key range for the scan. This
2938 * works kinda like a chained hash table where the lower 32 bits
2939 * of the namekey synthesize the chain.
2941 * The key range is inclusive of both key_beg and key_end.
2943 dip = VTOI(dvp);
2944 ncp = nch->ncp;
2946 if (dip->flags & HAMMER_INODE_RO)
2947 return (EROFS);
2949 namekey = hammer_directory_namekey(dip, ncp->nc_name, ncp->nc_nlen,
2950 &max_iterations);
2951 retry:
2952 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
2953 cursor.key_beg.localization = dip->obj_localization +
2954 hammer_dir_localization(dip);
2955 cursor.key_beg.obj_id = dip->obj_id;
2956 cursor.key_beg.key = namekey;
2957 cursor.key_beg.create_tid = 0;
2958 cursor.key_beg.delete_tid = 0;
2959 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2960 cursor.key_beg.obj_type = 0;
2962 cursor.key_end = cursor.key_beg;
2963 cursor.key_end.key += max_iterations;
2964 cursor.asof = dip->obj_asof;
2965 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2968 * Scan all matching records (the chain), locate the one matching
2969 * the requested path component. info->last_error contains the
2970 * error code on search termination and could be 0, ENOENT, or
2971 * something else.
2973 * The hammer_ip_*() functions merge in-memory records with on-disk
2974 * records for the purposes of the search.
2976 error = hammer_ip_first(&cursor);
2978 while (error == 0) {
2979 error = hammer_ip_resolve_data(&cursor);
2980 if (error)
2981 break;
2982 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2983 KKASSERT(nlen > 0);
2984 if (ncp->nc_nlen == nlen &&
2985 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
2986 break;
2988 error = hammer_ip_next(&cursor);
2992 * If all is ok we have to get the inode so we can adjust nlinks.
2993 * To avoid a deadlock with the flusher we must release the inode
2994 * lock on the directory when acquiring the inode for the entry.
2996 * If the target is a directory, it must be empty.
2998 if (error == 0) {
2999 hammer_unlock(&cursor.ip->lock);
3000 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
3001 dip->hmp->asof,
3002 cursor.data->entry.localization,
3003 0, &error);
3004 hammer_lock_sh(&cursor.ip->lock);
3005 if (error == ENOENT) {
3006 kprintf("HAMMER: WARNING: Removing "
3007 "dirent w/missing inode \"%s\"\n"
3008 "\tobj_id = %016llx\n",
3009 ncp->nc_name,
3010 (long long)cursor.data->entry.obj_id);
3011 error = 0;
3015 * If isdir >= 0 we validate that the entry is or is not a
3016 * directory. If isdir < 0 we don't care.
3018 if (error == 0 && isdir >= 0 && ip) {
3019 if (isdir &&
3020 ip->ino_data.obj_type != HAMMER_OBJTYPE_DIRECTORY) {
3021 error = ENOTDIR;
3022 } else if (isdir == 0 &&
3023 ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
3024 error = EISDIR;
3029 * If we are trying to remove a directory the directory must
3030 * be empty.
3032 * The check directory code can loop and deadlock/retry. Our
3033 * own cursor's node locks must be released to avoid a 3-way
3034 * deadlock with the flusher if the check directory code
3035 * blocks.
3037 * If any changes whatsoever have been made to the cursor
3038 * set EDEADLK and retry.
3040 if (error == 0 && ip && ip->ino_data.obj_type ==
3041 HAMMER_OBJTYPE_DIRECTORY) {
3042 hammer_unlock_cursor(&cursor);
3043 error = hammer_ip_check_directory_empty(trans, ip);
3044 hammer_lock_cursor(&cursor);
3045 if (cursor.flags & HAMMER_CURSOR_RETEST) {
3046 kprintf("HAMMER: Warning: avoided deadlock "
3047 "on rmdir '%s'\n",
3048 ncp->nc_name);
3049 error = EDEADLK;
3054 * Delete the directory entry.
3056 * WARNING: hammer_ip_del_directory() may have to terminate
3057 * the cursor to avoid a deadlock. It is ok to call
3058 * hammer_done_cursor() twice.
3060 if (error == 0) {
3061 error = hammer_ip_del_directory(trans, &cursor,
3062 dip, ip);
3064 hammer_done_cursor(&cursor);
3065 if (error == 0) {
3066 cache_setunresolved(nch);
3067 cache_setvp(nch, NULL);
3068 /* XXX locking */
3069 if (ip && ip->vp) {
3070 hammer_knote(ip->vp, NOTE_DELETE);
3071 cache_inval_vp(ip->vp, CINV_DESTROY);
3074 if (ip)
3075 hammer_rel_inode(ip, 0);
3076 } else {
3077 hammer_done_cursor(&cursor);
3079 if (error == EDEADLK)
3080 goto retry;
3082 return (error);
3085 /************************************************************************
3086 * FIFO AND SPECFS OPS *
3087 ************************************************************************
3091 static int
3092 hammer_vop_fifoclose (struct vop_close_args *ap)
3094 /* XXX update itimes */
3095 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
3098 static int
3099 hammer_vop_fiforead (struct vop_read_args *ap)
3101 int error;
3103 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3104 /* XXX update access time */
3105 return (error);
3108 static int
3109 hammer_vop_fifowrite (struct vop_write_args *ap)
3111 int error;
3113 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3114 /* XXX update access time */
3115 return (error);
3118 static
3120 hammer_vop_fifokqfilter(struct vop_kqfilter_args *ap)
3122 int error;
3124 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3125 if (error)
3126 error = hammer_vop_kqfilter(ap);
3127 return(error);
3130 /************************************************************************
3131 * KQFILTER OPS *
3132 ************************************************************************
3135 static void filt_hammerdetach(struct knote *kn);
3136 static int filt_hammerread(struct knote *kn, long hint);
3137 static int filt_hammerwrite(struct knote *kn, long hint);
3138 static int filt_hammervnode(struct knote *kn, long hint);
3140 static struct filterops hammerread_filtops =
3141 { 1, NULL, filt_hammerdetach, filt_hammerread };
3142 static struct filterops hammerwrite_filtops =
3143 { 1, NULL, filt_hammerdetach, filt_hammerwrite };
3144 static struct filterops hammervnode_filtops =
3145 { 1, NULL, filt_hammerdetach, filt_hammervnode };
3147 static
3149 hammer_vop_kqfilter(struct vop_kqfilter_args *ap)
3151 struct vnode *vp = ap->a_vp;
3152 struct knote *kn = ap->a_kn;
3153 lwkt_tokref vlock;
3155 switch (kn->kn_filter) {
3156 case EVFILT_READ:
3157 kn->kn_fop = &hammerread_filtops;
3158 break;
3159 case EVFILT_WRITE:
3160 kn->kn_fop = &hammerwrite_filtops;
3161 break;
3162 case EVFILT_VNODE:
3163 kn->kn_fop = &hammervnode_filtops;
3164 break;
3165 default:
3166 return (1);
3169 kn->kn_hook = (caddr_t)vp;
3171 lwkt_gettoken(&vlock, &vp->v_token);
3172 SLIST_INSERT_HEAD(&vp->v_pollinfo.vpi_selinfo.si_note, kn, kn_selnext);
3173 lwkt_reltoken(&vlock);
3175 return(0);
3178 static void
3179 filt_hammerdetach(struct knote *kn)
3181 struct vnode *vp = (void *)kn->kn_hook;
3182 lwkt_tokref vlock;
3184 lwkt_gettoken(&vlock, &vp->v_token);
3185 SLIST_REMOVE(&vp->v_pollinfo.vpi_selinfo.si_note,
3186 kn, knote, kn_selnext);
3187 lwkt_reltoken(&vlock);
3190 static int
3191 filt_hammerread(struct knote *kn, long hint)
3193 struct vnode *vp = (void *)kn->kn_hook;
3194 hammer_inode_t ip = VTOI(vp);
3196 if (hint == NOTE_REVOKE) {
3197 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3198 return(1);
3200 kn->kn_data = ip->ino_data.size - kn->kn_fp->f_offset;
3201 return (kn->kn_data != 0);
3204 static int
3205 filt_hammerwrite(struct knote *kn, long hint)
3207 if (hint == NOTE_REVOKE)
3208 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3209 kn->kn_data = 0;
3210 return (1);
3213 static int
3214 filt_hammervnode(struct knote *kn, long hint)
3216 if (kn->kn_sfflags & hint)
3217 kn->kn_fflags |= hint;
3218 if (hint == NOTE_REVOKE) {
3219 kn->kn_flags |= EV_EOF;
3220 return (1);
3222 return (kn->kn_fflags != 0);