hammer2 - structuralize the cluster's chain array
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
blobec6de179ab36778f38f2b7e6bd022c513ed7afec
1 /*
2 * Copyright (c) 2011-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 * Kernel Filesystem interface
39 * NOTE! local ipdata pointers must be reloaded on any modifying operation
40 * to the inode as its underlying chain may have changed.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
60 #include "hammer2.h"
61 #include "hammer2_lz4.h"
63 #include "zlib/hammer2_zlib.h"
65 #define ZFOFFSET (-2LL)
67 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
68 int seqcount);
69 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
70 int ioflag, int seqcount);
71 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
72 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
74 struct objcache *cache_buffer_read;
75 struct objcache *cache_buffer_write;
77 /*
78 * Callback used in read path in case that a block is compressed with LZ4.
80 static
81 void
82 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
84 struct buf *bp;
85 char *compressed_buffer;
86 int compressed_size;
87 int result;
89 bp = bio->bio_buf;
91 #if 0
92 if bio->bio_caller_info2.index &&
93 bio->bio_caller_info1.uvalue32 !=
94 crc32(bp->b_data, bp->b_bufsize) --- return error
95 #endif
97 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
98 compressed_size = *(const int *)data;
99 KKASSERT(compressed_size <= bytes - sizeof(int));
101 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
102 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
103 compressed_buffer,
104 compressed_size,
105 bp->b_bufsize);
106 if (result < 0) {
107 kprintf("READ PATH: Error during decompression."
108 "bio %016jx/%d\n",
109 (intmax_t)bio->bio_offset, bytes);
110 /* make sure it isn't random garbage */
111 bzero(compressed_buffer, bp->b_bufsize);
113 KKASSERT(result <= bp->b_bufsize);
114 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
115 if (result < bp->b_bufsize)
116 bzero(bp->b_data + result, bp->b_bufsize - result);
117 objcache_put(cache_buffer_read, compressed_buffer);
118 bp->b_resid = 0;
119 bp->b_flags |= B_AGE;
123 * Callback used in read path in case that a block is compressed with ZLIB.
124 * It is almost identical to LZ4 callback, so in theory they can be unified,
125 * but we didn't want to make changes in bio structure for that.
127 static
128 void
129 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
131 struct buf *bp;
132 char *compressed_buffer;
133 z_stream strm_decompress;
134 int result;
135 int ret;
137 bp = bio->bio_buf;
139 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
140 strm_decompress.avail_in = 0;
141 strm_decompress.next_in = Z_NULL;
143 ret = inflateInit(&strm_decompress);
145 if (ret != Z_OK)
146 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
148 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
149 strm_decompress.next_in = __DECONST(char *, data);
151 /* XXX supply proper size, subset of device bp */
152 strm_decompress.avail_in = bytes;
153 strm_decompress.next_out = compressed_buffer;
154 strm_decompress.avail_out = bp->b_bufsize;
156 ret = inflate(&strm_decompress, Z_FINISH);
157 if (ret != Z_STREAM_END) {
158 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
159 bzero(compressed_buffer, bp->b_bufsize);
161 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
162 result = bp->b_bufsize - strm_decompress.avail_out;
163 if (result < bp->b_bufsize)
164 bzero(bp->b_data + result, strm_decompress.avail_out);
165 objcache_put(cache_buffer_read, compressed_buffer);
166 ret = inflateEnd(&strm_decompress);
168 bp->b_resid = 0;
169 bp->b_flags |= B_AGE;
172 static __inline
173 void
174 hammer2_knote(struct vnode *vp, int flags)
176 if (flags)
177 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
181 * Last reference to a vnode is going away but it is still cached.
183 static
185 hammer2_vop_inactive(struct vop_inactive_args *ap)
187 hammer2_inode_t *ip;
188 hammer2_cluster_t *cluster;
189 struct vnode *vp;
191 LOCKSTART;
192 vp = ap->a_vp;
193 ip = VTOI(vp);
196 * Degenerate case
198 if (ip == NULL) {
199 vrecycle(vp);
200 LOCKSTOP;
201 return (0);
205 * Detect updates to the embedded data which may be synchronized by
206 * the strategy code. Simply mark the inode modified so it gets
207 * picked up by our normal flush.
209 cluster = hammer2_inode_lock_nex(ip, HAMMER2_RESOLVE_NEVER);
210 KKASSERT(cluster);
213 * Check for deleted inodes and recycle immediately.
215 * WARNING: nvtruncbuf() can only be safely called without the inode
216 * lock held due to the way our write thread works.
218 if (hammer2_cluster_isunlinked(cluster)) {
219 hammer2_key_t lbase;
220 int nblksize;
222 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
223 hammer2_inode_unlock_ex(ip, cluster);
224 nvtruncbuf(vp, 0, nblksize, 0, 0);
225 vrecycle(vp);
226 } else {
227 hammer2_inode_unlock_ex(ip, cluster);
229 LOCKSTOP;
230 return (0);
234 * Reclaim a vnode so that it can be reused; after the inode is
235 * disassociated, the filesystem must manage it alone.
237 static
239 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
241 hammer2_cluster_t *cluster;
242 hammer2_inode_t *ip;
243 hammer2_pfsmount_t *pmp;
244 struct vnode *vp;
246 LOCKSTART;
247 vp = ap->a_vp;
248 ip = VTOI(vp);
249 if (ip == NULL) {
250 LOCKSTOP;
251 return(0);
255 * Inode must be locked for reclaim.
257 pmp = ip->pmp;
258 cluster = hammer2_inode_lock_nex(ip, HAMMER2_RESOLVE_NEVER);
261 * The final close of a deleted file or directory marks it for
262 * destruction. The DELETED flag allows the flusher to shortcut
263 * any modified blocks still unflushed (that is, just ignore them).
265 * HAMMER2 usually does not try to optimize the freemap by returning
266 * deleted blocks to it as it does not usually know how many snapshots
267 * might be referencing portions of the file/dir.
269 vp->v_data = NULL;
270 ip->vp = NULL;
273 * NOTE! We do not attempt to flush chains here, flushing is
274 * really fragile and could also deadlock.
276 vclrisdirty(vp);
279 * A reclaim can occur at any time so we cannot safely start a
280 * transaction to handle reclamation of unlinked files. Instead,
281 * the ip is left with a reference and placed on a linked list and
282 * handled later on.
284 if (hammer2_cluster_isunlinked(cluster)) {
285 hammer2_inode_unlink_t *ipul;
287 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
288 ipul->ip = ip;
290 hammer2_spin_ex(&pmp->list_spin);
291 TAILQ_INSERT_TAIL(&pmp->unlinkq, ipul, entry);
292 hammer2_spin_unex(&pmp->list_spin);
293 hammer2_inode_unlock_ex(ip, cluster); /* unlock */
294 /* retain ref from vp for ipul */
295 } else {
296 hammer2_inode_unlock_ex(ip, cluster); /* unlock */
297 hammer2_inode_drop(ip); /* vp ref */
299 /* cluster no longer referenced */
300 /* cluster = NULL; not needed */
303 * XXX handle background sync when ip dirty, kernel will no longer
304 * notify us regarding this inode because there is no longer a
305 * vnode attached to it.
308 LOCKSTOP;
309 return (0);
312 static
314 hammer2_vop_fsync(struct vop_fsync_args *ap)
316 hammer2_inode_t *ip;
317 hammer2_trans_t trans;
318 hammer2_cluster_t *cluster;
319 struct vnode *vp;
321 LOCKSTART;
322 vp = ap->a_vp;
323 ip = VTOI(vp);
325 #if 0
326 /* XXX can't do this yet */
327 hammer2_trans_init(&trans, ip->pmp, HAMMER2_TRANS_ISFLUSH);
328 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
329 #endif
330 hammer2_trans_init(&trans, ip->pmp, 0);
331 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
334 * Calling chain_flush here creates a lot of duplicative
335 * COW operations due to non-optimal vnode ordering.
337 * Only do it for an actual fsync() syscall. The other forms
338 * which call this function will eventually call chain_flush
339 * on the volume root as a catch-all, which is far more optimal.
341 cluster = hammer2_inode_lock_ex(ip);
342 atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED);
343 vclrisdirty(vp);
344 if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MTIME))
345 hammer2_inode_fsync(&trans, ip, cluster);
347 #if 0
349 * XXX creates discontinuity w/modify_tid
351 if (ap->a_flags & VOP_FSYNC_SYSCALL) {
352 hammer2_flush(&trans, cluster);
354 #endif
355 hammer2_inode_unlock_ex(ip, cluster);
356 hammer2_trans_done(&trans);
358 LOCKSTOP;
359 return (0);
362 static
364 hammer2_vop_access(struct vop_access_args *ap)
366 hammer2_inode_t *ip = VTOI(ap->a_vp);
367 const hammer2_inode_data_t *ripdata;
368 hammer2_cluster_t *cluster;
369 uid_t uid;
370 gid_t gid;
371 int error;
373 LOCKSTART;
374 cluster = hammer2_inode_lock_sh(ip);
375 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
376 uid = hammer2_to_unix_xid(&ripdata->uid);
377 gid = hammer2_to_unix_xid(&ripdata->gid);
378 error = vop_helper_access(ap, uid, gid, ripdata->mode, ripdata->uflags);
379 hammer2_inode_unlock_sh(ip, cluster);
381 LOCKSTOP;
382 return (error);
385 static
387 hammer2_vop_getattr(struct vop_getattr_args *ap)
389 const hammer2_inode_data_t *ripdata;
390 hammer2_cluster_t *cluster;
391 hammer2_pfsmount_t *pmp;
392 hammer2_inode_t *ip;
393 struct vnode *vp;
394 struct vattr *vap;
396 LOCKSTART;
397 vp = ap->a_vp;
398 vap = ap->a_vap;
400 ip = VTOI(vp);
401 pmp = ip->pmp;
403 cluster = hammer2_inode_lock_sh(ip);
404 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
405 KKASSERT(hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
407 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
408 vap->va_fileid = ripdata->inum;
409 vap->va_mode = ripdata->mode;
410 vap->va_nlink = ripdata->nlinks;
411 vap->va_uid = hammer2_to_unix_xid(&ripdata->uid);
412 vap->va_gid = hammer2_to_unix_xid(&ripdata->gid);
413 vap->va_rmajor = 0;
414 vap->va_rminor = 0;
415 vap->va_size = ip->size; /* protected by shared lock */
416 vap->va_blocksize = HAMMER2_PBUFSIZE;
417 vap->va_flags = ripdata->uflags;
418 hammer2_time_to_timespec(ripdata->ctime, &vap->va_ctime);
419 hammer2_time_to_timespec(ripdata->mtime, &vap->va_mtime);
420 hammer2_time_to_timespec(ripdata->mtime, &vap->va_atime);
421 vap->va_gen = 1;
422 vap->va_bytes = vap->va_size; /* XXX */
423 vap->va_type = hammer2_get_vtype(ripdata);
424 vap->va_filerev = 0;
425 vap->va_uid_uuid = ripdata->uid;
426 vap->va_gid_uuid = ripdata->gid;
427 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
428 VA_FSID_UUID_VALID;
430 hammer2_inode_unlock_sh(ip, cluster);
432 LOCKSTOP;
433 return (0);
436 static
438 hammer2_vop_setattr(struct vop_setattr_args *ap)
440 const hammer2_inode_data_t *ripdata;
441 hammer2_inode_data_t *wipdata;
442 hammer2_inode_t *ip;
443 hammer2_cluster_t *cluster;
444 hammer2_trans_t trans;
445 struct vnode *vp;
446 struct vattr *vap;
447 int error;
448 int kflags = 0;
449 int domtime = 0;
450 int dosync = 0;
451 uint64_t ctime;
453 LOCKSTART;
454 vp = ap->a_vp;
455 vap = ap->a_vap;
456 hammer2_update_time(&ctime);
458 ip = VTOI(vp);
460 if (ip->pmp->ronly) {
461 LOCKSTOP;
462 return(EROFS);
465 hammer2_pfs_memory_wait(ip->pmp);
466 hammer2_trans_init(&trans, ip->pmp, 0);
467 cluster = hammer2_inode_lock_ex(ip);
468 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
469 error = 0;
471 if (vap->va_flags != VNOVAL) {
472 u_int32_t flags;
474 flags = ripdata->uflags;
475 error = vop_helper_setattr_flags(&flags, vap->va_flags,
476 hammer2_to_unix_xid(&ripdata->uid),
477 ap->a_cred);
478 if (error == 0) {
479 if (ripdata->uflags != flags) {
480 wipdata = hammer2_cluster_modify_ip(&trans, ip,
481 cluster, 0);
482 wipdata->uflags = flags;
483 wipdata->ctime = ctime;
484 kflags |= NOTE_ATTRIB;
485 dosync = 1;
486 ripdata = wipdata;
488 if (ripdata->uflags & (IMMUTABLE | APPEND)) {
489 error = 0;
490 goto done;
493 goto done;
495 if (ripdata->uflags & (IMMUTABLE | APPEND)) {
496 error = EPERM;
497 goto done;
499 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
500 mode_t cur_mode = ripdata->mode;
501 uid_t cur_uid = hammer2_to_unix_xid(&ripdata->uid);
502 gid_t cur_gid = hammer2_to_unix_xid(&ripdata->gid);
503 uuid_t uuid_uid;
504 uuid_t uuid_gid;
506 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
507 ap->a_cred,
508 &cur_uid, &cur_gid, &cur_mode);
509 if (error == 0) {
510 hammer2_guid_to_uuid(&uuid_uid, cur_uid);
511 hammer2_guid_to_uuid(&uuid_gid, cur_gid);
512 if (bcmp(&uuid_uid, &ripdata->uid, sizeof(uuid_uid)) ||
513 bcmp(&uuid_gid, &ripdata->gid, sizeof(uuid_gid)) ||
514 ripdata->mode != cur_mode
516 wipdata = hammer2_cluster_modify_ip(&trans, ip,
517 cluster, 0);
518 wipdata->uid = uuid_uid;
519 wipdata->gid = uuid_gid;
520 wipdata->mode = cur_mode;
521 wipdata->ctime = ctime;
522 dosync = 1;
523 ripdata = wipdata;
525 kflags |= NOTE_ATTRIB;
530 * Resize the file
532 if (vap->va_size != VNOVAL && ip->size != vap->va_size) {
533 switch(vp->v_type) {
534 case VREG:
535 if (vap->va_size == ip->size)
536 break;
537 hammer2_inode_unlock_ex(ip, cluster);
538 if (vap->va_size < ip->size) {
539 hammer2_truncate_file(ip, vap->va_size);
540 } else {
541 hammer2_extend_file(ip, vap->va_size);
543 cluster = hammer2_inode_lock_ex(ip);
544 /* RELOAD */
545 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
546 domtime = 1;
547 break;
548 default:
549 error = EINVAL;
550 goto done;
553 #if 0
554 /* atime not supported */
555 if (vap->va_atime.tv_sec != VNOVAL) {
556 wipdata = hammer2_cluster_modify_ip(&trans, ip, cluster, 0);
557 wipdata->atime = hammer2_timespec_to_time(&vap->va_atime);
558 kflags |= NOTE_ATTRIB;
559 dosync = 1;
560 ripdata = wipdata;
562 #endif
563 if (vap->va_mtime.tv_sec != VNOVAL) {
564 wipdata = hammer2_cluster_modify_ip(&trans, ip, cluster, 0);
565 wipdata->mtime = hammer2_timespec_to_time(&vap->va_mtime);
566 kflags |= NOTE_ATTRIB;
567 domtime = 0;
568 dosync = 1;
569 ripdata = wipdata;
571 if (vap->va_mode != (mode_t)VNOVAL) {
572 mode_t cur_mode = ripdata->mode;
573 uid_t cur_uid = hammer2_to_unix_xid(&ripdata->uid);
574 gid_t cur_gid = hammer2_to_unix_xid(&ripdata->gid);
576 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
577 cur_uid, cur_gid, &cur_mode);
578 if (error == 0 && ripdata->mode != cur_mode) {
579 wipdata = hammer2_cluster_modify_ip(&trans, ip,
580 cluster, 0);
581 wipdata->mode = cur_mode;
582 wipdata->ctime = ctime;
583 kflags |= NOTE_ATTRIB;
584 dosync = 1;
585 ripdata = wipdata;
590 * If a truncation occurred we must call inode_fsync() now in order
591 * to trim the related data chains, otherwise a later expansion can
592 * cause havoc.
594 if (dosync) {
595 hammer2_cluster_modsync(cluster);
596 dosync = 0;
598 hammer2_inode_fsync(&trans, ip, cluster);
601 * Cleanup. If domtime is set an additional inode modification
602 * must be flagged. All other modifications will have already
603 * set INODE_MODIFIED and called vsetisdirty().
605 done:
606 if (domtime) {
607 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED |
608 HAMMER2_INODE_MTIME);
609 vsetisdirty(ip->vp);
611 if (dosync)
612 hammer2_cluster_modsync(cluster);
613 hammer2_inode_unlock_ex(ip, cluster);
614 hammer2_trans_done(&trans);
615 hammer2_knote(ip->vp, kflags);
617 LOCKSTOP;
618 return (error);
621 static
623 hammer2_vop_readdir(struct vop_readdir_args *ap)
625 const hammer2_inode_data_t *ripdata;
626 hammer2_inode_t *ip;
627 hammer2_inode_t *xip;
628 hammer2_cluster_t *cparent;
629 hammer2_cluster_t *cluster;
630 hammer2_cluster_t *xcluster;
631 hammer2_blockref_t bref;
632 hammer2_tid_t inum;
633 hammer2_key_t key_next;
634 hammer2_key_t lkey;
635 struct uio *uio;
636 off_t *cookies;
637 off_t saveoff;
638 int cookie_index;
639 int ncookies;
640 int error;
641 int dtype;
642 int ddflag;
643 int r;
645 LOCKSTART;
646 ip = VTOI(ap->a_vp);
647 uio = ap->a_uio;
648 saveoff = uio->uio_offset;
651 * Setup cookies directory entry cookies if requested
653 if (ap->a_ncookies) {
654 ncookies = uio->uio_resid / 16 + 1;
655 if (ncookies > 1024)
656 ncookies = 1024;
657 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
658 } else {
659 ncookies = -1;
660 cookies = NULL;
662 cookie_index = 0;
664 cparent = hammer2_inode_lock_sh(ip);
665 ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
668 * Handle artificial entries. To ensure that only positive 64 bit
669 * quantities are returned to userland we always strip off bit 63.
670 * The hash code is designed such that codes 0x0000-0x7FFF are not
671 * used, allowing us to use these codes for articial entries.
673 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
674 * allow '..' to cross the mount point into (e.g.) the super-root.
676 error = 0;
677 cluster = (void *)(intptr_t)-1; /* non-NULL for early goto done case */
679 if (saveoff == 0) {
680 inum = ripdata->inum & HAMMER2_DIRHASH_USERMSK;
681 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
682 if (r)
683 goto done;
684 if (cookies)
685 cookies[cookie_index] = saveoff;
686 ++saveoff;
687 ++cookie_index;
688 if (cookie_index == ncookies)
689 goto done;
692 if (saveoff == 1) {
694 * Be careful with lockorder when accessing ".."
696 * (ip is the current dir. xip is the parent dir).
698 inum = ripdata->inum & HAMMER2_DIRHASH_USERMSK;
699 while (ip->pip != NULL && ip != ip->pmp->iroot) {
700 xip = ip->pip;
701 hammer2_inode_ref(xip);
702 hammer2_inode_unlock_sh(ip, cparent);
703 xcluster = hammer2_inode_lock_sh(xip);
704 cparent = hammer2_inode_lock_sh(ip);
705 hammer2_inode_drop(xip);
706 ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
707 if (xip == ip->pip) {
708 inum = hammer2_cluster_rdata(xcluster)->
709 ipdata.inum & HAMMER2_DIRHASH_USERMSK;
710 hammer2_inode_unlock_sh(xip, xcluster);
711 break;
713 hammer2_inode_unlock_sh(xip, xcluster);
715 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
716 if (r)
717 goto done;
718 if (cookies)
719 cookies[cookie_index] = saveoff;
720 ++saveoff;
721 ++cookie_index;
722 if (cookie_index == ncookies)
723 goto done;
726 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
727 if (hammer2_debug & 0x0020)
728 kprintf("readdir: lkey %016jx\n", lkey);
731 * parent is the inode cluster, already locked for us. Don't
732 * double lock shared locks as this will screw up upgrades.
734 if (error) {
735 goto done;
737 cluster = hammer2_cluster_lookup(cparent, &key_next, lkey, lkey,
738 HAMMER2_LOOKUP_SHARED, &ddflag);
739 if (cluster == NULL) {
740 cluster = hammer2_cluster_lookup(cparent, &key_next,
741 lkey, (hammer2_key_t)-1,
742 HAMMER2_LOOKUP_SHARED, &ddflag);
744 if (cluster)
745 hammer2_cluster_bref(cluster, &bref);
746 while (cluster) {
747 if (hammer2_debug & 0x0020)
748 kprintf("readdir: p=%p chain=%p %016jx (next %016jx)\n",
749 cparent->focus, cluster->focus,
750 bref.key, key_next);
752 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
753 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
754 dtype = hammer2_get_dtype(ripdata);
755 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
756 r = vop_write_dirent(&error, uio,
757 ripdata->inum &
758 HAMMER2_DIRHASH_USERMSK,
759 dtype,
760 ripdata->name_len,
761 ripdata->filename);
762 if (r)
763 break;
764 if (cookies)
765 cookies[cookie_index] = saveoff;
766 ++cookie_index;
767 } else {
768 /* XXX chain error */
769 kprintf("bad chain type readdir %d\n", bref.type);
773 * Keys may not be returned in order so once we have a
774 * placemarker (cluster) the scan must allow the full range
775 * or some entries will be missed.
777 cluster = hammer2_cluster_next(cparent, cluster, &key_next,
778 key_next, (hammer2_key_t)-1,
779 HAMMER2_LOOKUP_SHARED);
780 if (cluster) {
781 hammer2_cluster_bref(cluster, &bref);
782 saveoff = (bref.key & HAMMER2_DIRHASH_USERMSK) + 1;
783 } else {
784 saveoff = (hammer2_key_t)-1;
786 if (cookie_index == ncookies)
787 break;
789 if (cluster)
790 hammer2_cluster_unlock(cluster);
791 done:
792 hammer2_inode_unlock_sh(ip, cparent);
793 if (ap->a_eofflag)
794 *ap->a_eofflag = (cluster == NULL);
795 if (hammer2_debug & 0x0020)
796 kprintf("readdir: done at %016jx\n", saveoff);
797 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
798 if (error && cookie_index == 0) {
799 if (cookies) {
800 kfree(cookies, M_TEMP);
801 *ap->a_ncookies = 0;
802 *ap->a_cookies = NULL;
804 } else {
805 if (cookies) {
806 *ap->a_ncookies = cookie_index;
807 *ap->a_cookies = cookies;
810 LOCKSTOP;
811 return (error);
815 * hammer2_vop_readlink { vp, uio, cred }
817 static
819 hammer2_vop_readlink(struct vop_readlink_args *ap)
821 struct vnode *vp;
822 hammer2_inode_t *ip;
823 int error;
825 vp = ap->a_vp;
826 if (vp->v_type != VLNK)
827 return (EINVAL);
828 ip = VTOI(vp);
830 error = hammer2_read_file(ip, ap->a_uio, 0);
831 return (error);
834 static
836 hammer2_vop_read(struct vop_read_args *ap)
838 struct vnode *vp;
839 hammer2_inode_t *ip;
840 struct uio *uio;
841 int error;
842 int seqcount;
843 int bigread;
846 * Read operations supported on this vnode?
848 vp = ap->a_vp;
849 if (vp->v_type != VREG)
850 return (EINVAL);
853 * Misc
855 ip = VTOI(vp);
856 uio = ap->a_uio;
857 error = 0;
859 seqcount = ap->a_ioflag >> 16;
860 bigread = (uio->uio_resid > 100 * 1024 * 1024);
862 error = hammer2_read_file(ip, uio, seqcount);
863 return (error);
866 static
868 hammer2_vop_write(struct vop_write_args *ap)
870 hammer2_inode_t *ip;
871 hammer2_trans_t trans;
872 thread_t td;
873 struct vnode *vp;
874 struct uio *uio;
875 int error;
876 int seqcount;
877 int bigwrite;
880 * Read operations supported on this vnode?
882 vp = ap->a_vp;
883 if (vp->v_type != VREG)
884 return (EINVAL);
887 * Misc
889 ip = VTOI(vp);
890 uio = ap->a_uio;
891 error = 0;
892 if (ip->pmp->ronly) {
893 return (EROFS);
896 seqcount = ap->a_ioflag >> 16;
897 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
900 * Check resource limit
902 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
903 uio->uio_offset + uio->uio_resid >
904 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
905 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
906 return (EFBIG);
909 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
912 * The transaction interlocks against flushes initiations
913 * (note: but will run concurrently with the actual flush).
915 hammer2_trans_init(&trans, ip->pmp, 0);
916 error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount);
917 hammer2_trans_done(&trans);
919 return (error);
923 * Perform read operations on a file or symlink given an UNLOCKED
924 * inode and uio.
926 * The passed ip is not locked.
928 static
930 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
932 hammer2_off_t size;
933 struct buf *bp;
934 int error;
936 error = 0;
939 * UIO read loop.
941 * WARNING! Assumes that the kernel interlocks size changes at the
942 * vnode level.
944 hammer2_mtx_sh(&ip->lock);
945 size = ip->size;
946 hammer2_mtx_unlock(&ip->lock);
948 while (uio->uio_resid > 0 && uio->uio_offset < size) {
949 hammer2_key_t lbase;
950 hammer2_key_t leof;
951 int lblksize;
952 int loff;
953 int n;
955 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
956 &lbase, &leof);
958 error = cluster_read(ip->vp, leof, lbase, lblksize,
959 uio->uio_resid, seqcount * BKVASIZE,
960 &bp);
962 if (error)
963 break;
964 loff = (int)(uio->uio_offset - lbase);
965 n = lblksize - loff;
966 if (n > uio->uio_resid)
967 n = uio->uio_resid;
968 if (n > size - uio->uio_offset)
969 n = (int)(size - uio->uio_offset);
970 bp->b_flags |= B_AGE;
971 uiomove((char *)bp->b_data + loff, n, uio);
972 bqrelse(bp);
974 return (error);
978 * Write to the file represented by the inode via the logical buffer cache.
979 * The inode may represent a regular file or a symlink.
981 * The inode must not be locked.
983 static
985 hammer2_write_file(hammer2_inode_t *ip,
986 struct uio *uio, int ioflag, int seqcount)
988 hammer2_key_t old_eof;
989 hammer2_key_t new_eof;
990 struct buf *bp;
991 int kflags;
992 int error;
993 int modified;
996 * Setup if append
998 * WARNING! Assumes that the kernel interlocks size changes at the
999 * vnode level.
1001 hammer2_mtx_ex(&ip->lock);
1002 if (ioflag & IO_APPEND)
1003 uio->uio_offset = ip->size;
1004 old_eof = ip->size;
1005 hammer2_mtx_unlock(&ip->lock);
1008 * Extend the file if necessary. If the write fails at some point
1009 * we will truncate it back down to cover as much as we were able
1010 * to write.
1012 * Doing this now makes it easier to calculate buffer sizes in
1013 * the loop.
1015 kflags = 0;
1016 error = 0;
1017 modified = 0;
1019 if (uio->uio_offset + uio->uio_resid > old_eof) {
1020 new_eof = uio->uio_offset + uio->uio_resid;
1021 modified = 1;
1022 hammer2_extend_file(ip, new_eof);
1023 kflags |= NOTE_EXTEND;
1024 } else {
1025 new_eof = old_eof;
1029 * UIO write loop
1031 while (uio->uio_resid > 0) {
1032 hammer2_key_t lbase;
1033 int trivial;
1034 int endofblk;
1035 int lblksize;
1036 int loff;
1037 int n;
1040 * Don't allow the buffer build to blow out the buffer
1041 * cache.
1043 if ((ioflag & IO_RECURSE) == 0)
1044 bwillwrite(HAMMER2_PBUFSIZE);
1047 * This nominally tells us how much we can cluster and
1048 * what the logical buffer size needs to be. Currently
1049 * we don't try to cluster the write and just handle one
1050 * block at a time.
1052 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
1053 &lbase, NULL);
1054 loff = (int)(uio->uio_offset - lbase);
1056 KKASSERT(lblksize <= 65536);
1059 * Calculate bytes to copy this transfer and whether the
1060 * copy completely covers the buffer or not.
1062 trivial = 0;
1063 n = lblksize - loff;
1064 if (n > uio->uio_resid) {
1065 n = uio->uio_resid;
1066 if (loff == lbase && uio->uio_offset + n == new_eof)
1067 trivial = 1;
1068 endofblk = 0;
1069 } else {
1070 if (loff == 0)
1071 trivial = 1;
1072 endofblk = 1;
1076 * Get the buffer
1078 if (uio->uio_segflg == UIO_NOCOPY) {
1080 * Issuing a write with the same data backing the
1081 * buffer. Instantiate the buffer to collect the
1082 * backing vm pages, then read-in any missing bits.
1084 * This case is used by vop_stdputpages().
1086 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1087 if ((bp->b_flags & B_CACHE) == 0) {
1088 bqrelse(bp);
1089 error = bread(ip->vp, lbase, lblksize, &bp);
1091 } else if (trivial) {
1093 * Even though we are entirely overwriting the buffer
1094 * we may still have to zero it out to avoid a
1095 * mmap/write visibility issue.
1097 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1098 if ((bp->b_flags & B_CACHE) == 0)
1099 vfs_bio_clrbuf(bp);
1100 } else {
1102 * Partial overwrite, read in any missing bits then
1103 * replace the portion being written.
1105 * (The strategy code will detect zero-fill physical
1106 * blocks for this case).
1108 error = bread(ip->vp, lbase, lblksize, &bp);
1109 if (error == 0)
1110 bheavy(bp);
1113 if (error) {
1114 brelse(bp);
1115 break;
1119 * Ok, copy the data in
1121 error = uiomove(bp->b_data + loff, n, uio);
1122 kflags |= NOTE_WRITE;
1123 modified = 1;
1124 if (error) {
1125 brelse(bp);
1126 break;
1130 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1131 * with IO_SYNC or IO_ASYNC set. These writes
1132 * must be handled as the pageout daemon expects.
1134 if (ioflag & IO_SYNC) {
1135 bwrite(bp);
1136 } else if ((ioflag & IO_DIRECT) && endofblk) {
1137 bawrite(bp);
1138 } else if (ioflag & IO_ASYNC) {
1139 bawrite(bp);
1140 } else {
1141 bdwrite(bp);
1146 * Cleanup. If we extended the file EOF but failed to write through
1147 * the entire write is a failure and we have to back-up.
1149 if (error && new_eof != old_eof) {
1150 hammer2_truncate_file(ip, old_eof);
1151 } else if (modified) {
1152 hammer2_mtx_ex(&ip->lock);
1153 hammer2_update_time(&ip->mtime);
1154 atomic_set_int(&ip->flags, HAMMER2_INODE_MTIME);
1155 hammer2_mtx_unlock(&ip->lock);
1157 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1158 hammer2_knote(ip->vp, kflags);
1159 vsetisdirty(ip->vp);
1161 return error;
1165 * Truncate the size of a file. The inode must not be locked.
1167 * NOTE: Caller handles setting HAMMER2_INODE_MODIFIED
1169 * WARNING: nvtruncbuf() can only be safely called without the inode lock
1170 * held due to the way our write thread works.
1172 * WARNING! Assumes that the kernel interlocks size changes at the
1173 * vnode level.
1175 static
1176 void
1177 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1179 hammer2_key_t lbase;
1180 int nblksize;
1182 LOCKSTART;
1183 if (ip->vp) {
1184 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1185 nvtruncbuf(ip->vp, nsize,
1186 nblksize, (int)nsize & (nblksize - 1),
1189 hammer2_mtx_ex(&ip->lock);
1190 ip->size = nsize;
1191 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1192 hammer2_mtx_unlock(&ip->lock);
1193 LOCKSTOP;
1197 * Extend the size of a file. The inode must not be locked.
1199 * WARNING! Assumes that the kernel interlocks size changes at the
1200 * vnode level.
1202 * NOTE: Caller handles setting HAMMER2_INODE_MODIFIED
1204 static
1205 void
1206 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1208 hammer2_key_t lbase;
1209 hammer2_key_t osize;
1210 int oblksize;
1211 int nblksize;
1213 LOCKSTART;
1214 hammer2_mtx_ex(&ip->lock);
1215 osize = ip->size;
1216 ip->size = nsize;
1217 hammer2_mtx_unlock(&ip->lock);
1219 if (ip->vp) {
1220 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1221 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1222 nvextendbuf(ip->vp,
1223 osize, nsize,
1224 oblksize, nblksize,
1225 -1, -1, 0);
1227 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1228 LOCKSTOP;
1231 static
1233 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1235 hammer2_inode_t *ip;
1236 hammer2_inode_t *dip;
1237 hammer2_cluster_t *cparent;
1238 hammer2_cluster_t *cluster;
1239 const hammer2_inode_data_t *ripdata;
1240 hammer2_key_t key_next;
1241 hammer2_key_t lhc;
1242 struct namecache *ncp;
1243 const uint8_t *name;
1244 size_t name_len;
1245 int error = 0;
1246 int ddflag;
1247 struct vnode *vp;
1249 LOCKSTART;
1250 dip = VTOI(ap->a_dvp);
1251 ncp = ap->a_nch->ncp;
1252 name = ncp->nc_name;
1253 name_len = ncp->nc_nlen;
1254 lhc = hammer2_dirhash(name, name_len);
1257 * Note: In DragonFly the kernel handles '.' and '..'.
1259 cparent = hammer2_inode_lock_sh(dip);
1260 cluster = hammer2_cluster_lookup(cparent, &key_next,
1261 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1262 HAMMER2_LOOKUP_SHARED, &ddflag);
1263 while (cluster) {
1264 if (hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE) {
1265 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1266 if (ripdata->name_len == name_len &&
1267 bcmp(ripdata->filename, name, name_len) == 0) {
1268 break;
1271 cluster = hammer2_cluster_next(cparent, cluster, &key_next,
1272 key_next,
1273 lhc + HAMMER2_DIRHASH_LOMASK,
1274 HAMMER2_LOOKUP_SHARED);
1276 hammer2_inode_unlock_sh(dip, cparent);
1279 * Resolve hardlink entries before acquiring the inode.
1281 if (cluster) {
1282 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1283 if (ripdata->type == HAMMER2_OBJTYPE_HARDLINK) {
1284 hammer2_tid_t inum = ripdata->inum;
1285 error = hammer2_hardlink_find(dip, NULL, cluster);
1286 if (error) {
1287 kprintf("hammer2: unable to find hardlink "
1288 "0x%016jx\n", inum);
1289 hammer2_cluster_unlock(cluster);
1290 LOCKSTOP;
1291 return error;
1297 * nresolve needs to resolve hardlinks, the original cluster is not
1298 * sufficient.
1300 if (cluster) {
1301 ip = hammer2_inode_get(dip->pmp, dip, cluster);
1302 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1303 if (ripdata->type == HAMMER2_OBJTYPE_HARDLINK) {
1304 kprintf("nresolve: fixup hardlink\n");
1305 hammer2_inode_ref(ip);
1306 hammer2_inode_unlock_ex(ip, NULL);
1307 hammer2_cluster_unlock(cluster);
1308 cluster = hammer2_inode_lock_ex(ip);
1309 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1310 hammer2_inode_drop(ip);
1311 kprintf("nresolve: fixup to type %02x\n",
1312 ripdata->type);
1314 } else {
1315 ip = NULL;
1318 #if 0
1320 * Deconsolidate any hardlink whos nlinks == 1. Ignore errors.
1321 * If an error occurs chain and ip are left alone.
1323 * XXX upgrade shared lock?
1325 if (ochain && chain &&
1326 chain->data->ipdata.nlinks == 1 && !dip->pmp->ronly) {
1327 kprintf("hammer2: need to unconsolidate hardlink for %s\n",
1328 chain->data->ipdata.filename);
1329 /* XXX retain shared lock on dip? (currently not held) */
1330 hammer2_trans_init(&trans, dip->pmp, 0);
1331 hammer2_hardlink_deconsolidate(&trans, dip, &chain, &ochain);
1332 hammer2_trans_done(&trans);
1334 #endif
1337 * Acquire the related vnode
1339 * NOTE: For error processing, only ENOENT resolves the namecache
1340 * entry to NULL, otherwise we just return the error and
1341 * leave the namecache unresolved.
1343 * NOTE: multiple hammer2_inode structures can be aliased to the
1344 * same chain element, for example for hardlinks. This
1345 * use case does not 'reattach' inode associations that
1346 * might already exist, but always allocates a new one.
1348 * WARNING: inode structure is locked exclusively via inode_get
1349 * but chain was locked shared. inode_unlock_ex()
1350 * will handle it properly.
1352 if (cluster) {
1353 vp = hammer2_igetv(ip, cluster, &error);
1354 if (error == 0) {
1355 vn_unlock(vp);
1356 cache_setvp(ap->a_nch, vp);
1357 } else if (error == ENOENT) {
1358 cache_setvp(ap->a_nch, NULL);
1360 hammer2_inode_unlock_ex(ip, cluster);
1363 * The vp should not be released until after we've disposed
1364 * of our locks, because it might cause vop_inactive() to
1365 * be called.
1367 if (vp)
1368 vrele(vp);
1369 } else {
1370 error = ENOENT;
1371 cache_setvp(ap->a_nch, NULL);
1373 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1374 ("resolve error %d/%p ap %p\n",
1375 error, ap->a_nch->ncp->nc_vp, ap));
1376 LOCKSTOP;
1377 return error;
1380 static
1382 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1384 hammer2_inode_t *dip;
1385 hammer2_inode_t *ip;
1386 hammer2_cluster_t *cparent;
1387 int error;
1389 LOCKSTART;
1390 dip = VTOI(ap->a_dvp);
1392 if ((ip = dip->pip) == NULL) {
1393 *ap->a_vpp = NULL;
1394 LOCKSTOP;
1395 return ENOENT;
1397 cparent = hammer2_inode_lock_ex(ip);
1398 *ap->a_vpp = hammer2_igetv(ip, cparent, &error);
1399 hammer2_inode_unlock_ex(ip, cparent);
1401 LOCKSTOP;
1402 return error;
1405 static
1407 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1409 hammer2_inode_t *dip;
1410 hammer2_inode_t *nip;
1411 hammer2_trans_t trans;
1412 hammer2_cluster_t *cluster;
1413 struct namecache *ncp;
1414 const uint8_t *name;
1415 size_t name_len;
1416 int error;
1418 LOCKSTART;
1419 dip = VTOI(ap->a_dvp);
1420 if (dip->pmp->ronly) {
1421 LOCKSTOP;
1422 return (EROFS);
1425 ncp = ap->a_nch->ncp;
1426 name = ncp->nc_name;
1427 name_len = ncp->nc_nlen;
1428 cluster = NULL;
1430 hammer2_pfs_memory_wait(dip->pmp);
1431 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1432 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1433 name, name_len, &cluster, &error);
1434 if (error) {
1435 KKASSERT(nip == NULL);
1436 *ap->a_vpp = NULL;
1437 } else {
1438 *ap->a_vpp = hammer2_igetv(nip, cluster, &error);
1439 hammer2_inode_unlock_ex(nip, cluster);
1441 hammer2_trans_done(&trans);
1443 if (error == 0) {
1444 cache_setunresolved(ap->a_nch);
1445 cache_setvp(ap->a_nch, *ap->a_vpp);
1447 LOCKSTOP;
1448 return error;
1452 * Return the largest contiguous physical disk range for the logical
1453 * request, in bytes.
1455 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
1457 * Basically disabled, the logical buffer write thread has to deal with
1458 * buffers one-at-a-time.
1460 static
1462 hammer2_vop_bmap(struct vop_bmap_args *ap)
1464 *ap->a_doffsetp = NOOFFSET;
1465 if (ap->a_runp)
1466 *ap->a_runp = 0;
1467 if (ap->a_runb)
1468 *ap->a_runb = 0;
1469 return (EOPNOTSUPP);
1472 static
1474 hammer2_vop_open(struct vop_open_args *ap)
1476 return vop_stdopen(ap);
1480 * hammer2_vop_advlock { vp, id, op, fl, flags }
1482 static
1484 hammer2_vop_advlock(struct vop_advlock_args *ap)
1486 hammer2_inode_t *ip = VTOI(ap->a_vp);
1487 const hammer2_inode_data_t *ripdata;
1488 hammer2_cluster_t *cparent;
1489 hammer2_off_t size;
1491 cparent = hammer2_inode_lock_sh(ip);
1492 ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
1493 size = ripdata->size;
1494 hammer2_inode_unlock_sh(ip, cparent);
1495 return (lf_advlock(ap, &ip->advlock, size));
1499 static
1501 hammer2_vop_close(struct vop_close_args *ap)
1503 return vop_stdclose(ap);
1507 * hammer2_vop_nlink { nch, dvp, vp, cred }
1509 * Create a hardlink from (vp) to {dvp, nch}.
1511 static
1513 hammer2_vop_nlink(struct vop_nlink_args *ap)
1515 hammer2_inode_t *fdip; /* target directory to create link in */
1516 hammer2_inode_t *tdip; /* target directory to create link in */
1517 hammer2_inode_t *cdip; /* common parent directory */
1518 hammer2_inode_t *ip; /* inode we are hardlinking to */
1519 hammer2_cluster_t *cluster;
1520 hammer2_cluster_t *fdcluster;
1521 hammer2_cluster_t *tdcluster;
1522 hammer2_cluster_t *cdcluster;
1523 hammer2_trans_t trans;
1524 struct namecache *ncp;
1525 const uint8_t *name;
1526 size_t name_len;
1527 int error;
1529 LOCKSTART;
1530 tdip = VTOI(ap->a_dvp);
1531 if (tdip->pmp->ronly) {
1532 LOCKSTOP;
1533 return (EROFS);
1536 ncp = ap->a_nch->ncp;
1537 name = ncp->nc_name;
1538 name_len = ncp->nc_nlen;
1541 * ip represents the file being hardlinked. The file could be a
1542 * normal file or a hardlink target if it has already been hardlinked.
1543 * If ip is a hardlinked target then ip->pip represents the location
1544 * of the hardlinked target, NOT the location of the hardlink pointer.
1546 * Bump nlinks and potentially also create or move the hardlink
1547 * target in the parent directory common to (ip) and (tdip). The
1548 * consolidation code can modify ip->cluster and ip->pip. The
1549 * returned cluster is locked.
1551 ip = VTOI(ap->a_vp);
1552 hammer2_pfs_memory_wait(ip->pmp);
1553 hammer2_trans_init(&trans, ip->pmp, HAMMER2_TRANS_NEWINODE);
1556 * The common parent directory must be locked first to avoid deadlocks.
1557 * Also note that fdip and/or tdip might match cdip.
1559 fdip = ip->pip;
1560 cdip = hammer2_inode_common_parent(fdip, tdip);
1561 cdcluster = hammer2_inode_lock_ex(cdip);
1562 fdcluster = hammer2_inode_lock_ex(fdip);
1563 tdcluster = hammer2_inode_lock_ex(tdip);
1564 cluster = hammer2_inode_lock_ex(ip);
1565 error = hammer2_hardlink_consolidate(&trans, ip, &cluster,
1566 cdip, cdcluster, 1);
1567 if (error)
1568 goto done;
1571 * Create a directory entry connected to the specified cluster.
1573 * WARNING! chain can get moved by the connect (indirectly due to
1574 * potential indirect block creation).
1576 error = hammer2_inode_connect(&trans, &cluster, 1,
1577 tdip, tdcluster,
1578 name, name_len, 0);
1579 if (error == 0) {
1580 cache_setunresolved(ap->a_nch);
1581 cache_setvp(ap->a_nch, ap->a_vp);
1583 done:
1584 hammer2_inode_unlock_ex(ip, cluster);
1585 hammer2_inode_unlock_ex(tdip, tdcluster);
1586 hammer2_inode_unlock_ex(fdip, fdcluster);
1587 hammer2_inode_unlock_ex(cdip, cdcluster);
1588 hammer2_inode_drop(cdip);
1589 hammer2_trans_done(&trans);
1591 LOCKSTOP;
1592 return error;
1596 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1598 * The operating system has already ensured that the directory entry
1599 * does not exist and done all appropriate namespace locking.
1601 static
1603 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1605 hammer2_inode_t *dip;
1606 hammer2_inode_t *nip;
1607 hammer2_trans_t trans;
1608 hammer2_cluster_t *ncluster;
1609 struct namecache *ncp;
1610 const uint8_t *name;
1611 size_t name_len;
1612 int error;
1614 LOCKSTART;
1615 dip = VTOI(ap->a_dvp);
1616 if (dip->pmp->ronly) {
1617 LOCKSTOP;
1618 return (EROFS);
1621 ncp = ap->a_nch->ncp;
1622 name = ncp->nc_name;
1623 name_len = ncp->nc_nlen;
1624 hammer2_pfs_memory_wait(dip->pmp);
1625 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1626 ncluster = NULL;
1628 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1629 name, name_len, &ncluster, &error);
1630 if (error) {
1631 KKASSERT(nip == NULL);
1632 *ap->a_vpp = NULL;
1633 } else {
1634 *ap->a_vpp = hammer2_igetv(nip, ncluster, &error);
1635 hammer2_inode_unlock_ex(nip, ncluster);
1637 hammer2_trans_done(&trans);
1639 if (error == 0) {
1640 cache_setunresolved(ap->a_nch);
1641 cache_setvp(ap->a_nch, *ap->a_vpp);
1643 LOCKSTOP;
1644 return error;
1648 * Make a device node (typically a fifo)
1650 static
1652 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1654 hammer2_inode_t *dip;
1655 hammer2_inode_t *nip;
1656 hammer2_trans_t trans;
1657 hammer2_cluster_t *ncluster;
1658 struct namecache *ncp;
1659 const uint8_t *name;
1660 size_t name_len;
1661 int error;
1663 LOCKSTART;
1664 dip = VTOI(ap->a_dvp);
1665 if (dip->pmp->ronly) {
1666 LOCKSTOP;
1667 return (EROFS);
1670 ncp = ap->a_nch->ncp;
1671 name = ncp->nc_name;
1672 name_len = ncp->nc_nlen;
1673 hammer2_pfs_memory_wait(dip->pmp);
1674 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1675 ncluster = NULL;
1677 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1678 name, name_len, &ncluster, &error);
1679 if (error) {
1680 KKASSERT(nip == NULL);
1681 *ap->a_vpp = NULL;
1682 } else {
1683 *ap->a_vpp = hammer2_igetv(nip, ncluster, &error);
1684 hammer2_inode_unlock_ex(nip, ncluster);
1686 hammer2_trans_done(&trans);
1688 if (error == 0) {
1689 cache_setunresolved(ap->a_nch);
1690 cache_setvp(ap->a_nch, *ap->a_vpp);
1692 LOCKSTOP;
1693 return error;
1697 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1699 static
1701 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1703 hammer2_inode_t *dip;
1704 hammer2_inode_t *nip;
1705 hammer2_cluster_t *ncparent;
1706 hammer2_trans_t trans;
1707 struct namecache *ncp;
1708 const uint8_t *name;
1709 size_t name_len;
1710 int error;
1712 dip = VTOI(ap->a_dvp);
1713 if (dip->pmp->ronly)
1714 return (EROFS);
1716 ncp = ap->a_nch->ncp;
1717 name = ncp->nc_name;
1718 name_len = ncp->nc_nlen;
1719 hammer2_pfs_memory_wait(dip->pmp);
1720 hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1721 ncparent = NULL;
1723 ap->a_vap->va_type = VLNK; /* enforce type */
1725 nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1726 name, name_len, &ncparent, &error);
1727 if (error) {
1728 KKASSERT(nip == NULL);
1729 *ap->a_vpp = NULL;
1730 hammer2_trans_done(&trans);
1731 return error;
1733 *ap->a_vpp = hammer2_igetv(nip, ncparent, &error);
1736 * Build the softlink (~like file data) and finalize the namecache.
1738 if (error == 0) {
1739 size_t bytes;
1740 struct uio auio;
1741 struct iovec aiov;
1742 hammer2_inode_data_t *nipdata;
1744 nipdata = &hammer2_cluster_wdata(ncparent)->ipdata;
1745 /* nipdata = &nip->chain->data->ipdata;XXX */
1746 bytes = strlen(ap->a_target);
1748 if (bytes <= HAMMER2_EMBEDDED_BYTES) {
1749 KKASSERT(nipdata->op_flags &
1750 HAMMER2_OPFLAG_DIRECTDATA);
1751 bcopy(ap->a_target, nipdata->u.data, bytes);
1752 nipdata->size = bytes;
1753 nip->size = bytes;
1754 hammer2_cluster_modsync(ncparent);
1755 hammer2_inode_unlock_ex(nip, ncparent);
1756 /* nipdata = NULL; not needed */
1757 } else {
1758 hammer2_inode_unlock_ex(nip, ncparent);
1759 /* nipdata = NULL; not needed */
1760 bzero(&auio, sizeof(auio));
1761 bzero(&aiov, sizeof(aiov));
1762 auio.uio_iov = &aiov;
1763 auio.uio_segflg = UIO_SYSSPACE;
1764 auio.uio_rw = UIO_WRITE;
1765 auio.uio_resid = bytes;
1766 auio.uio_iovcnt = 1;
1767 auio.uio_td = curthread;
1768 aiov.iov_base = ap->a_target;
1769 aiov.iov_len = bytes;
1770 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1771 /* XXX handle error */
1772 error = 0;
1774 } else {
1775 hammer2_inode_unlock_ex(nip, ncparent);
1777 hammer2_trans_done(&trans);
1780 * Finalize namecache
1782 if (error == 0) {
1783 cache_setunresolved(ap->a_nch);
1784 cache_setvp(ap->a_nch, *ap->a_vpp);
1785 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1787 return error;
1791 * hammer2_vop_nremove { nch, dvp, cred }
1793 static
1795 hammer2_vop_nremove(struct vop_nremove_args *ap)
1797 hammer2_inode_t *dip;
1798 hammer2_trans_t trans;
1799 struct namecache *ncp;
1800 const uint8_t *name;
1801 size_t name_len;
1802 int error;
1804 LOCKSTART;
1805 dip = VTOI(ap->a_dvp);
1806 if (dip->pmp->ronly) {
1807 LOCKSTOP;
1808 return(EROFS);
1811 ncp = ap->a_nch->ncp;
1812 name = ncp->nc_name;
1813 name_len = ncp->nc_nlen;
1815 hammer2_pfs_memory_wait(dip->pmp);
1816 hammer2_trans_init(&trans, dip->pmp, 0);
1817 error = hammer2_unlink_file(&trans, dip, name, name_len,
1818 0, NULL, ap->a_nch, -1);
1819 hammer2_run_unlinkq(&trans, dip->pmp);
1820 hammer2_trans_done(&trans);
1821 if (error == 0)
1822 cache_unlink(ap->a_nch);
1823 LOCKSTOP;
1824 return (error);
1828 * hammer2_vop_nrmdir { nch, dvp, cred }
1830 static
1832 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1834 hammer2_inode_t *dip;
1835 hammer2_trans_t trans;
1836 struct namecache *ncp;
1837 const uint8_t *name;
1838 size_t name_len;
1839 int error;
1841 LOCKSTART;
1842 dip = VTOI(ap->a_dvp);
1843 if (dip->pmp->ronly) {
1844 LOCKSTOP;
1845 return(EROFS);
1848 ncp = ap->a_nch->ncp;
1849 name = ncp->nc_name;
1850 name_len = ncp->nc_nlen;
1852 hammer2_pfs_memory_wait(dip->pmp);
1853 hammer2_trans_init(&trans, dip->pmp, 0);
1854 hammer2_run_unlinkq(&trans, dip->pmp);
1855 error = hammer2_unlink_file(&trans, dip, name, name_len,
1856 1, NULL, ap->a_nch, -1);
1857 hammer2_trans_done(&trans);
1858 if (error == 0)
1859 cache_unlink(ap->a_nch);
1860 LOCKSTOP;
1861 return (error);
1865 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1867 static
1869 hammer2_vop_nrename(struct vop_nrename_args *ap)
1871 struct namecache *fncp;
1872 struct namecache *tncp;
1873 hammer2_inode_t *cdip;
1874 hammer2_inode_t *fdip;
1875 hammer2_inode_t *tdip;
1876 hammer2_inode_t *ip;
1877 hammer2_cluster_t *cluster;
1878 hammer2_cluster_t *fdcluster;
1879 hammer2_cluster_t *tdcluster;
1880 hammer2_cluster_t *cdcluster;
1881 hammer2_trans_t trans;
1882 const uint8_t *fname;
1883 size_t fname_len;
1884 const uint8_t *tname;
1885 size_t tname_len;
1886 int error;
1887 int tnch_error;
1888 int hlink;
1890 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1891 return(EXDEV);
1892 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1893 return(EXDEV);
1895 fdip = VTOI(ap->a_fdvp); /* source directory */
1896 tdip = VTOI(ap->a_tdvp); /* target directory */
1898 if (fdip->pmp->ronly)
1899 return(EROFS);
1901 LOCKSTART;
1902 fncp = ap->a_fnch->ncp; /* entry name in source */
1903 fname = fncp->nc_name;
1904 fname_len = fncp->nc_nlen;
1906 tncp = ap->a_tnch->ncp; /* entry name in target */
1907 tname = tncp->nc_name;
1908 tname_len = tncp->nc_nlen;
1910 hammer2_pfs_memory_wait(tdip->pmp);
1911 hammer2_trans_init(&trans, tdip->pmp, 0);
1914 * ip is the inode being renamed. If this is a hardlink then
1915 * ip represents the actual file and not the hardlink marker.
1917 ip = VTOI(fncp->nc_vp);
1918 cluster = NULL;
1922 * The common parent directory must be locked first to avoid deadlocks.
1923 * Also note that fdip and/or tdip might match cdip.
1925 * WARNING! fdip may not match ip->pip. That is, if the source file
1926 * is already a hardlink then what we are renaming is the
1927 * hardlink pointer, not the hardlink itself. The hardlink
1928 * directory (ip->pip) will already be at a common parent
1929 * of fdrip.
1931 * Be sure to use ip->pip when finding the common parent
1932 * against tdip or we might accidently move the hardlink
1933 * target into a subdirectory that makes it inaccessible to
1934 * other pointers.
1936 cdip = hammer2_inode_common_parent(ip->pip, tdip);
1937 cdcluster = hammer2_inode_lock_ex(cdip);
1938 fdcluster = hammer2_inode_lock_ex(fdip);
1939 tdcluster = hammer2_inode_lock_ex(tdip);
1942 * Keep a tight grip on the inode so the temporary unlinking from
1943 * the source location prior to linking to the target location
1944 * does not cause the cluster to be destroyed.
1946 * NOTE: To avoid deadlocks we cannot lock (ip) while we are
1947 * unlinking elements from their directories. Locking
1948 * the nlinks field does not lock the whole inode.
1950 hammer2_inode_ref(ip);
1953 * Remove target if it exists.
1955 error = hammer2_unlink_file(&trans, tdip, tname, tname_len,
1956 -1, NULL, ap->a_tnch, -1);
1957 tnch_error = error;
1958 if (error && error != ENOENT)
1959 goto done;
1962 * When renaming a hardlinked file we may have to re-consolidate
1963 * the location of the hardlink target.
1965 * If ip represents a regular file the consolidation code essentially
1966 * does nothing other than return the same locked cluster that was
1967 * passed in.
1969 * The returned cluster will be locked.
1971 * WARNING! We do not currently have a local copy of ipdata but
1972 * we do use one later remember that it must be reloaded
1973 * on any modification to the inode, including connects.
1975 cluster = hammer2_inode_lock_ex(ip);
1976 error = hammer2_hardlink_consolidate(&trans, ip, &cluster,
1977 cdip, cdcluster, 0);
1978 if (error)
1979 goto done;
1982 * Disconnect (fdip, fname) from the source directory. This will
1983 * disconnect (ip) if it represents a direct file. If (ip) represents
1984 * a hardlink the HARDLINK pointer object will be removed but the
1985 * hardlink will stay intact.
1987 * Always pass nch as NULL because we intend to reconnect the inode,
1988 * so we don't want hammer2_unlink_file() to rename it to the hidden
1989 * open-but-unlinked directory.
1991 * The target cluster may be marked DELETED but will not be destroyed
1992 * since we retain our hold on ip and cluster.
1994 * NOTE: We pass nlinks as 0 (not -1) in order to retain the file's
1995 * link count.
1997 error = hammer2_unlink_file(&trans, fdip, fname, fname_len,
1998 -1, &hlink, NULL, 0);
1999 KKASSERT(error != EAGAIN);
2000 if (error)
2001 goto done;
2004 * Reconnect ip to target directory using cluster. Chains cannot
2005 * actually be moved, so this will duplicate the cluster in the new
2006 * spot and assign it to the ip, replacing the old cluster.
2008 * WARNING: Because recursive locks are allowed and we unlinked the
2009 * file that we have a cluster-in-hand for just above, the
2010 * cluster might have been delete-duplicated. We must
2011 * refactor the cluster.
2013 * WARNING: Chain locks can lock buffer cache buffers, to avoid
2014 * deadlocks we want to unlock before issuing a cache_*()
2015 * op (that might have to lock a vnode).
2017 * NOTE: Pass nlinks as 0 because we retained the link count from
2018 * the unlink, so we do not have to modify it.
2020 error = hammer2_inode_connect(&trans, &cluster, hlink,
2021 tdip, tdcluster,
2022 tname, tname_len, 0);
2023 if (error == 0) {
2024 KKASSERT(cluster != NULL);
2025 hammer2_inode_repoint(ip, (hlink ? ip->pip : tdip), cluster);
2027 done:
2028 hammer2_inode_unlock_ex(ip, cluster);
2029 hammer2_inode_unlock_ex(tdip, tdcluster);
2030 hammer2_inode_unlock_ex(fdip, fdcluster);
2031 hammer2_inode_unlock_ex(cdip, cdcluster);
2032 hammer2_inode_drop(ip);
2033 hammer2_inode_drop(cdip);
2034 hammer2_run_unlinkq(&trans, fdip->pmp);
2035 hammer2_trans_done(&trans);
2038 * Issue the namecache update after unlocking all the internal
2039 * hammer structures, otherwise we might deadlock.
2041 if (tnch_error == 0) {
2042 cache_unlink(ap->a_tnch);
2043 cache_setunresolved(ap->a_tnch);
2045 if (error == 0)
2046 cache_rename(ap->a_fnch, ap->a_tnch);
2048 LOCKSTOP;
2049 return (error);
2053 * Strategy code (async logical file buffer I/O from system)
2055 * WARNING: The strategy code cannot safely use hammer2 transactions
2056 * as this can deadlock against vfs_sync's vfsync() call
2057 * if multiple flushes are queued. All H2 structures must
2058 * already be present and ready for the DIO.
2060 * Reads can be initiated asynchronously, writes have to be
2061 * spooled to a separate thread for action to avoid deadlocks.
2063 static int hammer2_strategy_read(struct vop_strategy_args *ap);
2064 static int hammer2_strategy_write(struct vop_strategy_args *ap);
2065 static void hammer2_strategy_read_callback(hammer2_iocb_t *iocb);
2067 static
2069 hammer2_vop_strategy(struct vop_strategy_args *ap)
2071 struct bio *biop;
2072 struct buf *bp;
2073 int error;
2075 biop = ap->a_bio;
2076 bp = biop->bio_buf;
2078 switch(bp->b_cmd) {
2079 case BUF_CMD_READ:
2080 error = hammer2_strategy_read(ap);
2081 ++hammer2_iod_file_read;
2082 break;
2083 case BUF_CMD_WRITE:
2084 error = hammer2_strategy_write(ap);
2085 ++hammer2_iod_file_write;
2086 break;
2087 default:
2088 bp->b_error = error = EINVAL;
2089 bp->b_flags |= B_ERROR;
2090 biodone(biop);
2091 break;
2093 return (error);
2097 * Logical buffer I/O, async read.
2099 static
2101 hammer2_strategy_read(struct vop_strategy_args *ap)
2103 struct buf *bp;
2104 struct bio *bio;
2105 struct bio *nbio;
2106 hammer2_inode_t *ip;
2107 hammer2_cluster_t *cparent;
2108 hammer2_cluster_t *cluster;
2109 hammer2_key_t key_dummy;
2110 hammer2_key_t lbase;
2111 int ddflag;
2112 uint8_t btype;
2114 bio = ap->a_bio;
2115 bp = bio->bio_buf;
2116 ip = VTOI(ap->a_vp);
2117 nbio = push_bio(bio);
2119 lbase = bio->bio_offset;
2120 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
2123 * Lookup the file offset.
2125 cparent = hammer2_inode_lock_sh(ip);
2126 cluster = hammer2_cluster_lookup(cparent, &key_dummy,
2127 lbase, lbase,
2128 HAMMER2_LOOKUP_NODATA |
2129 HAMMER2_LOOKUP_SHARED,
2130 &ddflag);
2131 hammer2_inode_unlock_sh(ip, cparent);
2134 * Data is zero-fill if no cluster could be found
2135 * (XXX or EIO on a cluster failure).
2137 if (cluster == NULL) {
2138 bp->b_resid = 0;
2139 bp->b_error = 0;
2140 bzero(bp->b_data, bp->b_bcount);
2141 biodone(nbio);
2142 return(0);
2146 * Cluster elements must be type INODE or type DATA, but the
2147 * compression mode (or not) for DATA chains can be different for
2148 * each chain. This will be handled by the callback.
2150 * If the cluster already has valid data the callback will be made
2151 * immediately/synchronously.
2153 btype = hammer2_cluster_type(cluster);
2154 if (btype != HAMMER2_BREF_TYPE_INODE &&
2155 btype != HAMMER2_BREF_TYPE_DATA) {
2156 panic("READ PATH: hammer2_strategy_read: unknown bref type");
2158 hammer2_cluster_load_async(cluster, hammer2_strategy_read_callback,
2159 nbio);
2160 return(0);
2164 * Read callback for hammer2_cluster_load_async(). The load function may
2165 * start several actual I/Os but will only make one callback, typically with
2166 * the first valid I/O XXX
2168 static
2169 void
2170 hammer2_strategy_read_callback(hammer2_iocb_t *iocb)
2172 struct bio *bio = iocb->ptr; /* original logical buffer */
2173 struct buf *bp = bio->bio_buf; /* original logical buffer */
2174 hammer2_chain_t *chain;
2175 hammer2_cluster_t *cluster;
2176 hammer2_io_t *dio;
2177 char *data;
2178 int i;
2181 * Extract data and handle iteration on I/O failure. iocb->off
2182 * is the cluster index for iteration.
2184 cluster = iocb->cluster;
2185 dio = iocb->dio; /* can be NULL if iocb not in progress */
2188 * Work to do if INPROG set, else dio is already good or dio is
2189 * NULL (which is the shortcut case if chain->data is already good).
2191 if (iocb->flags & HAMMER2_IOCB_INPROG) {
2193 * Read attempt not yet made. Issue an asynchronous read
2194 * if necessary and return, operation will chain back to
2195 * this function.
2197 if ((iocb->flags & HAMMER2_IOCB_READ) == 0) {
2198 if (dio->bp == NULL ||
2199 (dio->bp->b_flags & B_CACHE) == 0) {
2200 if (dio->bp) {
2201 bqrelse(dio->bp);
2202 dio->bp = NULL;
2204 iocb->flags |= HAMMER2_IOCB_READ;
2205 breadcb(dio->hmp->devvp,
2206 dio->pbase, dio->psize,
2207 hammer2_io_callback, iocb);
2208 return;
2214 * If we have a DIO it is now done, check for an error and
2215 * calculate the data.
2217 * If there is no DIO it is an optimization by
2218 * hammer2_cluster_load_async(), the data is available in
2219 * chain->data.
2221 if (dio) {
2222 if (dio->bp->b_flags & B_ERROR) {
2223 i = (int)iocb->lbase + 1;
2224 if (i >= cluster->nchains) {
2225 bp->b_flags |= B_ERROR;
2226 bp->b_error = dio->bp->b_error;
2227 hammer2_io_complete(iocb);
2228 biodone(bio);
2229 hammer2_cluster_unlock(cluster);
2230 } else {
2231 hammer2_io_complete(iocb); /* XXX */
2232 chain = cluster->array[i].chain;
2233 kprintf("hammer2: IO CHAIN-%d %p\n", i, chain);
2234 hammer2_adjreadcounter(&chain->bref,
2235 chain->bytes);
2236 iocb->chain = chain;
2237 iocb->lbase = (off_t)i;
2238 iocb->flags = 0;
2239 iocb->error = 0;
2240 hammer2_io_getblk(chain->hmp,
2241 chain->bref.data_off,
2242 chain->bytes,
2243 iocb);
2245 return;
2247 chain = iocb->chain;
2248 data = hammer2_io_data(dio, chain->bref.data_off);
2249 } else {
2251 * Special synchronous case, data present in chain->data.
2253 chain = iocb->chain;
2254 data = (void *)chain->data;
2257 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2259 * Data is embedded in the inode (copy from inode).
2261 bcopy(((hammer2_inode_data_t *)data)->u.data,
2262 bp->b_data, HAMMER2_EMBEDDED_BYTES);
2263 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
2264 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
2265 bp->b_resid = 0;
2266 bp->b_error = 0;
2267 } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2269 * Data is on-media, issue device I/O and copy.
2271 * XXX direct-IO shortcut could go here XXX.
2273 switch (HAMMER2_DEC_COMP(chain->bref.methods)) {
2274 case HAMMER2_COMP_LZ4:
2275 hammer2_decompress_LZ4_callback(data, chain->bytes,
2276 bio);
2277 break;
2278 case HAMMER2_COMP_ZLIB:
2279 hammer2_decompress_ZLIB_callback(data, chain->bytes,
2280 bio);
2281 break;
2282 case HAMMER2_COMP_NONE:
2283 KKASSERT(chain->bytes <= bp->b_bcount);
2284 bcopy(data, bp->b_data, chain->bytes);
2285 if (chain->bytes < bp->b_bcount) {
2286 bzero(bp->b_data + chain->bytes,
2287 bp->b_bcount - chain->bytes);
2289 bp->b_flags |= B_NOTMETA;
2290 bp->b_resid = 0;
2291 bp->b_error = 0;
2292 break;
2293 default:
2294 panic("hammer2_strategy_read: "
2295 "unknown compression type");
2297 } else {
2298 /* bqrelse the dio to help stabilize the call to panic() */
2299 if (dio)
2300 hammer2_io_bqrelse(&dio);
2301 panic("hammer2_strategy_read: unknown bref type");
2305 * Once the iocb is cleaned up the DIO (if any) will no longer be
2306 * in-progress but will still have a ref. Be sure to release
2307 * the ref.
2309 hammer2_io_complete(iocb); /* physical management */
2310 if (dio) /* physical dio & buffer */
2311 hammer2_io_bqrelse(&dio);
2312 hammer2_cluster_unlock(cluster); /* cluster management */
2313 biodone(bio); /* logical buffer */
2316 static
2318 hammer2_strategy_write(struct vop_strategy_args *ap)
2320 hammer2_pfsmount_t *pmp;
2321 struct bio *bio;
2322 struct buf *bp;
2323 hammer2_inode_t *ip;
2325 bio = ap->a_bio;
2326 bp = bio->bio_buf;
2327 ip = VTOI(ap->a_vp);
2328 pmp = ip->pmp;
2330 hammer2_lwinprog_ref(pmp);
2331 hammer2_mtx_ex(&pmp->wthread_mtx);
2332 if (TAILQ_EMPTY(&pmp->wthread_bioq.queue)) {
2333 bioq_insert_tail(&pmp->wthread_bioq, ap->a_bio);
2334 hammer2_mtx_unlock(&pmp->wthread_mtx);
2335 wakeup(&pmp->wthread_bioq);
2336 } else {
2337 bioq_insert_tail(&pmp->wthread_bioq, ap->a_bio);
2338 hammer2_mtx_unlock(&pmp->wthread_mtx);
2340 hammer2_lwinprog_wait(pmp);
2342 return(0);
2346 * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2348 static
2350 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2352 hammer2_inode_t *ip;
2353 int error;
2355 LOCKSTART;
2356 ip = VTOI(ap->a_vp);
2358 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2359 ap->a_fflag, ap->a_cred);
2360 LOCKSTOP;
2361 return (error);
2364 static
2365 int
2366 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2368 struct mount *mp;
2369 hammer2_pfsmount_t *pmp;
2370 int rc;
2372 LOCKSTART;
2373 switch (ap->a_op) {
2374 case (MOUNTCTL_SET_EXPORT):
2375 mp = ap->a_head.a_ops->head.vv_mount;
2376 pmp = MPTOPMP(mp);
2378 if (ap->a_ctllen != sizeof(struct export_args))
2379 rc = (EINVAL);
2380 else
2381 rc = vfs_export(mp, &pmp->export,
2382 (const struct export_args *)ap->a_ctl);
2383 break;
2384 default:
2385 rc = vop_stdmountctl(ap);
2386 break;
2388 LOCKSTOP;
2389 return (rc);
2393 * This handles unlinked open files after the vnode is finally dereferenced.
2394 * To avoid deadlocks it cannot be called from the normal vnode recycling
2395 * path, so we call it (1) after a unlink, rmdir, or rename, (2) on every
2396 * flush, and (3) on umount.
2398 void
2399 hammer2_run_unlinkq(hammer2_trans_t *trans, hammer2_pfsmount_t *pmp)
2401 const hammer2_inode_data_t *ripdata;
2402 hammer2_inode_unlink_t *ipul;
2403 hammer2_inode_t *ip;
2404 hammer2_cluster_t *cluster;
2405 hammer2_cluster_t *cparent;
2407 if (TAILQ_EMPTY(&pmp->unlinkq))
2408 return;
2410 LOCKSTART;
2411 hammer2_spin_ex(&pmp->list_spin);
2412 while ((ipul = TAILQ_FIRST(&pmp->unlinkq)) != NULL) {
2413 TAILQ_REMOVE(&pmp->unlinkq, ipul, entry);
2414 hammer2_spin_unex(&pmp->list_spin);
2415 ip = ipul->ip;
2416 kfree(ipul, pmp->minode);
2418 cluster = hammer2_inode_lock_ex(ip);
2419 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
2420 if (hammer2_debug & 0x400) {
2421 kprintf("hammer2: unlink on reclaim: %s refs=%d\n",
2422 ripdata->filename, ip->refs);
2424 KKASSERT(ripdata->nlinks == 0);
2426 cparent = hammer2_cluster_parent(cluster);
2427 hammer2_cluster_delete(trans, cparent, cluster,
2428 HAMMER2_DELETE_PERMANENT);
2429 hammer2_cluster_unlock(cparent);
2430 hammer2_inode_unlock_ex(ip, cluster); /* inode lock */
2431 hammer2_inode_drop(ip); /* ipul ref */
2433 hammer2_spin_ex(&pmp->list_spin);
2435 hammer2_spin_unex(&pmp->list_spin);
2436 LOCKSTOP;
2441 * KQFILTER
2443 static void filt_hammer2detach(struct knote *kn);
2444 static int filt_hammer2read(struct knote *kn, long hint);
2445 static int filt_hammer2write(struct knote *kn, long hint);
2446 static int filt_hammer2vnode(struct knote *kn, long hint);
2448 static struct filterops hammer2read_filtops =
2449 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2450 NULL, filt_hammer2detach, filt_hammer2read };
2451 static struct filterops hammer2write_filtops =
2452 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2453 NULL, filt_hammer2detach, filt_hammer2write };
2454 static struct filterops hammer2vnode_filtops =
2455 { FILTEROP_ISFD | FILTEROP_MPSAFE,
2456 NULL, filt_hammer2detach, filt_hammer2vnode };
2458 static
2460 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2462 struct vnode *vp = ap->a_vp;
2463 struct knote *kn = ap->a_kn;
2465 switch (kn->kn_filter) {
2466 case EVFILT_READ:
2467 kn->kn_fop = &hammer2read_filtops;
2468 break;
2469 case EVFILT_WRITE:
2470 kn->kn_fop = &hammer2write_filtops;
2471 break;
2472 case EVFILT_VNODE:
2473 kn->kn_fop = &hammer2vnode_filtops;
2474 break;
2475 default:
2476 return (EOPNOTSUPP);
2479 kn->kn_hook = (caddr_t)vp;
2481 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2483 return(0);
2486 static void
2487 filt_hammer2detach(struct knote *kn)
2489 struct vnode *vp = (void *)kn->kn_hook;
2491 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2494 static int
2495 filt_hammer2read(struct knote *kn, long hint)
2497 struct vnode *vp = (void *)kn->kn_hook;
2498 hammer2_inode_t *ip = VTOI(vp);
2499 off_t off;
2501 if (hint == NOTE_REVOKE) {
2502 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2503 return(1);
2505 off = ip->size - kn->kn_fp->f_offset;
2506 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2507 if (kn->kn_sfflags & NOTE_OLDAPI)
2508 return(1);
2509 return (kn->kn_data != 0);
2513 static int
2514 filt_hammer2write(struct knote *kn, long hint)
2516 if (hint == NOTE_REVOKE)
2517 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2518 kn->kn_data = 0;
2519 return (1);
2522 static int
2523 filt_hammer2vnode(struct knote *kn, long hint)
2525 if (kn->kn_sfflags & hint)
2526 kn->kn_fflags |= hint;
2527 if (hint == NOTE_REVOKE) {
2528 kn->kn_flags |= (EV_EOF | EV_NODATA);
2529 return (1);
2531 return (kn->kn_fflags != 0);
2535 * FIFO VOPS
2537 static
2539 hammer2_vop_markatime(struct vop_markatime_args *ap)
2541 hammer2_inode_t *ip;
2542 struct vnode *vp;
2544 vp = ap->a_vp;
2545 ip = VTOI(vp);
2547 if (ip->pmp->ronly)
2548 return(EROFS);
2549 return(0);
2552 static
2554 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2556 int error;
2558 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2559 if (error)
2560 error = hammer2_vop_kqfilter(ap);
2561 return(error);
2565 * VOPS vector
2567 struct vop_ops hammer2_vnode_vops = {
2568 .vop_default = vop_defaultop,
2569 .vop_fsync = hammer2_vop_fsync,
2570 .vop_getpages = vop_stdgetpages,
2571 .vop_putpages = vop_stdputpages,
2572 .vop_access = hammer2_vop_access,
2573 .vop_advlock = hammer2_vop_advlock,
2574 .vop_close = hammer2_vop_close,
2575 .vop_nlink = hammer2_vop_nlink,
2576 .vop_ncreate = hammer2_vop_ncreate,
2577 .vop_nsymlink = hammer2_vop_nsymlink,
2578 .vop_nremove = hammer2_vop_nremove,
2579 .vop_nrmdir = hammer2_vop_nrmdir,
2580 .vop_nrename = hammer2_vop_nrename,
2581 .vop_getattr = hammer2_vop_getattr,
2582 .vop_setattr = hammer2_vop_setattr,
2583 .vop_readdir = hammer2_vop_readdir,
2584 .vop_readlink = hammer2_vop_readlink,
2585 .vop_getpages = vop_stdgetpages,
2586 .vop_putpages = vop_stdputpages,
2587 .vop_read = hammer2_vop_read,
2588 .vop_write = hammer2_vop_write,
2589 .vop_open = hammer2_vop_open,
2590 .vop_inactive = hammer2_vop_inactive,
2591 .vop_reclaim = hammer2_vop_reclaim,
2592 .vop_nresolve = hammer2_vop_nresolve,
2593 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2594 .vop_nmkdir = hammer2_vop_nmkdir,
2595 .vop_nmknod = hammer2_vop_nmknod,
2596 .vop_ioctl = hammer2_vop_ioctl,
2597 .vop_mountctl = hammer2_vop_mountctl,
2598 .vop_bmap = hammer2_vop_bmap,
2599 .vop_strategy = hammer2_vop_strategy,
2600 .vop_kqfilter = hammer2_vop_kqfilter
2603 struct vop_ops hammer2_spec_vops = {
2604 .vop_default = vop_defaultop,
2605 .vop_fsync = hammer2_vop_fsync,
2606 .vop_read = vop_stdnoread,
2607 .vop_write = vop_stdnowrite,
2608 .vop_access = hammer2_vop_access,
2609 .vop_close = hammer2_vop_close,
2610 .vop_markatime = hammer2_vop_markatime,
2611 .vop_getattr = hammer2_vop_getattr,
2612 .vop_inactive = hammer2_vop_inactive,
2613 .vop_reclaim = hammer2_vop_reclaim,
2614 .vop_setattr = hammer2_vop_setattr
2617 struct vop_ops hammer2_fifo_vops = {
2618 .vop_default = fifo_vnoperate,
2619 .vop_fsync = hammer2_vop_fsync,
2620 #if 0
2621 .vop_read = hammer2_vop_fiforead,
2622 .vop_write = hammer2_vop_fifowrite,
2623 #endif
2624 .vop_access = hammer2_vop_access,
2625 #if 0
2626 .vop_close = hammer2_vop_fifoclose,
2627 #endif
2628 .vop_markatime = hammer2_vop_markatime,
2629 .vop_getattr = hammer2_vop_getattr,
2630 .vop_inactive = hammer2_vop_inactive,
2631 .vop_reclaim = hammer2_vop_reclaim,
2632 .vop_setattr = hammer2_vop_setattr,
2633 .vop_kqfilter = hammer2_vop_fifokqfilter