nrelease - fix/improve livecd
[dragonfly.git] / sys / vfs / fuse / fuse_vnops.c
blob905702be5850749840caadffb45eb3629fe5c11b
1 /*-
2 * Copyright (c) 2019 Tomohiro Kusumi <tkusumi@netbsd.org>
3 * Copyright (c) 2019 The DragonFly Project
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
28 #include "fuse.h"
30 #include <sys/fcntl.h>
31 #include <sys/dirent.h>
32 #include <sys/uio.h>
33 #include <sys/buf.h>
34 #include <sys/mountctl.h>
35 #include <sys/kern_syscall.h>
36 #include <vm/vm_pager.h>
37 #include <vm/vm_extern.h>
38 #include <vm/vnode_pager.h>
39 #include <vm/vm_pageout.h>
41 #include <sys/buf2.h>
42 #include <vm/vm_page2.h>
44 static int fuse_reg_resize(struct vnode *vp, off_t newsize, int trivial);
45 static void fuse_io_execute(struct fuse_mount *fmp, struct bio *bio);
46 static void fuse_release(struct fuse_mount *fmp, struct fuse_node *fnp);
48 static int
49 fuse_set_attr(struct fuse_node *fnp, struct fuse_attr *fat)
51 struct vattr *vap = &fnp->attr;
52 int error = 0;
54 vattr_null(vap);
56 vap->va_type = IFTOVT(fat->mode);
57 vap->va_size = (fnp->sizeoverride ? fnp->size : fat->size);
58 vap->va_bytes = fat->blocks * S_BLKSIZE;
59 vap->va_mode = fat->mode & ~S_IFMT;
60 if (!fat->nlink) /* XXX .fuse_hidden* has 0 link */
61 vap->va_nlink = 1;
62 else
63 vap->va_nlink = fat->nlink;
64 vap->va_uid = fat->uid;
65 vap->va_gid = fat->gid;
66 vap->va_fsid = fnp->fmp->mp->mnt_stat.f_fsid.val[0];
67 vap->va_fileid = fat->ino;
68 vap->va_blocksize = FUSE_BLKSIZE;
69 vap->va_rmajor = VNOVAL;
70 vap->va_rminor = VNOVAL;
71 vap->va_atime.tv_sec = fat->atime;
72 vap->va_atime.tv_nsec = fat->atimensec;
73 vap->va_mtime.tv_sec = fat->mtime;
74 vap->va_mtime.tv_nsec = fat->mtimensec;
75 vap->va_ctime.tv_sec = fat->ctime;
76 vap->va_ctime.tv_nsec = fat->ctimensec;
77 vap->va_flags = 0;
78 vap->va_gen = VNOVAL;
79 vap->va_vaflags = 0;
81 KKASSERT(vap->va_type == fnp->type);
83 if (fnp->vp->v_object && fnp->sizeoverride == 0 &&
84 fnp->size != vap->va_size)
86 error = fuse_node_truncate(fnp, fnp->size, vap->va_size);
89 fnp->attrgood = 1;
91 return error;
94 static int
95 fuse_vop_access(struct vop_access_args *ap)
97 struct vnode *vp = ap->a_vp;
98 mode_t mode = ap->a_mode;
99 struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
100 struct fuse_ipc *fip;
101 struct fuse_access_in *fai;
102 uint32_t mask;
103 int error;
105 if (fuse_test_dead(fmp))
106 return 0;
108 if (fuse_test_nosys(fmp, FUSE_ACCESS))
109 return 0;
111 switch (vp->v_type) {
112 case VDIR:
113 case VLNK:
114 case VREG:
115 if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY))
116 return EROFS;
117 break;
118 case VBLK:
119 case VCHR:
120 case VSOCK:
121 case VFIFO:
122 break;
123 default:
124 return EINVAL;
127 mask = F_OK;
128 if (mode & VEXEC)
129 mask |= X_OK;
130 if (mode & VWRITE)
131 mask |= W_OK;
132 if (mode & VREAD)
133 mask |= R_OK;
135 fip = fuse_ipc_get(fmp, sizeof(*fai));
136 fai = fuse_ipc_fill(fip, FUSE_ACCESS, VTOI(vp)->ino, ap->a_cred);
137 fai->mask = mask;
139 error = fuse_ipc_tx(fip);
140 if (error) {
141 if (error == ENOSYS)
142 error = 0;
143 if (error == ENOTCONN && (vp->v_flag & VROOT))
144 error = 0;
145 return error;
148 fuse_ipc_put(fip);
150 return 0;
153 static int
154 fuse_vop_open(struct vop_open_args *ap)
156 struct vnode *vp = ap->a_vp;
157 struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
158 struct fuse_node *fnp = VTOI(vp);
159 struct fuse_ipc *fip;
160 struct fuse_open_in *foi;
161 struct fuse_open_out *foo;
162 int error, op;
164 if (fuse_test_dead(fmp))
165 return ENOTCONN;
167 if (fuse_test_nosys(fmp, FUSE_OPEN))
168 return EOPNOTSUPP;
171 * Reopen with userland process if the vnode doesn't have a
172 * file-handle. This can occur if the vnode is new or if it
173 * was previously deactivated.
175 if (fnp->fh == 0) {
176 if (vp->v_type == VDIR)
177 op = FUSE_OPENDIR;
178 else
179 op = FUSE_OPEN;
181 fip = fuse_ipc_get(fmp, sizeof(*foi));
182 foi = fuse_ipc_fill(fip, op, fnp->ino, ap->a_cred);
183 foi->flags = OFLAGS(ap->a_mode);
184 fuse_dbg("flags=%X\n", foi->flags);
185 if (foi->flags & O_CREAT) {
186 fuse_dbg("drop O_CREAT\n");
187 foi->flags &= ~O_CREAT;
190 error = fuse_ipc_tx(fip);
191 if (error)
192 return error;
194 /* XXX unused */
195 foo = fuse_out_data(fip);
196 if (foo->open_flags & FOPEN_DIRECT_IO)
198 else if (foo->open_flags & FOPEN_KEEP_CACHE)
200 else if (foo->open_flags & FOPEN_NONSEEKABLE)
202 else if (foo->open_flags & FOPEN_CACHE_DIR)
205 fnp->closed = false;
206 fnp->fh = foo->fh;
207 fuse_ipc_put(fip);
210 return vop_stdopen(ap);
214 * NOTE: We do not release the file-handle on close() as the vnode
215 * may still be in active use as an active directory or memory-mapped.
217 * However, to reduce overhead we issue vfinalize() to tell the kernel
218 * to attempt to finalize (deactivate) the vnode as soon as it can.
220 static int
221 fuse_vop_close(struct vop_close_args *ap)
223 struct vnode *vp = ap->a_vp;
224 struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
226 if (fuse_test_dead(fmp))
227 return 0;
229 if (fuse_test_nosys(fmp, FUSE_RELEASE) ||
230 fuse_test_nosys(fmp, FUSE_RELEASEDIR))
232 return EOPNOTSUPP;
236 * Finalize immediately if not dirty, otherwise we will check
237 * during the fsync and try to finalize then.
239 if ((vp->v_flag & VISDIRTY) == 0 &&
240 RB_EMPTY(&vp->v_rbdirty_tree))
242 vfinalize(vp);
245 return vop_stdclose(ap);
248 static int
249 fuse_vop_fsync(struct vop_fsync_args *ap)
251 struct vnode *vp = ap->a_vp;
252 struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
253 struct fuse_ipc *fip;
254 struct fuse_fsync_in *fsi;
255 struct fuse_node *fnp = VTOI(vp);
256 int error, op;
258 if (fuse_test_dead(fmp))
259 return 0;
261 if (fuse_test_nosys(fmp, FUSE_FSYNC))
262 return 0;
265 * fsync any dirty buffers, wait for completion.
267 vclrisdirty(vp);
268 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
269 bio_track_wait(&vp->v_track_write, 0, 0);
270 fnp->sizeoverride = 0;
273 * Ask DragonFly to deactivate the vnode ASAP if it is no longer
274 * open.
276 if (vp->v_opencount == 0)
277 vfinalize(vp);
279 if (fnp->fh) {
280 if (vp->v_type == VDIR)
281 op = FUSE_FSYNCDIR;
282 else
283 op = FUSE_FSYNC;
285 fip = fuse_ipc_get(fmp, sizeof(*fsi));
286 fsi = fuse_ipc_fill(fip, op, VTOI(vp)->ino, NULL);
287 fsi->fh = fnp->fh;
288 fsi->fsync_flags = 1; /* datasync */
290 error = fuse_ipc_tx(fip);
291 if (error == 0)
292 fuse_ipc_put(fip);
293 } else {
294 error = 0;
297 return error;
300 static int
301 fuse_vop_getattr(struct vop_getattr_args *ap)
303 struct vnode *vp = ap->a_vp;
304 struct vattr *vap = ap->a_vap;
305 struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
306 struct fuse_node *fnp = VTOI(vp);
307 struct fuse_ipc *fip;
308 struct fuse_getattr_in *fgi;
309 struct fuse_attr_out *fao;
310 int error;
312 if (fuse_test_dead(fmp))
313 return 0;
315 if (fuse_test_nosys(fmp, FUSE_GETATTR))
316 return 0;
318 if (fnp->attrgood == 0) {
320 * Acquire new attribute
322 fip = fuse_ipc_get(fmp, sizeof(*fgi));
323 fgi = fuse_ipc_fill(fip, FUSE_GETATTR, fnp->ino, NULL);
324 #if 0
325 /* this may be called before open when fh is 0 */
326 fgi->getattr_flags |= FUSE_GETATTR_FH;
327 fgi->fh = fnp->fh;
328 #endif
329 error = fuse_ipc_tx(fip);
330 if (error) {
331 if (error == ENOSYS)
332 error = 0;
333 if (error == ENOTCONN && (vp->v_flag & VROOT)) {
334 memset(vap, 0, sizeof(*vap));
335 vap->va_type = vp->v_type;
336 error = 0;
338 return error;
341 fao = fuse_out_data(fip);
342 mtx_lock(&fnp->node_lock);
343 fuse_set_attr(fnp, &fao->attr);
344 memcpy(vap, &fnp->attr, sizeof(*vap));
345 /* unused */
346 //fao->attr_valid;
347 //fao->attr_valid_nsec;
348 mtx_unlock(&fnp->node_lock);
350 fuse_ipc_put(fip);
351 } else {
353 * Use cached attribute
355 memcpy(vap, &fnp->attr, sizeof(*vap));
358 if (vap->va_type != vp->v_type)
359 return EINVAL;
361 return 0;
364 static int
365 fuse_vop_setattr(struct vop_setattr_args *ap)
367 struct vnode *vp = ap->a_vp;
368 struct vattr *vap = ap->a_vap;
369 struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
370 struct fuse_node *fnp = VTOI(vp);
371 struct fuse_ipc *fip;
372 struct fuse_setattr_in *fsi, arg;
373 struct fuse_attr_out *fao;
374 int kflags = 0;
375 int error = 0;
377 if (fuse_test_dead(fmp))
378 return 0;
380 if (fuse_test_nosys(fmp, FUSE_SETATTR))
381 return 0;
383 if (vp->v_mount->mnt_flag & MNT_RDONLY)
384 return EROFS;
386 memset(&arg, 0, sizeof(arg));
387 mtx_lock(&fnp->node_lock);
389 if (!error && (vap->va_flags != VNOVAL)) {
390 mtx_unlock(&fnp->node_lock);
391 kflags |= NOTE_ATTRIB;
392 return EOPNOTSUPP; /* XXX */
395 if (!error && (vap->va_size != VNOVAL)) {
396 if (vp->v_type == VDIR) {
397 mtx_unlock(&fnp->node_lock);
398 return EISDIR;
400 if (vp->v_type == VREG &&
401 (vp->v_mount->mnt_flag & MNT_RDONLY)) {
402 mtx_unlock(&fnp->node_lock);
403 return EROFS;
405 arg.size = vap->va_size;
406 arg.valid |= FATTR_SIZE;
407 if (vap->va_size > fnp->size)
408 kflags |= NOTE_WRITE | NOTE_EXTEND;
409 else
410 kflags |= NOTE_WRITE;
413 if (!error && (vap->va_uid != (uid_t)VNOVAL ||
414 vap->va_gid != (gid_t)VNOVAL)) {
415 mode_t mode;
416 error = vop_helper_chown(vp, vap->va_uid, vap->va_gid,
417 ap->a_cred, &arg.uid, &arg.gid, &mode);
418 arg.valid |= FATTR_UID;
419 arg.valid |= FATTR_GID;
420 kflags |= NOTE_ATTRIB;
423 if (!error && (vap->va_mode != (mode_t)VNOVAL)) {
424 error = vop_helper_chmod(vp, vap->va_mode, ap->a_cred,
425 vap->va_uid, vap->va_gid, (mode_t*)&arg.mode);
426 arg.valid |= FATTR_MODE;
427 kflags |= NOTE_ATTRIB;
430 if (!error && (vap->va_atime.tv_sec != VNOVAL &&
431 vap->va_atime.tv_nsec != VNOVAL)) {
432 arg.atime = vap->va_atime.tv_sec;
433 arg.atimensec = vap->va_atime.tv_nsec;
434 arg.valid |= FATTR_ATIME;
435 kflags |= NOTE_ATTRIB;
438 if (!error && (vap->va_mtime.tv_sec != VNOVAL &&
439 vap->va_mtime.tv_nsec != VNOVAL)) {
440 arg.mtime = vap->va_mtime.tv_sec;
441 arg.mtimensec = vap->va_mtime.tv_nsec;
442 arg.valid |= FATTR_MTIME;
443 kflags |= NOTE_ATTRIB;
446 if (!error && (vap->va_ctime.tv_sec != VNOVAL &&
447 vap->va_ctime.tv_nsec != VNOVAL)) {
448 arg.ctime = vap->va_ctime.tv_sec;
449 arg.ctimensec = vap->va_ctime.tv_nsec;
450 arg.valid |= FATTR_CTIME;
451 kflags |= NOTE_ATTRIB;
454 mtx_unlock(&fnp->node_lock);
456 if (error)
457 return error;
458 if (!arg.valid)
459 return 0;
461 fip = fuse_ipc_get(fmp, sizeof(*fsi));
462 fsi = fuse_ipc_fill(fip, FUSE_SETATTR, fnp->ino, ap->a_cred);
463 memcpy(fsi, &arg, sizeof(arg));
464 #if 0
465 fsi->valid |= FATTR_FH;
466 fsi->fh = fnp->fh;
467 #endif
468 error = fuse_ipc_tx(fip);
469 if (error)
470 return error;
472 fao = fuse_out_data(fip);
473 if (IFTOVT(fao->attr.mode) != vp->v_type) {
474 fuse_ipc_put(fip);
475 return EINVAL;
477 mtx_lock(&fnp->node_lock);
478 fuse_set_attr(fnp, &fao->attr);
479 /* unused */
480 //fao->attr_valid;
481 //fao->attr_valid_nsec;
482 mtx_unlock(&fnp->node_lock);
484 fuse_ipc_put(fip);
485 fuse_knote(vp, kflags);
487 return 0;
490 static int
491 fuse_vop_nresolve(struct vop_nresolve_args *ap)
493 struct vnode *dvp = ap->a_dvp;
494 struct vnode *vp;
495 struct namecache *ncp = ap->a_nch->ncp;
496 struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
497 struct fuse_node *dfnp = VTOI(dvp);
498 struct fuse_node *fnp;
499 struct fuse_ipc *fip;
500 struct fuse_entry_out *feo;
501 char *p, tmp[1024];
502 uint32_t mode;
503 enum vtype vtyp;
504 int error;
505 int forgettable;
507 if (fuse_test_dead(fmp))
508 return ENOTCONN;
510 if (fuse_test_nosys(fmp, FUSE_LOOKUP))
511 return EOPNOTSUPP;
513 fip = fuse_ipc_get(fmp, ncp->nc_nlen + 1);
514 p = fuse_ipc_fill(fip, FUSE_LOOKUP, dfnp->ino, ap->a_cred);
516 memcpy(p, ncp->nc_name, ncp->nc_nlen);
517 p[ncp->nc_nlen] = '\0';
518 strlcpy(tmp, p, sizeof(tmp));
521 * "." and ".." are not ref-counted by the fuse userland
522 * (their API is basically broken but, meh).
524 forgettable = 0;
525 if (strcmp(p, ".") != 0 && strcmp(p, "..") != 0)
526 forgettable = 1;
528 error = fuse_ipc_tx(fip);
529 if (error == ENOENT) {
530 cache_setvp(ap->a_nch, NULL);
531 fuse_dbg("lookup \"%s\" ENOENT\n", tmp);
532 return ENOENT;
533 } else if (error) {
534 fuse_dbg("lookup \"%s\" error=%d\n", tmp, error);
535 return error;
538 feo = fuse_out_data(fip);
539 fuse_dbg("lookup \"%s\" ino=%ju/%ju\n", p, feo->nodeid, feo->attr.ino);
542 * Apparently in later FUSEs this means a cacheable ENOENT
544 if (feo->nodeid == 0) {
545 fuse_ipc_put(fip);
546 cache_setvp(ap->a_nch, NULL);
547 return ENOENT;
549 if (feo->nodeid == 1)
550 forgettable = 0;
552 mode = feo->attr.mode;
554 if (S_ISREG(mode))
555 vtyp = VREG;
556 else if (S_ISDIR(mode))
557 vtyp = VDIR;
558 else if (S_ISBLK(mode))
559 vtyp = VBLK;
560 else if (S_ISCHR(mode))
561 vtyp = VCHR;
562 else if (S_ISLNK(mode))
563 vtyp = VLNK;
564 else if (S_ISSOCK(mode))
565 vtyp = VSOCK;
566 else if (S_ISFIFO(mode))
567 vtyp = VFIFO;
568 else
569 vtyp = VBAD;
571 error = fuse_alloc_node(fmp, dfnp, feo->nodeid, vtyp, &vp);
572 if (error == 0) {
573 KKASSERT(vp);
574 KKASSERT(vn_islocked(vp));
576 vn_unlock(vp);
577 cache_setvp(ap->a_nch, vp);
578 vrele(vp);
580 /* unused */
581 //feo->generation;
582 //feo->entry_valid;
583 //feo->attr_valid;
584 //feo->entry_valid_nsec;
585 //feo->attr_valid_nsec;
586 fnp = VTOI(vp);
588 if (forgettable)
589 atomic_add_64(&fnp->nlookup, 1);
590 } else {
591 #if 0
592 /* sshfs fails utterly if we issue FUSE_FORGET */
593 if (forgettable)
594 fuse_forget_node(fmp, feo->nodeid, 1, NULL);
595 #endif
597 fuse_ipc_put(fip);
599 return error;
602 static int
603 fuse_vop_nlink(struct vop_nlink_args *ap)
605 struct vnode *dvp = ap->a_dvp;
606 struct vnode *vp = ap->a_vp;
607 struct namecache *ncp = ap->a_nch->ncp;
608 struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
609 struct fuse_node *dfnp = VTOI(dvp);
610 struct fuse_node *fnp = VTOI(vp);
611 struct fuse_ipc *fip;
612 struct fuse_link_in *fli;
613 struct fuse_entry_out *feo;
614 char *p;
615 int error;
617 if (fuse_test_dead(fmp))
618 return ENOTCONN;
620 if (fuse_test_nosys(fmp, FUSE_LINK))
621 return EOPNOTSUPP;
623 if (vp->v_type == VDIR)
624 return EPERM;
625 if (dvp->v_mount != vp->v_mount)
626 return EXDEV;
628 fip = fuse_ipc_get(fmp, sizeof(fli) + ncp->nc_nlen + 1);
629 fli = fuse_ipc_fill(fip, FUSE_LINK, dfnp->ino, ap->a_cred);
630 fli->oldnodeid = fnp->ino;
632 p = (char*)(fli + 1);
633 memcpy(p, ncp->nc_name, ncp->nc_nlen);
634 p[ncp->nc_nlen] = '\0';
636 error = fuse_ipc_tx(fip);
637 if (error)
638 return error;
640 feo = fuse_out_data(fip);
641 if (IFTOVT(feo->attr.mode) != vp->v_type) {
642 fuse_ipc_put(fip);
643 return EINVAL;
646 mtx_lock(&dfnp->node_lock);
647 mtx_lock(&fnp->node_lock);
648 fuse_set_attr(fnp, &feo->attr);
649 mtx_unlock(&fnp->node_lock);
650 mtx_unlock(&dfnp->node_lock);
652 cache_setunresolved(ap->a_nch);
653 cache_setvp(ap->a_nch, vp);
654 fuse_knote(dvp, NOTE_WRITE);
655 fuse_knote(vp, NOTE_LINK);
657 /* unused */
658 //feo->nodeid;
659 //feo->generation;
660 //feo->entry_valid;
661 //feo->attr_valid;
662 //feo->entry_valid_nsec;
663 //feo->attr_valid_nsec;
665 fuse_ipc_put(fip);
667 return 0;
670 static int
671 fuse_vop_ncreate(struct vop_ncreate_args *ap)
673 struct vnode *dvp = ap->a_dvp;
674 struct vnode *vp;
675 struct namecache *ncp = ap->a_nch->ncp;
676 struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
677 struct fuse_node *dfnp = VTOI(dvp);
678 struct fuse_node *fnp;
679 struct fuse_ipc *fip;
680 struct fuse_create_in *fci;
681 struct fuse_entry_out *feo;
682 struct fuse_open_out *foo;
683 enum vtype vtyp;
684 char *p;
685 int error;
687 if (fuse_test_dead(fmp))
688 return ENOTCONN;
690 if (fuse_test_nosys(fmp, FUSE_CREATE))
691 return EOPNOTSUPP;
693 fip = fuse_ipc_get(fmp, sizeof(*fci) + ncp->nc_nlen + 1);
694 fci = fuse_ipc_fill(fip, FUSE_CREATE, dfnp->ino, ap->a_cred);
695 fci->flags = OFLAGS(ap->a_vap->va_fuseflags);
696 fci->mode = MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode);
697 /* unused */
698 //fci->umask = ...;
699 fuse_dbg("flags=%X mode=%X\n", fci->flags, fci->mode);
701 p = (char*)(fci + 1);
702 memcpy(p, ncp->nc_name, ncp->nc_nlen);
703 p[ncp->nc_nlen] = '\0';
705 error = fuse_ipc_tx(fip);
706 if (error)
707 return error;
709 feo = fuse_out_data(fip);
710 foo = (struct fuse_open_out*)(feo + 1);
711 vtyp = IFTOVT(feo->attr.mode);
712 if (vtyp != VREG && vtyp != VSOCK) {
713 fuse_ipc_put(fip);
714 return EINVAL;
717 error = fuse_alloc_node(fmp, dfnp, feo->nodeid, VREG, &vp);
718 if (error == 0) {
719 KKASSERT(vp);
720 KKASSERT(vn_islocked(vp));
722 fnp = VTOI(vp);
723 mtx_lock(&fnp->node_lock);
724 fuse_set_attr(fnp, &feo->attr);
725 mtx_unlock(&fnp->node_lock);
726 fnp->fh = foo->fh;
728 cache_setunresolved(ap->a_nch);
729 cache_setvp(ap->a_nch, vp);
730 *(ap->a_vpp) = vp;
731 fuse_knote(dvp, NOTE_WRITE);
733 /* unused */
734 //feo->generation;
735 //feo->entry_valid;
736 //feo->attr_valid;
737 //feo->entry_valid_nsec;
738 //feo->attr_valid_nsec;
739 /* unused */
740 //foo->open_flags;
742 fuse_ipc_put(fip);
744 return error;
747 static int
748 fuse_vop_nmknod(struct vop_nmknod_args *ap)
750 struct vnode *dvp = ap->a_dvp;
751 struct vnode *vp;
752 struct namecache *ncp = ap->a_nch->ncp;
753 struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
754 struct fuse_node *dfnp = VTOI(dvp);
755 struct fuse_node *fnp;
756 struct fuse_ipc *fip;
757 struct fuse_mknod_in *fmi;
758 struct fuse_entry_out *feo;
759 enum vtype vtyp;
760 char *p;
761 int error;
763 if (fuse_test_dead(fmp))
764 return ENOTCONN;
766 if (fuse_test_nosys(fmp, FUSE_MKNOD))
767 return EOPNOTSUPP;
769 fip = fuse_ipc_get(fmp, sizeof(*fmi) + ncp->nc_nlen + 1);
770 fmi = fuse_ipc_fill(fip, FUSE_MKNOD, dfnp->ino, ap->a_cred);
771 fmi->mode = MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode);
772 /* unused */
773 //fmi->rdev = ...;
774 //fmi->umask = ...;
776 p = (char*)(fmi + 1);
777 memcpy(p, ncp->nc_name, ncp->nc_nlen);
778 p[ncp->nc_nlen] = '\0';
780 error = fuse_ipc_tx(fip);
781 if (error)
782 return error;
784 feo = fuse_out_data(fip);
785 vtyp = IFTOVT(feo->attr.mode);
786 if (vtyp != VBLK && vtyp != VCHR && vtyp != VFIFO) {
787 fuse_ipc_put(fip);
788 return EINVAL;
791 error = fuse_alloc_node(fmp, dfnp, feo->nodeid,
792 ap->a_vap->va_type, &vp);
793 if (error == 0) {
794 KKASSERT(vp);
795 KKASSERT(vn_islocked(vp));
797 fnp = VTOI(vp);
798 mtx_lock(&fnp->node_lock);
799 fuse_set_attr(fnp, &feo->attr);
800 mtx_unlock(&fnp->node_lock);
802 cache_setunresolved(ap->a_nch);
803 cache_setvp(ap->a_nch, vp);
804 *(ap->a_vpp) = vp;
805 fuse_knote(dvp, NOTE_WRITE);
807 /* unused */
808 //feo->generation;
809 //feo->entry_valid;
810 //feo->attr_valid;
811 //feo->entry_valid_nsec;
812 //feo->attr_valid_nsec;
814 fuse_ipc_put(fip);
816 return error;
819 static int
820 fuse_vop_nremove(struct vop_nremove_args *ap)
822 struct vnode *dvp = ap->a_dvp;
823 struct vnode *vp;
824 struct namecache *ncp = ap->a_nch->ncp;
825 struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
826 struct fuse_node *dfnp = VTOI(dvp);
827 struct fuse_node *fnp;
828 struct fuse_ipc *fip;
829 char *p;
830 int error;
832 if (fuse_test_dead(fmp))
833 return ENOTCONN;
835 if (fuse_test_nosys(fmp, FUSE_UNLINK))
836 return EOPNOTSUPP;
838 error = cache_vget(ap->a_nch, ap->a_cred, LK_SHARED, &vp);
839 if (error)
840 return error;
841 KKASSERT(vp->v_mount == dvp->v_mount);
844 * Clean-up the deletion target to avoid .fuse_hidden*
845 * files.
847 * NOTE: XXX v_opencount check does not take mmap/filepointers
848 * into account.
850 vinvalbuf(vp, V_SAVE, 0, 0);
851 if (vp->v_opencount == 0) {
852 fnp = VTOI(vp);
853 fuse_release(fmp, fnp);
855 vn_unlock(vp);
857 fip = fuse_ipc_get(fmp, ncp->nc_nlen + 1);
858 p = fuse_ipc_fill(fip, FUSE_UNLINK, dfnp->ino, ap->a_cred);
860 memcpy(p, ncp->nc_name, ncp->nc_nlen);
861 p[ncp->nc_nlen] = '\0';
863 error = fuse_ipc_tx(fip);
864 if (error) {
865 vrele(vp);
866 return error;
869 fnp = VTOI(vp);
871 cache_unlink(ap->a_nch);
872 fuse_knote(dvp, NOTE_WRITE);
873 fuse_knote(vp, NOTE_DELETE);
875 fuse_ipc_put(fip);
876 vrele(vp);
878 return 0;
881 static int
882 fuse_vop_nmkdir(struct vop_nmkdir_args *ap)
884 struct vnode *dvp = ap->a_dvp;
885 struct vnode *vp;
886 struct namecache *ncp = ap->a_nch->ncp;
887 struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
888 struct fuse_node *dfnp = VTOI(dvp);
889 struct fuse_node *fnp;
890 struct fuse_ipc *fip;
891 struct fuse_mkdir_in *fmi;
892 struct fuse_entry_out *feo;
893 char *p;
894 int error;
896 if (fuse_test_dead(fmp))
897 return ENOTCONN;
899 if (fuse_test_nosys(fmp, FUSE_MKDIR))
900 return EOPNOTSUPP;
902 fip = fuse_ipc_get(fmp, sizeof(*fmi) + ncp->nc_nlen + 1);
903 fmi = fuse_ipc_fill(fip, FUSE_MKDIR, dfnp->ino, ap->a_cred);
904 fmi->mode = MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode);
906 p = (char*)(fmi + 1);
907 memcpy(p, ncp->nc_name, ncp->nc_nlen);
908 p[ncp->nc_nlen] = '\0';
910 error = fuse_ipc_tx(fip);
911 if (error)
912 return error;
914 feo = fuse_out_data(fip);
915 if (IFTOVT(feo->attr.mode) != VDIR) {
916 fuse_ipc_put(fip);
917 return EINVAL;
920 error = fuse_alloc_node(fmp, dfnp, feo->nodeid, VDIR, &vp);
921 if (error == 0) {
922 KKASSERT(vp);
923 KKASSERT(vn_islocked(vp));
925 fnp = VTOI(vp);
926 mtx_lock(&fnp->node_lock);
927 fuse_set_attr(fnp, &feo->attr);
928 mtx_unlock(&fnp->node_lock);
930 cache_setunresolved(ap->a_nch);
931 cache_setvp(ap->a_nch, vp);
932 *(ap->a_vpp) = vp;
933 fuse_knote(dvp, NOTE_WRITE | NOTE_LINK);
935 /* unused */
936 //feo->generation;
937 //feo->entry_valid;
938 //feo->attr_valid;
939 //feo->entry_valid_nsec;
940 //feo->attr_valid_nsec;
942 fuse_ipc_put(fip);
944 return error;
947 static int
948 fuse_vop_nrmdir(struct vop_nrmdir_args *ap)
950 struct vnode *dvp = ap->a_dvp;
951 struct vnode *vp;
952 struct namecache *ncp = ap->a_nch->ncp;
953 struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
954 struct fuse_node *dfnp = VTOI(dvp);
955 struct fuse_node *fnp;
956 struct fuse_ipc *fip;
957 char *p;
958 int error;
960 if (fuse_test_dead(fmp))
961 return ENOTCONN;
963 if (fuse_test_nosys(fmp, FUSE_RMDIR))
964 return EOPNOTSUPP;
966 error = cache_vget(ap->a_nch, ap->a_cred, LK_SHARED, &vp);
967 KKASSERT(vp->v_mount == dvp->v_mount);
968 KKASSERT(!error); /* from tmpfs */
969 vn_unlock(vp);
971 fip = fuse_ipc_get(fmp, ncp->nc_nlen + 1);
972 p = fuse_ipc_fill(fip, FUSE_RMDIR, dfnp->ino, ap->a_cred);
974 memcpy(p, ncp->nc_name, ncp->nc_nlen);
975 p[ncp->nc_nlen] = '\0';
977 error = fuse_ipc_tx(fip);
978 if (error) {
979 vrele(vp);
980 return error;
983 fnp = VTOI(vp);
985 cache_unlink(ap->a_nch);
986 fuse_knote(dvp, NOTE_WRITE | NOTE_LINK);
988 fuse_ipc_put(fip);
989 vrele(vp);
991 return 0;
994 static int
995 fuse_vop_pathconf(struct vop_pathconf_args *ap)
997 switch (ap->a_name) {
998 case _PC_FILESIZEBITS:
999 *ap->a_retval = 64;
1000 break;
1001 case _PC_NO_TRUNC:
1002 *ap->a_retval = 1;
1003 break;
1004 default:
1005 return vop_stdpathconf(ap);
1008 return 0;
1011 static int
1012 fuse_vop_readdir(struct vop_readdir_args *ap)
1014 struct vnode *vp = ap->a_vp;
1015 struct uio *uio = ap->a_uio;
1016 struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
1017 struct fuse_ipc *fip;
1018 struct fuse_read_in *fri;
1019 const char *buf;
1020 size_t len;
1021 off_t cur_offset = 0;
1022 int error;
1024 if (fuse_test_dead(fmp))
1025 return ENOTCONN;
1027 if (fuse_test_nosys(fmp, FUSE_READDIR))
1028 return EOPNOTSUPP;
1030 fip = fuse_ipc_get(fmp, sizeof(*fri));
1031 fri = fuse_ipc_fill(fip, FUSE_READDIR, VTOI(vp)->ino, ap->a_cred);
1032 fri->fh = VTOI(vp)->fh;
1033 fri->offset = 0;
1035 * XXX This needs to be large enough to read all entries at once.
1036 * FUSE filesystems typically just opendir/readdir and return entries.
1038 fri->size = FUSE_BLKSIZE * 10;
1039 /* unused */
1040 //fri->read_flags = ...;
1041 //fri->lock_owner = ...;
1042 //fri->flags = ...;
1044 error = fuse_ipc_tx(fip);
1045 if (error)
1046 return error;
1048 buf = fuse_out_data(fip);
1049 len = fuse_out_data_size(fip);
1051 while (1) {
1052 const struct fuse_dirent *fde;
1053 size_t freclen;
1055 fuse_dbg("uio_offset=%ju uio_resid=%ju\n",
1056 uio->uio_offset, uio->uio_resid);
1058 if (len < FUSE_NAME_OFFSET) {
1059 if (ap->a_eofflag)
1060 *ap->a_eofflag = 1;
1061 break;
1063 if (uio->uio_resid < FUSE_NAME_OFFSET)
1064 break;
1066 fde = (const struct fuse_dirent*)buf;
1067 if (!fde->namelen) {
1068 error = EINVAL;
1069 break;
1071 freclen = FUSE_DIRENT_SIZE(fde);
1074 * Also see
1075 * getdirentries(2) in sys/kern/vfs_syscalls.c
1076 * readdir(3) in lib/libc/gen/readdir.c
1078 if (cur_offset >= uio->uio_offset) {
1079 error = 0;
1080 if (vop_write_dirent(&error, uio, fde->ino, fde->type,
1081 fde->namelen, fde->name))
1082 break;
1083 if (error)
1084 break;
1085 fuse_dbg("ino=%ju type=%d name=%s len=%u\n",
1086 fde->ino, fde->type, fde->name, fde->namelen);
1089 cur_offset += _DIRENT_RECLEN(fde->namelen);
1090 buf += freclen;
1091 len -= freclen;
1093 fuse_ipc_put(fip);
1095 return error;
1098 static int
1099 fuse_vop_readlink(struct vop_readlink_args *ap)
1101 struct vnode *vp = ap->a_vp;
1102 struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
1103 struct fuse_ipc *fip;
1104 int error;
1106 if (fuse_test_dead(fmp))
1107 return ENOTCONN;
1109 if (fuse_test_nosys(fmp, FUSE_READLINK))
1110 return EOPNOTSUPP;
1112 if (vp->v_type != VLNK)
1113 return EINVAL;
1115 fip = fuse_ipc_get(fmp, 0);
1116 fuse_ipc_fill(fip, FUSE_READLINK, VTOI(vp)->ino, ap->a_cred);
1118 error = fuse_ipc_tx(fip);
1119 if (error)
1120 return error;
1122 error = uiomove(fuse_out_data(fip), fuse_out_data_size(fip), ap->a_uio);
1124 fuse_ipc_put(fip);
1126 return error;
1129 static int
1130 fuse_vop_nrename(struct vop_nrename_args *ap)
1132 struct namecache *fncp = ap->a_fnch->ncp;
1133 struct namecache *tncp = ap->a_tnch->ncp;
1134 struct vnode *fdvp = ap->a_fdvp;
1135 struct vnode *fvp = fncp->nc_vp;
1136 struct vnode *tdvp = ap->a_tdvp;
1137 struct vnode *tvp;
1138 struct fuse_mount *fmp = VFSTOFUSE(fdvp->v_mount);
1139 struct fuse_node *fdfnp = VTOI(fdvp);
1140 struct fuse_node *ffnp = VTOI(fvp);
1141 struct fuse_node *tdfnp = VTOI(tdvp);
1142 struct fuse_node *tfnp;
1143 struct fuse_ipc *fip;
1144 struct fuse_rename_in *fri;
1145 char *p, *newname;
1146 int error;
1148 KKASSERT(fdvp->v_mount == fvp->v_mount);
1150 if (fuse_test_dead(fmp))
1151 return ENOTCONN;
1153 if (fuse_test_nosys(fmp, FUSE_RENAME))
1154 return EOPNOTSUPP;
1156 error = cache_vget(ap->a_tnch, ap->a_cred, LK_SHARED, &tvp);
1157 if (!error) {
1158 tfnp = VTOI(tvp);
1161 * Clean-up the deletion target to avoid .fuse_hidden*
1162 * files.
1163 * NOTE: XXX v_opencount check does not take mmap/filepointers
1164 * into account.
1166 if (tvp->v_opencount == 0) {
1167 vinvalbuf(tvp, V_SAVE, 0, 0);
1168 fuse_release(fmp, tfnp);
1170 vn_unlock(tvp);
1171 } else {
1172 tfnp = NULL;
1175 /* Disallow cross-device renames.
1176 * Why isn't this done by the caller? */
1177 if (fvp->v_mount != tdvp->v_mount ||
1178 (tvp && fvp->v_mount != tvp->v_mount)) {
1179 error = EXDEV;
1180 goto out;
1183 if (fvp == tvp) {
1184 error = 0;
1185 goto out;
1188 if (tvp) {
1189 KKASSERT(tfnp);
1190 if (ffnp->type == VDIR && tfnp->type == VDIR) {
1191 /* depend on RPC to check if empty */
1192 } else if (ffnp->type == VDIR && tfnp->type != VDIR) {
1193 error = ENOTDIR;
1194 goto out;
1195 } else if (ffnp->type != VDIR && tfnp->type == VDIR) {
1196 error = EISDIR;
1197 goto out;
1198 } else
1199 KKASSERT(ffnp->type != VDIR && tfnp->type != VDIR);
1202 fip = fuse_ipc_get(fmp, sizeof(*fri) + fncp->nc_nlen +
1203 tncp->nc_nlen + 2);
1204 /* There is also fuse_rename2_in with flags. */
1205 fri = fuse_ipc_fill(fip, FUSE_RENAME, fdfnp->ino, ap->a_cred);
1206 fri->newdir = tdfnp->ino;
1208 p = (char*)(fri + 1);
1209 memcpy(p, fncp->nc_name, fncp->nc_nlen);
1210 p[fncp->nc_nlen] = '\0';
1211 memcpy(p + fncp->nc_nlen + 1, tncp->nc_name, tncp->nc_nlen);
1212 p[fncp->nc_nlen + 1 + tncp->nc_nlen] = '\0';
1214 error = fuse_ipc_tx(fip);
1215 if (error)
1216 goto out;
1217 fuse_ipc_put(fip);
1219 if (fncp->nc_nlen != tncp->nc_nlen ||
1220 memcmp(fncp->nc_name, tncp->nc_name, fncp->nc_nlen)) {
1221 newname = kmalloc(tncp->nc_nlen + 1, M_TEMP, M_WAITOK | M_ZERO);
1222 KKASSERT(newname);
1223 memcpy(newname, tncp->nc_name, tncp->nc_nlen);
1224 newname[tncp->nc_nlen] = '\0';
1225 fuse_dbg("newname=\"%s\"\n", newname);
1226 } else
1227 newname = NULL;
1229 mtx_lock(&tdfnp->node_lock);
1230 mtx_lock(&fdfnp->node_lock);
1231 mtx_lock(&ffnp->node_lock);
1233 if (tvp) {
1234 fuse_knote(tdvp, NOTE_DELETE);
1237 mtx_unlock(&ffnp->node_lock);
1238 mtx_unlock(&fdfnp->node_lock);
1239 mtx_unlock(&tdfnp->node_lock);
1241 cache_rename(ap->a_fnch, ap->a_tnch);
1242 fuse_knote(fdvp, NOTE_WRITE);
1243 fuse_knote(tdvp, NOTE_WRITE);
1244 fuse_knote(fvp, NOTE_RENAME);
1245 out:
1246 if (tvp)
1247 vrele(tvp);
1249 return error;
1252 static int
1253 fuse_vop_nsymlink(struct vop_nsymlink_args *ap)
1255 struct vnode *dvp = ap->a_dvp;
1256 struct vnode *vp;
1257 struct namecache *ncp = ap->a_nch->ncp;
1258 struct fuse_mount *fmp = VFSTOFUSE(dvp->v_mount);
1259 struct fuse_node *dfnp = VTOI(dvp);
1260 struct fuse_node *fnp;
1261 struct fuse_ipc *fip;
1262 struct fuse_entry_out *feo;
1263 char *p;
1264 int error;
1266 if (fuse_test_dead(fmp))
1267 return ENOTCONN;
1269 if (fuse_test_nosys(fmp, FUSE_SYMLINK))
1270 return EOPNOTSUPP;
1272 fip = fuse_ipc_get(fmp, strlen(ap->a_target) + 1 + ncp->nc_nlen + 1);
1273 p = fuse_ipc_fill(fip, FUSE_SYMLINK, dfnp->ino, ap->a_cred);
1275 memcpy(p, ncp->nc_name, ncp->nc_nlen);
1276 p[ncp->nc_nlen] = '\0';
1277 memcpy(p + ncp->nc_nlen + 1, ap->a_target, strlen(ap->a_target) + 1);
1279 error = fuse_ipc_tx(fip);
1280 if (error)
1281 return error;
1283 feo = fuse_out_data(fip);
1284 if (IFTOVT(feo->attr.mode) != VLNK) {
1285 fuse_ipc_put(fip);
1286 return EINVAL;
1289 error = fuse_alloc_node(fmp, dfnp, feo->nodeid, VLNK, &vp);
1290 if (error == 0) {
1291 KKASSERT(vp);
1292 KKASSERT(vn_islocked(vp));
1294 fnp = VTOI(vp);
1295 mtx_lock(&fnp->node_lock);
1296 fuse_set_attr(fnp, &feo->attr);
1297 mtx_unlock(&fnp->node_lock);
1299 cache_setunresolved(ap->a_nch);
1300 cache_setvp(ap->a_nch, vp);
1301 *(ap->a_vpp) = vp;
1302 fuse_knote(vp, NOTE_WRITE);
1304 /* unused */
1305 //feo->generation;
1306 //feo->entry_valid;
1307 //feo->attr_valid;
1308 //feo->entry_valid_nsec;
1309 //feo->attr_valid_nsec;
1311 fuse_ipc_put(fip);
1313 return error;
1316 static int
1317 fuse_vop_read(struct vop_read_args *ap)
1319 struct buf *bp;
1320 struct vnode *vp = ap->a_vp;
1321 struct uio *uio = ap->a_uio;
1322 struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
1323 struct fuse_node *fnp;
1324 off_t base_offset;
1325 size_t offset;
1326 size_t len;
1327 size_t resid;
1328 int error;
1329 int seqcount;
1332 * Check the basics
1334 if (fuse_test_dead(fmp))
1335 return ENOTCONN;
1336 if (fuse_test_nosys(fmp, FUSE_READ))
1337 return EOPNOTSUPP;
1338 if (uio->uio_offset < 0)
1339 return EINVAL;
1340 if (vp->v_type != VREG)
1341 return EINVAL;
1344 * Extract node, try to shortcut the operation through
1345 * the vM page cache, allowing us to avoid buffer cache
1346 * overheads.
1348 fnp = VTOI(vp);
1349 resid = uio->uio_resid;
1350 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
1351 error = vop_helper_read_shortcut(ap);
1352 if (error)
1353 return error;
1354 if (uio->uio_resid == 0) {
1355 if (resid)
1356 goto finished;
1357 return error;
1361 * Fall-through to our normal read code.
1363 while (uio->uio_resid > 0 && uio->uio_offset < fnp->size) {
1365 * Use buffer cache I/O (via fuse_vop_strategy)
1367 offset = (size_t)uio->uio_offset & FUSE_BLKMASK64;
1368 base_offset = (off_t)uio->uio_offset - offset;
1369 bp = getcacheblk(vp, base_offset,
1370 FUSE_BLKSIZE, GETBLK_KVABIO);
1371 if (bp == NULL) {
1372 if (1 /* fuse_cluster_rd_enable XXX sysctl */) {
1373 error = cluster_readx(vp, fnp->size,
1374 base_offset,
1375 FUSE_BLKSIZE,
1376 B_NOTMETA | B_KVABIO,
1377 uio->uio_resid,
1378 seqcount * MAXBSIZE,
1379 &bp);
1380 } else {
1381 error = bread_kvabio(vp, base_offset,
1382 FUSE_BLKSIZE, &bp);
1384 if (error) {
1385 brelse(bp);
1386 kprintf("fuse_vop_read bread error %d\n",
1387 error);
1388 break;
1392 * Only do this if the VOP is coming from a normal
1393 * read/write. The VM system handles the case for
1394 * UIO_NOCOPY.
1396 if (uio->uio_segflg != UIO_NOCOPY)
1397 vm_wait_nominal();
1399 bp->b_flags |= B_CLUSTEROK;
1400 bkvasync(bp);
1403 * Figure out how many bytes we can actually copy this loop.
1405 len = FUSE_BLKSIZE - offset;
1406 if (len > uio->uio_resid)
1407 len = uio->uio_resid;
1408 if (len > fnp->size - uio->uio_offset)
1409 len = (size_t)(fnp->size - uio->uio_offset);
1411 error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio);
1412 bqrelse(bp);
1413 if (error) {
1414 kprintf("fuse_vop_read uiomove error %d\n", error);
1415 break;
1419 finished:
1420 if (fnp->accessed == 0) {
1421 mtx_lock(&fnp->node_lock);
1422 fnp->accessed = 1;
1423 mtx_unlock(&fnp->node_lock);
1425 return (error);
1428 static int
1429 fuse_vop_write(struct vop_write_args *ap)
1431 struct vnode *vp = ap->a_vp;
1432 struct uio *uio = ap->a_uio;
1433 struct thread *td = uio->uio_td;
1434 struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
1435 struct fuse_node *fnp;
1436 boolean_t extended;
1437 off_t oldsize;
1438 off_t newsize;
1439 int error;
1440 off_t base_offset;
1441 size_t offset;
1442 size_t len;
1443 struct rlimit limit;
1444 int trivial = 0;
1445 int kflags = 0;
1446 int ioflag = ap->a_ioflag;
1447 int seqcount;
1448 int endofblk;
1450 if (fuse_test_dead(fmp))
1451 return ENOTCONN;
1453 if (fuse_test_nosys(fmp, FUSE_WRITE))
1454 return EOPNOTSUPP;
1456 error = 0;
1457 if (uio->uio_resid == 0)
1458 return error;
1460 fnp = VTOI(vp);
1462 if (vp->v_type != VREG)
1463 return (EINVAL);
1464 seqcount = ioflag >> IO_SEQSHIFT;
1466 mtx_lock(&fnp->node_lock);
1468 oldsize = fnp->size;
1469 newsize = uio->uio_offset + uio->uio_resid;
1470 if (newsize < oldsize)
1471 newsize = oldsize;
1472 if (ioflag & IO_APPEND)
1473 uio->uio_offset = fnp->size;
1476 * Check for illegal write offsets.
1478 if (newsize > FUSE_MAXFILESIZE) {
1479 error = EFBIG;
1480 goto done;
1484 * NOTE: Ignore if UIO does not come from a user thread (e.g. VN).
1486 if (vp->v_type == VREG && td != NULL && td->td_lwp != NULL) {
1487 error = kern_getrlimit(RLIMIT_FSIZE, &limit);
1488 if (error)
1489 goto done;
1490 if (newsize > limit.rlim_cur) {
1491 ksignal(td->td_proc, SIGXFSZ);
1492 error = EFBIG;
1493 goto done;
1498 * Extend the file's size if necessary
1500 extended = (newsize > fnp->size);
1502 while (uio->uio_resid > 0) {
1503 struct buf *bp;
1506 * Don't completely blow out running buffer I/O
1507 * when being hit from the pageout daemon.
1509 if (uio->uio_segflg == UIO_NOCOPY &&
1510 (ioflag & IO_RECURSE) == 0)
1512 bwillwrite(FUSE_BLKSIZE);
1516 * Use buffer cache I/O (via fuse_vop_strategy)
1518 * Calculate the maximum bytes we can write to the buffer at
1519 * this offset (after resizing).
1521 offset = (size_t)uio->uio_offset & FUSE_BLKMASK64;
1522 base_offset = (off_t)uio->uio_offset - offset;
1523 len = uio->uio_resid;
1524 if (len > FUSE_BLKSIZE - offset)
1525 len = FUSE_BLKSIZE - offset;
1527 endofblk = 0;
1528 trivial = 0;
1529 if ((uio->uio_offset + len) > fnp->size) {
1530 trivial = (uio->uio_offset <= fnp->size);
1531 error = fuse_reg_resize(vp, uio->uio_offset + len,
1532 trivial);
1533 kflags |= NOTE_EXTEND;
1534 if (error)
1535 break;
1537 if (base_offset + len == FUSE_BLKSIZE)
1538 endofblk = 1;
1541 * Get the buffer
1543 error = 0;
1544 if (uio->uio_segflg == UIO_NOCOPY) {
1546 * Issue a write with the same data backing
1547 * the buffer
1549 bp = getblk(vp,
1550 base_offset, FUSE_BLKSIZE,
1551 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1552 if ((bp->b_flags & B_CACHE) == 0) {
1553 bqrelse(bp);
1554 error = bread_kvabio(vp,
1555 base_offset, FUSE_BLKSIZE,
1556 &bp);
1558 } else if (trivial) {
1560 * We are entirely overwriting the buffer, but
1561 * may still have to zero it.
1563 bp = getblk(vp,
1564 base_offset, FUSE_BLKSIZE,
1565 GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1566 if ((bp->b_flags & B_CACHE) == 0)
1567 vfs_bio_clrbuf(bp);
1568 } else {
1570 * Partial overwrite, read in any missing bits
1571 * then replace the portion being overwritten.
1573 error = bread_kvabio(vp, base_offset, FUSE_BLKSIZE, &bp);
1574 if (error == 0)
1575 bheavy(bp);
1578 if (error) {
1579 brelse(bp);
1580 break;
1584 * Ok, copy the data in
1586 bkvasync(bp);
1587 error = uiomovebp(bp, (char *)bp->b_data + offset, len, uio);
1588 kflags |= NOTE_WRITE;
1590 if (error) {
1591 kprintf("fuse_vop_write uiomove error %d\n", error);
1592 brelse(bp);
1593 break;
1596 if (ioflag & IO_SYNC) {
1597 bwrite(bp);
1598 } else if ((ioflag & IO_DIRECT) && endofblk) {
1599 bawrite(bp);
1600 } else if (ioflag & IO_ASYNC) {
1601 bawrite(bp);
1602 } else if (vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1603 bdwrite(bp);
1604 } else {
1605 bp->b_flags |= B_CLUSTEROK;
1606 cluster_write(bp, fnp->size, FUSE_BLKSIZE, seqcount);
1607 //bdwrite(bp);
1610 vsetisdirty(vp);
1612 if (error) {
1613 if (extended) {
1614 (void)fuse_reg_resize(vp, oldsize, trivial);
1615 kflags &= ~NOTE_EXTEND;
1617 goto done;
1621 * Currently we don't set the mtime on files modified via mmap()
1622 * because we can't tell the difference between those modifications
1623 * and an attempt by the pageout daemon to flush fuse pages to
1624 * swap.
1626 if (uio->uio_segflg == UIO_NOCOPY) {
1627 if (vp->v_flag & VLASTWRITETS) {
1628 fnp->attr.va_mtime.tv_sec = vp->v_lastwrite_ts.tv_sec;
1629 fnp->attr.va_mtime.tv_nsec = vp->v_lastwrite_ts.tv_nsec;
1631 } else {
1632 fnp->modified = 1;
1633 vclrflags(vp, VLASTWRITETS);
1636 if (extended)
1637 fnp->changed = 1;
1639 if (fnp->attr.va_mode & (S_ISUID | S_ISGID)) {
1640 if (caps_priv_check(ap->a_cred, SYSCAP_NOVFS_RETAINSUGID))
1641 fnp->attr.va_mode &= ~(S_ISUID | S_ISGID);
1643 done:
1644 mtx_unlock(&fnp->node_lock);
1646 if (kflags)
1647 fuse_knote(vp, kflags);
1649 return(error);
1653 * Issue I/O RPC to support thread. This can be issued from sensitive
1654 * kernel threads such as the pageout daemon, so we have to queue the
1655 * I/O to our support thread and return. We cannot block in here.
1657 static int
1658 fuse_vop_strategy(struct vop_strategy_args *ap)
1660 struct bio *bio = ap->a_bio;
1661 struct buf *bp = bio->bio_buf;
1662 struct vnode *vp = ap->a_vp;
1663 struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
1664 //struct fuse_node *fnp = VTOI(vp);
1666 fuse_dbg("ino=%ju b_cmd=%d\n", VTOI(ap->a_vp)->ino, bp->b_cmd);
1668 if (vp->v_type != VREG) {
1669 bp->b_resid = bp->b_bcount;
1670 bp->b_flags |= B_ERROR | B_INVAL;
1671 bp->b_error = EINVAL;
1672 biodone(bio);
1673 return 0;
1676 bp->b_flags &= ~(B_ERROR | B_INVAL);
1678 switch(bp->b_cmd) {
1679 case BUF_CMD_READ:
1680 if (vn_cache_strategy(vp, bio) == 0) {
1681 bio->bio_driver_info = vp;
1682 spin_lock(&fmp->helper_spin);
1683 TAILQ_INSERT_TAIL(&fmp->bioq, bio, bio_act);
1684 spin_unlock(&fmp->helper_spin);
1685 wakeup(&fmp->helper_td);
1687 break;
1688 case BUF_CMD_WRITE:
1689 bio->bio_driver_info = vp;
1690 spin_lock(&fmp->helper_spin);
1691 TAILQ_INSERT_TAIL(&fmp->bioq, bio, bio_act);
1692 spin_unlock(&fmp->helper_spin);
1693 wakeup(&fmp->helper_td);
1694 break;
1695 default:
1696 bp->b_flags |= B_INVAL;
1697 bp->b_error = EINVAL;
1698 biodone(bio);
1699 break;
1701 return 0;
1705 * Just make the backing store appear to be contiguous so write clustering
1706 * works. The strategy function will take it from there. Use MAXBSIZE
1707 * chunks as a micro-optimization to make random flushes use reasonable
1708 * block writes.
1710 static int
1711 fuse_bmap(struct vop_bmap_args *ap)
1713 if (ap->a_doffsetp != NULL)
1714 *ap->a_doffsetp = ap->a_loffset;
1715 if (ap->a_runp != NULL)
1716 *ap->a_runp = MAXBSIZE - (ap->a_loffset & (MAXBSIZE - 1));
1717 if (ap->a_runb != NULL)
1718 *ap->a_runb = ap->a_loffset & (MAXBSIZE - 1);
1720 return 0;
1723 static int
1724 fuse_advlock(struct vop_advlock_args *ap)
1726 struct vnode *vp = ap->a_vp;
1727 struct fuse_node *fnp = VTOI(vp);
1728 int error;
1730 error = lf_advlock(ap, &fnp->advlock, fnp->size);
1732 return error;
1735 static int
1736 fuse_vop_print(struct vop_print_args *ap)
1738 struct fuse_node *fnp = VTOI(ap->a_vp);
1740 fuse_print("tag VT_FUSE, node %p, ino %ju\n",
1741 fnp, VTOI(ap->a_vp)->ino);
1743 return 0;
1746 static int
1747 fuse_vop_inactive(struct vop_inactive_args *ap)
1749 struct vnode *vp = ap->a_vp;
1750 struct mount *mp = vp->v_mount;
1751 struct fuse_node *fnp = VTOI(vp);
1752 struct fuse_mount *fmp = VFSTOFUSE(mp);
1753 struct vm_object *obj;
1755 if (!fnp) {
1756 vrecycle(vp);
1757 return 0;
1761 * For now synchronize all dirty data on INACTIVE instead
1762 * of on RECLAIM.
1764 * Get all dirty data out... mmap'd pages and the buffer cache,
1765 * so we can issue FUSE_RELEASE here.
1767 fuse_dbg("ino=%ju\n", fnp->ino);
1769 if ((obj = vp->v_object) != NULL)
1770 vm_object_page_clean(obj, 0, 0, 0);
1771 VOP_FSYNC(vp, MNT_WAIT, 0);
1776 fuse_release(fmp, fnp);
1778 return 0;
1782 * Reclaim inactive vnode and destroy the related fuse_node. We
1783 * never destroy the root fuse_node here.
1785 static int
1786 fuse_vop_reclaim(struct vop_reclaim_args *ap)
1788 struct vnode *vp = ap->a_vp;
1789 struct fuse_mount *fmp = VFSTOFUSE(vp->v_mount);
1790 struct fuse_node *fnp = VTOI(vp);
1792 if (fnp) {
1793 vp->v_data = NULL;
1794 fnp->vp = NULL;
1795 fuse_dbg("ino=%ju\n", fnp->ino);
1797 if (fnp != fmp->rfnp)
1798 fuse_node_free(fmp, fnp);
1799 vclrisdirty(vp);
1802 return 0;
1805 static int
1806 fuse_vop_mountctl(struct vop_mountctl_args *ap)
1808 struct mount *mp;
1809 int res = 0;
1811 mp = ap->a_head.a_ops->head.vv_mount;
1812 lwkt_gettoken(&mp->mnt_token);
1814 switch (ap->a_op) {
1815 //case MOUNTCTL_MOUNTFLAGS:
1816 // ...
1817 // break;
1818 default:
1819 res = vop_stdmountctl(ap);
1820 break;
1823 lwkt_reltoken(&mp->mnt_token);
1824 return res;
1827 static void filt_fusedetach(struct knote*);
1828 static int filt_fuseread(struct knote*, long);
1829 static int filt_fusewrite(struct knote*, long);
1830 static int filt_fusevnode(struct knote*, long);
1832 static struct filterops fuseread_filtops =
1833 { FILTEROP_ISFD | FILTEROP_MPSAFE,
1834 NULL, filt_fusedetach, filt_fuseread };
1835 static struct filterops fusewrite_filtops =
1836 { FILTEROP_ISFD | FILTEROP_MPSAFE,
1837 NULL, filt_fusedetach, filt_fusewrite };
1838 static struct filterops fusevnode_filtops =
1839 { FILTEROP_ISFD | FILTEROP_MPSAFE,
1840 NULL, filt_fusedetach, filt_fusevnode };
1842 static int
1843 fuse_kqfilter(struct vop_kqfilter_args *ap)
1845 struct vnode *vp = ap->a_vp;
1846 struct knote *kn = ap->a_kn;
1848 switch (kn->kn_filter) {
1849 case EVFILT_READ:
1850 kn->kn_fop = &fuseread_filtops;
1851 break;
1852 case EVFILT_WRITE:
1853 kn->kn_fop = &fusewrite_filtops;
1854 break;
1855 case EVFILT_VNODE:
1856 kn->kn_fop = &fusevnode_filtops;
1857 break;
1858 default:
1859 return EOPNOTSUPP;
1862 kn->kn_hook = (caddr_t)vp;
1863 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
1865 return 0;
1868 static void
1869 filt_fusedetach(struct knote *kn)
1871 struct vnode *vp = (void*)kn->kn_hook;
1873 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
1876 static int
1877 filt_fuseread(struct knote *kn, long hint)
1879 struct vnode *vp = (void*)kn->kn_hook;
1880 struct fuse_node *fnp = VTOI(vp);
1881 off_t off;
1883 if (hint == NOTE_REVOKE) {
1884 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
1885 return 1;
1889 * Interlock against MP races when performing this function.
1891 mtx_lock(&fnp->node_lock);
1892 off = fnp->size - kn->kn_fp->f_offset;
1893 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
1894 if (kn->kn_sfflags & NOTE_OLDAPI) {
1895 mtx_unlock(&fnp->node_lock);
1896 return 1;
1898 if (!kn->kn_data)
1899 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
1900 mtx_unlock(&fnp->node_lock);
1902 return kn->kn_data != 0;
1905 static int
1906 filt_fusewrite(struct knote *kn, long hint)
1908 if (hint == NOTE_REVOKE)
1909 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
1910 kn->kn_data = 0;
1912 return 1;
1915 static int
1916 filt_fusevnode(struct knote *kn, long hint)
1918 if (kn->kn_sfflags & hint)
1919 kn->kn_fflags |= hint;
1920 if (hint == NOTE_REVOKE) {
1921 kn->kn_flags |= (EV_EOF | EV_NODATA);
1922 return 1;
1925 return kn->kn_fflags != 0;
1928 static int
1929 fuse_vop_getpages(struct vop_getpages_args *ap)
1931 if (!ap->a_vp->v_mount)
1932 return VM_PAGER_BAD;
1934 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
1935 ap->a_reqpage, ap->a_seqaccess);
1938 static int
1939 fuse_vop_putpages(struct vop_putpages_args *ap)
1941 if (!ap->a_vp->v_mount)
1942 return VM_PAGER_BAD;
1944 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
1945 ap->a_flags, ap->a_rtvals);
1949 * Resizes the object associated to the regular file pointed to by vp to
1950 * the size newsize. 'vp' must point to a vnode that represents a regular
1951 * file. 'newsize' must be positive.
1953 * pass NVEXTF_TRIVIAL when buf content will be overwritten, otherwise set 0
1954 * to be zero filled.
1956 * Returns zero on success or an appropriate error code on failure.
1958 * Caller must hold the node exclusively locked.
1960 static int
1961 fuse_reg_resize(struct vnode *vp, off_t newsize, int trivial)
1963 struct fuse_node *fnp;
1964 off_t oldsize;
1965 int nvextflags;
1966 int error;
1968 #ifdef INVARIANTS
1969 KKASSERT(vp->v_type == VREG);
1970 KKASSERT(newsize >= 0);
1971 #endif
1973 fnp = VTOI(vp);
1975 oldsize = fnp->size;
1976 fnp->size = newsize;
1977 fnp->attr.va_size = newsize;
1978 fnp->sizeoverride = 1;
1980 nvextflags = 0;
1983 * The backing VM object may contain VM pages as well as swap
1984 * assignments if we previously renamed main object pages into
1985 * it during deactivation.
1987 if (newsize < oldsize) {
1988 error = nvtruncbuf(vp, newsize, FUSE_BLKSIZE, -1, nvextflags);
1989 } else {
1990 int nblksize;
1992 nblksize = FUSE_BLKSIZE;
1994 if (trivial)
1995 nvextflags |= NVEXTF_TRIVIAL;
1997 error = nvextendbuf(vp, oldsize, newsize,
1998 FUSE_BLKSIZE, nblksize,
1999 -1, -1, nvextflags);
2001 return error;
2005 * Fuse strategy helper thread
2007 void
2008 fuse_io_thread(void *arg)
2010 struct fuse_mount *fmp = arg;
2011 struct bio *bio;
2013 while (fmp->dead == 0) {
2014 tsleep(&fmp->helper_td, 0, "fuse_wio", 0);
2015 spin_lock(&fmp->helper_spin);
2016 while ((bio = TAILQ_FIRST(&fmp->bioq)) != NULL) {
2017 TAILQ_REMOVE(&fmp->bioq, bio, bio_act);
2018 spin_unlock(&fmp->helper_spin);
2019 fuse_io_execute(fmp, bio);
2020 spin_lock(&fmp->helper_spin);
2022 spin_unlock(&fmp->helper_spin);
2024 fmp->helper_td = NULL;
2025 wakeup(&fmp->helper_td);
2029 * Execute BIO
2031 static void
2032 fuse_io_execute(struct fuse_mount *fmp, struct bio *bio)
2034 struct buf *bp = bio->bio_buf;
2035 struct vnode *vp = bio->bio_driver_info;
2036 struct fuse_node *fnp = VTOI(vp);
2037 struct fuse_ipc *fip;
2038 struct fuse_read_in *fri;
2039 struct fuse_write_in *fwi;
2040 struct fuse_write_out *fwo;
2041 int error;
2043 switch(bp->b_cmd) {
2044 case BUF_CMD_READ:
2045 fip = fuse_ipc_get(fmp, sizeof(*fri));
2046 fri = fuse_ipc_fill(fip, FUSE_READ, fnp->ino, proc0.p_ucred);
2047 fri->offset = bp->b_loffset;
2048 fri->size = bp->b_bcount;
2049 fri->fh = fnp->fh;
2051 error = fuse_ipc_tx(fip);
2053 if (error == 0) {
2054 memcpy(bp->b_data, fuse_out_data(fip),
2055 fuse_out_data_size(fip));
2056 fuse_ipc_put(fip);
2057 bp->b_resid = 0;
2058 bp->b_error = 0;
2059 } else {
2060 bp->b_resid = bp->b_bcount;
2061 bp->b_flags |= B_ERROR | B_INVAL;
2062 bp->b_error = EINVAL;
2064 biodone(bio);
2065 break;
2066 case BUF_CMD_WRITE:
2067 fip = fuse_ipc_get(fmp, sizeof(*fwi) + bp->b_bcount);
2068 fwi = fuse_ipc_fill(fip, FUSE_WRITE, fnp->ino, proc0.p_ucred);
2069 fwi->offset = bp->b_loffset;
2070 fwi->size = bp->b_bcount;
2071 fwi->fh = fnp->fh;
2074 * Handle truncated buffer at file EOF
2076 if (fwi->offset + fwi->size > fnp->size) {
2077 if (fwi->offset >= fnp->size) {
2078 error = EINVAL;
2079 goto write_failed;
2081 fwi->size = fnp->size - fwi->offset;
2084 memcpy((void *)(fwi + 1), bp->b_data, bp->b_bcount);
2086 error = fuse_ipc_tx(fip);
2088 fwo = fuse_out_data(fip);
2089 if (error == 0) {
2090 bp->b_resid = bp->b_bcount - fwo->size;
2091 bp->b_error = 0;
2092 fuse_ipc_put(fip);
2093 } else {
2094 write_failed:
2095 bp->b_resid = bp->b_bcount;
2096 bp->b_flags |= B_ERROR | B_INVAL;
2097 bp->b_error = EINVAL;
2099 biodone(bio);
2100 break;
2101 default:
2102 bp->b_resid = bp->b_bcount;
2103 bp->b_flags |= B_ERROR | B_INVAL;
2104 bp->b_error = EINVAL;
2105 biodone(bio);
2106 break;
2110 #if 0
2111 bp->b_resid = bp->b_bcount;
2112 bp->b_flags |= B_ERROR | B_INVAL;
2113 bp->b_error = EINVAL;
2114 biodone(bio);
2115 #endif
2117 static void
2118 fuse_release(struct fuse_mount *fmp, struct fuse_node *fnp)
2120 struct fuse_ipc *fip;
2121 struct fuse_release_in *fri;
2122 int error, op;
2124 if (fnp->fh) {
2126 * Release the file-handle to clean-up the userland side.
2128 if (fnp->type == VDIR)
2129 op = FUSE_RELEASEDIR;
2130 else
2131 op = FUSE_RELEASE;
2133 fip = fuse_ipc_get(fmp, sizeof(*fri));
2134 fri = fuse_ipc_fill(fip, op, fnp->ino, NULL);
2135 /* unused */
2136 //fri->flags = ...;
2137 fri->release_flags = FUSE_RELEASE_FLUSH;
2138 //fri->lock_owner = ...;
2139 fri->fh = fnp->fh;
2141 error = fuse_ipc_tx(fip);
2142 if (error == 0)
2143 fuse_ipc_put(fip);
2145 #if 0
2146 op = FUSE_FORGET;
2147 fip = fuse_ipc_get(fmp, sizeof(*fri));
2148 fri = fuse_ipc_fill(fip, op, fnp->ino, NULL);
2149 error = fuse_ipc_tx(fip);
2150 if (error == 0)
2151 fuse_ipc_put(fip);
2152 #endif
2153 fnp->fh = 0;
2155 if (fnp->nlookup && fnp->ino != 1) {
2156 #if 0
2157 /* sshfs fails utterly if we issue FUSE_FORGET */
2158 error = fuse_forget_node(fmp, fnp->ino, fnp->nlookup, NULL);
2159 #endif
2160 fnp->nlookup = 0;
2162 fnp->closed = true;
2166 struct vop_ops fuse_vnode_vops = {
2167 .vop_default = vop_defaultop,
2168 .vop_access = fuse_vop_access,
2169 .vop_open = fuse_vop_open,
2170 .vop_close = fuse_vop_close,
2171 .vop_fsync = fuse_vop_fsync,
2172 .vop_getattr = fuse_vop_getattr,
2173 .vop_setattr = fuse_vop_setattr,
2174 .vop_nresolve = fuse_vop_nresolve,
2175 //.vop_nlookupdotdot = fuse_nlookupdotdot,
2176 .vop_nlink = fuse_vop_nlink,
2177 .vop_ncreate = fuse_vop_ncreate,
2178 .vop_nmknod = fuse_vop_nmknod,
2179 .vop_nremove = fuse_vop_nremove,
2180 .vop_nmkdir = fuse_vop_nmkdir,
2181 .vop_nrmdir = fuse_vop_nrmdir,
2182 .vop_pathconf = fuse_vop_pathconf,
2183 .vop_readdir = fuse_vop_readdir,
2184 .vop_readlink = fuse_vop_readlink,
2185 .vop_nrename = fuse_vop_nrename,
2186 .vop_nsymlink = fuse_vop_nsymlink,
2187 .vop_read = fuse_vop_read,
2188 .vop_write = fuse_vop_write,
2189 .vop_strategy = fuse_vop_strategy,
2190 .vop_bmap = fuse_bmap,
2191 .vop_advlock = fuse_advlock,
2192 .vop_print = fuse_vop_print,
2193 .vop_inactive = fuse_vop_inactive,
2194 .vop_reclaim = fuse_vop_reclaim,
2195 .vop_mountctl = fuse_vop_mountctl,
2196 .vop_kqfilter = fuse_kqfilter,
2197 .vop_getpages = fuse_vop_getpages,
2198 .vop_putpages = fuse_vop_putpages,
2201 struct vop_ops fuse_spec_vops = {
2202 .vop_default = vop_defaultop,
2203 .vop_access = fuse_vop_access,
2204 .vop_close = fuse_vop_close,
2205 .vop_fsync = fuse_vop_fsync,
2206 .vop_getattr = fuse_vop_getattr,
2207 .vop_setattr = fuse_vop_setattr,
2208 .vop_read = vop_stdnoread,
2209 .vop_write = vop_stdnowrite,
2210 //.vop_markatime = fuse_vop_markatime,
2211 .vop_print = fuse_vop_print,
2212 .vop_inactive = fuse_vop_inactive,
2213 .vop_reclaim = fuse_vop_reclaim,