2 * FUSE: Filesystem in Userspace
3 * Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
5 * Implementation of (most of) the low-level FUSE API. The session loop
6 * functions are implemented in separate files.
8 * This program can be distributed under the terms of the GNU LGPLv2.
9 * See the file COPYING.LIB
12 #include "qemu/osdep.h"
14 #include "standard-headers/linux/fuse.h"
15 #include "fuse_misc.h"
17 #include "fuse_virtio.h"
21 #define THREAD_POOL_SIZE 0
23 #define OFFSET_MAX 0x7fffffffffffffffLL
25 struct fuse_pollhandle
{
27 struct fuse_session
*se
;
30 static size_t pagesize
;
32 static __attribute__((constructor
)) void fuse_ll_init_pagesize(void)
34 pagesize
= getpagesize();
37 static void convert_stat(const struct stat
*stbuf
, struct fuse_attr
*attr
)
39 *attr
= (struct fuse_attr
){
41 .mode
= stbuf
->st_mode
,
42 .nlink
= stbuf
->st_nlink
,
45 .rdev
= stbuf
->st_rdev
,
46 .size
= stbuf
->st_size
,
47 .blksize
= stbuf
->st_blksize
,
48 .blocks
= stbuf
->st_blocks
,
49 .atime
= stbuf
->st_atime
,
50 .mtime
= stbuf
->st_mtime
,
51 .ctime
= stbuf
->st_ctime
,
52 .atimensec
= ST_ATIM_NSEC(stbuf
),
53 .mtimensec
= ST_MTIM_NSEC(stbuf
),
54 .ctimensec
= ST_CTIM_NSEC(stbuf
),
58 static void convert_attr(const struct fuse_setattr_in
*attr
, struct stat
*stbuf
)
60 stbuf
->st_mode
= attr
->mode
;
61 stbuf
->st_uid
= attr
->uid
;
62 stbuf
->st_gid
= attr
->gid
;
63 stbuf
->st_size
= attr
->size
;
64 stbuf
->st_atime
= attr
->atime
;
65 stbuf
->st_mtime
= attr
->mtime
;
66 stbuf
->st_ctime
= attr
->ctime
;
67 ST_ATIM_NSEC_SET(stbuf
, attr
->atimensec
);
68 ST_MTIM_NSEC_SET(stbuf
, attr
->mtimensec
);
69 ST_CTIM_NSEC_SET(stbuf
, attr
->ctimensec
);
72 static size_t iov_length(const struct iovec
*iov
, size_t count
)
77 for (seg
= 0; seg
< count
; seg
++) {
78 ret
+= iov
[seg
].iov_len
;
83 static void list_init_req(struct fuse_req
*req
)
89 static void list_del_req(struct fuse_req
*req
)
91 struct fuse_req
*prev
= req
->prev
;
92 struct fuse_req
*next
= req
->next
;
97 static void list_add_req(struct fuse_req
*req
, struct fuse_req
*next
)
99 struct fuse_req
*prev
= next
->prev
;
106 static void destroy_req(fuse_req_t req
)
108 pthread_mutex_destroy(&req
->lock
);
112 void fuse_free_req(fuse_req_t req
)
115 struct fuse_session
*se
= req
->se
;
117 pthread_mutex_lock(&se
->lock
);
118 req
->u
.ni
.func
= NULL
;
119 req
->u
.ni
.data
= NULL
;
123 pthread_mutex_unlock(&se
->lock
);
129 static struct fuse_req
*fuse_ll_alloc_req(struct fuse_session
*se
)
131 struct fuse_req
*req
;
133 req
= g_try_new0(struct fuse_req
, 1);
135 fuse_log(FUSE_LOG_ERR
, "fuse: failed to allocate request\n");
140 fuse_mutex_init(&req
->lock
);
146 /* Send data. If *ch* is NULL, send via session master fd */
147 static int fuse_send_msg(struct fuse_session
*se
, struct fuse_chan
*ch
,
148 struct iovec
*iov
, int count
)
150 struct fuse_out_header
*out
= iov
[0].iov_base
;
152 out
->len
= iov_length(iov
, count
);
153 if (out
->unique
== 0) {
154 fuse_log(FUSE_LOG_DEBUG
, "NOTIFY: code=%d length=%u\n", out
->error
,
156 } else if (out
->error
) {
157 fuse_log(FUSE_LOG_DEBUG
,
158 " unique: %llu, error: %i (%s), outsize: %i\n",
159 (unsigned long long)out
->unique
, out
->error
,
160 strerror(-out
->error
), out
->len
);
162 fuse_log(FUSE_LOG_DEBUG
, " unique: %llu, success, outsize: %i\n",
163 (unsigned long long)out
->unique
, out
->len
);
166 if (fuse_lowlevel_is_virtio(se
)) {
167 return virtio_send_msg(se
, ch
, iov
, count
);
170 abort(); /* virtio should have taken it before here */
175 int fuse_send_reply_iov_nofree(fuse_req_t req
, int error
, struct iovec
*iov
,
178 struct fuse_out_header out
= {
179 .unique
= req
->unique
,
183 if (error
<= -1000 || error
> 0) {
184 fuse_log(FUSE_LOG_ERR
, "fuse: bad error value: %i\n", error
);
188 iov
[0].iov_base
= &out
;
189 iov
[0].iov_len
= sizeof(struct fuse_out_header
);
191 return fuse_send_msg(req
->se
, req
->ch
, iov
, count
);
194 static int send_reply_iov(fuse_req_t req
, int error
, struct iovec
*iov
,
199 res
= fuse_send_reply_iov_nofree(req
, error
, iov
, count
);
204 static int send_reply(fuse_req_t req
, int error
, const void *arg
,
210 iov
[1].iov_base
= (void *)arg
;
211 iov
[1].iov_len
= argsize
;
214 return send_reply_iov(req
, error
, iov
, count
);
217 int fuse_reply_iov(fuse_req_t req
, const struct iovec
*iov
, int count
)
220 g_autofree
struct iovec
*padded_iov
= NULL
;
222 padded_iov
= g_try_new(struct iovec
, count
+ 1);
223 if (padded_iov
== NULL
) {
224 return fuse_reply_err(req
, ENOMEM
);
227 memcpy(padded_iov
+ 1, iov
, count
* sizeof(struct iovec
));
230 res
= send_reply_iov(req
, 0, padded_iov
, count
);
237 * 'buf` is allowed to be empty so that the proper size may be
238 * allocated by the caller
240 size_t fuse_add_direntry(fuse_req_t req
, char *buf
, size_t bufsize
,
241 const char *name
, const struct stat
*stbuf
, off_t off
)
246 size_t entlen_padded
;
247 struct fuse_dirent
*dirent
;
249 namelen
= strlen(name
);
250 entlen
= FUSE_NAME_OFFSET
+ namelen
;
251 entlen_padded
= FUSE_DIRENT_ALIGN(entlen
);
253 if ((buf
== NULL
) || (entlen_padded
> bufsize
)) {
254 return entlen_padded
;
257 dirent
= (struct fuse_dirent
*)buf
;
258 dirent
->ino
= stbuf
->st_ino
;
260 dirent
->namelen
= namelen
;
261 dirent
->type
= (stbuf
->st_mode
& S_IFMT
) >> 12;
262 memcpy(dirent
->name
, name
, namelen
);
263 memset(dirent
->name
+ namelen
, 0, entlen_padded
- entlen
);
265 return entlen_padded
;
268 static void convert_statfs(const struct statvfs
*stbuf
,
269 struct fuse_kstatfs
*kstatfs
)
271 *kstatfs
= (struct fuse_kstatfs
){
272 .bsize
= stbuf
->f_bsize
,
273 .frsize
= stbuf
->f_frsize
,
274 .blocks
= stbuf
->f_blocks
,
275 .bfree
= stbuf
->f_bfree
,
276 .bavail
= stbuf
->f_bavail
,
277 .files
= stbuf
->f_files
,
278 .ffree
= stbuf
->f_ffree
,
279 .namelen
= stbuf
->f_namemax
,
283 static int send_reply_ok(fuse_req_t req
, const void *arg
, size_t argsize
)
285 return send_reply(req
, 0, arg
, argsize
);
288 int fuse_reply_err(fuse_req_t req
, int err
)
290 return send_reply(req
, -err
, NULL
, 0);
293 void fuse_reply_none(fuse_req_t req
)
298 static unsigned long calc_timeout_sec(double t
)
300 if (t
> (double)ULONG_MAX
) {
302 } else if (t
< 0.0) {
305 return (unsigned long)t
;
309 static unsigned int calc_timeout_nsec(double t
)
311 double f
= t
- (double)calc_timeout_sec(t
);
314 } else if (f
>= 0.999999999) {
317 return (unsigned int)(f
* 1.0e9
);
321 static void fill_entry(struct fuse_entry_out
*arg
,
322 const struct fuse_entry_param
*e
)
324 *arg
= (struct fuse_entry_out
){
326 .generation
= e
->generation
,
327 .entry_valid
= calc_timeout_sec(e
->entry_timeout
),
328 .entry_valid_nsec
= calc_timeout_nsec(e
->entry_timeout
),
329 .attr_valid
= calc_timeout_sec(e
->attr_timeout
),
330 .attr_valid_nsec
= calc_timeout_nsec(e
->attr_timeout
),
332 convert_stat(&e
->attr
, &arg
->attr
);
334 arg
->attr
.flags
= e
->attr_flags
;
338 * `buf` is allowed to be empty so that the proper size may be
339 * allocated by the caller
341 size_t fuse_add_direntry_plus(fuse_req_t req
, char *buf
, size_t bufsize
,
343 const struct fuse_entry_param
*e
, off_t off
)
348 size_t entlen_padded
;
350 namelen
= strlen(name
);
351 entlen
= FUSE_NAME_OFFSET_DIRENTPLUS
+ namelen
;
352 entlen_padded
= FUSE_DIRENT_ALIGN(entlen
);
353 if ((buf
== NULL
) || (entlen_padded
> bufsize
)) {
354 return entlen_padded
;
357 struct fuse_direntplus
*dp
= (struct fuse_direntplus
*)buf
;
358 memset(&dp
->entry_out
, 0, sizeof(dp
->entry_out
));
359 fill_entry(&dp
->entry_out
, e
);
361 struct fuse_dirent
*dirent
= &dp
->dirent
;
362 *dirent
= (struct fuse_dirent
){
363 .ino
= e
->attr
.st_ino
,
366 .type
= (e
->attr
.st_mode
& S_IFMT
) >> 12,
368 memcpy(dirent
->name
, name
, namelen
);
369 memset(dirent
->name
+ namelen
, 0, entlen_padded
- entlen
);
371 return entlen_padded
;
374 static void fill_open(struct fuse_open_out
*arg
, const struct fuse_file_info
*f
)
378 arg
->open_flags
|= FOPEN_DIRECT_IO
;
381 arg
->open_flags
|= FOPEN_KEEP_CACHE
;
383 if (f
->cache_readdir
) {
384 arg
->open_flags
|= FOPEN_CACHE_DIR
;
386 if (f
->nonseekable
) {
387 arg
->open_flags
|= FOPEN_NONSEEKABLE
;
391 int fuse_reply_entry(fuse_req_t req
, const struct fuse_entry_param
*e
)
393 struct fuse_entry_out arg
;
394 size_t size
= sizeof(arg
);
396 memset(&arg
, 0, sizeof(arg
));
398 return send_reply_ok(req
, &arg
, size
);
401 int fuse_reply_create(fuse_req_t req
, const struct fuse_entry_param
*e
,
402 const struct fuse_file_info
*f
)
404 char buf
[sizeof(struct fuse_entry_out
) + sizeof(struct fuse_open_out
)];
405 size_t entrysize
= sizeof(struct fuse_entry_out
);
406 struct fuse_entry_out
*earg
= (struct fuse_entry_out
*)buf
;
407 struct fuse_open_out
*oarg
= (struct fuse_open_out
*)(buf
+ entrysize
);
409 memset(buf
, 0, sizeof(buf
));
412 return send_reply_ok(req
, buf
, entrysize
+ sizeof(struct fuse_open_out
));
415 int fuse_reply_attr(fuse_req_t req
, const struct stat
*attr
,
418 struct fuse_attr_out arg
;
419 size_t size
= sizeof(arg
);
421 memset(&arg
, 0, sizeof(arg
));
422 arg
.attr_valid
= calc_timeout_sec(attr_timeout
);
423 arg
.attr_valid_nsec
= calc_timeout_nsec(attr_timeout
);
424 convert_stat(attr
, &arg
.attr
);
426 return send_reply_ok(req
, &arg
, size
);
429 int fuse_reply_readlink(fuse_req_t req
, const char *linkname
)
431 return send_reply_ok(req
, linkname
, strlen(linkname
));
434 int fuse_reply_open(fuse_req_t req
, const struct fuse_file_info
*f
)
436 struct fuse_open_out arg
;
438 memset(&arg
, 0, sizeof(arg
));
440 return send_reply_ok(req
, &arg
, sizeof(arg
));
443 int fuse_reply_write(fuse_req_t req
, size_t count
)
445 struct fuse_write_out arg
;
447 memset(&arg
, 0, sizeof(arg
));
450 return send_reply_ok(req
, &arg
, sizeof(arg
));
453 int fuse_reply_buf(fuse_req_t req
, const char *buf
, size_t size
)
455 return send_reply_ok(req
, buf
, size
);
458 static int fuse_send_data_iov_fallback(struct fuse_session
*se
,
459 struct fuse_chan
*ch
, struct iovec
*iov
,
460 int iov_count
, struct fuse_bufvec
*buf
,
463 /* Optimize common case */
464 if (buf
->count
== 1 && buf
->idx
== 0 && buf
->off
== 0 &&
465 !(buf
->buf
[0].flags
& FUSE_BUF_IS_FD
)) {
467 * FIXME: also avoid memory copy if there are multiple buffers
468 * but none of them contain an fd
471 iov
[iov_count
].iov_base
= buf
->buf
[0].mem
;
472 iov
[iov_count
].iov_len
= len
;
474 return fuse_send_msg(se
, ch
, iov
, iov_count
);
477 if (fuse_lowlevel_is_virtio(se
) && buf
->count
== 1 &&
478 buf
->buf
[0].flags
== (FUSE_BUF_IS_FD
| FUSE_BUF_FD_SEEK
)) {
479 return virtio_send_data_iov(se
, ch
, iov
, iov_count
, buf
, len
);
482 abort(); /* Will have taken vhost path */
486 static int fuse_send_data_iov(struct fuse_session
*se
, struct fuse_chan
*ch
,
487 struct iovec
*iov
, int iov_count
,
488 struct fuse_bufvec
*buf
)
490 size_t len
= fuse_buf_size(buf
);
492 return fuse_send_data_iov_fallback(se
, ch
, iov
, iov_count
, buf
, len
);
495 int fuse_reply_data(fuse_req_t req
, struct fuse_bufvec
*bufv
)
498 struct fuse_out_header out
= {
499 .unique
= req
->unique
,
503 iov
[0].iov_base
= &out
;
504 iov
[0].iov_len
= sizeof(struct fuse_out_header
);
506 res
= fuse_send_data_iov(req
->se
, req
->ch
, iov
, 1, bufv
);
511 return fuse_reply_err(req
, res
);
515 int fuse_reply_statfs(fuse_req_t req
, const struct statvfs
*stbuf
)
517 struct fuse_statfs_out arg
;
518 size_t size
= sizeof(arg
);
520 memset(&arg
, 0, sizeof(arg
));
521 convert_statfs(stbuf
, &arg
.st
);
523 return send_reply_ok(req
, &arg
, size
);
526 int fuse_reply_xattr(fuse_req_t req
, size_t count
)
528 struct fuse_getxattr_out arg
;
530 memset(&arg
, 0, sizeof(arg
));
533 return send_reply_ok(req
, &arg
, sizeof(arg
));
536 int fuse_reply_lock(fuse_req_t req
, const struct flock
*lock
)
538 struct fuse_lk_out arg
;
540 memset(&arg
, 0, sizeof(arg
));
541 arg
.lk
.type
= lock
->l_type
;
542 if (lock
->l_type
!= F_UNLCK
) {
543 arg
.lk
.start
= lock
->l_start
;
544 if (lock
->l_len
== 0) {
545 arg
.lk
.end
= OFFSET_MAX
;
547 arg
.lk
.end
= lock
->l_start
+ lock
->l_len
- 1;
550 arg
.lk
.pid
= lock
->l_pid
;
551 return send_reply_ok(req
, &arg
, sizeof(arg
));
554 int fuse_reply_bmap(fuse_req_t req
, uint64_t idx
)
556 struct fuse_bmap_out arg
;
558 memset(&arg
, 0, sizeof(arg
));
561 return send_reply_ok(req
, &arg
, sizeof(arg
));
564 static struct fuse_ioctl_iovec
*fuse_ioctl_iovec_copy(const struct iovec
*iov
,
567 struct fuse_ioctl_iovec
*fiov
;
570 fiov
= g_try_new(struct fuse_ioctl_iovec
, count
);
575 for (i
= 0; i
< count
; i
++) {
576 fiov
[i
].base
= (uintptr_t)iov
[i
].iov_base
;
577 fiov
[i
].len
= iov
[i
].iov_len
;
583 int fuse_reply_ioctl_retry(fuse_req_t req
, const struct iovec
*in_iov
,
584 size_t in_count
, const struct iovec
*out_iov
,
587 struct fuse_ioctl_out arg
;
588 g_autofree
struct fuse_ioctl_iovec
*in_fiov
= NULL
;
589 g_autofree
struct fuse_ioctl_iovec
*out_fiov
= NULL
;
594 memset(&arg
, 0, sizeof(arg
));
595 arg
.flags
|= FUSE_IOCTL_RETRY
;
596 arg
.in_iovs
= in_count
;
597 arg
.out_iovs
= out_count
;
598 iov
[count
].iov_base
= &arg
;
599 iov
[count
].iov_len
= sizeof(arg
);
602 /* Can't handle non-compat 64bit ioctls on 32bit */
603 if (sizeof(void *) == 4 && req
->ioctl_64bit
) {
604 res
= fuse_reply_err(req
, EINVAL
);
609 in_fiov
= fuse_ioctl_iovec_copy(in_iov
, in_count
);
611 res
= fuse_reply_err(req
, ENOMEM
);
615 iov
[count
].iov_base
= (void *)in_fiov
;
616 iov
[count
].iov_len
= sizeof(in_fiov
[0]) * in_count
;
620 out_fiov
= fuse_ioctl_iovec_copy(out_iov
, out_count
);
622 res
= fuse_reply_err(req
, ENOMEM
);
626 iov
[count
].iov_base
= (void *)out_fiov
;
627 iov
[count
].iov_len
= sizeof(out_fiov
[0]) * out_count
;
631 res
= send_reply_iov(req
, 0, iov
, count
);
636 int fuse_reply_ioctl(fuse_req_t req
, int result
, const void *buf
, size_t size
)
638 struct fuse_ioctl_out arg
;
642 memset(&arg
, 0, sizeof(arg
));
644 iov
[count
].iov_base
= &arg
;
645 iov
[count
].iov_len
= sizeof(arg
);
649 iov
[count
].iov_base
= (char *)buf
;
650 iov
[count
].iov_len
= size
;
654 return send_reply_iov(req
, 0, iov
, count
);
657 int fuse_reply_ioctl_iov(fuse_req_t req
, int result
, const struct iovec
*iov
,
660 g_autofree
struct iovec
*padded_iov
= NULL
;
661 struct fuse_ioctl_out arg
;
664 padded_iov
= g_try_new(struct iovec
, count
+ 2);
665 if (padded_iov
== NULL
) {
666 return fuse_reply_err(req
, ENOMEM
);
669 memset(&arg
, 0, sizeof(arg
));
671 padded_iov
[1].iov_base
= &arg
;
672 padded_iov
[1].iov_len
= sizeof(arg
);
674 memcpy(&padded_iov
[2], iov
, count
* sizeof(struct iovec
));
676 res
= send_reply_iov(req
, 0, padded_iov
, count
+ 2);
681 int fuse_reply_poll(fuse_req_t req
, unsigned revents
)
683 struct fuse_poll_out arg
;
685 memset(&arg
, 0, sizeof(arg
));
686 arg
.revents
= revents
;
688 return send_reply_ok(req
, &arg
, sizeof(arg
));
691 int fuse_reply_lseek(fuse_req_t req
, off_t off
)
693 struct fuse_lseek_out arg
;
695 memset(&arg
, 0, sizeof(arg
));
698 return send_reply_ok(req
, &arg
, sizeof(arg
));
701 static void do_lookup(fuse_req_t req
, fuse_ino_t nodeid
,
702 struct fuse_mbuf_iter
*iter
)
704 const char *name
= fuse_mbuf_iter_advance_str(iter
);
706 fuse_reply_err(req
, EINVAL
);
710 if (req
->se
->op
.lookup
) {
711 req
->se
->op
.lookup(req
, nodeid
, name
);
713 fuse_reply_err(req
, ENOSYS
);
717 static void do_forget(fuse_req_t req
, fuse_ino_t nodeid
,
718 struct fuse_mbuf_iter
*iter
)
720 struct fuse_forget_in
*arg
;
722 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
724 fuse_reply_err(req
, EINVAL
);
728 if (req
->se
->op
.forget
) {
729 req
->se
->op
.forget(req
, nodeid
, arg
->nlookup
);
731 fuse_reply_none(req
);
735 static void do_batch_forget(fuse_req_t req
, fuse_ino_t nodeid
,
736 struct fuse_mbuf_iter
*iter
)
738 struct fuse_batch_forget_in
*arg
;
739 struct fuse_forget_data
*forgets
;
744 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
746 fuse_reply_none(req
);
751 * Prevent integer overflow. The compiler emits the following warning
752 * unless we use the scount local variable:
754 * error: comparison is always false due to limited range of data type
755 * [-Werror=type-limits]
757 * This may be true on 64-bit hosts but we need this check for 32-bit
761 if (scount
> SIZE_MAX
/ sizeof(forgets
[0])) {
762 fuse_reply_none(req
);
766 forgets
= fuse_mbuf_iter_advance(iter
, arg
->count
* sizeof(forgets
[0]));
768 fuse_reply_none(req
);
772 if (req
->se
->op
.forget_multi
) {
773 req
->se
->op
.forget_multi(req
, arg
->count
, forgets
);
774 } else if (req
->se
->op
.forget
) {
777 for (i
= 0; i
< arg
->count
; i
++) {
778 struct fuse_req
*dummy_req
;
780 dummy_req
= fuse_ll_alloc_req(req
->se
);
781 if (dummy_req
== NULL
) {
785 dummy_req
->unique
= req
->unique
;
786 dummy_req
->ctx
= req
->ctx
;
787 dummy_req
->ch
= NULL
;
789 req
->se
->op
.forget(dummy_req
, forgets
[i
].ino
, forgets
[i
].nlookup
);
791 fuse_reply_none(req
);
793 fuse_reply_none(req
);
797 static void do_getattr(fuse_req_t req
, fuse_ino_t nodeid
,
798 struct fuse_mbuf_iter
*iter
)
800 struct fuse_file_info
*fip
= NULL
;
801 struct fuse_file_info fi
;
803 struct fuse_getattr_in
*arg
;
805 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
807 fuse_reply_err(req
, EINVAL
);
811 if (arg
->getattr_flags
& FUSE_GETATTR_FH
) {
812 memset(&fi
, 0, sizeof(fi
));
817 if (req
->se
->op
.getattr
) {
818 req
->se
->op
.getattr(req
, nodeid
, fip
);
820 fuse_reply_err(req
, ENOSYS
);
824 static void do_setattr(fuse_req_t req
, fuse_ino_t nodeid
,
825 struct fuse_mbuf_iter
*iter
)
827 if (req
->se
->op
.setattr
) {
828 struct fuse_setattr_in
*arg
;
829 struct fuse_file_info
*fi
= NULL
;
830 struct fuse_file_info fi_store
;
833 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
835 fuse_reply_err(req
, EINVAL
);
839 memset(&stbuf
, 0, sizeof(stbuf
));
840 convert_attr(arg
, &stbuf
);
841 if (arg
->valid
& FATTR_FH
) {
842 arg
->valid
&= ~FATTR_FH
;
843 memset(&fi_store
, 0, sizeof(fi_store
));
847 arg
->valid
&= FUSE_SET_ATTR_MODE
| FUSE_SET_ATTR_UID
|
848 FUSE_SET_ATTR_GID
| FUSE_SET_ATTR_SIZE
|
849 FUSE_SET_ATTR_ATIME
| FUSE_SET_ATTR_MTIME
|
850 FUSE_SET_ATTR_ATIME_NOW
| FUSE_SET_ATTR_MTIME_NOW
|
851 FUSE_SET_ATTR_CTIME
| FUSE_SET_ATTR_KILL_SUIDGID
;
853 req
->se
->op
.setattr(req
, nodeid
, &stbuf
, arg
->valid
, fi
);
855 fuse_reply_err(req
, ENOSYS
);
859 static void do_access(fuse_req_t req
, fuse_ino_t nodeid
,
860 struct fuse_mbuf_iter
*iter
)
862 struct fuse_access_in
*arg
;
864 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
866 fuse_reply_err(req
, EINVAL
);
870 if (req
->se
->op
.access
) {
871 req
->se
->op
.access(req
, nodeid
, arg
->mask
);
873 fuse_reply_err(req
, ENOSYS
);
877 static void do_readlink(fuse_req_t req
, fuse_ino_t nodeid
,
878 struct fuse_mbuf_iter
*iter
)
882 if (req
->se
->op
.readlink
) {
883 req
->se
->op
.readlink(req
, nodeid
);
885 fuse_reply_err(req
, ENOSYS
);
889 static int parse_secctx_fill_req(fuse_req_t req
, struct fuse_mbuf_iter
*iter
)
891 struct fuse_secctx_header
*fsecctx_header
;
892 struct fuse_secctx
*fsecctx
;
896 fsecctx_header
= fuse_mbuf_iter_advance(iter
, sizeof(*fsecctx_header
));
897 if (!fsecctx_header
) {
902 * As of now maximum of one security context is supported. It can
903 * change in future though.
905 if (fsecctx_header
->nr_secctx
> 1) {
909 /* No security context sent. Maybe no LSM supports it */
910 if (!fsecctx_header
->nr_secctx
) {
914 fsecctx
= fuse_mbuf_iter_advance(iter
, sizeof(*fsecctx
));
919 /* struct fsecctx with zero sized context is not expected */
920 if (!fsecctx
->size
) {
923 name
= fuse_mbuf_iter_advance_str(iter
);
928 secctx
= fuse_mbuf_iter_advance(iter
, fsecctx
->size
);
933 req
->secctx
.name
= name
;
934 req
->secctx
.ctx
= secctx
;
935 req
->secctx
.ctxlen
= fsecctx
->size
;
939 static void do_mknod(fuse_req_t req
, fuse_ino_t nodeid
,
940 struct fuse_mbuf_iter
*iter
)
942 struct fuse_mknod_in
*arg
;
944 bool secctx_enabled
= req
->se
->conn
.want
& FUSE_CAP_SECURITY_CTX
;
947 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
948 name
= fuse_mbuf_iter_advance_str(iter
);
950 fuse_reply_err(req
, EINVAL
);
954 req
->ctx
.umask
= arg
->umask
;
956 if (secctx_enabled
) {
957 err
= parse_secctx_fill_req(req
, iter
);
959 fuse_reply_err(req
, -err
);
964 if (req
->se
->op
.mknod
) {
965 req
->se
->op
.mknod(req
, nodeid
, name
, arg
->mode
, arg
->rdev
);
967 fuse_reply_err(req
, ENOSYS
);
971 static void do_mkdir(fuse_req_t req
, fuse_ino_t nodeid
,
972 struct fuse_mbuf_iter
*iter
)
974 struct fuse_mkdir_in
*arg
;
976 bool secctx_enabled
= req
->se
->conn
.want
& FUSE_CAP_SECURITY_CTX
;
979 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
980 name
= fuse_mbuf_iter_advance_str(iter
);
982 fuse_reply_err(req
, EINVAL
);
986 req
->ctx
.umask
= arg
->umask
;
988 if (secctx_enabled
) {
989 err
= parse_secctx_fill_req(req
, iter
);
991 fuse_reply_err(req
, err
);
996 if (req
->se
->op
.mkdir
) {
997 req
->se
->op
.mkdir(req
, nodeid
, name
, arg
->mode
);
999 fuse_reply_err(req
, ENOSYS
);
1003 static void do_unlink(fuse_req_t req
, fuse_ino_t nodeid
,
1004 struct fuse_mbuf_iter
*iter
)
1006 const char *name
= fuse_mbuf_iter_advance_str(iter
);
1009 fuse_reply_err(req
, EINVAL
);
1013 if (req
->se
->op
.unlink
) {
1014 req
->se
->op
.unlink(req
, nodeid
, name
);
1016 fuse_reply_err(req
, ENOSYS
);
1020 static void do_rmdir(fuse_req_t req
, fuse_ino_t nodeid
,
1021 struct fuse_mbuf_iter
*iter
)
1023 const char *name
= fuse_mbuf_iter_advance_str(iter
);
1026 fuse_reply_err(req
, EINVAL
);
1030 if (req
->se
->op
.rmdir
) {
1031 req
->se
->op
.rmdir(req
, nodeid
, name
);
1033 fuse_reply_err(req
, ENOSYS
);
1037 static void do_symlink(fuse_req_t req
, fuse_ino_t nodeid
,
1038 struct fuse_mbuf_iter
*iter
)
1040 const char *name
= fuse_mbuf_iter_advance_str(iter
);
1041 const char *linkname
= fuse_mbuf_iter_advance_str(iter
);
1042 bool secctx_enabled
= req
->se
->conn
.want
& FUSE_CAP_SECURITY_CTX
;
1045 if (!name
|| !linkname
) {
1046 fuse_reply_err(req
, EINVAL
);
1050 if (secctx_enabled
) {
1051 err
= parse_secctx_fill_req(req
, iter
);
1053 fuse_reply_err(req
, err
);
1058 if (req
->se
->op
.symlink
) {
1059 req
->se
->op
.symlink(req
, linkname
, nodeid
, name
);
1061 fuse_reply_err(req
, ENOSYS
);
1065 static void do_rename(fuse_req_t req
, fuse_ino_t nodeid
,
1066 struct fuse_mbuf_iter
*iter
)
1068 struct fuse_rename_in
*arg
;
1069 const char *oldname
;
1070 const char *newname
;
1072 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1073 oldname
= fuse_mbuf_iter_advance_str(iter
);
1074 newname
= fuse_mbuf_iter_advance_str(iter
);
1075 if (!arg
|| !oldname
|| !newname
) {
1076 fuse_reply_err(req
, EINVAL
);
1080 if (req
->se
->op
.rename
) {
1081 req
->se
->op
.rename(req
, nodeid
, oldname
, arg
->newdir
, newname
, 0);
1083 fuse_reply_err(req
, ENOSYS
);
1087 static void do_rename2(fuse_req_t req
, fuse_ino_t nodeid
,
1088 struct fuse_mbuf_iter
*iter
)
1090 struct fuse_rename2_in
*arg
;
1091 const char *oldname
;
1092 const char *newname
;
1094 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1095 oldname
= fuse_mbuf_iter_advance_str(iter
);
1096 newname
= fuse_mbuf_iter_advance_str(iter
);
1097 if (!arg
|| !oldname
|| !newname
) {
1098 fuse_reply_err(req
, EINVAL
);
1102 if (req
->se
->op
.rename
) {
1103 req
->se
->op
.rename(req
, nodeid
, oldname
, arg
->newdir
, newname
,
1106 fuse_reply_err(req
, ENOSYS
);
1110 static void do_link(fuse_req_t req
, fuse_ino_t nodeid
,
1111 struct fuse_mbuf_iter
*iter
)
1113 struct fuse_link_in
*arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1114 const char *name
= fuse_mbuf_iter_advance_str(iter
);
1116 if (!arg
|| !name
) {
1117 fuse_reply_err(req
, EINVAL
);
1121 if (req
->se
->op
.link
) {
1122 req
->se
->op
.link(req
, arg
->oldnodeid
, nodeid
, name
);
1124 fuse_reply_err(req
, ENOSYS
);
1128 static void do_create(fuse_req_t req
, fuse_ino_t nodeid
,
1129 struct fuse_mbuf_iter
*iter
)
1131 bool secctx_enabled
= req
->se
->conn
.want
& FUSE_CAP_SECURITY_CTX
;
1133 if (req
->se
->op
.create
) {
1134 struct fuse_create_in
*arg
;
1135 struct fuse_file_info fi
;
1138 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1139 name
= fuse_mbuf_iter_advance_str(iter
);
1140 if (!arg
|| !name
) {
1141 fuse_reply_err(req
, EINVAL
);
1145 if (secctx_enabled
) {
1147 err
= parse_secctx_fill_req(req
, iter
);
1149 fuse_reply_err(req
, err
);
1154 memset(&fi
, 0, sizeof(fi
));
1155 fi
.flags
= arg
->flags
;
1156 fi
.kill_priv
= arg
->open_flags
& FUSE_OPEN_KILL_SUIDGID
;
1158 req
->ctx
.umask
= arg
->umask
;
1160 req
->se
->op
.create(req
, nodeid
, name
, arg
->mode
, &fi
);
1162 fuse_reply_err(req
, ENOSYS
);
1166 static void do_open(fuse_req_t req
, fuse_ino_t nodeid
,
1167 struct fuse_mbuf_iter
*iter
)
1169 struct fuse_open_in
*arg
;
1170 struct fuse_file_info fi
;
1172 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1174 fuse_reply_err(req
, EINVAL
);
1178 /* File creation is handled by do_create() or do_mknod() */
1179 if (arg
->flags
& (O_CREAT
| O_TMPFILE
)) {
1180 fuse_reply_err(req
, EINVAL
);
1184 memset(&fi
, 0, sizeof(fi
));
1185 fi
.flags
= arg
->flags
;
1186 fi
.kill_priv
= arg
->open_flags
& FUSE_OPEN_KILL_SUIDGID
;
1188 if (req
->se
->op
.open
) {
1189 req
->se
->op
.open(req
, nodeid
, &fi
);
1191 fuse_reply_open(req
, &fi
);
1195 static void do_read(fuse_req_t req
, fuse_ino_t nodeid
,
1196 struct fuse_mbuf_iter
*iter
)
1198 if (req
->se
->op
.read
) {
1199 struct fuse_read_in
*arg
;
1200 struct fuse_file_info fi
;
1202 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1204 fuse_reply_err(req
, EINVAL
);
1208 memset(&fi
, 0, sizeof(fi
));
1210 fi
.lock_owner
= arg
->lock_owner
;
1211 fi
.flags
= arg
->flags
;
1212 req
->se
->op
.read(req
, nodeid
, arg
->size
, arg
->offset
, &fi
);
1214 fuse_reply_err(req
, ENOSYS
);
1218 static void do_write(fuse_req_t req
, fuse_ino_t nodeid
,
1219 struct fuse_mbuf_iter
*iter
)
1221 struct fuse_write_in
*arg
;
1222 struct fuse_file_info fi
;
1225 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1227 fuse_reply_err(req
, EINVAL
);
1231 param
= fuse_mbuf_iter_advance(iter
, arg
->size
);
1233 fuse_reply_err(req
, EINVAL
);
1237 memset(&fi
, 0, sizeof(fi
));
1239 fi
.writepage
= (arg
->write_flags
& FUSE_WRITE_CACHE
) != 0;
1240 fi
.kill_priv
= !!(arg
->write_flags
& FUSE_WRITE_KILL_PRIV
);
1242 fi
.lock_owner
= arg
->lock_owner
;
1243 fi
.flags
= arg
->flags
;
1245 if (req
->se
->op
.write
) {
1246 req
->se
->op
.write(req
, nodeid
, param
, arg
->size
, arg
->offset
, &fi
);
1248 fuse_reply_err(req
, ENOSYS
);
1252 static void do_write_buf(fuse_req_t req
, fuse_ino_t nodeid
,
1253 struct fuse_mbuf_iter
*iter
, struct fuse_bufvec
*ibufv
)
1255 struct fuse_session
*se
= req
->se
;
1256 struct fuse_bufvec
*pbufv
= ibufv
;
1257 struct fuse_bufvec tmpbufv
= {
1258 .buf
[0] = ibufv
->buf
[0],
1261 struct fuse_write_in
*arg
;
1262 size_t arg_size
= sizeof(*arg
);
1263 struct fuse_file_info fi
;
1265 memset(&fi
, 0, sizeof(fi
));
1267 arg
= fuse_mbuf_iter_advance(iter
, arg_size
);
1269 fuse_reply_err(req
, EINVAL
);
1273 fi
.lock_owner
= arg
->lock_owner
;
1274 fi
.flags
= arg
->flags
;
1276 fi
.writepage
= !!(arg
->write_flags
& FUSE_WRITE_CACHE
);
1277 fi
.kill_priv
= !!(arg
->write_flags
& FUSE_WRITE_KILL_PRIV
);
1279 if (ibufv
->count
== 1) {
1280 assert(!(tmpbufv
.buf
[0].flags
& FUSE_BUF_IS_FD
));
1281 tmpbufv
.buf
[0].mem
= ((char *)arg
) + arg_size
;
1282 tmpbufv
.buf
[0].size
-= sizeof(struct fuse_in_header
) + arg_size
;
1286 * Input bufv contains the headers in the first element
1287 * and the data in the rest, we need to skip that first element
1289 ibufv
->buf
[0].size
= 0;
1292 if (fuse_buf_size(pbufv
) != arg
->size
) {
1293 fuse_log(FUSE_LOG_ERR
,
1294 "fuse: do_write_buf: buffer size doesn't match arg->size\n");
1295 fuse_reply_err(req
, EIO
);
1299 se
->op
.write_buf(req
, nodeid
, pbufv
, arg
->offset
, &fi
);
1302 static void do_flush(fuse_req_t req
, fuse_ino_t nodeid
,
1303 struct fuse_mbuf_iter
*iter
)
1305 struct fuse_flush_in
*arg
;
1306 struct fuse_file_info fi
;
1308 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1310 fuse_reply_err(req
, EINVAL
);
1314 memset(&fi
, 0, sizeof(fi
));
1317 fi
.lock_owner
= arg
->lock_owner
;
1319 if (req
->se
->op
.flush
) {
1320 req
->se
->op
.flush(req
, nodeid
, &fi
);
1322 fuse_reply_err(req
, ENOSYS
);
1326 static void do_release(fuse_req_t req
, fuse_ino_t nodeid
,
1327 struct fuse_mbuf_iter
*iter
)
1329 struct fuse_release_in
*arg
;
1330 struct fuse_file_info fi
;
1332 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1334 fuse_reply_err(req
, EINVAL
);
1338 memset(&fi
, 0, sizeof(fi
));
1339 fi
.flags
= arg
->flags
;
1341 fi
.flush
= (arg
->release_flags
& FUSE_RELEASE_FLUSH
) ? 1 : 0;
1342 fi
.lock_owner
= arg
->lock_owner
;
1344 if (arg
->release_flags
& FUSE_RELEASE_FLOCK_UNLOCK
) {
1345 fi
.flock_release
= 1;
1348 if (req
->se
->op
.release
) {
1349 req
->se
->op
.release(req
, nodeid
, &fi
);
1351 fuse_reply_err(req
, 0);
1355 static void do_fsync(fuse_req_t req
, fuse_ino_t nodeid
,
1356 struct fuse_mbuf_iter
*iter
)
1358 struct fuse_fsync_in
*arg
;
1359 struct fuse_file_info fi
;
1362 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1364 fuse_reply_err(req
, EINVAL
);
1367 datasync
= arg
->fsync_flags
& 1;
1369 memset(&fi
, 0, sizeof(fi
));
1372 if (req
->se
->op
.fsync
) {
1373 if (fi
.fh
== (uint64_t)-1) {
1374 req
->se
->op
.fsync(req
, nodeid
, datasync
, NULL
);
1376 req
->se
->op
.fsync(req
, nodeid
, datasync
, &fi
);
1379 fuse_reply_err(req
, ENOSYS
);
1383 static void do_opendir(fuse_req_t req
, fuse_ino_t nodeid
,
1384 struct fuse_mbuf_iter
*iter
)
1386 struct fuse_open_in
*arg
;
1387 struct fuse_file_info fi
;
1389 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1391 fuse_reply_err(req
, EINVAL
);
1395 memset(&fi
, 0, sizeof(fi
));
1396 fi
.flags
= arg
->flags
;
1398 if (req
->se
->op
.opendir
) {
1399 req
->se
->op
.opendir(req
, nodeid
, &fi
);
1401 fuse_reply_open(req
, &fi
);
1405 static void do_readdir(fuse_req_t req
, fuse_ino_t nodeid
,
1406 struct fuse_mbuf_iter
*iter
)
1408 struct fuse_read_in
*arg
;
1409 struct fuse_file_info fi
;
1411 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1413 fuse_reply_err(req
, EINVAL
);
1417 memset(&fi
, 0, sizeof(fi
));
1420 if (req
->se
->op
.readdir
) {
1421 req
->se
->op
.readdir(req
, nodeid
, arg
->size
, arg
->offset
, &fi
);
1423 fuse_reply_err(req
, ENOSYS
);
1427 static void do_readdirplus(fuse_req_t req
, fuse_ino_t nodeid
,
1428 struct fuse_mbuf_iter
*iter
)
1430 struct fuse_read_in
*arg
;
1431 struct fuse_file_info fi
;
1433 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1435 fuse_reply_err(req
, EINVAL
);
1439 memset(&fi
, 0, sizeof(fi
));
1442 if (req
->se
->op
.readdirplus
) {
1443 req
->se
->op
.readdirplus(req
, nodeid
, arg
->size
, arg
->offset
, &fi
);
1445 fuse_reply_err(req
, ENOSYS
);
1449 static void do_releasedir(fuse_req_t req
, fuse_ino_t nodeid
,
1450 struct fuse_mbuf_iter
*iter
)
1452 struct fuse_release_in
*arg
;
1453 struct fuse_file_info fi
;
1455 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1457 fuse_reply_err(req
, EINVAL
);
1461 memset(&fi
, 0, sizeof(fi
));
1462 fi
.flags
= arg
->flags
;
1465 if (req
->se
->op
.releasedir
) {
1466 req
->se
->op
.releasedir(req
, nodeid
, &fi
);
1468 fuse_reply_err(req
, 0);
1472 static void do_fsyncdir(fuse_req_t req
, fuse_ino_t nodeid
,
1473 struct fuse_mbuf_iter
*iter
)
1475 struct fuse_fsync_in
*arg
;
1476 struct fuse_file_info fi
;
1479 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1481 fuse_reply_err(req
, EINVAL
);
1484 datasync
= arg
->fsync_flags
& 1;
1486 memset(&fi
, 0, sizeof(fi
));
1489 if (req
->se
->op
.fsyncdir
) {
1490 req
->se
->op
.fsyncdir(req
, nodeid
, datasync
, &fi
);
1492 fuse_reply_err(req
, ENOSYS
);
1496 static void do_statfs(fuse_req_t req
, fuse_ino_t nodeid
,
1497 struct fuse_mbuf_iter
*iter
)
1502 if (req
->se
->op
.statfs
) {
1503 req
->se
->op
.statfs(req
, nodeid
);
1505 struct statvfs buf
= {
1509 fuse_reply_statfs(req
, &buf
);
1513 static void do_setxattr(fuse_req_t req
, fuse_ino_t nodeid
,
1514 struct fuse_mbuf_iter
*iter
)
1516 struct fuse_setxattr_in
*arg
;
1519 bool setxattr_ext
= req
->se
->conn
.want
& FUSE_CAP_SETXATTR_EXT
;
1522 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1524 arg
= fuse_mbuf_iter_advance(iter
, FUSE_COMPAT_SETXATTR_IN_SIZE
);
1526 name
= fuse_mbuf_iter_advance_str(iter
);
1527 if (!arg
|| !name
) {
1528 fuse_reply_err(req
, EINVAL
);
1532 value
= fuse_mbuf_iter_advance(iter
, arg
->size
);
1534 fuse_reply_err(req
, EINVAL
);
1538 if (req
->se
->op
.setxattr
) {
1539 uint32_t setxattr_flags
= setxattr_ext
? arg
->setxattr_flags
: 0;
1540 req
->se
->op
.setxattr(req
, nodeid
, name
, value
, arg
->size
, arg
->flags
,
1543 fuse_reply_err(req
, ENOSYS
);
1547 static void do_getxattr(fuse_req_t req
, fuse_ino_t nodeid
,
1548 struct fuse_mbuf_iter
*iter
)
1550 struct fuse_getxattr_in
*arg
;
1553 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1554 name
= fuse_mbuf_iter_advance_str(iter
);
1555 if (!arg
|| !name
) {
1556 fuse_reply_err(req
, EINVAL
);
1560 if (req
->se
->op
.getxattr
) {
1561 req
->se
->op
.getxattr(req
, nodeid
, name
, arg
->size
);
1563 fuse_reply_err(req
, ENOSYS
);
1567 static void do_listxattr(fuse_req_t req
, fuse_ino_t nodeid
,
1568 struct fuse_mbuf_iter
*iter
)
1570 struct fuse_getxattr_in
*arg
;
1572 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1574 fuse_reply_err(req
, EINVAL
);
1578 if (req
->se
->op
.listxattr
) {
1579 req
->se
->op
.listxattr(req
, nodeid
, arg
->size
);
1581 fuse_reply_err(req
, ENOSYS
);
1585 static void do_removexattr(fuse_req_t req
, fuse_ino_t nodeid
,
1586 struct fuse_mbuf_iter
*iter
)
1588 const char *name
= fuse_mbuf_iter_advance_str(iter
);
1591 fuse_reply_err(req
, EINVAL
);
1595 if (req
->se
->op
.removexattr
) {
1596 req
->se
->op
.removexattr(req
, nodeid
, name
);
1598 fuse_reply_err(req
, ENOSYS
);
1602 static void convert_fuse_file_lock(struct fuse_file_lock
*fl
,
1603 struct flock
*flock
)
1605 memset(flock
, 0, sizeof(struct flock
));
1606 flock
->l_type
= fl
->type
;
1607 flock
->l_whence
= SEEK_SET
;
1608 flock
->l_start
= fl
->start
;
1609 if (fl
->end
== OFFSET_MAX
) {
1612 flock
->l_len
= fl
->end
- fl
->start
+ 1;
1614 flock
->l_pid
= fl
->pid
;
1617 static void do_getlk(fuse_req_t req
, fuse_ino_t nodeid
,
1618 struct fuse_mbuf_iter
*iter
)
1620 struct fuse_lk_in
*arg
;
1621 struct fuse_file_info fi
;
1624 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1626 fuse_reply_err(req
, EINVAL
);
1630 memset(&fi
, 0, sizeof(fi
));
1632 fi
.lock_owner
= arg
->owner
;
1634 convert_fuse_file_lock(&arg
->lk
, &flock
);
1635 if (req
->se
->op
.getlk
) {
1636 req
->se
->op
.getlk(req
, nodeid
, &fi
, &flock
);
1638 fuse_reply_err(req
, ENOSYS
);
1642 static void do_setlk_common(fuse_req_t req
, fuse_ino_t nodeid
,
1643 struct fuse_mbuf_iter
*iter
, int sleep
)
1645 struct fuse_lk_in
*arg
;
1646 struct fuse_file_info fi
;
1649 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1651 fuse_reply_err(req
, EINVAL
);
1655 memset(&fi
, 0, sizeof(fi
));
1657 fi
.lock_owner
= arg
->owner
;
1659 if (arg
->lk_flags
& FUSE_LK_FLOCK
) {
1662 switch (arg
->lk
.type
) {
1677 if (req
->se
->op
.flock
) {
1678 req
->se
->op
.flock(req
, nodeid
, &fi
, op
);
1680 fuse_reply_err(req
, ENOSYS
);
1683 convert_fuse_file_lock(&arg
->lk
, &flock
);
1684 if (req
->se
->op
.setlk
) {
1685 req
->se
->op
.setlk(req
, nodeid
, &fi
, &flock
, sleep
);
1687 fuse_reply_err(req
, ENOSYS
);
1692 static void do_setlk(fuse_req_t req
, fuse_ino_t nodeid
,
1693 struct fuse_mbuf_iter
*iter
)
1695 do_setlk_common(req
, nodeid
, iter
, 0);
1698 static void do_setlkw(fuse_req_t req
, fuse_ino_t nodeid
,
1699 struct fuse_mbuf_iter
*iter
)
1701 do_setlk_common(req
, nodeid
, iter
, 1);
1704 static int find_interrupted(struct fuse_session
*se
, struct fuse_req
*req
)
1706 struct fuse_req
*curr
;
1708 for (curr
= se
->list
.next
; curr
!= &se
->list
; curr
= curr
->next
) {
1709 if (curr
->unique
== req
->u
.i
.unique
) {
1710 fuse_interrupt_func_t func
;
1714 pthread_mutex_unlock(&se
->lock
);
1716 /* Ugh, ugly locking */
1717 pthread_mutex_lock(&curr
->lock
);
1718 pthread_mutex_lock(&se
->lock
);
1719 curr
->interrupted
= 1;
1720 func
= curr
->u
.ni
.func
;
1721 data
= curr
->u
.ni
.data
;
1722 pthread_mutex_unlock(&se
->lock
);
1726 pthread_mutex_unlock(&curr
->lock
);
1728 pthread_mutex_lock(&se
->lock
);
1737 for (curr
= se
->interrupts
.next
; curr
!= &se
->interrupts
;
1738 curr
= curr
->next
) {
1739 if (curr
->u
.i
.unique
== req
->u
.i
.unique
) {
1746 static void do_interrupt(fuse_req_t req
, fuse_ino_t nodeid
,
1747 struct fuse_mbuf_iter
*iter
)
1749 struct fuse_interrupt_in
*arg
;
1750 struct fuse_session
*se
= req
->se
;
1754 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1756 fuse_reply_err(req
, EINVAL
);
1760 fuse_log(FUSE_LOG_DEBUG
, "INTERRUPT: %llu\n",
1761 (unsigned long long)arg
->unique
);
1763 req
->u
.i
.unique
= arg
->unique
;
1765 pthread_mutex_lock(&se
->lock
);
1766 if (find_interrupted(se
, req
)) {
1769 list_add_req(req
, &se
->interrupts
);
1771 pthread_mutex_unlock(&se
->lock
);
1774 static struct fuse_req
*check_interrupt(struct fuse_session
*se
,
1775 struct fuse_req
*req
)
1777 struct fuse_req
*curr
;
1779 for (curr
= se
->interrupts
.next
; curr
!= &se
->interrupts
;
1780 curr
= curr
->next
) {
1781 if (curr
->u
.i
.unique
== req
->unique
) {
1782 req
->interrupted
= 1;
1788 curr
= se
->interrupts
.next
;
1789 if (curr
!= &se
->interrupts
) {
1791 list_init_req(curr
);
1798 static void do_bmap(fuse_req_t req
, fuse_ino_t nodeid
,
1799 struct fuse_mbuf_iter
*iter
)
1801 struct fuse_bmap_in
*arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1804 fuse_reply_err(req
, EINVAL
);
1808 if (req
->se
->op
.bmap
) {
1809 req
->se
->op
.bmap(req
, nodeid
, arg
->blocksize
, arg
->block
);
1811 fuse_reply_err(req
, ENOSYS
);
1815 static void do_ioctl(fuse_req_t req
, fuse_ino_t nodeid
,
1816 struct fuse_mbuf_iter
*iter
)
1818 struct fuse_ioctl_in
*arg
;
1820 void *in_buf
= NULL
;
1821 struct fuse_file_info fi
;
1823 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1825 fuse_reply_err(req
, EINVAL
);
1830 if (flags
& FUSE_IOCTL_DIR
&& !(req
->se
->conn
.want
& FUSE_CAP_IOCTL_DIR
)) {
1831 fuse_reply_err(req
, ENOTTY
);
1836 in_buf
= fuse_mbuf_iter_advance(iter
, arg
->in_size
);
1838 fuse_reply_err(req
, EINVAL
);
1843 memset(&fi
, 0, sizeof(fi
));
1846 if (sizeof(void *) == 4 && !(flags
& FUSE_IOCTL_32BIT
)) {
1847 req
->ioctl_64bit
= 1;
1850 if (req
->se
->op
.ioctl
) {
1851 req
->se
->op
.ioctl(req
, nodeid
, arg
->cmd
, (void *)(uintptr_t)arg
->arg
,
1852 &fi
, flags
, in_buf
, arg
->in_size
, arg
->out_size
);
1854 fuse_reply_err(req
, ENOSYS
);
1858 void fuse_pollhandle_destroy(struct fuse_pollhandle
*ph
)
1863 static void do_poll(fuse_req_t req
, fuse_ino_t nodeid
,
1864 struct fuse_mbuf_iter
*iter
)
1866 struct fuse_poll_in
*arg
;
1867 struct fuse_file_info fi
;
1869 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1871 fuse_reply_err(req
, EINVAL
);
1875 memset(&fi
, 0, sizeof(fi
));
1877 fi
.poll_events
= arg
->events
;
1879 if (req
->se
->op
.poll
) {
1880 struct fuse_pollhandle
*ph
= NULL
;
1882 if (arg
->flags
& FUSE_POLL_SCHEDULE_NOTIFY
) {
1883 ph
= malloc(sizeof(struct fuse_pollhandle
));
1885 fuse_reply_err(req
, ENOMEM
);
1892 req
->se
->op
.poll(req
, nodeid
, &fi
, ph
);
1894 fuse_reply_err(req
, ENOSYS
);
1898 static void do_fallocate(fuse_req_t req
, fuse_ino_t nodeid
,
1899 struct fuse_mbuf_iter
*iter
)
1901 struct fuse_fallocate_in
*arg
;
1902 struct fuse_file_info fi
;
1904 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1906 fuse_reply_err(req
, EINVAL
);
1910 memset(&fi
, 0, sizeof(fi
));
1913 if (req
->se
->op
.fallocate
) {
1914 req
->se
->op
.fallocate(req
, nodeid
, arg
->mode
, arg
->offset
, arg
->length
,
1917 fuse_reply_err(req
, ENOSYS
);
1921 static void do_copy_file_range(fuse_req_t req
, fuse_ino_t nodeid_in
,
1922 struct fuse_mbuf_iter
*iter
)
1924 struct fuse_copy_file_range_in
*arg
;
1925 struct fuse_file_info fi_in
, fi_out
;
1927 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1929 fuse_reply_err(req
, EINVAL
);
1933 memset(&fi_in
, 0, sizeof(fi_in
));
1934 fi_in
.fh
= arg
->fh_in
;
1936 memset(&fi_out
, 0, sizeof(fi_out
));
1937 fi_out
.fh
= arg
->fh_out
;
1940 if (req
->se
->op
.copy_file_range
) {
1941 req
->se
->op
.copy_file_range(req
, nodeid_in
, arg
->off_in
, &fi_in
,
1942 arg
->nodeid_out
, arg
->off_out
, &fi_out
,
1943 arg
->len
, arg
->flags
);
1945 fuse_reply_err(req
, ENOSYS
);
1949 static void do_lseek(fuse_req_t req
, fuse_ino_t nodeid
,
1950 struct fuse_mbuf_iter
*iter
)
1952 struct fuse_lseek_in
*arg
;
1953 struct fuse_file_info fi
;
1955 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1957 fuse_reply_err(req
, EINVAL
);
1960 memset(&fi
, 0, sizeof(fi
));
1963 if (req
->se
->op
.lseek
) {
1964 req
->se
->op
.lseek(req
, nodeid
, arg
->offset
, arg
->whence
, &fi
);
1966 fuse_reply_err(req
, ENOSYS
);
1970 static void do_syncfs(fuse_req_t req
, fuse_ino_t nodeid
,
1971 struct fuse_mbuf_iter
*iter
)
1973 if (req
->se
->op
.syncfs
) {
1974 req
->se
->op
.syncfs(req
, nodeid
);
1976 fuse_reply_err(req
, ENOSYS
);
1980 static void do_init(fuse_req_t req
, fuse_ino_t nodeid
,
1981 struct fuse_mbuf_iter
*iter
)
1983 size_t compat_size
= offsetof(struct fuse_init_in
, max_readahead
);
1984 size_t compat2_size
= offsetof(struct fuse_init_in
, flags
) +
1986 /* Fuse structure extended with minor version 36 */
1987 size_t compat3_size
= endof(struct fuse_init_in
, unused
);
1988 struct fuse_init_in
*arg
;
1989 struct fuse_init_out outarg
;
1990 struct fuse_session
*se
= req
->se
;
1991 size_t bufsize
= se
->bufsize
;
1992 size_t outargsize
= sizeof(outarg
);
1997 /* First consume the old fields... */
1998 arg
= fuse_mbuf_iter_advance(iter
, compat_size
);
2000 fuse_reply_err(req
, EINVAL
);
2004 /* ...and now consume the new fields. */
2005 if (arg
->major
== 7 && arg
->minor
>= 6) {
2006 if (!fuse_mbuf_iter_advance(iter
, compat2_size
- compat_size
)) {
2007 fuse_reply_err(req
, EINVAL
);
2010 flags
|= arg
->flags
;
2014 * fuse_init_in was extended again with minor version 36. Just read
2015 * current known size of fuse_init so that future extension and
2016 * header rebase does not cause breakage.
2018 if (sizeof(*arg
) > compat2_size
&& (arg
->flags
& FUSE_INIT_EXT
)) {
2019 if (!fuse_mbuf_iter_advance(iter
, compat3_size
- compat2_size
)) {
2020 fuse_reply_err(req
, EINVAL
);
2023 flags
|= (uint64_t) arg
->flags2
<< 32;
2026 fuse_log(FUSE_LOG_DEBUG
, "INIT: %u.%u\n", arg
->major
, arg
->minor
);
2027 if (arg
->major
== 7 && arg
->minor
>= 6) {
2028 fuse_log(FUSE_LOG_DEBUG
, "flags=0x%016llx\n", flags
);
2029 fuse_log(FUSE_LOG_DEBUG
, "max_readahead=0x%08x\n", arg
->max_readahead
);
2031 se
->conn
.proto_major
= arg
->major
;
2032 se
->conn
.proto_minor
= arg
->minor
;
2033 se
->conn
.capable
= 0;
2036 memset(&outarg
, 0, sizeof(outarg
));
2037 outarg
.major
= FUSE_KERNEL_VERSION
;
2038 outarg
.minor
= FUSE_KERNEL_MINOR_VERSION
;
2040 if (arg
->major
< 7 || (arg
->major
== 7 && arg
->minor
< 31)) {
2041 fuse_log(FUSE_LOG_ERR
, "fuse: unsupported protocol version: %u.%u\n",
2042 arg
->major
, arg
->minor
);
2043 fuse_reply_err(req
, EPROTO
);
2047 if (arg
->major
> 7) {
2048 /* Wait for a second INIT request with a 7.X version */
2049 send_reply_ok(req
, &outarg
, sizeof(outarg
));
2053 if (arg
->max_readahead
< se
->conn
.max_readahead
) {
2054 se
->conn
.max_readahead
= arg
->max_readahead
;
2056 if (flags
& FUSE_ASYNC_READ
) {
2057 se
->conn
.capable
|= FUSE_CAP_ASYNC_READ
;
2059 if (flags
& FUSE_POSIX_LOCKS
) {
2060 se
->conn
.capable
|= FUSE_CAP_POSIX_LOCKS
;
2062 if (flags
& FUSE_ATOMIC_O_TRUNC
) {
2063 se
->conn
.capable
|= FUSE_CAP_ATOMIC_O_TRUNC
;
2065 if (flags
& FUSE_EXPORT_SUPPORT
) {
2066 se
->conn
.capable
|= FUSE_CAP_EXPORT_SUPPORT
;
2068 if (flags
& FUSE_DONT_MASK
) {
2069 se
->conn
.capable
|= FUSE_CAP_DONT_MASK
;
2071 if (flags
& FUSE_FLOCK_LOCKS
) {
2072 se
->conn
.capable
|= FUSE_CAP_FLOCK_LOCKS
;
2074 if (flags
& FUSE_AUTO_INVAL_DATA
) {
2075 se
->conn
.capable
|= FUSE_CAP_AUTO_INVAL_DATA
;
2077 if (flags
& FUSE_DO_READDIRPLUS
) {
2078 se
->conn
.capable
|= FUSE_CAP_READDIRPLUS
;
2080 if (flags
& FUSE_READDIRPLUS_AUTO
) {
2081 se
->conn
.capable
|= FUSE_CAP_READDIRPLUS_AUTO
;
2083 if (flags
& FUSE_ASYNC_DIO
) {
2084 se
->conn
.capable
|= FUSE_CAP_ASYNC_DIO
;
2086 if (flags
& FUSE_WRITEBACK_CACHE
) {
2087 se
->conn
.capable
|= FUSE_CAP_WRITEBACK_CACHE
;
2089 if (flags
& FUSE_NO_OPEN_SUPPORT
) {
2090 se
->conn
.capable
|= FUSE_CAP_NO_OPEN_SUPPORT
;
2092 if (flags
& FUSE_PARALLEL_DIROPS
) {
2093 se
->conn
.capable
|= FUSE_CAP_PARALLEL_DIROPS
;
2095 if (flags
& FUSE_POSIX_ACL
) {
2096 se
->conn
.capable
|= FUSE_CAP_POSIX_ACL
;
2098 if (flags
& FUSE_HANDLE_KILLPRIV
) {
2099 se
->conn
.capable
|= FUSE_CAP_HANDLE_KILLPRIV
;
2101 if (flags
& FUSE_NO_OPENDIR_SUPPORT
) {
2102 se
->conn
.capable
|= FUSE_CAP_NO_OPENDIR_SUPPORT
;
2104 if (!(flags
& FUSE_MAX_PAGES
)) {
2105 size_t max_bufsize
= FUSE_DEFAULT_MAX_PAGES_PER_REQ
* getpagesize() +
2106 FUSE_BUFFER_HEADER_SIZE
;
2107 if (bufsize
> max_bufsize
) {
2108 bufsize
= max_bufsize
;
2111 if (flags
& FUSE_SUBMOUNTS
) {
2112 se
->conn
.capable
|= FUSE_CAP_SUBMOUNTS
;
2114 if (flags
& FUSE_HANDLE_KILLPRIV_V2
) {
2115 se
->conn
.capable
|= FUSE_CAP_HANDLE_KILLPRIV_V2
;
2117 if (flags
& FUSE_SETXATTR_EXT
) {
2118 se
->conn
.capable
|= FUSE_CAP_SETXATTR_EXT
;
2120 if (flags
& FUSE_SECURITY_CTX
) {
2121 se
->conn
.capable
|= FUSE_CAP_SECURITY_CTX
;
2124 #ifdef HAVE_VMSPLICE
2125 se
->conn
.capable
|= FUSE_CAP_SPLICE_WRITE
| FUSE_CAP_SPLICE_MOVE
;
2127 se
->conn
.capable
|= FUSE_CAP_SPLICE_READ
;
2129 se
->conn
.capable
|= FUSE_CAP_IOCTL_DIR
;
2132 * Default settings for modern filesystems.
2134 * Most of these capabilities were disabled by default in
2135 * libfuse2 for backwards compatibility reasons. In libfuse3,
2136 * we can finally enable them by default (as long as they're
2137 * supported by the kernel).
2139 #define LL_SET_DEFAULT(cond, cap) \
2140 if ((cond) && (se->conn.capable & (cap))) \
2141 se->conn.want |= (cap)
2142 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ
);
2143 LL_SET_DEFAULT(1, FUSE_CAP_PARALLEL_DIROPS
);
2144 LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA
);
2145 LL_SET_DEFAULT(1, FUSE_CAP_HANDLE_KILLPRIV
);
2146 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO
);
2147 LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR
);
2148 LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC
);
2149 LL_SET_DEFAULT(se
->op
.write_buf
, FUSE_CAP_SPLICE_READ
);
2150 LL_SET_DEFAULT(se
->op
.getlk
&& se
->op
.setlk
, FUSE_CAP_POSIX_LOCKS
);
2151 LL_SET_DEFAULT(se
->op
.flock
, FUSE_CAP_FLOCK_LOCKS
);
2152 LL_SET_DEFAULT(se
->op
.readdirplus
, FUSE_CAP_READDIRPLUS
);
2153 LL_SET_DEFAULT(se
->op
.readdirplus
&& se
->op
.readdir
,
2154 FUSE_CAP_READDIRPLUS_AUTO
);
2155 se
->conn
.time_gran
= 1;
2157 if (bufsize
< FUSE_MIN_READ_BUFFER
) {
2158 fuse_log(FUSE_LOG_ERR
, "fuse: warning: buffer size too small: %zu\n",
2160 bufsize
= FUSE_MIN_READ_BUFFER
;
2162 se
->bufsize
= bufsize
;
2164 if (se
->conn
.max_write
> bufsize
- FUSE_BUFFER_HEADER_SIZE
) {
2165 se
->conn
.max_write
= bufsize
- FUSE_BUFFER_HEADER_SIZE
;
2169 se
->got_destroy
= 0;
2171 se
->op
.init(se
->userdata
, &se
->conn
);
2174 if (se
->conn
.want
& (~se
->conn
.capable
)) {
2175 fuse_log(FUSE_LOG_ERR
,
2176 "fuse: error: filesystem requested capabilities "
2177 "0x%llx that are not supported by kernel, aborting.\n",
2178 se
->conn
.want
& (~se
->conn
.capable
));
2179 fuse_reply_err(req
, EPROTO
);
2180 se
->error
= -EPROTO
;
2181 fuse_session_exit(se
);
2185 if (se
->conn
.max_write
< bufsize
- FUSE_BUFFER_HEADER_SIZE
) {
2186 se
->bufsize
= se
->conn
.max_write
+ FUSE_BUFFER_HEADER_SIZE
;
2188 if (flags
& FUSE_MAX_PAGES
) {
2189 outarg
.flags
|= FUSE_MAX_PAGES
;
2190 outarg
.max_pages
= (se
->conn
.max_write
- 1) / getpagesize() + 1;
2194 * Always enable big writes, this is superseded
2195 * by the max_write option
2197 outarg
.flags
|= FUSE_BIG_WRITES
;
2199 if (se
->conn
.want
& FUSE_CAP_ASYNC_READ
) {
2200 outarg
.flags
|= FUSE_ASYNC_READ
;
2202 if (se
->conn
.want
& FUSE_CAP_PARALLEL_DIROPS
) {
2203 outarg
.flags
|= FUSE_PARALLEL_DIROPS
;
2205 if (se
->conn
.want
& FUSE_CAP_POSIX_LOCKS
) {
2206 outarg
.flags
|= FUSE_POSIX_LOCKS
;
2208 if (se
->conn
.want
& FUSE_CAP_ATOMIC_O_TRUNC
) {
2209 outarg
.flags
|= FUSE_ATOMIC_O_TRUNC
;
2211 if (se
->conn
.want
& FUSE_CAP_EXPORT_SUPPORT
) {
2212 outarg
.flags
|= FUSE_EXPORT_SUPPORT
;
2214 if (se
->conn
.want
& FUSE_CAP_DONT_MASK
) {
2215 outarg
.flags
|= FUSE_DONT_MASK
;
2217 if (se
->conn
.want
& FUSE_CAP_FLOCK_LOCKS
) {
2218 outarg
.flags
|= FUSE_FLOCK_LOCKS
;
2220 if (se
->conn
.want
& FUSE_CAP_AUTO_INVAL_DATA
) {
2221 outarg
.flags
|= FUSE_AUTO_INVAL_DATA
;
2223 if (se
->conn
.want
& FUSE_CAP_READDIRPLUS
) {
2224 outarg
.flags
|= FUSE_DO_READDIRPLUS
;
2226 if (se
->conn
.want
& FUSE_CAP_READDIRPLUS_AUTO
) {
2227 outarg
.flags
|= FUSE_READDIRPLUS_AUTO
;
2229 if (se
->conn
.want
& FUSE_CAP_ASYNC_DIO
) {
2230 outarg
.flags
|= FUSE_ASYNC_DIO
;
2232 if (se
->conn
.want
& FUSE_CAP_WRITEBACK_CACHE
) {
2233 outarg
.flags
|= FUSE_WRITEBACK_CACHE
;
2235 if (se
->conn
.want
& FUSE_CAP_POSIX_ACL
) {
2236 outarg
.flags
|= FUSE_POSIX_ACL
;
2238 outarg
.max_readahead
= se
->conn
.max_readahead
;
2239 outarg
.max_write
= se
->conn
.max_write
;
2240 if (se
->conn
.max_background
>= (1 << 16)) {
2241 se
->conn
.max_background
= (1 << 16) - 1;
2243 if (se
->conn
.congestion_threshold
> se
->conn
.max_background
) {
2244 se
->conn
.congestion_threshold
= se
->conn
.max_background
;
2246 if (!se
->conn
.congestion_threshold
) {
2247 se
->conn
.congestion_threshold
= se
->conn
.max_background
* 3 / 4;
2250 outarg
.max_background
= se
->conn
.max_background
;
2251 outarg
.congestion_threshold
= se
->conn
.congestion_threshold
;
2252 outarg
.time_gran
= se
->conn
.time_gran
;
2254 if (se
->conn
.want
& FUSE_CAP_HANDLE_KILLPRIV_V2
) {
2255 outarg
.flags
|= FUSE_HANDLE_KILLPRIV_V2
;
2258 if (se
->conn
.want
& FUSE_CAP_SETXATTR_EXT
) {
2259 outarg
.flags
|= FUSE_SETXATTR_EXT
;
2262 if (se
->conn
.want
& FUSE_CAP_SECURITY_CTX
) {
2263 /* bits 32..63 get shifted down 32 bits into the flags2 field */
2264 outarg
.flags2
|= FUSE_SECURITY_CTX
>> 32;
2267 fuse_log(FUSE_LOG_DEBUG
, " INIT: %u.%u\n", outarg
.major
, outarg
.minor
);
2268 fuse_log(FUSE_LOG_DEBUG
, " flags2=0x%08x flags=0x%08x\n", outarg
.flags2
,
2270 fuse_log(FUSE_LOG_DEBUG
, " max_readahead=0x%08x\n", outarg
.max_readahead
);
2271 fuse_log(FUSE_LOG_DEBUG
, " max_write=0x%08x\n", outarg
.max_write
);
2272 fuse_log(FUSE_LOG_DEBUG
, " max_background=%i\n", outarg
.max_background
);
2273 fuse_log(FUSE_LOG_DEBUG
, " congestion_threshold=%i\n",
2274 outarg
.congestion_threshold
);
2275 fuse_log(FUSE_LOG_DEBUG
, " time_gran=%u\n", outarg
.time_gran
);
2277 send_reply_ok(req
, &outarg
, outargsize
);
2280 static void do_destroy(fuse_req_t req
, fuse_ino_t nodeid
,
2281 struct fuse_mbuf_iter
*iter
)
2283 struct fuse_session
*se
= req
->se
;
2288 se
->got_destroy
= 1;
2290 if (se
->op
.destroy
) {
2291 se
->op
.destroy(se
->userdata
);
2294 send_reply_ok(req
, NULL
, 0);
2297 int fuse_lowlevel_notify_store(struct fuse_session
*se
, fuse_ino_t ino
,
2298 off_t offset
, struct fuse_bufvec
*bufv
)
2300 struct fuse_out_header out
= {
2301 .error
= FUSE_NOTIFY_STORE
,
2303 struct fuse_notify_store_out outarg
= {
2306 .size
= fuse_buf_size(bufv
),
2308 struct iovec iov
[3];
2315 iov
[0].iov_base
= &out
;
2316 iov
[0].iov_len
= sizeof(out
);
2317 iov
[1].iov_base
= &outarg
;
2318 iov
[1].iov_len
= sizeof(outarg
);
2320 res
= fuse_send_data_iov(se
, NULL
, iov
, 2, bufv
);
2328 void *fuse_req_userdata(fuse_req_t req
)
2330 return req
->se
->userdata
;
2333 const struct fuse_ctx
*fuse_req_ctx(fuse_req_t req
)
2338 void fuse_req_interrupt_func(fuse_req_t req
, fuse_interrupt_func_t func
,
2341 pthread_mutex_lock(&req
->lock
);
2342 pthread_mutex_lock(&req
->se
->lock
);
2343 req
->u
.ni
.func
= func
;
2344 req
->u
.ni
.data
= data
;
2345 pthread_mutex_unlock(&req
->se
->lock
);
2346 if (req
->interrupted
&& func
) {
2349 pthread_mutex_unlock(&req
->lock
);
2352 int fuse_req_interrupted(fuse_req_t req
)
2356 pthread_mutex_lock(&req
->se
->lock
);
2357 interrupted
= req
->interrupted
;
2358 pthread_mutex_unlock(&req
->se
->lock
);
2364 void (*func
)(fuse_req_t
, fuse_ino_t
, struct fuse_mbuf_iter
*);
2367 [FUSE_LOOKUP
] = { do_lookup
, "LOOKUP" },
2368 [FUSE_FORGET
] = { do_forget
, "FORGET" },
2369 [FUSE_GETATTR
] = { do_getattr
, "GETATTR" },
2370 [FUSE_SETATTR
] = { do_setattr
, "SETATTR" },
2371 [FUSE_READLINK
] = { do_readlink
, "READLINK" },
2372 [FUSE_SYMLINK
] = { do_symlink
, "SYMLINK" },
2373 [FUSE_MKNOD
] = { do_mknod
, "MKNOD" },
2374 [FUSE_MKDIR
] = { do_mkdir
, "MKDIR" },
2375 [FUSE_UNLINK
] = { do_unlink
, "UNLINK" },
2376 [FUSE_RMDIR
] = { do_rmdir
, "RMDIR" },
2377 [FUSE_RENAME
] = { do_rename
, "RENAME" },
2378 [FUSE_LINK
] = { do_link
, "LINK" },
2379 [FUSE_OPEN
] = { do_open
, "OPEN" },
2380 [FUSE_READ
] = { do_read
, "READ" },
2381 [FUSE_WRITE
] = { do_write
, "WRITE" },
2382 [FUSE_STATFS
] = { do_statfs
, "STATFS" },
2383 [FUSE_RELEASE
] = { do_release
, "RELEASE" },
2384 [FUSE_FSYNC
] = { do_fsync
, "FSYNC" },
2385 [FUSE_SETXATTR
] = { do_setxattr
, "SETXATTR" },
2386 [FUSE_GETXATTR
] = { do_getxattr
, "GETXATTR" },
2387 [FUSE_LISTXATTR
] = { do_listxattr
, "LISTXATTR" },
2388 [FUSE_REMOVEXATTR
] = { do_removexattr
, "REMOVEXATTR" },
2389 [FUSE_FLUSH
] = { do_flush
, "FLUSH" },
2390 [FUSE_INIT
] = { do_init
, "INIT" },
2391 [FUSE_OPENDIR
] = { do_opendir
, "OPENDIR" },
2392 [FUSE_READDIR
] = { do_readdir
, "READDIR" },
2393 [FUSE_RELEASEDIR
] = { do_releasedir
, "RELEASEDIR" },
2394 [FUSE_FSYNCDIR
] = { do_fsyncdir
, "FSYNCDIR" },
2395 [FUSE_GETLK
] = { do_getlk
, "GETLK" },
2396 [FUSE_SETLK
] = { do_setlk
, "SETLK" },
2397 [FUSE_SETLKW
] = { do_setlkw
, "SETLKW" },
2398 [FUSE_ACCESS
] = { do_access
, "ACCESS" },
2399 [FUSE_CREATE
] = { do_create
, "CREATE" },
2400 [FUSE_INTERRUPT
] = { do_interrupt
, "INTERRUPT" },
2401 [FUSE_BMAP
] = { do_bmap
, "BMAP" },
2402 [FUSE_IOCTL
] = { do_ioctl
, "IOCTL" },
2403 [FUSE_POLL
] = { do_poll
, "POLL" },
2404 [FUSE_FALLOCATE
] = { do_fallocate
, "FALLOCATE" },
2405 [FUSE_DESTROY
] = { do_destroy
, "DESTROY" },
2406 [FUSE_NOTIFY_REPLY
] = { NULL
, "NOTIFY_REPLY" },
2407 [FUSE_BATCH_FORGET
] = { do_batch_forget
, "BATCH_FORGET" },
2408 [FUSE_READDIRPLUS
] = { do_readdirplus
, "READDIRPLUS" },
2409 [FUSE_RENAME2
] = { do_rename2
, "RENAME2" },
2410 [FUSE_COPY_FILE_RANGE
] = { do_copy_file_range
, "COPY_FILE_RANGE" },
2411 [FUSE_LSEEK
] = { do_lseek
, "LSEEK" },
2412 [FUSE_SYNCFS
] = { do_syncfs
, "SYNCFS" },
2415 #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2417 static const char *opname(enum fuse_opcode opcode
)
2419 if (opcode
>= FUSE_MAXOP
|| !fuse_ll_ops
[opcode
].name
) {
2422 return fuse_ll_ops
[opcode
].name
;
2426 void fuse_session_process_buf(struct fuse_session
*se
,
2427 const struct fuse_buf
*buf
)
2429 struct fuse_bufvec bufv
= { .buf
[0] = *buf
, .count
= 1 };
2430 fuse_session_process_buf_int(se
, &bufv
, NULL
);
2435 * bufv is normally a single entry buffer, except for a write
2436 * where (if it's in memory) then the bufv may be multiple entries,
2437 * where the first entry contains all headers and subsequent entries
2439 * bufv shall not use any offsets etc to make the data anything
2440 * other than contiguous starting from 0.
2442 void fuse_session_process_buf_int(struct fuse_session
*se
,
2443 struct fuse_bufvec
*bufv
,
2444 struct fuse_chan
*ch
)
2446 const struct fuse_buf
*buf
= bufv
->buf
;
2447 struct fuse_mbuf_iter iter
= FUSE_MBUF_ITER_INIT(buf
);
2448 struct fuse_in_header
*in
;
2449 struct fuse_req
*req
;
2452 /* The first buffer must be a memory buffer */
2453 assert(!(buf
->flags
& FUSE_BUF_IS_FD
));
2455 in
= fuse_mbuf_iter_advance(&iter
, sizeof(*in
));
2456 assert(in
); /* caller guarantees the input buffer is large enough */
2460 "unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2461 (unsigned long long)in
->unique
, opname((enum fuse_opcode
)in
->opcode
),
2462 in
->opcode
, (unsigned long long)in
->nodeid
, buf
->size
, in
->pid
);
2464 req
= fuse_ll_alloc_req(se
);
2466 struct fuse_out_header out
= {
2467 .unique
= in
->unique
,
2470 struct iovec iov
= {
2472 .iov_len
= sizeof(struct fuse_out_header
),
2475 fuse_send_msg(se
, ch
, &iov
, 1);
2479 req
->unique
= in
->unique
;
2480 req
->ctx
.uid
= in
->uid
;
2481 req
->ctx
.gid
= in
->gid
;
2482 req
->ctx
.pid
= in
->pid
;
2486 * INIT and DESTROY requests are serialized, all other request types
2487 * run in parallel. This prevents races between FUSE_INIT and ordinary
2488 * requests, FUSE_INIT and FUSE_INIT, FUSE_INIT and FUSE_DESTROY, and
2489 * FUSE_DESTROY and FUSE_DESTROY.
2491 if (in
->opcode
== FUSE_INIT
|| in
->opcode
== CUSE_INIT
||
2492 in
->opcode
== FUSE_DESTROY
) {
2493 pthread_rwlock_wrlock(&se
->init_rwlock
);
2495 pthread_rwlock_rdlock(&se
->init_rwlock
);
2499 if (!se
->got_init
) {
2500 enum fuse_opcode expected
;
2502 expected
= se
->cuse_data
? CUSE_INIT
: FUSE_INIT
;
2503 if (in
->opcode
!= expected
) {
2506 } else if (in
->opcode
== FUSE_INIT
|| in
->opcode
== CUSE_INIT
) {
2507 if (fuse_lowlevel_is_virtio(se
)) {
2509 * TODO: This is after a hard reboot typically, we need to do
2510 * a destroy, but we can't reply to this request yet so
2511 * we can't use do_destroy
2513 fuse_log(FUSE_LOG_DEBUG
, "%s: reinit\n", __func__
);
2514 se
->got_destroy
= 1;
2516 if (se
->op
.destroy
) {
2517 se
->op
.destroy(se
->userdata
);
2525 /* Implement -o allow_root */
2526 if (se
->deny_others
&& in
->uid
!= se
->owner
&& in
->uid
!= 0 &&
2527 in
->opcode
!= FUSE_INIT
&& in
->opcode
!= FUSE_READ
&&
2528 in
->opcode
!= FUSE_WRITE
&& in
->opcode
!= FUSE_FSYNC
&&
2529 in
->opcode
!= FUSE_RELEASE
&& in
->opcode
!= FUSE_READDIR
&&
2530 in
->opcode
!= FUSE_FSYNCDIR
&& in
->opcode
!= FUSE_RELEASEDIR
&&
2531 in
->opcode
!= FUSE_NOTIFY_REPLY
&& in
->opcode
!= FUSE_READDIRPLUS
) {
2536 if (in
->opcode
>= FUSE_MAXOP
|| !fuse_ll_ops
[in
->opcode
].func
) {
2539 if (in
->opcode
!= FUSE_INTERRUPT
) {
2540 struct fuse_req
*intr
;
2541 pthread_mutex_lock(&se
->lock
);
2542 intr
= check_interrupt(se
, req
);
2543 list_add_req(req
, &se
->list
);
2544 pthread_mutex_unlock(&se
->lock
);
2546 fuse_reply_err(intr
, EAGAIN
);
2550 if (in
->opcode
== FUSE_WRITE
&& se
->op
.write_buf
) {
2551 do_write_buf(req
, in
->nodeid
, &iter
, bufv
);
2553 fuse_ll_ops
[in
->opcode
].func(req
, in
->nodeid
, &iter
);
2556 pthread_rwlock_unlock(&se
->init_rwlock
);
2560 fuse_reply_err(req
, err
);
2561 pthread_rwlock_unlock(&se
->init_rwlock
);
2564 #define LL_OPTION(n, o, v) \
2566 n, offsetof(struct fuse_session, o), v \
2569 static const struct fuse_opt fuse_ll_opts
[] = {
2570 LL_OPTION("debug", debug
, 1),
2571 LL_OPTION("-d", debug
, 1),
2572 LL_OPTION("--debug", debug
, 1),
2573 LL_OPTION("allow_root", deny_others
, 1),
2574 LL_OPTION("--socket-path=%s", vu_socket_path
, 0),
2575 LL_OPTION("--socket-group=%s", vu_socket_group
, 0),
2576 LL_OPTION("--fd=%d", vu_listen_fd
, 0),
2577 LL_OPTION("--thread-pool-size=%d", thread_pool_size
, 0),
2581 void fuse_lowlevel_version(void)
2583 printf("using FUSE kernel interface version %i.%i\n", FUSE_KERNEL_VERSION
,
2584 FUSE_KERNEL_MINOR_VERSION
);
2587 void fuse_lowlevel_help(void)
2590 * These are not all options, but the ones that are
2591 * potentially of interest to an end-user
2594 " -o allow_root allow access by root\n"
2595 " --socket-path=PATH path for the vhost-user socket\n"
2596 " --socket-group=GRNAME name of group for the vhost-user socket\n"
2597 " --fd=FDNUM fd number of vhost-user socket\n"
2598 " --thread-pool-size=NUM thread pool size limit (default %d)\n",
2602 void fuse_session_destroy(struct fuse_session
*se
)
2604 if (se
->got_init
&& !se
->got_destroy
) {
2605 if (se
->op
.destroy
) {
2606 se
->op
.destroy(se
->userdata
);
2609 pthread_rwlock_destroy(&se
->init_rwlock
);
2610 pthread_mutex_destroy(&se
->lock
);
2611 free(se
->cuse_data
);
2616 if (fuse_lowlevel_is_virtio(se
)) {
2617 virtio_session_close(se
);
2620 free(se
->vu_socket_path
);
2621 se
->vu_socket_path
= NULL
;
2627 struct fuse_session
*fuse_session_new(struct fuse_args
*args
,
2628 const struct fuse_lowlevel_ops
*op
,
2629 size_t op_size
, void *userdata
)
2631 struct fuse_session
*se
;
2633 if (sizeof(struct fuse_lowlevel_ops
) < op_size
) {
2636 "fuse: warning: library too old, some operations may not work\n");
2637 op_size
= sizeof(struct fuse_lowlevel_ops
);
2640 if (args
->argc
== 0) {
2641 fuse_log(FUSE_LOG_ERR
,
2642 "fuse: empty argv passed to fuse_session_new().\n");
2646 se
= g_try_new0(struct fuse_session
, 1);
2648 fuse_log(FUSE_LOG_ERR
, "fuse: failed to allocate fuse object\n");
2652 se
->vu_listen_fd
= -1;
2653 se
->thread_pool_size
= THREAD_POOL_SIZE
;
2654 se
->conn
.max_write
= UINT_MAX
;
2655 se
->conn
.max_readahead
= UINT_MAX
;
2658 if (fuse_opt_parse(args
, se
, fuse_ll_opts
, NULL
) == -1) {
2661 if (args
->argc
== 1 && args
->argv
[0][0] == '-') {
2662 fuse_log(FUSE_LOG_ERR
,
2663 "fuse: warning: argv[0] looks like an option, but "
2664 "will be ignored\n");
2665 } else if (args
->argc
!= 1) {
2667 fuse_log(FUSE_LOG_ERR
, "fuse: unknown option(s): `");
2668 for (i
= 1; i
< args
->argc
- 1; i
++) {
2669 fuse_log(FUSE_LOG_ERR
, "%s ", args
->argv
[i
]);
2671 fuse_log(FUSE_LOG_ERR
, "%s'\n", args
->argv
[i
]);
2675 if (!se
->vu_socket_path
&& se
->vu_listen_fd
< 0) {
2676 fuse_log(FUSE_LOG_ERR
, "fuse: missing --socket-path or --fd option\n");
2679 if (se
->vu_socket_path
&& se
->vu_listen_fd
>= 0) {
2680 fuse_log(FUSE_LOG_ERR
,
2681 "fuse: --socket-path and --fd cannot be given together\n");
2684 if (se
->vu_socket_group
&& !se
->vu_socket_path
) {
2685 fuse_log(FUSE_LOG_ERR
,
2686 "fuse: --socket-group can only be used with --socket-path\n");
2690 se
->bufsize
= FUSE_MAX_MAX_PAGES
* getpagesize() + FUSE_BUFFER_HEADER_SIZE
;
2692 list_init_req(&se
->list
);
2693 list_init_req(&se
->interrupts
);
2694 fuse_mutex_init(&se
->lock
);
2695 pthread_rwlock_init(&se
->init_rwlock
, NULL
);
2697 memcpy(&se
->op
, op
, op_size
);
2698 se
->owner
= getuid();
2699 se
->userdata
= userdata
;
2704 fuse_opt_free_args(args
);
2711 int fuse_session_mount(struct fuse_session
*se
)
2713 return virtio_session_mount(se
);
2716 int fuse_session_fd(struct fuse_session
*se
)
2721 void fuse_session_unmount(struct fuse_session
*se
)
2725 int fuse_lowlevel_is_virtio(struct fuse_session
*se
)
2727 return !!se
->virtio_dev
;
2730 void fuse_session_exit(struct fuse_session
*se
)
2735 void fuse_session_reset(struct fuse_session
*se
)
2741 int fuse_session_exited(struct fuse_session
*se
)