2 * FUSE: Filesystem in Userspace
3 * Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
5 * Implementation of (most of) the low-level FUSE API. The session loop
6 * functions are implemented in separate files.
8 * This program can be distributed under the terms of the GNU LGPLv2.
9 * See the file COPYING.LIB
12 #include "qemu/osdep.h"
14 #include "standard-headers/linux/fuse.h"
15 #include "fuse_misc.h"
17 #include "fuse_virtio.h"
31 #define THREAD_POOL_SIZE 64
33 #define OFFSET_MAX 0x7fffffffffffffffLL
35 struct fuse_pollhandle
{
37 struct fuse_session
*se
;
40 static size_t pagesize
;
42 static __attribute__((constructor
)) void fuse_ll_init_pagesize(void)
44 pagesize
= getpagesize();
47 static void convert_stat(const struct stat
*stbuf
, struct fuse_attr
*attr
)
49 *attr
= (struct fuse_attr
){
51 .mode
= stbuf
->st_mode
,
52 .nlink
= stbuf
->st_nlink
,
55 .rdev
= stbuf
->st_rdev
,
56 .size
= stbuf
->st_size
,
57 .blksize
= stbuf
->st_blksize
,
58 .blocks
= stbuf
->st_blocks
,
59 .atime
= stbuf
->st_atime
,
60 .mtime
= stbuf
->st_mtime
,
61 .ctime
= stbuf
->st_ctime
,
62 .atimensec
= ST_ATIM_NSEC(stbuf
),
63 .mtimensec
= ST_MTIM_NSEC(stbuf
),
64 .ctimensec
= ST_CTIM_NSEC(stbuf
),
68 static void convert_attr(const struct fuse_setattr_in
*attr
, struct stat
*stbuf
)
70 stbuf
->st_mode
= attr
->mode
;
71 stbuf
->st_uid
= attr
->uid
;
72 stbuf
->st_gid
= attr
->gid
;
73 stbuf
->st_size
= attr
->size
;
74 stbuf
->st_atime
= attr
->atime
;
75 stbuf
->st_mtime
= attr
->mtime
;
76 stbuf
->st_ctime
= attr
->ctime
;
77 ST_ATIM_NSEC_SET(stbuf
, attr
->atimensec
);
78 ST_MTIM_NSEC_SET(stbuf
, attr
->mtimensec
);
79 ST_CTIM_NSEC_SET(stbuf
, attr
->ctimensec
);
82 static size_t iov_length(const struct iovec
*iov
, size_t count
)
87 for (seg
= 0; seg
< count
; seg
++) {
88 ret
+= iov
[seg
].iov_len
;
93 static void list_init_req(struct fuse_req
*req
)
99 static void list_del_req(struct fuse_req
*req
)
101 struct fuse_req
*prev
= req
->prev
;
102 struct fuse_req
*next
= req
->next
;
107 static void list_add_req(struct fuse_req
*req
, struct fuse_req
*next
)
109 struct fuse_req
*prev
= next
->prev
;
116 static void destroy_req(fuse_req_t req
)
118 pthread_mutex_destroy(&req
->lock
);
122 void fuse_free_req(fuse_req_t req
)
125 struct fuse_session
*se
= req
->se
;
127 pthread_mutex_lock(&se
->lock
);
128 req
->u
.ni
.func
= NULL
;
129 req
->u
.ni
.data
= NULL
;
133 pthread_mutex_unlock(&se
->lock
);
139 static struct fuse_req
*fuse_ll_alloc_req(struct fuse_session
*se
)
141 struct fuse_req
*req
;
143 req
= (struct fuse_req
*)calloc(1, sizeof(struct fuse_req
));
145 fuse_log(FUSE_LOG_ERR
, "fuse: failed to allocate request\n");
150 fuse_mutex_init(&req
->lock
);
156 /* Send data. If *ch* is NULL, send via session master fd */
157 static int fuse_send_msg(struct fuse_session
*se
, struct fuse_chan
*ch
,
158 struct iovec
*iov
, int count
)
160 struct fuse_out_header
*out
= iov
[0].iov_base
;
162 out
->len
= iov_length(iov
, count
);
163 if (out
->unique
== 0) {
164 fuse_log(FUSE_LOG_DEBUG
, "NOTIFY: code=%d length=%u\n", out
->error
,
166 } else if (out
->error
) {
167 fuse_log(FUSE_LOG_DEBUG
,
168 " unique: %llu, error: %i (%s), outsize: %i\n",
169 (unsigned long long)out
->unique
, out
->error
,
170 strerror(-out
->error
), out
->len
);
172 fuse_log(FUSE_LOG_DEBUG
, " unique: %llu, success, outsize: %i\n",
173 (unsigned long long)out
->unique
, out
->len
);
176 if (fuse_lowlevel_is_virtio(se
)) {
177 return virtio_send_msg(se
, ch
, iov
, count
);
180 abort(); /* virtio should have taken it before here */
185 int fuse_send_reply_iov_nofree(fuse_req_t req
, int error
, struct iovec
*iov
,
188 struct fuse_out_header out
= {
189 .unique
= req
->unique
,
193 if (error
<= -1000 || error
> 0) {
194 fuse_log(FUSE_LOG_ERR
, "fuse: bad error value: %i\n", error
);
198 iov
[0].iov_base
= &out
;
199 iov
[0].iov_len
= sizeof(struct fuse_out_header
);
201 return fuse_send_msg(req
->se
, req
->ch
, iov
, count
);
204 static int send_reply_iov(fuse_req_t req
, int error
, struct iovec
*iov
,
209 res
= fuse_send_reply_iov_nofree(req
, error
, iov
, count
);
214 static int send_reply(fuse_req_t req
, int error
, const void *arg
,
220 iov
[1].iov_base
= (void *)arg
;
221 iov
[1].iov_len
= argsize
;
224 return send_reply_iov(req
, error
, iov
, count
);
227 int fuse_reply_iov(fuse_req_t req
, const struct iovec
*iov
, int count
)
230 struct iovec
*padded_iov
;
232 padded_iov
= malloc((count
+ 1) * sizeof(struct iovec
));
233 if (padded_iov
== NULL
) {
234 return fuse_reply_err(req
, ENOMEM
);
237 memcpy(padded_iov
+ 1, iov
, count
* sizeof(struct iovec
));
240 res
= send_reply_iov(req
, 0, padded_iov
, count
);
248 * 'buf` is allowed to be empty so that the proper size may be
249 * allocated by the caller
251 size_t fuse_add_direntry(fuse_req_t req
, char *buf
, size_t bufsize
,
252 const char *name
, const struct stat
*stbuf
, off_t off
)
257 size_t entlen_padded
;
258 struct fuse_dirent
*dirent
;
260 namelen
= strlen(name
);
261 entlen
= FUSE_NAME_OFFSET
+ namelen
;
262 entlen_padded
= FUSE_DIRENT_ALIGN(entlen
);
264 if ((buf
== NULL
) || (entlen_padded
> bufsize
)) {
265 return entlen_padded
;
268 dirent
= (struct fuse_dirent
*)buf
;
269 dirent
->ino
= stbuf
->st_ino
;
271 dirent
->namelen
= namelen
;
272 dirent
->type
= (stbuf
->st_mode
& S_IFMT
) >> 12;
273 memcpy(dirent
->name
, name
, namelen
);
274 memset(dirent
->name
+ namelen
, 0, entlen_padded
- entlen
);
276 return entlen_padded
;
279 static void convert_statfs(const struct statvfs
*stbuf
,
280 struct fuse_kstatfs
*kstatfs
)
282 *kstatfs
= (struct fuse_kstatfs
){
283 .bsize
= stbuf
->f_bsize
,
284 .frsize
= stbuf
->f_frsize
,
285 .blocks
= stbuf
->f_blocks
,
286 .bfree
= stbuf
->f_bfree
,
287 .bavail
= stbuf
->f_bavail
,
288 .files
= stbuf
->f_files
,
289 .ffree
= stbuf
->f_ffree
,
290 .namelen
= stbuf
->f_namemax
,
294 static int send_reply_ok(fuse_req_t req
, const void *arg
, size_t argsize
)
296 return send_reply(req
, 0, arg
, argsize
);
299 int fuse_reply_err(fuse_req_t req
, int err
)
301 return send_reply(req
, -err
, NULL
, 0);
304 void fuse_reply_none(fuse_req_t req
)
309 static unsigned long calc_timeout_sec(double t
)
311 if (t
> (double)ULONG_MAX
) {
313 } else if (t
< 0.0) {
316 return (unsigned long)t
;
320 static unsigned int calc_timeout_nsec(double t
)
322 double f
= t
- (double)calc_timeout_sec(t
);
325 } else if (f
>= 0.999999999) {
328 return (unsigned int)(f
* 1.0e9
);
332 static void fill_entry(struct fuse_entry_out
*arg
,
333 const struct fuse_entry_param
*e
)
335 *arg
= (struct fuse_entry_out
){
337 .generation
= e
->generation
,
338 .entry_valid
= calc_timeout_sec(e
->entry_timeout
),
339 .entry_valid_nsec
= calc_timeout_nsec(e
->entry_timeout
),
340 .attr_valid
= calc_timeout_sec(e
->attr_timeout
),
341 .attr_valid_nsec
= calc_timeout_nsec(e
->attr_timeout
),
343 convert_stat(&e
->attr
, &arg
->attr
);
347 * `buf` is allowed to be empty so that the proper size may be
348 * allocated by the caller
350 size_t fuse_add_direntry_plus(fuse_req_t req
, char *buf
, size_t bufsize
,
352 const struct fuse_entry_param
*e
, off_t off
)
357 size_t entlen_padded
;
359 namelen
= strlen(name
);
360 entlen
= FUSE_NAME_OFFSET_DIRENTPLUS
+ namelen
;
361 entlen_padded
= FUSE_DIRENT_ALIGN(entlen
);
362 if ((buf
== NULL
) || (entlen_padded
> bufsize
)) {
363 return entlen_padded
;
366 struct fuse_direntplus
*dp
= (struct fuse_direntplus
*)buf
;
367 memset(&dp
->entry_out
, 0, sizeof(dp
->entry_out
));
368 fill_entry(&dp
->entry_out
, e
);
370 struct fuse_dirent
*dirent
= &dp
->dirent
;
371 *dirent
= (struct fuse_dirent
){
372 .ino
= e
->attr
.st_ino
,
375 .type
= (e
->attr
.st_mode
& S_IFMT
) >> 12,
377 memcpy(dirent
->name
, name
, namelen
);
378 memset(dirent
->name
+ namelen
, 0, entlen_padded
- entlen
);
380 return entlen_padded
;
383 static void fill_open(struct fuse_open_out
*arg
, const struct fuse_file_info
*f
)
387 arg
->open_flags
|= FOPEN_DIRECT_IO
;
390 arg
->open_flags
|= FOPEN_KEEP_CACHE
;
392 if (f
->cache_readdir
) {
393 arg
->open_flags
|= FOPEN_CACHE_DIR
;
395 if (f
->nonseekable
) {
396 arg
->open_flags
|= FOPEN_NONSEEKABLE
;
400 int fuse_reply_entry(fuse_req_t req
, const struct fuse_entry_param
*e
)
402 struct fuse_entry_out arg
;
403 size_t size
= sizeof(arg
);
405 memset(&arg
, 0, sizeof(arg
));
407 return send_reply_ok(req
, &arg
, size
);
410 int fuse_reply_create(fuse_req_t req
, const struct fuse_entry_param
*e
,
411 const struct fuse_file_info
*f
)
413 char buf
[sizeof(struct fuse_entry_out
) + sizeof(struct fuse_open_out
)];
414 size_t entrysize
= sizeof(struct fuse_entry_out
);
415 struct fuse_entry_out
*earg
= (struct fuse_entry_out
*)buf
;
416 struct fuse_open_out
*oarg
= (struct fuse_open_out
*)(buf
+ entrysize
);
418 memset(buf
, 0, sizeof(buf
));
421 return send_reply_ok(req
, buf
, entrysize
+ sizeof(struct fuse_open_out
));
424 int fuse_reply_attr(fuse_req_t req
, const struct stat
*attr
,
427 struct fuse_attr_out arg
;
428 size_t size
= sizeof(arg
);
430 memset(&arg
, 0, sizeof(arg
));
431 arg
.attr_valid
= calc_timeout_sec(attr_timeout
);
432 arg
.attr_valid_nsec
= calc_timeout_nsec(attr_timeout
);
433 convert_stat(attr
, &arg
.attr
);
435 return send_reply_ok(req
, &arg
, size
);
438 int fuse_reply_readlink(fuse_req_t req
, const char *linkname
)
440 return send_reply_ok(req
, linkname
, strlen(linkname
));
443 int fuse_reply_open(fuse_req_t req
, const struct fuse_file_info
*f
)
445 struct fuse_open_out arg
;
447 memset(&arg
, 0, sizeof(arg
));
449 return send_reply_ok(req
, &arg
, sizeof(arg
));
452 int fuse_reply_write(fuse_req_t req
, size_t count
)
454 struct fuse_write_out arg
;
456 memset(&arg
, 0, sizeof(arg
));
459 return send_reply_ok(req
, &arg
, sizeof(arg
));
462 int fuse_reply_buf(fuse_req_t req
, const char *buf
, size_t size
)
464 return send_reply_ok(req
, buf
, size
);
467 static int fuse_send_data_iov_fallback(struct fuse_session
*se
,
468 struct fuse_chan
*ch
, struct iovec
*iov
,
469 int iov_count
, struct fuse_bufvec
*buf
,
472 /* Optimize common case */
473 if (buf
->count
== 1 && buf
->idx
== 0 && buf
->off
== 0 &&
474 !(buf
->buf
[0].flags
& FUSE_BUF_IS_FD
)) {
476 * FIXME: also avoid memory copy if there are multiple buffers
477 * but none of them contain an fd
480 iov
[iov_count
].iov_base
= buf
->buf
[0].mem
;
481 iov
[iov_count
].iov_len
= len
;
483 return fuse_send_msg(se
, ch
, iov
, iov_count
);
486 if (fuse_lowlevel_is_virtio(se
) && buf
->count
== 1 &&
487 buf
->buf
[0].flags
== (FUSE_BUF_IS_FD
| FUSE_BUF_FD_SEEK
)) {
488 return virtio_send_data_iov(se
, ch
, iov
, iov_count
, buf
, len
);
491 abort(); /* Will have taken vhost path */
495 static int fuse_send_data_iov(struct fuse_session
*se
, struct fuse_chan
*ch
,
496 struct iovec
*iov
, int iov_count
,
497 struct fuse_bufvec
*buf
)
499 size_t len
= fuse_buf_size(buf
);
501 return fuse_send_data_iov_fallback(se
, ch
, iov
, iov_count
, buf
, len
);
504 int fuse_reply_data(fuse_req_t req
, struct fuse_bufvec
*bufv
)
507 struct fuse_out_header out
= {
508 .unique
= req
->unique
,
512 iov
[0].iov_base
= &out
;
513 iov
[0].iov_len
= sizeof(struct fuse_out_header
);
515 res
= fuse_send_data_iov(req
->se
, req
->ch
, iov
, 1, bufv
);
520 return fuse_reply_err(req
, res
);
524 int fuse_reply_statfs(fuse_req_t req
, const struct statvfs
*stbuf
)
526 struct fuse_statfs_out arg
;
527 size_t size
= sizeof(arg
);
529 memset(&arg
, 0, sizeof(arg
));
530 convert_statfs(stbuf
, &arg
.st
);
532 return send_reply_ok(req
, &arg
, size
);
535 int fuse_reply_xattr(fuse_req_t req
, size_t count
)
537 struct fuse_getxattr_out arg
;
539 memset(&arg
, 0, sizeof(arg
));
542 return send_reply_ok(req
, &arg
, sizeof(arg
));
545 int fuse_reply_lock(fuse_req_t req
, const struct flock
*lock
)
547 struct fuse_lk_out arg
;
549 memset(&arg
, 0, sizeof(arg
));
550 arg
.lk
.type
= lock
->l_type
;
551 if (lock
->l_type
!= F_UNLCK
) {
552 arg
.lk
.start
= lock
->l_start
;
553 if (lock
->l_len
== 0) {
554 arg
.lk
.end
= OFFSET_MAX
;
556 arg
.lk
.end
= lock
->l_start
+ lock
->l_len
- 1;
559 arg
.lk
.pid
= lock
->l_pid
;
560 return send_reply_ok(req
, &arg
, sizeof(arg
));
563 int fuse_reply_bmap(fuse_req_t req
, uint64_t idx
)
565 struct fuse_bmap_out arg
;
567 memset(&arg
, 0, sizeof(arg
));
570 return send_reply_ok(req
, &arg
, sizeof(arg
));
573 static struct fuse_ioctl_iovec
*fuse_ioctl_iovec_copy(const struct iovec
*iov
,
576 struct fuse_ioctl_iovec
*fiov
;
579 fiov
= malloc(sizeof(fiov
[0]) * count
);
584 for (i
= 0; i
< count
; i
++) {
585 fiov
[i
].base
= (uintptr_t)iov
[i
].iov_base
;
586 fiov
[i
].len
= iov
[i
].iov_len
;
592 int fuse_reply_ioctl_retry(fuse_req_t req
, const struct iovec
*in_iov
,
593 size_t in_count
, const struct iovec
*out_iov
,
596 struct fuse_ioctl_out arg
;
597 struct fuse_ioctl_iovec
*in_fiov
= NULL
;
598 struct fuse_ioctl_iovec
*out_fiov
= NULL
;
603 memset(&arg
, 0, sizeof(arg
));
604 arg
.flags
|= FUSE_IOCTL_RETRY
;
605 arg
.in_iovs
= in_count
;
606 arg
.out_iovs
= out_count
;
607 iov
[count
].iov_base
= &arg
;
608 iov
[count
].iov_len
= sizeof(arg
);
611 /* Can't handle non-compat 64bit ioctls on 32bit */
612 if (sizeof(void *) == 4 && req
->ioctl_64bit
) {
613 res
= fuse_reply_err(req
, EINVAL
);
618 in_fiov
= fuse_ioctl_iovec_copy(in_iov
, in_count
);
623 iov
[count
].iov_base
= (void *)in_fiov
;
624 iov
[count
].iov_len
= sizeof(in_fiov
[0]) * in_count
;
628 out_fiov
= fuse_ioctl_iovec_copy(out_iov
, out_count
);
633 iov
[count
].iov_base
= (void *)out_fiov
;
634 iov
[count
].iov_len
= sizeof(out_fiov
[0]) * out_count
;
638 res
= send_reply_iov(req
, 0, iov
, count
);
646 res
= fuse_reply_err(req
, ENOMEM
);
650 int fuse_reply_ioctl(fuse_req_t req
, int result
, const void *buf
, size_t size
)
652 struct fuse_ioctl_out arg
;
656 memset(&arg
, 0, sizeof(arg
));
658 iov
[count
].iov_base
= &arg
;
659 iov
[count
].iov_len
= sizeof(arg
);
663 iov
[count
].iov_base
= (char *)buf
;
664 iov
[count
].iov_len
= size
;
668 return send_reply_iov(req
, 0, iov
, count
);
671 int fuse_reply_ioctl_iov(fuse_req_t req
, int result
, const struct iovec
*iov
,
674 struct iovec
*padded_iov
;
675 struct fuse_ioctl_out arg
;
678 padded_iov
= malloc((count
+ 2) * sizeof(struct iovec
));
679 if (padded_iov
== NULL
) {
680 return fuse_reply_err(req
, ENOMEM
);
683 memset(&arg
, 0, sizeof(arg
));
685 padded_iov
[1].iov_base
= &arg
;
686 padded_iov
[1].iov_len
= sizeof(arg
);
688 memcpy(&padded_iov
[2], iov
, count
* sizeof(struct iovec
));
690 res
= send_reply_iov(req
, 0, padded_iov
, count
+ 2);
696 int fuse_reply_poll(fuse_req_t req
, unsigned revents
)
698 struct fuse_poll_out arg
;
700 memset(&arg
, 0, sizeof(arg
));
701 arg
.revents
= revents
;
703 return send_reply_ok(req
, &arg
, sizeof(arg
));
706 int fuse_reply_lseek(fuse_req_t req
, off_t off
)
708 struct fuse_lseek_out arg
;
710 memset(&arg
, 0, sizeof(arg
));
713 return send_reply_ok(req
, &arg
, sizeof(arg
));
716 static void do_lookup(fuse_req_t req
, fuse_ino_t nodeid
,
717 struct fuse_mbuf_iter
*iter
)
719 const char *name
= fuse_mbuf_iter_advance_str(iter
);
721 fuse_reply_err(req
, EINVAL
);
725 if (req
->se
->op
.lookup
) {
726 req
->se
->op
.lookup(req
, nodeid
, name
);
728 fuse_reply_err(req
, ENOSYS
);
732 static void do_forget(fuse_req_t req
, fuse_ino_t nodeid
,
733 struct fuse_mbuf_iter
*iter
)
735 struct fuse_forget_in
*arg
;
737 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
739 fuse_reply_err(req
, EINVAL
);
743 if (req
->se
->op
.forget
) {
744 req
->se
->op
.forget(req
, nodeid
, arg
->nlookup
);
746 fuse_reply_none(req
);
750 static void do_batch_forget(fuse_req_t req
, fuse_ino_t nodeid
,
751 struct fuse_mbuf_iter
*iter
)
753 struct fuse_batch_forget_in
*arg
;
754 struct fuse_forget_data
*forgets
;
759 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
761 fuse_reply_none(req
);
766 * Prevent integer overflow. The compiler emits the following warning
767 * unless we use the scount local variable:
769 * error: comparison is always false due to limited range of data type
770 * [-Werror=type-limits]
772 * This may be true on 64-bit hosts but we need this check for 32-bit
776 if (scount
> SIZE_MAX
/ sizeof(forgets
[0])) {
777 fuse_reply_none(req
);
781 forgets
= fuse_mbuf_iter_advance(iter
, arg
->count
* sizeof(forgets
[0]));
783 fuse_reply_none(req
);
787 if (req
->se
->op
.forget_multi
) {
788 req
->se
->op
.forget_multi(req
, arg
->count
, forgets
);
789 } else if (req
->se
->op
.forget
) {
792 for (i
= 0; i
< arg
->count
; i
++) {
793 struct fuse_req
*dummy_req
;
795 dummy_req
= fuse_ll_alloc_req(req
->se
);
796 if (dummy_req
== NULL
) {
800 dummy_req
->unique
= req
->unique
;
801 dummy_req
->ctx
= req
->ctx
;
802 dummy_req
->ch
= NULL
;
804 req
->se
->op
.forget(dummy_req
, forgets
[i
].ino
, forgets
[i
].nlookup
);
806 fuse_reply_none(req
);
808 fuse_reply_none(req
);
812 static void do_getattr(fuse_req_t req
, fuse_ino_t nodeid
,
813 struct fuse_mbuf_iter
*iter
)
815 struct fuse_file_info
*fip
= NULL
;
816 struct fuse_file_info fi
;
818 struct fuse_getattr_in
*arg
;
820 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
822 fuse_reply_err(req
, EINVAL
);
826 if (arg
->getattr_flags
& FUSE_GETATTR_FH
) {
827 memset(&fi
, 0, sizeof(fi
));
832 if (req
->se
->op
.getattr
) {
833 req
->se
->op
.getattr(req
, nodeid
, fip
);
835 fuse_reply_err(req
, ENOSYS
);
839 static void do_setattr(fuse_req_t req
, fuse_ino_t nodeid
,
840 struct fuse_mbuf_iter
*iter
)
842 if (req
->se
->op
.setattr
) {
843 struct fuse_setattr_in
*arg
;
844 struct fuse_file_info
*fi
= NULL
;
845 struct fuse_file_info fi_store
;
848 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
850 fuse_reply_err(req
, EINVAL
);
854 memset(&stbuf
, 0, sizeof(stbuf
));
855 convert_attr(arg
, &stbuf
);
856 if (arg
->valid
& FATTR_FH
) {
857 arg
->valid
&= ~FATTR_FH
;
858 memset(&fi_store
, 0, sizeof(fi_store
));
862 arg
->valid
&= FUSE_SET_ATTR_MODE
| FUSE_SET_ATTR_UID
|
863 FUSE_SET_ATTR_GID
| FUSE_SET_ATTR_SIZE
|
864 FUSE_SET_ATTR_ATIME
| FUSE_SET_ATTR_MTIME
|
865 FUSE_SET_ATTR_ATIME_NOW
| FUSE_SET_ATTR_MTIME_NOW
|
868 req
->se
->op
.setattr(req
, nodeid
, &stbuf
, arg
->valid
, fi
);
870 fuse_reply_err(req
, ENOSYS
);
874 static void do_access(fuse_req_t req
, fuse_ino_t nodeid
,
875 struct fuse_mbuf_iter
*iter
)
877 struct fuse_access_in
*arg
;
879 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
881 fuse_reply_err(req
, EINVAL
);
885 if (req
->se
->op
.access
) {
886 req
->se
->op
.access(req
, nodeid
, arg
->mask
);
888 fuse_reply_err(req
, ENOSYS
);
892 static void do_readlink(fuse_req_t req
, fuse_ino_t nodeid
,
893 struct fuse_mbuf_iter
*iter
)
897 if (req
->se
->op
.readlink
) {
898 req
->se
->op
.readlink(req
, nodeid
);
900 fuse_reply_err(req
, ENOSYS
);
904 static void do_mknod(fuse_req_t req
, fuse_ino_t nodeid
,
905 struct fuse_mbuf_iter
*iter
)
907 struct fuse_mknod_in
*arg
;
910 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
911 name
= fuse_mbuf_iter_advance_str(iter
);
913 fuse_reply_err(req
, EINVAL
);
917 req
->ctx
.umask
= arg
->umask
;
919 if (req
->se
->op
.mknod
) {
920 req
->se
->op
.mknod(req
, nodeid
, name
, arg
->mode
, arg
->rdev
);
922 fuse_reply_err(req
, ENOSYS
);
926 static void do_mkdir(fuse_req_t req
, fuse_ino_t nodeid
,
927 struct fuse_mbuf_iter
*iter
)
929 struct fuse_mkdir_in
*arg
;
932 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
933 name
= fuse_mbuf_iter_advance_str(iter
);
935 fuse_reply_err(req
, EINVAL
);
939 req
->ctx
.umask
= arg
->umask
;
941 if (req
->se
->op
.mkdir
) {
942 req
->se
->op
.mkdir(req
, nodeid
, name
, arg
->mode
);
944 fuse_reply_err(req
, ENOSYS
);
948 static void do_unlink(fuse_req_t req
, fuse_ino_t nodeid
,
949 struct fuse_mbuf_iter
*iter
)
951 const char *name
= fuse_mbuf_iter_advance_str(iter
);
954 fuse_reply_err(req
, EINVAL
);
958 if (req
->se
->op
.unlink
) {
959 req
->se
->op
.unlink(req
, nodeid
, name
);
961 fuse_reply_err(req
, ENOSYS
);
965 static void do_rmdir(fuse_req_t req
, fuse_ino_t nodeid
,
966 struct fuse_mbuf_iter
*iter
)
968 const char *name
= fuse_mbuf_iter_advance_str(iter
);
971 fuse_reply_err(req
, EINVAL
);
975 if (req
->se
->op
.rmdir
) {
976 req
->se
->op
.rmdir(req
, nodeid
, name
);
978 fuse_reply_err(req
, ENOSYS
);
982 static void do_symlink(fuse_req_t req
, fuse_ino_t nodeid
,
983 struct fuse_mbuf_iter
*iter
)
985 const char *name
= fuse_mbuf_iter_advance_str(iter
);
986 const char *linkname
= fuse_mbuf_iter_advance_str(iter
);
988 if (!name
|| !linkname
) {
989 fuse_reply_err(req
, EINVAL
);
993 if (req
->se
->op
.symlink
) {
994 req
->se
->op
.symlink(req
, linkname
, nodeid
, name
);
996 fuse_reply_err(req
, ENOSYS
);
1000 static void do_rename(fuse_req_t req
, fuse_ino_t nodeid
,
1001 struct fuse_mbuf_iter
*iter
)
1003 struct fuse_rename_in
*arg
;
1004 const char *oldname
;
1005 const char *newname
;
1007 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1008 oldname
= fuse_mbuf_iter_advance_str(iter
);
1009 newname
= fuse_mbuf_iter_advance_str(iter
);
1010 if (!arg
|| !oldname
|| !newname
) {
1011 fuse_reply_err(req
, EINVAL
);
1015 if (req
->se
->op
.rename
) {
1016 req
->se
->op
.rename(req
, nodeid
, oldname
, arg
->newdir
, newname
, 0);
1018 fuse_reply_err(req
, ENOSYS
);
1022 static void do_rename2(fuse_req_t req
, fuse_ino_t nodeid
,
1023 struct fuse_mbuf_iter
*iter
)
1025 struct fuse_rename2_in
*arg
;
1026 const char *oldname
;
1027 const char *newname
;
1029 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1030 oldname
= fuse_mbuf_iter_advance_str(iter
);
1031 newname
= fuse_mbuf_iter_advance_str(iter
);
1032 if (!arg
|| !oldname
|| !newname
) {
1033 fuse_reply_err(req
, EINVAL
);
1037 if (req
->se
->op
.rename
) {
1038 req
->se
->op
.rename(req
, nodeid
, oldname
, arg
->newdir
, newname
,
1041 fuse_reply_err(req
, ENOSYS
);
1045 static void do_link(fuse_req_t req
, fuse_ino_t nodeid
,
1046 struct fuse_mbuf_iter
*iter
)
1048 struct fuse_link_in
*arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1049 const char *name
= fuse_mbuf_iter_advance_str(iter
);
1051 if (!arg
|| !name
) {
1052 fuse_reply_err(req
, EINVAL
);
1056 if (req
->se
->op
.link
) {
1057 req
->se
->op
.link(req
, arg
->oldnodeid
, nodeid
, name
);
1059 fuse_reply_err(req
, ENOSYS
);
1063 static void do_create(fuse_req_t req
, fuse_ino_t nodeid
,
1064 struct fuse_mbuf_iter
*iter
)
1066 if (req
->se
->op
.create
) {
1067 struct fuse_create_in
*arg
;
1068 struct fuse_file_info fi
;
1071 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1072 name
= fuse_mbuf_iter_advance_str(iter
);
1073 if (!arg
|| !name
) {
1074 fuse_reply_err(req
, EINVAL
);
1078 memset(&fi
, 0, sizeof(fi
));
1079 fi
.flags
= arg
->flags
;
1081 req
->ctx
.umask
= arg
->umask
;
1083 req
->se
->op
.create(req
, nodeid
, name
, arg
->mode
, &fi
);
1085 fuse_reply_err(req
, ENOSYS
);
1089 static void do_open(fuse_req_t req
, fuse_ino_t nodeid
,
1090 struct fuse_mbuf_iter
*iter
)
1092 struct fuse_open_in
*arg
;
1093 struct fuse_file_info fi
;
1095 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1097 fuse_reply_err(req
, EINVAL
);
1101 memset(&fi
, 0, sizeof(fi
));
1102 fi
.flags
= arg
->flags
;
1104 if (req
->se
->op
.open
) {
1105 req
->se
->op
.open(req
, nodeid
, &fi
);
1107 fuse_reply_open(req
, &fi
);
1111 static void do_read(fuse_req_t req
, fuse_ino_t nodeid
,
1112 struct fuse_mbuf_iter
*iter
)
1114 if (req
->se
->op
.read
) {
1115 struct fuse_read_in
*arg
;
1116 struct fuse_file_info fi
;
1118 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1120 memset(&fi
, 0, sizeof(fi
));
1122 fi
.lock_owner
= arg
->lock_owner
;
1123 fi
.flags
= arg
->flags
;
1124 req
->se
->op
.read(req
, nodeid
, arg
->size
, arg
->offset
, &fi
);
1126 fuse_reply_err(req
, ENOSYS
);
1130 static void do_write(fuse_req_t req
, fuse_ino_t nodeid
,
1131 struct fuse_mbuf_iter
*iter
)
1133 struct fuse_write_in
*arg
;
1134 struct fuse_file_info fi
;
1137 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1139 fuse_reply_err(req
, EINVAL
);
1143 param
= fuse_mbuf_iter_advance(iter
, arg
->size
);
1145 fuse_reply_err(req
, EINVAL
);
1149 memset(&fi
, 0, sizeof(fi
));
1151 fi
.writepage
= (arg
->write_flags
& FUSE_WRITE_CACHE
) != 0;
1152 fi
.kill_priv
= !!(arg
->write_flags
& FUSE_WRITE_KILL_PRIV
);
1154 fi
.lock_owner
= arg
->lock_owner
;
1155 fi
.flags
= arg
->flags
;
1157 if (req
->se
->op
.write
) {
1158 req
->se
->op
.write(req
, nodeid
, param
, arg
->size
, arg
->offset
, &fi
);
1160 fuse_reply_err(req
, ENOSYS
);
1164 static void do_write_buf(fuse_req_t req
, fuse_ino_t nodeid
,
1165 struct fuse_mbuf_iter
*iter
, struct fuse_bufvec
*ibufv
)
1167 struct fuse_session
*se
= req
->se
;
1168 struct fuse_bufvec
*pbufv
= ibufv
;
1169 struct fuse_bufvec tmpbufv
= {
1170 .buf
[0] = ibufv
->buf
[0],
1173 struct fuse_write_in
*arg
;
1174 size_t arg_size
= sizeof(*arg
);
1175 struct fuse_file_info fi
;
1177 memset(&fi
, 0, sizeof(fi
));
1179 arg
= fuse_mbuf_iter_advance(iter
, arg_size
);
1181 fuse_reply_err(req
, EINVAL
);
1185 fi
.lock_owner
= arg
->lock_owner
;
1186 fi
.flags
= arg
->flags
;
1188 fi
.writepage
= !!(arg
->write_flags
& FUSE_WRITE_CACHE
);
1189 fi
.kill_priv
= !!(arg
->write_flags
& FUSE_WRITE_KILL_PRIV
);
1191 if (ibufv
->count
== 1) {
1192 assert(!(tmpbufv
.buf
[0].flags
& FUSE_BUF_IS_FD
));
1193 tmpbufv
.buf
[0].mem
= ((char *)arg
) + arg_size
;
1194 tmpbufv
.buf
[0].size
-= sizeof(struct fuse_in_header
) + arg_size
;
1198 * Input bufv contains the headers in the first element
1199 * and the data in the rest, we need to skip that first element
1201 ibufv
->buf
[0].size
= 0;
1204 if (fuse_buf_size(pbufv
) != arg
->size
) {
1205 fuse_log(FUSE_LOG_ERR
,
1206 "fuse: do_write_buf: buffer size doesn't match arg->size\n");
1207 fuse_reply_err(req
, EIO
);
1211 se
->op
.write_buf(req
, nodeid
, pbufv
, arg
->offset
, &fi
);
1214 static void do_flush(fuse_req_t req
, fuse_ino_t nodeid
,
1215 struct fuse_mbuf_iter
*iter
)
1217 struct fuse_flush_in
*arg
;
1218 struct fuse_file_info fi
;
1220 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1222 fuse_reply_err(req
, EINVAL
);
1226 memset(&fi
, 0, sizeof(fi
));
1229 fi
.lock_owner
= arg
->lock_owner
;
1231 if (req
->se
->op
.flush
) {
1232 req
->se
->op
.flush(req
, nodeid
, &fi
);
1234 fuse_reply_err(req
, ENOSYS
);
1238 static void do_release(fuse_req_t req
, fuse_ino_t nodeid
,
1239 struct fuse_mbuf_iter
*iter
)
1241 struct fuse_release_in
*arg
;
1242 struct fuse_file_info fi
;
1244 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1246 fuse_reply_err(req
, EINVAL
);
1250 memset(&fi
, 0, sizeof(fi
));
1251 fi
.flags
= arg
->flags
;
1253 fi
.flush
= (arg
->release_flags
& FUSE_RELEASE_FLUSH
) ? 1 : 0;
1254 fi
.lock_owner
= arg
->lock_owner
;
1256 if (arg
->release_flags
& FUSE_RELEASE_FLOCK_UNLOCK
) {
1257 fi
.flock_release
= 1;
1260 if (req
->se
->op
.release
) {
1261 req
->se
->op
.release(req
, nodeid
, &fi
);
1263 fuse_reply_err(req
, 0);
1267 static void do_fsync(fuse_req_t req
, fuse_ino_t nodeid
,
1268 struct fuse_mbuf_iter
*iter
)
1270 struct fuse_fsync_in
*arg
;
1271 struct fuse_file_info fi
;
1274 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1276 fuse_reply_err(req
, EINVAL
);
1279 datasync
= arg
->fsync_flags
& 1;
1281 memset(&fi
, 0, sizeof(fi
));
1284 if (req
->se
->op
.fsync
) {
1285 if (fi
.fh
== (uint64_t)-1) {
1286 req
->se
->op
.fsync(req
, nodeid
, datasync
, NULL
);
1288 req
->se
->op
.fsync(req
, nodeid
, datasync
, &fi
);
1291 fuse_reply_err(req
, ENOSYS
);
1295 static void do_opendir(fuse_req_t req
, fuse_ino_t nodeid
,
1296 struct fuse_mbuf_iter
*iter
)
1298 struct fuse_open_in
*arg
;
1299 struct fuse_file_info fi
;
1301 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1303 fuse_reply_err(req
, EINVAL
);
1307 memset(&fi
, 0, sizeof(fi
));
1308 fi
.flags
= arg
->flags
;
1310 if (req
->se
->op
.opendir
) {
1311 req
->se
->op
.opendir(req
, nodeid
, &fi
);
1313 fuse_reply_open(req
, &fi
);
1317 static void do_readdir(fuse_req_t req
, fuse_ino_t nodeid
,
1318 struct fuse_mbuf_iter
*iter
)
1320 struct fuse_read_in
*arg
;
1321 struct fuse_file_info fi
;
1323 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1325 fuse_reply_err(req
, EINVAL
);
1329 memset(&fi
, 0, sizeof(fi
));
1332 if (req
->se
->op
.readdir
) {
1333 req
->se
->op
.readdir(req
, nodeid
, arg
->size
, arg
->offset
, &fi
);
1335 fuse_reply_err(req
, ENOSYS
);
1339 static void do_readdirplus(fuse_req_t req
, fuse_ino_t nodeid
,
1340 struct fuse_mbuf_iter
*iter
)
1342 struct fuse_read_in
*arg
;
1343 struct fuse_file_info fi
;
1345 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1347 fuse_reply_err(req
, EINVAL
);
1351 memset(&fi
, 0, sizeof(fi
));
1354 if (req
->se
->op
.readdirplus
) {
1355 req
->se
->op
.readdirplus(req
, nodeid
, arg
->size
, arg
->offset
, &fi
);
1357 fuse_reply_err(req
, ENOSYS
);
1361 static void do_releasedir(fuse_req_t req
, fuse_ino_t nodeid
,
1362 struct fuse_mbuf_iter
*iter
)
1364 struct fuse_release_in
*arg
;
1365 struct fuse_file_info fi
;
1367 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1369 fuse_reply_err(req
, EINVAL
);
1373 memset(&fi
, 0, sizeof(fi
));
1374 fi
.flags
= arg
->flags
;
1377 if (req
->se
->op
.releasedir
) {
1378 req
->se
->op
.releasedir(req
, nodeid
, &fi
);
1380 fuse_reply_err(req
, 0);
1384 static void do_fsyncdir(fuse_req_t req
, fuse_ino_t nodeid
,
1385 struct fuse_mbuf_iter
*iter
)
1387 struct fuse_fsync_in
*arg
;
1388 struct fuse_file_info fi
;
1391 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1393 fuse_reply_err(req
, EINVAL
);
1396 datasync
= arg
->fsync_flags
& 1;
1398 memset(&fi
, 0, sizeof(fi
));
1401 if (req
->se
->op
.fsyncdir
) {
1402 req
->se
->op
.fsyncdir(req
, nodeid
, datasync
, &fi
);
1404 fuse_reply_err(req
, ENOSYS
);
1408 static void do_statfs(fuse_req_t req
, fuse_ino_t nodeid
,
1409 struct fuse_mbuf_iter
*iter
)
1414 if (req
->se
->op
.statfs
) {
1415 req
->se
->op
.statfs(req
, nodeid
);
1417 struct statvfs buf
= {
1421 fuse_reply_statfs(req
, &buf
);
1425 static void do_setxattr(fuse_req_t req
, fuse_ino_t nodeid
,
1426 struct fuse_mbuf_iter
*iter
)
1428 struct fuse_setxattr_in
*arg
;
1432 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1433 name
= fuse_mbuf_iter_advance_str(iter
);
1434 if (!arg
|| !name
) {
1435 fuse_reply_err(req
, EINVAL
);
1439 value
= fuse_mbuf_iter_advance(iter
, arg
->size
);
1441 fuse_reply_err(req
, EINVAL
);
1445 if (req
->se
->op
.setxattr
) {
1446 req
->se
->op
.setxattr(req
, nodeid
, name
, value
, arg
->size
, arg
->flags
);
1448 fuse_reply_err(req
, ENOSYS
);
1452 static void do_getxattr(fuse_req_t req
, fuse_ino_t nodeid
,
1453 struct fuse_mbuf_iter
*iter
)
1455 struct fuse_getxattr_in
*arg
;
1458 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1459 name
= fuse_mbuf_iter_advance_str(iter
);
1460 if (!arg
|| !name
) {
1461 fuse_reply_err(req
, EINVAL
);
1465 if (req
->se
->op
.getxattr
) {
1466 req
->se
->op
.getxattr(req
, nodeid
, name
, arg
->size
);
1468 fuse_reply_err(req
, ENOSYS
);
1472 static void do_listxattr(fuse_req_t req
, fuse_ino_t nodeid
,
1473 struct fuse_mbuf_iter
*iter
)
1475 struct fuse_getxattr_in
*arg
;
1477 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1479 fuse_reply_err(req
, EINVAL
);
1483 if (req
->se
->op
.listxattr
) {
1484 req
->se
->op
.listxattr(req
, nodeid
, arg
->size
);
1486 fuse_reply_err(req
, ENOSYS
);
1490 static void do_removexattr(fuse_req_t req
, fuse_ino_t nodeid
,
1491 struct fuse_mbuf_iter
*iter
)
1493 const char *name
= fuse_mbuf_iter_advance_str(iter
);
1496 fuse_reply_err(req
, EINVAL
);
1500 if (req
->se
->op
.removexattr
) {
1501 req
->se
->op
.removexattr(req
, nodeid
, name
);
1503 fuse_reply_err(req
, ENOSYS
);
1507 static void convert_fuse_file_lock(struct fuse_file_lock
*fl
,
1508 struct flock
*flock
)
1510 memset(flock
, 0, sizeof(struct flock
));
1511 flock
->l_type
= fl
->type
;
1512 flock
->l_whence
= SEEK_SET
;
1513 flock
->l_start
= fl
->start
;
1514 if (fl
->end
== OFFSET_MAX
) {
1517 flock
->l_len
= fl
->end
- fl
->start
+ 1;
1519 flock
->l_pid
= fl
->pid
;
1522 static void do_getlk(fuse_req_t req
, fuse_ino_t nodeid
,
1523 struct fuse_mbuf_iter
*iter
)
1525 struct fuse_lk_in
*arg
;
1526 struct fuse_file_info fi
;
1529 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1531 fuse_reply_err(req
, EINVAL
);
1535 memset(&fi
, 0, sizeof(fi
));
1537 fi
.lock_owner
= arg
->owner
;
1539 convert_fuse_file_lock(&arg
->lk
, &flock
);
1540 if (req
->se
->op
.getlk
) {
1541 req
->se
->op
.getlk(req
, nodeid
, &fi
, &flock
);
1543 fuse_reply_err(req
, ENOSYS
);
1547 static void do_setlk_common(fuse_req_t req
, fuse_ino_t nodeid
,
1548 struct fuse_mbuf_iter
*iter
, int sleep
)
1550 struct fuse_lk_in
*arg
;
1551 struct fuse_file_info fi
;
1554 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1556 fuse_reply_err(req
, EINVAL
);
1560 memset(&fi
, 0, sizeof(fi
));
1562 fi
.lock_owner
= arg
->owner
;
1564 if (arg
->lk_flags
& FUSE_LK_FLOCK
) {
1567 switch (arg
->lk
.type
) {
1582 if (req
->se
->op
.flock
) {
1583 req
->se
->op
.flock(req
, nodeid
, &fi
, op
);
1585 fuse_reply_err(req
, ENOSYS
);
1588 convert_fuse_file_lock(&arg
->lk
, &flock
);
1589 if (req
->se
->op
.setlk
) {
1590 req
->se
->op
.setlk(req
, nodeid
, &fi
, &flock
, sleep
);
1592 fuse_reply_err(req
, ENOSYS
);
1597 static void do_setlk(fuse_req_t req
, fuse_ino_t nodeid
,
1598 struct fuse_mbuf_iter
*iter
)
1600 do_setlk_common(req
, nodeid
, iter
, 0);
1603 static void do_setlkw(fuse_req_t req
, fuse_ino_t nodeid
,
1604 struct fuse_mbuf_iter
*iter
)
1606 do_setlk_common(req
, nodeid
, iter
, 1);
1609 static int find_interrupted(struct fuse_session
*se
, struct fuse_req
*req
)
1611 struct fuse_req
*curr
;
1613 for (curr
= se
->list
.next
; curr
!= &se
->list
; curr
= curr
->next
) {
1614 if (curr
->unique
== req
->u
.i
.unique
) {
1615 fuse_interrupt_func_t func
;
1619 pthread_mutex_unlock(&se
->lock
);
1621 /* Ugh, ugly locking */
1622 pthread_mutex_lock(&curr
->lock
);
1623 pthread_mutex_lock(&se
->lock
);
1624 curr
->interrupted
= 1;
1625 func
= curr
->u
.ni
.func
;
1626 data
= curr
->u
.ni
.data
;
1627 pthread_mutex_unlock(&se
->lock
);
1631 pthread_mutex_unlock(&curr
->lock
);
1633 pthread_mutex_lock(&se
->lock
);
1642 for (curr
= se
->interrupts
.next
; curr
!= &se
->interrupts
;
1643 curr
= curr
->next
) {
1644 if (curr
->u
.i
.unique
== req
->u
.i
.unique
) {
1651 static void do_interrupt(fuse_req_t req
, fuse_ino_t nodeid
,
1652 struct fuse_mbuf_iter
*iter
)
1654 struct fuse_interrupt_in
*arg
;
1655 struct fuse_session
*se
= req
->se
;
1659 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1661 fuse_reply_err(req
, EINVAL
);
1665 fuse_log(FUSE_LOG_DEBUG
, "INTERRUPT: %llu\n",
1666 (unsigned long long)arg
->unique
);
1668 req
->u
.i
.unique
= arg
->unique
;
1670 pthread_mutex_lock(&se
->lock
);
1671 if (find_interrupted(se
, req
)) {
1674 list_add_req(req
, &se
->interrupts
);
1676 pthread_mutex_unlock(&se
->lock
);
1679 static struct fuse_req
*check_interrupt(struct fuse_session
*se
,
1680 struct fuse_req
*req
)
1682 struct fuse_req
*curr
;
1684 for (curr
= se
->interrupts
.next
; curr
!= &se
->interrupts
;
1685 curr
= curr
->next
) {
1686 if (curr
->u
.i
.unique
== req
->unique
) {
1687 req
->interrupted
= 1;
1693 curr
= se
->interrupts
.next
;
1694 if (curr
!= &se
->interrupts
) {
1696 list_init_req(curr
);
1703 static void do_bmap(fuse_req_t req
, fuse_ino_t nodeid
,
1704 struct fuse_mbuf_iter
*iter
)
1706 struct fuse_bmap_in
*arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1709 fuse_reply_err(req
, EINVAL
);
1713 if (req
->se
->op
.bmap
) {
1714 req
->se
->op
.bmap(req
, nodeid
, arg
->blocksize
, arg
->block
);
1716 fuse_reply_err(req
, ENOSYS
);
1720 static void do_ioctl(fuse_req_t req
, fuse_ino_t nodeid
,
1721 struct fuse_mbuf_iter
*iter
)
1723 struct fuse_ioctl_in
*arg
;
1725 void *in_buf
= NULL
;
1726 struct fuse_file_info fi
;
1728 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1730 fuse_reply_err(req
, EINVAL
);
1735 if (flags
& FUSE_IOCTL_DIR
&& !(req
->se
->conn
.want
& FUSE_CAP_IOCTL_DIR
)) {
1736 fuse_reply_err(req
, ENOTTY
);
1741 in_buf
= fuse_mbuf_iter_advance(iter
, arg
->in_size
);
1743 fuse_reply_err(req
, EINVAL
);
1748 memset(&fi
, 0, sizeof(fi
));
1751 if (sizeof(void *) == 4 && !(flags
& FUSE_IOCTL_32BIT
)) {
1752 req
->ioctl_64bit
= 1;
1755 if (req
->se
->op
.ioctl
) {
1756 req
->se
->op
.ioctl(req
, nodeid
, arg
->cmd
, (void *)(uintptr_t)arg
->arg
,
1757 &fi
, flags
, in_buf
, arg
->in_size
, arg
->out_size
);
1759 fuse_reply_err(req
, ENOSYS
);
1763 void fuse_pollhandle_destroy(struct fuse_pollhandle
*ph
)
1768 static void do_poll(fuse_req_t req
, fuse_ino_t nodeid
,
1769 struct fuse_mbuf_iter
*iter
)
1771 struct fuse_poll_in
*arg
;
1772 struct fuse_file_info fi
;
1774 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1776 fuse_reply_err(req
, EINVAL
);
1780 memset(&fi
, 0, sizeof(fi
));
1782 fi
.poll_events
= arg
->events
;
1784 if (req
->se
->op
.poll
) {
1785 struct fuse_pollhandle
*ph
= NULL
;
1787 if (arg
->flags
& FUSE_POLL_SCHEDULE_NOTIFY
) {
1788 ph
= malloc(sizeof(struct fuse_pollhandle
));
1790 fuse_reply_err(req
, ENOMEM
);
1797 req
->se
->op
.poll(req
, nodeid
, &fi
, ph
);
1799 fuse_reply_err(req
, ENOSYS
);
1803 static void do_fallocate(fuse_req_t req
, fuse_ino_t nodeid
,
1804 struct fuse_mbuf_iter
*iter
)
1806 struct fuse_fallocate_in
*arg
;
1807 struct fuse_file_info fi
;
1809 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1811 fuse_reply_err(req
, EINVAL
);
1815 memset(&fi
, 0, sizeof(fi
));
1818 if (req
->se
->op
.fallocate
) {
1819 req
->se
->op
.fallocate(req
, nodeid
, arg
->mode
, arg
->offset
, arg
->length
,
1822 fuse_reply_err(req
, ENOSYS
);
1826 static void do_copy_file_range(fuse_req_t req
, fuse_ino_t nodeid_in
,
1827 struct fuse_mbuf_iter
*iter
)
1829 struct fuse_copy_file_range_in
*arg
;
1830 struct fuse_file_info fi_in
, fi_out
;
1832 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1834 fuse_reply_err(req
, EINVAL
);
1838 memset(&fi_in
, 0, sizeof(fi_in
));
1839 fi_in
.fh
= arg
->fh_in
;
1841 memset(&fi_out
, 0, sizeof(fi_out
));
1842 fi_out
.fh
= arg
->fh_out
;
1845 if (req
->se
->op
.copy_file_range
) {
1846 req
->se
->op
.copy_file_range(req
, nodeid_in
, arg
->off_in
, &fi_in
,
1847 arg
->nodeid_out
, arg
->off_out
, &fi_out
,
1848 arg
->len
, arg
->flags
);
1850 fuse_reply_err(req
, ENOSYS
);
1854 static void do_lseek(fuse_req_t req
, fuse_ino_t nodeid
,
1855 struct fuse_mbuf_iter
*iter
)
1857 struct fuse_lseek_in
*arg
;
1858 struct fuse_file_info fi
;
1860 arg
= fuse_mbuf_iter_advance(iter
, sizeof(*arg
));
1862 fuse_reply_err(req
, EINVAL
);
1865 memset(&fi
, 0, sizeof(fi
));
1868 if (req
->se
->op
.lseek
) {
1869 req
->se
->op
.lseek(req
, nodeid
, arg
->offset
, arg
->whence
, &fi
);
1871 fuse_reply_err(req
, ENOSYS
);
1875 static void do_init(fuse_req_t req
, fuse_ino_t nodeid
,
1876 struct fuse_mbuf_iter
*iter
)
1878 size_t compat_size
= offsetof(struct fuse_init_in
, max_readahead
);
1879 struct fuse_init_in
*arg
;
1880 struct fuse_init_out outarg
;
1881 struct fuse_session
*se
= req
->se
;
1882 size_t bufsize
= se
->bufsize
;
1883 size_t outargsize
= sizeof(outarg
);
1887 /* First consume the old fields... */
1888 arg
= fuse_mbuf_iter_advance(iter
, compat_size
);
1890 fuse_reply_err(req
, EINVAL
);
1894 /* ...and now consume the new fields. */
1895 if (arg
->major
== 7 && arg
->minor
>= 6) {
1896 if (!fuse_mbuf_iter_advance(iter
, sizeof(*arg
) - compat_size
)) {
1897 fuse_reply_err(req
, EINVAL
);
1902 fuse_log(FUSE_LOG_DEBUG
, "INIT: %u.%u\n", arg
->major
, arg
->minor
);
1903 if (arg
->major
== 7 && arg
->minor
>= 6) {
1904 fuse_log(FUSE_LOG_DEBUG
, "flags=0x%08x\n", arg
->flags
);
1905 fuse_log(FUSE_LOG_DEBUG
, "max_readahead=0x%08x\n", arg
->max_readahead
);
1907 se
->conn
.proto_major
= arg
->major
;
1908 se
->conn
.proto_minor
= arg
->minor
;
1909 se
->conn
.capable
= 0;
1912 memset(&outarg
, 0, sizeof(outarg
));
1913 outarg
.major
= FUSE_KERNEL_VERSION
;
1914 outarg
.minor
= FUSE_KERNEL_MINOR_VERSION
;
1916 if (arg
->major
< 7 || (arg
->major
== 7 && arg
->minor
< 31)) {
1917 fuse_log(FUSE_LOG_ERR
, "fuse: unsupported protocol version: %u.%u\n",
1918 arg
->major
, arg
->minor
);
1919 fuse_reply_err(req
, EPROTO
);
1923 if (arg
->major
> 7) {
1924 /* Wait for a second INIT request with a 7.X version */
1925 send_reply_ok(req
, &outarg
, sizeof(outarg
));
1929 if (arg
->max_readahead
< se
->conn
.max_readahead
) {
1930 se
->conn
.max_readahead
= arg
->max_readahead
;
1932 if (arg
->flags
& FUSE_ASYNC_READ
) {
1933 se
->conn
.capable
|= FUSE_CAP_ASYNC_READ
;
1935 if (arg
->flags
& FUSE_POSIX_LOCKS
) {
1936 se
->conn
.capable
|= FUSE_CAP_POSIX_LOCKS
;
1938 if (arg
->flags
& FUSE_ATOMIC_O_TRUNC
) {
1939 se
->conn
.capable
|= FUSE_CAP_ATOMIC_O_TRUNC
;
1941 if (arg
->flags
& FUSE_EXPORT_SUPPORT
) {
1942 se
->conn
.capable
|= FUSE_CAP_EXPORT_SUPPORT
;
1944 if (arg
->flags
& FUSE_DONT_MASK
) {
1945 se
->conn
.capable
|= FUSE_CAP_DONT_MASK
;
1947 if (arg
->flags
& FUSE_FLOCK_LOCKS
) {
1948 se
->conn
.capable
|= FUSE_CAP_FLOCK_LOCKS
;
1950 if (arg
->flags
& FUSE_AUTO_INVAL_DATA
) {
1951 se
->conn
.capable
|= FUSE_CAP_AUTO_INVAL_DATA
;
1953 if (arg
->flags
& FUSE_DO_READDIRPLUS
) {
1954 se
->conn
.capable
|= FUSE_CAP_READDIRPLUS
;
1956 if (arg
->flags
& FUSE_READDIRPLUS_AUTO
) {
1957 se
->conn
.capable
|= FUSE_CAP_READDIRPLUS_AUTO
;
1959 if (arg
->flags
& FUSE_ASYNC_DIO
) {
1960 se
->conn
.capable
|= FUSE_CAP_ASYNC_DIO
;
1962 if (arg
->flags
& FUSE_WRITEBACK_CACHE
) {
1963 se
->conn
.capable
|= FUSE_CAP_WRITEBACK_CACHE
;
1965 if (arg
->flags
& FUSE_NO_OPEN_SUPPORT
) {
1966 se
->conn
.capable
|= FUSE_CAP_NO_OPEN_SUPPORT
;
1968 if (arg
->flags
& FUSE_PARALLEL_DIROPS
) {
1969 se
->conn
.capable
|= FUSE_CAP_PARALLEL_DIROPS
;
1971 if (arg
->flags
& FUSE_POSIX_ACL
) {
1972 se
->conn
.capable
|= FUSE_CAP_POSIX_ACL
;
1974 if (arg
->flags
& FUSE_HANDLE_KILLPRIV
) {
1975 se
->conn
.capable
|= FUSE_CAP_HANDLE_KILLPRIV
;
1977 if (arg
->flags
& FUSE_NO_OPENDIR_SUPPORT
) {
1978 se
->conn
.capable
|= FUSE_CAP_NO_OPENDIR_SUPPORT
;
1980 if (!(arg
->flags
& FUSE_MAX_PAGES
)) {
1981 size_t max_bufsize
= FUSE_DEFAULT_MAX_PAGES_PER_REQ
* getpagesize() +
1982 FUSE_BUFFER_HEADER_SIZE
;
1983 if (bufsize
> max_bufsize
) {
1984 bufsize
= max_bufsize
;
1988 #ifdef HAVE_VMSPLICE
1989 se
->conn
.capable
|= FUSE_CAP_SPLICE_WRITE
| FUSE_CAP_SPLICE_MOVE
;
1991 se
->conn
.capable
|= FUSE_CAP_SPLICE_READ
;
1993 se
->conn
.capable
|= FUSE_CAP_IOCTL_DIR
;
1996 * Default settings for modern filesystems.
1998 * Most of these capabilities were disabled by default in
1999 * libfuse2 for backwards compatibility reasons. In libfuse3,
2000 * we can finally enable them by default (as long as they're
2001 * supported by the kernel).
2003 #define LL_SET_DEFAULT(cond, cap) \
2004 if ((cond) && (se->conn.capable & (cap))) \
2005 se->conn.want |= (cap)
2006 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ
);
2007 LL_SET_DEFAULT(1, FUSE_CAP_PARALLEL_DIROPS
);
2008 LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA
);
2009 LL_SET_DEFAULT(1, FUSE_CAP_HANDLE_KILLPRIV
);
2010 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO
);
2011 LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR
);
2012 LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC
);
2013 LL_SET_DEFAULT(se
->op
.write_buf
, FUSE_CAP_SPLICE_READ
);
2014 LL_SET_DEFAULT(se
->op
.getlk
&& se
->op
.setlk
, FUSE_CAP_POSIX_LOCKS
);
2015 LL_SET_DEFAULT(se
->op
.flock
, FUSE_CAP_FLOCK_LOCKS
);
2016 LL_SET_DEFAULT(se
->op
.readdirplus
, FUSE_CAP_READDIRPLUS
);
2017 LL_SET_DEFAULT(se
->op
.readdirplus
&& se
->op
.readdir
,
2018 FUSE_CAP_READDIRPLUS_AUTO
);
2019 se
->conn
.time_gran
= 1;
2021 if (bufsize
< FUSE_MIN_READ_BUFFER
) {
2022 fuse_log(FUSE_LOG_ERR
, "fuse: warning: buffer size too small: %zu\n",
2024 bufsize
= FUSE_MIN_READ_BUFFER
;
2026 se
->bufsize
= bufsize
;
2028 if (se
->conn
.max_write
> bufsize
- FUSE_BUFFER_HEADER_SIZE
) {
2029 se
->conn
.max_write
= bufsize
- FUSE_BUFFER_HEADER_SIZE
;
2033 se
->got_destroy
= 0;
2035 se
->op
.init(se
->userdata
, &se
->conn
);
2038 if (se
->conn
.want
& (~se
->conn
.capable
)) {
2039 fuse_log(FUSE_LOG_ERR
,
2040 "fuse: error: filesystem requested capabilities "
2041 "0x%x that are not supported by kernel, aborting.\n",
2042 se
->conn
.want
& (~se
->conn
.capable
));
2043 fuse_reply_err(req
, EPROTO
);
2044 se
->error
= -EPROTO
;
2045 fuse_session_exit(se
);
2049 if (se
->conn
.max_write
< bufsize
- FUSE_BUFFER_HEADER_SIZE
) {
2050 se
->bufsize
= se
->conn
.max_write
+ FUSE_BUFFER_HEADER_SIZE
;
2052 if (arg
->flags
& FUSE_MAX_PAGES
) {
2053 outarg
.flags
|= FUSE_MAX_PAGES
;
2054 outarg
.max_pages
= (se
->conn
.max_write
- 1) / getpagesize() + 1;
2058 * Always enable big writes, this is superseded
2059 * by the max_write option
2061 outarg
.flags
|= FUSE_BIG_WRITES
;
2063 if (se
->conn
.want
& FUSE_CAP_ASYNC_READ
) {
2064 outarg
.flags
|= FUSE_ASYNC_READ
;
2066 if (se
->conn
.want
& FUSE_CAP_PARALLEL_DIROPS
) {
2067 outarg
.flags
|= FUSE_PARALLEL_DIROPS
;
2069 if (se
->conn
.want
& FUSE_CAP_POSIX_LOCKS
) {
2070 outarg
.flags
|= FUSE_POSIX_LOCKS
;
2072 if (se
->conn
.want
& FUSE_CAP_ATOMIC_O_TRUNC
) {
2073 outarg
.flags
|= FUSE_ATOMIC_O_TRUNC
;
2075 if (se
->conn
.want
& FUSE_CAP_EXPORT_SUPPORT
) {
2076 outarg
.flags
|= FUSE_EXPORT_SUPPORT
;
2078 if (se
->conn
.want
& FUSE_CAP_DONT_MASK
) {
2079 outarg
.flags
|= FUSE_DONT_MASK
;
2081 if (se
->conn
.want
& FUSE_CAP_FLOCK_LOCKS
) {
2082 outarg
.flags
|= FUSE_FLOCK_LOCKS
;
2084 if (se
->conn
.want
& FUSE_CAP_AUTO_INVAL_DATA
) {
2085 outarg
.flags
|= FUSE_AUTO_INVAL_DATA
;
2087 if (se
->conn
.want
& FUSE_CAP_READDIRPLUS
) {
2088 outarg
.flags
|= FUSE_DO_READDIRPLUS
;
2090 if (se
->conn
.want
& FUSE_CAP_READDIRPLUS_AUTO
) {
2091 outarg
.flags
|= FUSE_READDIRPLUS_AUTO
;
2093 if (se
->conn
.want
& FUSE_CAP_ASYNC_DIO
) {
2094 outarg
.flags
|= FUSE_ASYNC_DIO
;
2096 if (se
->conn
.want
& FUSE_CAP_WRITEBACK_CACHE
) {
2097 outarg
.flags
|= FUSE_WRITEBACK_CACHE
;
2099 if (se
->conn
.want
& FUSE_CAP_POSIX_ACL
) {
2100 outarg
.flags
|= FUSE_POSIX_ACL
;
2102 outarg
.max_readahead
= se
->conn
.max_readahead
;
2103 outarg
.max_write
= se
->conn
.max_write
;
2104 if (se
->conn
.max_background
>= (1 << 16)) {
2105 se
->conn
.max_background
= (1 << 16) - 1;
2107 if (se
->conn
.congestion_threshold
> se
->conn
.max_background
) {
2108 se
->conn
.congestion_threshold
= se
->conn
.max_background
;
2110 if (!se
->conn
.congestion_threshold
) {
2111 se
->conn
.congestion_threshold
= se
->conn
.max_background
* 3 / 4;
2114 outarg
.max_background
= se
->conn
.max_background
;
2115 outarg
.congestion_threshold
= se
->conn
.congestion_threshold
;
2116 outarg
.time_gran
= se
->conn
.time_gran
;
2118 fuse_log(FUSE_LOG_DEBUG
, " INIT: %u.%u\n", outarg
.major
, outarg
.minor
);
2119 fuse_log(FUSE_LOG_DEBUG
, " flags=0x%08x\n", outarg
.flags
);
2120 fuse_log(FUSE_LOG_DEBUG
, " max_readahead=0x%08x\n", outarg
.max_readahead
);
2121 fuse_log(FUSE_LOG_DEBUG
, " max_write=0x%08x\n", outarg
.max_write
);
2122 fuse_log(FUSE_LOG_DEBUG
, " max_background=%i\n", outarg
.max_background
);
2123 fuse_log(FUSE_LOG_DEBUG
, " congestion_threshold=%i\n",
2124 outarg
.congestion_threshold
);
2125 fuse_log(FUSE_LOG_DEBUG
, " time_gran=%u\n", outarg
.time_gran
);
2127 send_reply_ok(req
, &outarg
, outargsize
);
2130 static void do_destroy(fuse_req_t req
, fuse_ino_t nodeid
,
2131 struct fuse_mbuf_iter
*iter
)
2133 struct fuse_session
*se
= req
->se
;
2138 se
->got_destroy
= 1;
2140 if (se
->op
.destroy
) {
2141 se
->op
.destroy(se
->userdata
);
2144 send_reply_ok(req
, NULL
, 0);
2147 static int send_notify_iov(struct fuse_session
*se
, int notify_code
,
2148 struct iovec
*iov
, int count
)
2150 struct fuse_out_header out
= {
2151 .error
= notify_code
,
2154 if (!se
->got_init
) {
2158 iov
[0].iov_base
= &out
;
2159 iov
[0].iov_len
= sizeof(struct fuse_out_header
);
2161 return fuse_send_msg(se
, NULL
, iov
, count
);
2164 int fuse_lowlevel_notify_poll(struct fuse_pollhandle
*ph
)
2167 struct fuse_notify_poll_wakeup_out outarg
= {
2170 struct iovec iov
[2];
2172 iov
[1].iov_base
= &outarg
;
2173 iov
[1].iov_len
= sizeof(outarg
);
2175 return send_notify_iov(ph
->se
, FUSE_NOTIFY_POLL
, iov
, 2);
2181 int fuse_lowlevel_notify_inval_inode(struct fuse_session
*se
, fuse_ino_t ino
,
2182 off_t off
, off_t len
)
2184 struct fuse_notify_inval_inode_out outarg
= {
2189 struct iovec iov
[2];
2195 iov
[1].iov_base
= &outarg
;
2196 iov
[1].iov_len
= sizeof(outarg
);
2198 return send_notify_iov(se
, FUSE_NOTIFY_INVAL_INODE
, iov
, 2);
2201 int fuse_lowlevel_notify_inval_entry(struct fuse_session
*se
, fuse_ino_t parent
,
2202 const char *name
, size_t namelen
)
2204 struct fuse_notify_inval_entry_out outarg
= {
2208 struct iovec iov
[3];
2214 iov
[1].iov_base
= &outarg
;
2215 iov
[1].iov_len
= sizeof(outarg
);
2216 iov
[2].iov_base
= (void *)name
;
2217 iov
[2].iov_len
= namelen
+ 1;
2219 return send_notify_iov(se
, FUSE_NOTIFY_INVAL_ENTRY
, iov
, 3);
2222 int fuse_lowlevel_notify_delete(struct fuse_session
*se
, fuse_ino_t parent
,
2223 fuse_ino_t child
, const char *name
,
2226 struct fuse_notify_delete_out outarg
= {
2231 struct iovec iov
[3];
2237 iov
[1].iov_base
= &outarg
;
2238 iov
[1].iov_len
= sizeof(outarg
);
2239 iov
[2].iov_base
= (void *)name
;
2240 iov
[2].iov_len
= namelen
+ 1;
2242 return send_notify_iov(se
, FUSE_NOTIFY_DELETE
, iov
, 3);
2245 int fuse_lowlevel_notify_store(struct fuse_session
*se
, fuse_ino_t ino
,
2246 off_t offset
, struct fuse_bufvec
*bufv
)
2248 struct fuse_out_header out
= {
2249 .error
= FUSE_NOTIFY_STORE
,
2251 struct fuse_notify_store_out outarg
= {
2254 .size
= fuse_buf_size(bufv
),
2256 struct iovec iov
[3];
2263 iov
[0].iov_base
= &out
;
2264 iov
[0].iov_len
= sizeof(out
);
2265 iov
[1].iov_base
= &outarg
;
2266 iov
[1].iov_len
= sizeof(outarg
);
2268 res
= fuse_send_data_iov(se
, NULL
, iov
, 2, bufv
);
2276 void *fuse_req_userdata(fuse_req_t req
)
2278 return req
->se
->userdata
;
2281 const struct fuse_ctx
*fuse_req_ctx(fuse_req_t req
)
2286 void fuse_req_interrupt_func(fuse_req_t req
, fuse_interrupt_func_t func
,
2289 pthread_mutex_lock(&req
->lock
);
2290 pthread_mutex_lock(&req
->se
->lock
);
2291 req
->u
.ni
.func
= func
;
2292 req
->u
.ni
.data
= data
;
2293 pthread_mutex_unlock(&req
->se
->lock
);
2294 if (req
->interrupted
&& func
) {
2297 pthread_mutex_unlock(&req
->lock
);
2300 int fuse_req_interrupted(fuse_req_t req
)
2304 pthread_mutex_lock(&req
->se
->lock
);
2305 interrupted
= req
->interrupted
;
2306 pthread_mutex_unlock(&req
->se
->lock
);
2312 void (*func
)(fuse_req_t
, fuse_ino_t
, struct fuse_mbuf_iter
*);
2315 [FUSE_LOOKUP
] = { do_lookup
, "LOOKUP" },
2316 [FUSE_FORGET
] = { do_forget
, "FORGET" },
2317 [FUSE_GETATTR
] = { do_getattr
, "GETATTR" },
2318 [FUSE_SETATTR
] = { do_setattr
, "SETATTR" },
2319 [FUSE_READLINK
] = { do_readlink
, "READLINK" },
2320 [FUSE_SYMLINK
] = { do_symlink
, "SYMLINK" },
2321 [FUSE_MKNOD
] = { do_mknod
, "MKNOD" },
2322 [FUSE_MKDIR
] = { do_mkdir
, "MKDIR" },
2323 [FUSE_UNLINK
] = { do_unlink
, "UNLINK" },
2324 [FUSE_RMDIR
] = { do_rmdir
, "RMDIR" },
2325 [FUSE_RENAME
] = { do_rename
, "RENAME" },
2326 [FUSE_LINK
] = { do_link
, "LINK" },
2327 [FUSE_OPEN
] = { do_open
, "OPEN" },
2328 [FUSE_READ
] = { do_read
, "READ" },
2329 [FUSE_WRITE
] = { do_write
, "WRITE" },
2330 [FUSE_STATFS
] = { do_statfs
, "STATFS" },
2331 [FUSE_RELEASE
] = { do_release
, "RELEASE" },
2332 [FUSE_FSYNC
] = { do_fsync
, "FSYNC" },
2333 [FUSE_SETXATTR
] = { do_setxattr
, "SETXATTR" },
2334 [FUSE_GETXATTR
] = { do_getxattr
, "GETXATTR" },
2335 [FUSE_LISTXATTR
] = { do_listxattr
, "LISTXATTR" },
2336 [FUSE_REMOVEXATTR
] = { do_removexattr
, "REMOVEXATTR" },
2337 [FUSE_FLUSH
] = { do_flush
, "FLUSH" },
2338 [FUSE_INIT
] = { do_init
, "INIT" },
2339 [FUSE_OPENDIR
] = { do_opendir
, "OPENDIR" },
2340 [FUSE_READDIR
] = { do_readdir
, "READDIR" },
2341 [FUSE_RELEASEDIR
] = { do_releasedir
, "RELEASEDIR" },
2342 [FUSE_FSYNCDIR
] = { do_fsyncdir
, "FSYNCDIR" },
2343 [FUSE_GETLK
] = { do_getlk
, "GETLK" },
2344 [FUSE_SETLK
] = { do_setlk
, "SETLK" },
2345 [FUSE_SETLKW
] = { do_setlkw
, "SETLKW" },
2346 [FUSE_ACCESS
] = { do_access
, "ACCESS" },
2347 [FUSE_CREATE
] = { do_create
, "CREATE" },
2348 [FUSE_INTERRUPT
] = { do_interrupt
, "INTERRUPT" },
2349 [FUSE_BMAP
] = { do_bmap
, "BMAP" },
2350 [FUSE_IOCTL
] = { do_ioctl
, "IOCTL" },
2351 [FUSE_POLL
] = { do_poll
, "POLL" },
2352 [FUSE_FALLOCATE
] = { do_fallocate
, "FALLOCATE" },
2353 [FUSE_DESTROY
] = { do_destroy
, "DESTROY" },
2354 [FUSE_NOTIFY_REPLY
] = { NULL
, "NOTIFY_REPLY" },
2355 [FUSE_BATCH_FORGET
] = { do_batch_forget
, "BATCH_FORGET" },
2356 [FUSE_READDIRPLUS
] = { do_readdirplus
, "READDIRPLUS" },
2357 [FUSE_RENAME2
] = { do_rename2
, "RENAME2" },
2358 [FUSE_COPY_FILE_RANGE
] = { do_copy_file_range
, "COPY_FILE_RANGE" },
2359 [FUSE_LSEEK
] = { do_lseek
, "LSEEK" },
2362 #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2364 static const char *opname(enum fuse_opcode opcode
)
2366 if (opcode
>= FUSE_MAXOP
|| !fuse_ll_ops
[opcode
].name
) {
2369 return fuse_ll_ops
[opcode
].name
;
2373 void fuse_session_process_buf(struct fuse_session
*se
,
2374 const struct fuse_buf
*buf
)
2376 struct fuse_bufvec bufv
= { .buf
[0] = *buf
, .count
= 1 };
2377 fuse_session_process_buf_int(se
, &bufv
, NULL
);
2382 * bufv is normally a single entry buffer, except for a write
2383 * where (if it's in memory) then the bufv may be multiple entries,
2384 * where the first entry contains all headers and subsequent entries
2386 * bufv shall not use any offsets etc to make the data anything
2387 * other than contiguous starting from 0.
2389 void fuse_session_process_buf_int(struct fuse_session
*se
,
2390 struct fuse_bufvec
*bufv
,
2391 struct fuse_chan
*ch
)
2393 const struct fuse_buf
*buf
= bufv
->buf
;
2394 struct fuse_mbuf_iter iter
= FUSE_MBUF_ITER_INIT(buf
);
2395 struct fuse_in_header
*in
;
2396 struct fuse_req
*req
;
2399 /* The first buffer must be a memory buffer */
2400 assert(!(buf
->flags
& FUSE_BUF_IS_FD
));
2402 in
= fuse_mbuf_iter_advance(&iter
, sizeof(*in
));
2403 assert(in
); /* caller guarantees the input buffer is large enough */
2407 "unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2408 (unsigned long long)in
->unique
, opname((enum fuse_opcode
)in
->opcode
),
2409 in
->opcode
, (unsigned long long)in
->nodeid
, buf
->size
, in
->pid
);
2411 req
= fuse_ll_alloc_req(se
);
2413 struct fuse_out_header out
= {
2414 .unique
= in
->unique
,
2417 struct iovec iov
= {
2419 .iov_len
= sizeof(struct fuse_out_header
),
2422 fuse_send_msg(se
, ch
, &iov
, 1);
2426 req
->unique
= in
->unique
;
2427 req
->ctx
.uid
= in
->uid
;
2428 req
->ctx
.gid
= in
->gid
;
2429 req
->ctx
.pid
= in
->pid
;
2433 * INIT and DESTROY requests are serialized, all other request types
2434 * run in parallel. This prevents races between FUSE_INIT and ordinary
2435 * requests, FUSE_INIT and FUSE_INIT, FUSE_INIT and FUSE_DESTROY, and
2436 * FUSE_DESTROY and FUSE_DESTROY.
2438 if (in
->opcode
== FUSE_INIT
|| in
->opcode
== CUSE_INIT
||
2439 in
->opcode
== FUSE_DESTROY
) {
2440 pthread_rwlock_wrlock(&se
->init_rwlock
);
2442 pthread_rwlock_rdlock(&se
->init_rwlock
);
2446 if (!se
->got_init
) {
2447 enum fuse_opcode expected
;
2449 expected
= se
->cuse_data
? CUSE_INIT
: FUSE_INIT
;
2450 if (in
->opcode
!= expected
) {
2453 } else if (in
->opcode
== FUSE_INIT
|| in
->opcode
== CUSE_INIT
) {
2454 if (fuse_lowlevel_is_virtio(se
)) {
2456 * TODO: This is after a hard reboot typically, we need to do
2457 * a destroy, but we can't reply to this request yet so
2458 * we can't use do_destroy
2460 fuse_log(FUSE_LOG_DEBUG
, "%s: reinit\n", __func__
);
2461 se
->got_destroy
= 1;
2463 if (se
->op
.destroy
) {
2464 se
->op
.destroy(se
->userdata
);
2472 /* Implement -o allow_root */
2473 if (se
->deny_others
&& in
->uid
!= se
->owner
&& in
->uid
!= 0 &&
2474 in
->opcode
!= FUSE_INIT
&& in
->opcode
!= FUSE_READ
&&
2475 in
->opcode
!= FUSE_WRITE
&& in
->opcode
!= FUSE_FSYNC
&&
2476 in
->opcode
!= FUSE_RELEASE
&& in
->opcode
!= FUSE_READDIR
&&
2477 in
->opcode
!= FUSE_FSYNCDIR
&& in
->opcode
!= FUSE_RELEASEDIR
&&
2478 in
->opcode
!= FUSE_NOTIFY_REPLY
&& in
->opcode
!= FUSE_READDIRPLUS
) {
2483 if (in
->opcode
>= FUSE_MAXOP
|| !fuse_ll_ops
[in
->opcode
].func
) {
2486 if (in
->opcode
!= FUSE_INTERRUPT
) {
2487 struct fuse_req
*intr
;
2488 pthread_mutex_lock(&se
->lock
);
2489 intr
= check_interrupt(se
, req
);
2490 list_add_req(req
, &se
->list
);
2491 pthread_mutex_unlock(&se
->lock
);
2493 fuse_reply_err(intr
, EAGAIN
);
2497 if (in
->opcode
== FUSE_WRITE
&& se
->op
.write_buf
) {
2498 do_write_buf(req
, in
->nodeid
, &iter
, bufv
);
2500 fuse_ll_ops
[in
->opcode
].func(req
, in
->nodeid
, &iter
);
2503 pthread_rwlock_unlock(&se
->init_rwlock
);
2507 fuse_reply_err(req
, err
);
2508 pthread_rwlock_unlock(&se
->init_rwlock
);
2511 #define LL_OPTION(n, o, v) \
2513 n, offsetof(struct fuse_session, o), v \
2516 static const struct fuse_opt fuse_ll_opts
[] = {
2517 LL_OPTION("debug", debug
, 1),
2518 LL_OPTION("-d", debug
, 1),
2519 LL_OPTION("--debug", debug
, 1),
2520 LL_OPTION("allow_root", deny_others
, 1),
2521 LL_OPTION("--socket-path=%s", vu_socket_path
, 0),
2522 LL_OPTION("--fd=%d", vu_listen_fd
, 0),
2523 LL_OPTION("--thread-pool-size=%d", thread_pool_size
, 0),
2527 void fuse_lowlevel_version(void)
2529 printf("using FUSE kernel interface version %i.%i\n", FUSE_KERNEL_VERSION
,
2530 FUSE_KERNEL_MINOR_VERSION
);
2533 void fuse_lowlevel_help(void)
2536 * These are not all options, but the ones that are
2537 * potentially of interest to an end-user
2540 " -o allow_root allow access by root\n"
2541 " --socket-path=PATH path for the vhost-user socket\n"
2542 " --fd=FDNUM fd number of vhost-user socket\n"
2543 " --thread-pool-size=NUM thread pool size limit (default %d)\n",
2547 void fuse_session_destroy(struct fuse_session
*se
)
2549 if (se
->got_init
&& !se
->got_destroy
) {
2550 if (se
->op
.destroy
) {
2551 se
->op
.destroy(se
->userdata
);
2554 pthread_rwlock_destroy(&se
->init_rwlock
);
2555 pthread_mutex_destroy(&se
->lock
);
2556 free(se
->cuse_data
);
2561 if (fuse_lowlevel_is_virtio(se
)) {
2562 virtio_session_close(se
);
2565 free(se
->vu_socket_path
);
2566 se
->vu_socket_path
= NULL
;
2572 struct fuse_session
*fuse_session_new(struct fuse_args
*args
,
2573 const struct fuse_lowlevel_ops
*op
,
2574 size_t op_size
, void *userdata
)
2576 struct fuse_session
*se
;
2578 if (sizeof(struct fuse_lowlevel_ops
) < op_size
) {
2581 "fuse: warning: library too old, some operations may not work\n");
2582 op_size
= sizeof(struct fuse_lowlevel_ops
);
2585 if (args
->argc
== 0) {
2586 fuse_log(FUSE_LOG_ERR
,
2587 "fuse: empty argv passed to fuse_session_new().\n");
2591 se
= (struct fuse_session
*)calloc(1, sizeof(struct fuse_session
));
2593 fuse_log(FUSE_LOG_ERR
, "fuse: failed to allocate fuse object\n");
2597 se
->vu_listen_fd
= -1;
2598 se
->thread_pool_size
= THREAD_POOL_SIZE
;
2599 se
->conn
.max_write
= UINT_MAX
;
2600 se
->conn
.max_readahead
= UINT_MAX
;
2603 if (fuse_opt_parse(args
, se
, fuse_ll_opts
, NULL
) == -1) {
2606 if (args
->argc
== 1 && args
->argv
[0][0] == '-') {
2607 fuse_log(FUSE_LOG_ERR
,
2608 "fuse: warning: argv[0] looks like an option, but "
2609 "will be ignored\n");
2610 } else if (args
->argc
!= 1) {
2612 fuse_log(FUSE_LOG_ERR
, "fuse: unknown option(s): `");
2613 for (i
= 1; i
< args
->argc
- 1; i
++) {
2614 fuse_log(FUSE_LOG_ERR
, "%s ", args
->argv
[i
]);
2616 fuse_log(FUSE_LOG_ERR
, "%s'\n", args
->argv
[i
]);
2620 if (!se
->vu_socket_path
&& se
->vu_listen_fd
< 0) {
2621 fuse_log(FUSE_LOG_ERR
, "fuse: missing --socket-path or --fd option\n");
2624 if (se
->vu_socket_path
&& se
->vu_listen_fd
>= 0) {
2625 fuse_log(FUSE_LOG_ERR
,
2626 "fuse: --socket-path and --fd cannot be given together\n");
2630 se
->bufsize
= FUSE_MAX_MAX_PAGES
* getpagesize() + FUSE_BUFFER_HEADER_SIZE
;
2632 list_init_req(&se
->list
);
2633 list_init_req(&se
->interrupts
);
2634 fuse_mutex_init(&se
->lock
);
2635 pthread_rwlock_init(&se
->init_rwlock
, NULL
);
2637 memcpy(&se
->op
, op
, op_size
);
2638 se
->owner
= getuid();
2639 se
->userdata
= userdata
;
2644 fuse_opt_free_args(args
);
2651 int fuse_session_mount(struct fuse_session
*se
)
2653 return virtio_session_mount(se
);
2656 int fuse_session_fd(struct fuse_session
*se
)
2661 void fuse_session_unmount(struct fuse_session
*se
)
2665 int fuse_lowlevel_is_virtio(struct fuse_session
*se
)
2667 return !!se
->virtio_dev
;
2671 int fuse_req_getgroups(fuse_req_t req
, int size
, gid_t list
[])
2674 size_t bufsize
= 1024;
2678 unsigned long pid
= req
->ctx
.pid
;
2681 sprintf(path
, "/proc/%lu/task/%lu/status", pid
, pid
);
2684 buf
= malloc(bufsize
);
2690 fd
= open(path
, O_RDONLY
);
2695 ret
= read(fd
, buf
, bufsize
);
2702 if ((size_t)ret
== bufsize
) {
2709 s
= strstr(buf
, "\nGroups:");
2718 unsigned long val
= strtoul(s
, &end
, 0);
2736 * This is currently not implemented on other than Linux...
2738 int fuse_req_getgroups(fuse_req_t req
, int size
, gid_t list
[])
2747 void fuse_session_exit(struct fuse_session
*se
)
2752 void fuse_session_reset(struct fuse_session
*se
)
2758 int fuse_session_exited(struct fuse_session
*se
)