2 * virtio-fs glue for FUSE
3 * Copyright (C) 2018 Red Hat, Inc. and/or its affiliates
6 * Dave Gilbert <dgilbert@redhat.com>
8 * Implements the glue between libfuse and libvhost-user
10 * This program can be distributed under the terms of the GNU LGPLv2.
11 * See the file COPYING.LIB
14 #include "qemu/osdep.h"
16 #include "qapi/error.h"
18 #include "standard-headers/linux/fuse.h"
19 #include "fuse_misc.h"
21 #include "fuse_virtio.h"
30 #include <sys/eventfd.h>
31 #include <sys/socket.h>
32 #include <sys/types.h>
36 #include "contrib/libvhost-user/libvhost-user.h"
42 * This lock protects the VuVirtq preventing races between
43 * fv_queue_thread() and fv_queue_worker().
45 pthread_mutex_t vq_lock
;
47 struct fv_VuDev
*virtio_dev
;
49 /* Our queue index, corresponds to array position */
52 int kill_fd
; /* For killing the thread */
60 /* Used to complete requests that involve no reply */
65 * We pass the dev element into libvhost-user
66 * and then use it to get back to the outer
67 * container for other data.
71 struct fuse_session
*se
;
74 * Either handle virtqueues or vhost-user protocol messages. Don't do
75 * both at the same time since that could lead to race conditions if
76 * virtqueues or memory tables change while another thread is accessing
79 * The assumptions are:
80 * 1. fv_queue_thread() reads/writes to virtqueues and only reads VuDev.
81 * 2. virtio_loop() reads/writes virtqueues and VuDev.
83 pthread_rwlock_t vu_dispatch_rwlock
;
86 * The following pair of fields are only accessed in the main
90 struct fv_QueueInfo
**qi
;
94 struct virtio_fs_config
{
99 /* Callback from libvhost-user */
100 static uint64_t fv_get_features(VuDev
*dev
)
102 return 1ULL << VIRTIO_F_VERSION_1
;
105 /* Callback from libvhost-user */
106 static void fv_set_features(VuDev
*dev
, uint64_t features
)
111 * Callback from libvhost-user if there's a new fd we're supposed to listen
112 * to, typically a queue kick?
114 static void fv_set_watch(VuDev
*dev
, int fd
, int condition
, vu_watch_cb cb
,
117 fuse_log(FUSE_LOG_WARNING
, "%s: TODO! fd=%d\n", __func__
, fd
);
121 * Callback from libvhost-user if we're no longer supposed to listen on an fd
123 static void fv_remove_watch(VuDev
*dev
, int fd
)
125 fuse_log(FUSE_LOG_WARNING
, "%s: TODO! fd=%d\n", __func__
, fd
);
128 /* Callback from libvhost-user to panic */
129 static void fv_panic(VuDev
*dev
, const char *err
)
131 fuse_log(FUSE_LOG_ERR
, "%s: libvhost-user: %s\n", __func__
, err
);
132 /* TODO: Allow reconnects?? */
137 * Copy from an iovec into a fuse_buf (memory only)
138 * Caller must ensure there is space
140 static void copy_from_iov(struct fuse_buf
*buf
, size_t out_num
,
141 const struct iovec
*out_sg
)
143 void *dest
= buf
->mem
;
146 size_t onelen
= out_sg
->iov_len
;
147 memcpy(dest
, out_sg
->iov_base
, onelen
);
155 * Copy from one iov to another, the given number of bytes
156 * The caller must have checked sizes.
158 static void copy_iov(struct iovec
*src_iov
, int src_count
,
159 struct iovec
*dst_iov
, int dst_count
, size_t to_copy
)
161 size_t dst_offset
= 0;
162 /* Outer loop copies 'src' elements */
165 size_t src_len
= src_iov
[0].iov_len
;
166 size_t src_offset
= 0;
168 if (src_len
> to_copy
) {
171 /* Inner loop copies contents of one 'src' to maybe multiple dst. */
174 size_t dst_len
= dst_iov
[0].iov_len
- dst_offset
;
175 if (dst_len
> src_len
) {
179 memcpy(dst_iov
[0].iov_base
+ dst_offset
,
180 src_iov
[0].iov_base
+ src_offset
, dst_len
);
183 src_offset
+= dst_len
;
184 dst_offset
+= dst_len
;
186 assert(dst_offset
<= dst_iov
[0].iov_len
);
187 if (dst_offset
== dst_iov
[0].iov_len
) {
199 * Called back by ll whenever it wants to send a reply/message back
200 * The 1st element of the iov starts with the fuse_out_header
201 * 'unique'==0 means it's a notify message.
203 int virtio_send_msg(struct fuse_session
*se
, struct fuse_chan
*ch
,
204 struct iovec
*iov
, int count
)
206 FVRequest
*req
= container_of(ch
, FVRequest
, ch
);
207 struct fv_QueueInfo
*qi
= ch
->qi
;
208 VuDev
*dev
= &se
->virtio_dev
->dev
;
209 VuVirtq
*q
= vu_get_queue(dev
, qi
->qidx
);
210 VuVirtqElement
*elem
= &req
->elem
;
214 assert(iov
[0].iov_len
>= sizeof(struct fuse_out_header
));
216 struct fuse_out_header
*out
= iov
[0].iov_base
;
217 /* TODO: Endianness! */
219 size_t tosend_len
= iov_size(iov
, count
);
221 /* unique == 0 is notification, which we don't support */
223 assert(!req
->reply_sent
);
225 /* The 'in' part of the elem is to qemu */
226 unsigned int in_num
= elem
->in_num
;
227 struct iovec
*in_sg
= elem
->in_sg
;
228 size_t in_len
= iov_size(in_sg
, in_num
);
229 fuse_log(FUSE_LOG_DEBUG
, "%s: elem %d: with %d in desc of length %zd\n",
230 __func__
, elem
->index
, in_num
, in_len
);
233 * The elem should have room for a 'fuse_out_header' (out from fuse)
234 * plus the data based on the len in the header.
236 if (in_len
< sizeof(struct fuse_out_header
)) {
237 fuse_log(FUSE_LOG_ERR
, "%s: elem %d too short for out_header\n",
238 __func__
, elem
->index
);
242 if (in_len
< tosend_len
) {
243 fuse_log(FUSE_LOG_ERR
, "%s: elem %d too small for data len %zd\n",
244 __func__
, elem
->index
, tosend_len
);
249 copy_iov(iov
, count
, in_sg
, in_num
, tosend_len
);
251 pthread_rwlock_rdlock(&qi
->virtio_dev
->vu_dispatch_rwlock
);
252 pthread_mutex_lock(&qi
->vq_lock
);
253 vu_queue_push(dev
, q
, elem
, tosend_len
);
254 vu_queue_notify(dev
, q
);
255 pthread_mutex_unlock(&qi
->vq_lock
);
256 pthread_rwlock_unlock(&qi
->virtio_dev
->vu_dispatch_rwlock
);
258 req
->reply_sent
= true;
265 * Callback from fuse_send_data_iov_* when it's virtio and the buffer
266 * is a single FD with FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK
267 * We need send the iov and then the buffer.
268 * Return 0 on success
270 int virtio_send_data_iov(struct fuse_session
*se
, struct fuse_chan
*ch
,
271 struct iovec
*iov
, int count
, struct fuse_bufvec
*buf
,
274 FVRequest
*req
= container_of(ch
, FVRequest
, ch
);
275 struct fv_QueueInfo
*qi
= ch
->qi
;
276 VuDev
*dev
= &se
->virtio_dev
->dev
;
277 VuVirtq
*q
= vu_get_queue(dev
, qi
->qidx
);
278 VuVirtqElement
*elem
= &req
->elem
;
282 assert(iov
[0].iov_len
>= sizeof(struct fuse_out_header
));
284 struct fuse_out_header
*out
= iov
[0].iov_base
;
285 /* TODO: Endianness! */
287 size_t iov_len
= iov_size(iov
, count
);
288 size_t tosend_len
= iov_len
+ len
;
290 out
->len
= tosend_len
;
292 fuse_log(FUSE_LOG_DEBUG
, "%s: count=%d len=%zd iov_len=%zd\n", __func__
,
293 count
, len
, iov_len
);
295 /* unique == 0 is notification which we don't support */
298 assert(!req
->reply_sent
);
300 /* The 'in' part of the elem is to qemu */
301 unsigned int in_num
= elem
->in_num
;
302 struct iovec
*in_sg
= elem
->in_sg
;
303 size_t in_len
= iov_size(in_sg
, in_num
);
304 fuse_log(FUSE_LOG_DEBUG
, "%s: elem %d: with %d in desc of length %zd\n",
305 __func__
, elem
->index
, in_num
, in_len
);
308 * The elem should have room for a 'fuse_out_header' (out from fuse)
309 * plus the data based on the len in the header.
311 if (in_len
< sizeof(struct fuse_out_header
)) {
312 fuse_log(FUSE_LOG_ERR
, "%s: elem %d too short for out_header\n",
313 __func__
, elem
->index
);
317 if (in_len
< tosend_len
) {
318 fuse_log(FUSE_LOG_ERR
, "%s: elem %d too small for data len %zd\n",
319 __func__
, elem
->index
, tosend_len
);
324 /* TODO: Limit to 'len' */
326 /* First copy the header data from iov->in_sg */
327 copy_iov(iov
, count
, in_sg
, in_num
, iov_len
);
330 * Build a copy of the the in_sg iov so we can skip bits in it,
331 * including changing the offsets
333 struct iovec
*in_sg_cpy
= calloc(sizeof(struct iovec
), in_num
);
335 memcpy(in_sg_cpy
, in_sg
, sizeof(struct iovec
) * in_num
);
336 /* These get updated as we skip */
337 struct iovec
*in_sg_ptr
= in_sg_cpy
;
338 int in_sg_cpy_count
= in_num
;
340 /* skip over parts of in_sg that contained the header iov */
341 size_t skip_size
= iov_len
;
343 size_t in_sg_left
= 0;
345 while (skip_size
!= 0 && in_sg_cpy_count
) {
346 if (skip_size
>= in_sg_ptr
[0].iov_len
) {
347 skip_size
-= in_sg_ptr
[0].iov_len
;
351 in_sg_ptr
[0].iov_len
-= skip_size
;
352 in_sg_ptr
[0].iov_base
+= skip_size
;
358 for (i
= 0, in_sg_left
= 0; i
< in_sg_cpy_count
; i
++) {
359 in_sg_left
+= in_sg_ptr
[i
].iov_len
;
361 fuse_log(FUSE_LOG_DEBUG
,
362 "%s: after skip skip_size=%zd in_sg_cpy_count=%d "
364 __func__
, skip_size
, in_sg_cpy_count
, in_sg_left
);
365 ret
= preadv(buf
->buf
[0].fd
, in_sg_ptr
, in_sg_cpy_count
,
370 fuse_log(FUSE_LOG_DEBUG
, "%s: preadv failed (%m) len=%zd\n",
375 fuse_log(FUSE_LOG_DEBUG
, "%s: preadv ret=%d len=%zd\n", __func__
,
377 if (ret
< len
&& ret
) {
378 fuse_log(FUSE_LOG_DEBUG
, "%s: ret < len\n", __func__
);
379 /* Skip over this much next time around */
381 buf
->buf
[0].pos
+= ret
;
384 /* Lets do another read */
389 fuse_log(FUSE_LOG_DEBUG
, "%s: !ret in_sg_left=%zd\n", __func__
,
394 fuse_log(FUSE_LOG_DEBUG
, "%s: ret!=len\n", __func__
);
401 } while (in_sg_left
);
404 /* Need to fix out->len on EOF */
406 struct fuse_out_header
*out_sg
= in_sg
[0].iov_base
;
409 out_sg
->len
= tosend_len
;
414 pthread_rwlock_rdlock(&qi
->virtio_dev
->vu_dispatch_rwlock
);
415 pthread_mutex_lock(&qi
->vq_lock
);
416 vu_queue_push(dev
, q
, elem
, tosend_len
);
417 vu_queue_notify(dev
, q
);
418 pthread_mutex_unlock(&qi
->vq_lock
);
419 pthread_rwlock_unlock(&qi
->virtio_dev
->vu_dispatch_rwlock
);
423 req
->reply_sent
= true;
429 static __thread
bool clone_fs_called
;
431 /* Process one FVRequest in a thread pool */
432 static void fv_queue_worker(gpointer data
, gpointer user_data
)
434 struct fv_QueueInfo
*qi
= user_data
;
435 struct fuse_session
*se
= qi
->virtio_dev
->se
;
436 struct VuDev
*dev
= &qi
->virtio_dev
->dev
;
437 FVRequest
*req
= data
;
438 VuVirtqElement
*elem
= &req
->elem
;
439 struct fuse_buf fbuf
= {};
440 bool allocated_bufv
= false;
441 struct fuse_bufvec bufv
;
442 struct fuse_bufvec
*pbufv
;
444 assert(se
->bufsize
> sizeof(struct fuse_in_header
));
446 if (!clone_fs_called
) {
449 /* unshare FS for xattr operation */
450 ret
= unshare(CLONE_FS
);
451 /* should not fail */
454 clone_fs_called
= true;
458 * An element contains one request and the space to send our response
459 * They're spread over multiple descriptors in a scatter/gather set
460 * and we can't trust the guest to keep them still; so copy in/out.
462 fbuf
.mem
= malloc(se
->bufsize
);
465 fuse_mutex_init(&req
->ch
.lock
);
469 /* The 'out' part of the elem is from qemu */
470 unsigned int out_num
= elem
->out_num
;
471 struct iovec
*out_sg
= elem
->out_sg
;
472 size_t out_len
= iov_size(out_sg
, out_num
);
473 fuse_log(FUSE_LOG_DEBUG
,
474 "%s: elem %d: with %d out desc of length %zd\n",
475 __func__
, elem
->index
, out_num
, out_len
);
478 * The elem should contain a 'fuse_in_header' (in to fuse)
479 * plus the data based on the len in the header.
481 if (out_len
< sizeof(struct fuse_in_header
)) {
482 fuse_log(FUSE_LOG_ERR
, "%s: elem %d too short for in_header\n",
483 __func__
, elem
->index
);
484 assert(0); /* TODO */
486 if (out_len
> se
->bufsize
) {
487 fuse_log(FUSE_LOG_ERR
, "%s: elem %d too large for buffer\n", __func__
,
489 assert(0); /* TODO */
491 /* Copy just the first element and look at it */
492 copy_from_iov(&fbuf
, 1, out_sg
);
494 pbufv
= NULL
; /* Compiler thinks an unitialised path */
496 out_sg
[0].iov_len
== sizeof(struct fuse_in_header
) &&
497 ((struct fuse_in_header
*)fbuf
.mem
)->opcode
== FUSE_WRITE
&&
498 out_sg
[1].iov_len
== sizeof(struct fuse_write_in
)) {
500 * For a write we don't actually need to copy the
501 * data, we can just do it straight out of guest memory
502 * but we must still copy the headers in case the guest
503 * was nasty and changed them while we were using them.
505 fuse_log(FUSE_LOG_DEBUG
, "%s: Write special case\n", __func__
);
507 /* copy the fuse_write_in header afte rthe fuse_in_header */
508 fbuf
.mem
+= out_sg
->iov_len
;
509 copy_from_iov(&fbuf
, 1, out_sg
+ 1);
510 fbuf
.mem
-= out_sg
->iov_len
;
511 fbuf
.size
= out_sg
[0].iov_len
+ out_sg
[1].iov_len
;
513 /* Allocate the bufv, with space for the rest of the iov */
514 pbufv
= malloc(sizeof(struct fuse_bufvec
) +
515 sizeof(struct fuse_buf
) * (out_num
- 2));
517 fuse_log(FUSE_LOG_ERR
, "%s: pbufv malloc failed\n",
522 allocated_bufv
= true;
524 pbufv
->buf
[0] = fbuf
;
526 size_t iovindex
, pbufvindex
;
527 iovindex
= 2; /* 2 headers, separate iovs */
528 pbufvindex
= 1; /* 2 headers, 1 fusebuf */
530 for (; iovindex
< out_num
; iovindex
++, pbufvindex
++) {
532 pbufv
->buf
[pbufvindex
].pos
= ~0; /* Dummy */
533 pbufv
->buf
[pbufvindex
].flags
= 0;
534 pbufv
->buf
[pbufvindex
].mem
= out_sg
[iovindex
].iov_base
;
535 pbufv
->buf
[pbufvindex
].size
= out_sg
[iovindex
].iov_len
;
538 /* Normal (non fast write) path */
540 /* Copy the rest of the buffer */
541 fbuf
.mem
+= out_sg
->iov_len
;
542 copy_from_iov(&fbuf
, out_num
- 1, out_sg
+ 1);
543 fbuf
.mem
-= out_sg
->iov_len
;
546 /* TODO! Endianness of header */
548 /* TODO: Add checks for fuse_session_exited */
555 fuse_session_process_buf_int(se
, pbufv
, &req
->ch
);
558 if (allocated_bufv
) {
562 /* If the request has no reply, still recycle the virtqueue element */
563 if (!req
->reply_sent
) {
564 struct VuVirtq
*q
= vu_get_queue(dev
, qi
->qidx
);
566 fuse_log(FUSE_LOG_DEBUG
, "%s: elem %d no reply sent\n", __func__
,
569 pthread_rwlock_rdlock(&qi
->virtio_dev
->vu_dispatch_rwlock
);
570 pthread_mutex_lock(&qi
->vq_lock
);
571 vu_queue_push(dev
, q
, elem
, 0);
572 vu_queue_notify(dev
, q
);
573 pthread_mutex_unlock(&qi
->vq_lock
);
574 pthread_rwlock_unlock(&qi
->virtio_dev
->vu_dispatch_rwlock
);
577 pthread_mutex_destroy(&req
->ch
.lock
);
582 /* Thread function for individual queues, created when a queue is 'started' */
583 static void *fv_queue_thread(void *opaque
)
585 struct fv_QueueInfo
*qi
= opaque
;
586 struct VuDev
*dev
= &qi
->virtio_dev
->dev
;
587 struct VuVirtq
*q
= vu_get_queue(dev
, qi
->qidx
);
588 struct fuse_session
*se
= qi
->virtio_dev
->se
;
591 pool
= g_thread_pool_new(fv_queue_worker
, qi
, se
->thread_pool_size
, TRUE
,
594 fuse_log(FUSE_LOG_ERR
, "%s: g_thread_pool_new failed\n", __func__
);
598 fuse_log(FUSE_LOG_INFO
, "%s: Start for queue %d kick_fd %d\n", __func__
,
599 qi
->qidx
, qi
->kick_fd
);
604 pf
[0].fd
= qi
->kick_fd
;
605 pf
[0].events
= POLLIN
;
607 pf
[1].fd
= qi
->kill_fd
;
608 pf
[1].events
= POLLIN
;
611 fuse_log(FUSE_LOG_DEBUG
, "%s: Waiting for Queue %d event\n", __func__
,
613 int poll_res
= ppoll(pf
, 2, NULL
, NULL
);
615 if (poll_res
== -1) {
616 if (errno
== EINTR
) {
617 fuse_log(FUSE_LOG_INFO
, "%s: ppoll interrupted, going around\n",
621 fuse_log(FUSE_LOG_ERR
, "fv_queue_thread ppoll: %m\n");
624 assert(poll_res
>= 1);
625 if (pf
[0].revents
& (POLLERR
| POLLHUP
| POLLNVAL
)) {
626 fuse_log(FUSE_LOG_ERR
, "%s: Unexpected poll revents %x Queue %d\n",
627 __func__
, pf
[0].revents
, qi
->qidx
);
630 if (pf
[1].revents
& (POLLERR
| POLLHUP
| POLLNVAL
)) {
631 fuse_log(FUSE_LOG_ERR
,
632 "%s: Unexpected poll revents %x Queue %d killfd\n",
633 __func__
, pf
[1].revents
, qi
->qidx
);
637 fuse_log(FUSE_LOG_INFO
, "%s: kill event on queue %d - quitting\n",
641 assert(pf
[0].revents
& POLLIN
);
642 fuse_log(FUSE_LOG_DEBUG
, "%s: Got queue event on Queue %d\n", __func__
,
646 if (eventfd_read(qi
->kick_fd
, &evalue
)) {
647 fuse_log(FUSE_LOG_ERR
, "Eventfd_read for queue: %m\n");
650 /* Mutual exclusion with virtio_loop() */
651 ret
= pthread_rwlock_rdlock(&qi
->virtio_dev
->vu_dispatch_rwlock
);
652 assert(ret
== 0); /* there is no possible error case */
653 pthread_mutex_lock(&qi
->vq_lock
);
654 /* out is from guest, in is too guest */
655 unsigned int in_bytes
, out_bytes
;
656 vu_queue_get_avail_bytes(dev
, q
, &in_bytes
, &out_bytes
, ~0, ~0);
658 fuse_log(FUSE_LOG_DEBUG
,
659 "%s: Queue %d gave evalue: %zx available: in: %u out: %u\n",
660 __func__
, qi
->qidx
, (size_t)evalue
, in_bytes
, out_bytes
);
663 FVRequest
*req
= vu_queue_pop(dev
, q
, sizeof(FVRequest
));
668 req
->reply_sent
= false;
670 g_thread_pool_push(pool
, req
, NULL
);
673 pthread_mutex_unlock(&qi
->vq_lock
);
674 pthread_rwlock_unlock(&qi
->virtio_dev
->vu_dispatch_rwlock
);
677 g_thread_pool_free(pool
, FALSE
, TRUE
);
682 static void fv_queue_cleanup_thread(struct fv_VuDev
*vud
, int qidx
)
685 struct fv_QueueInfo
*ourqi
;
687 assert(qidx
< vud
->nqueues
);
688 ourqi
= vud
->qi
[qidx
];
690 /* Kill the thread */
691 if (eventfd_write(ourqi
->kill_fd
, 1)) {
692 fuse_log(FUSE_LOG_ERR
, "Eventfd_write for queue %d: %s\n",
693 qidx
, strerror(errno
));
695 ret
= pthread_join(ourqi
->thread
, NULL
);
697 fuse_log(FUSE_LOG_ERR
, "%s: Failed to join thread idx %d err %d\n",
698 __func__
, qidx
, ret
);
700 pthread_mutex_destroy(&ourqi
->vq_lock
);
701 close(ourqi
->kill_fd
);
704 vud
->qi
[qidx
] = NULL
;
707 /* Callback from libvhost-user on start or stop of a queue */
708 static void fv_queue_set_started(VuDev
*dev
, int qidx
, bool started
)
710 struct fv_VuDev
*vud
= container_of(dev
, struct fv_VuDev
, dev
);
711 struct fv_QueueInfo
*ourqi
;
713 fuse_log(FUSE_LOG_INFO
, "%s: qidx=%d started=%d\n", __func__
, qidx
,
718 * Ignore additional request queues for now. passthrough_ll.c must be
719 * audited for thread-safety issues first. It was written with a
720 * well-behaved client in mind and may not protect against all types of
724 fuse_log(FUSE_LOG_ERR
,
725 "%s: multiple request queues not yet implemented, please only "
726 "configure 1 request queue\n",
732 /* Fire up a thread to watch this queue */
733 if (qidx
>= vud
->nqueues
) {
734 vud
->qi
= realloc(vud
->qi
, (qidx
+ 1) * sizeof(vud
->qi
[0]));
736 memset(vud
->qi
+ vud
->nqueues
, 0,
737 sizeof(vud
->qi
[0]) * (1 + (qidx
- vud
->nqueues
)));
738 vud
->nqueues
= qidx
+ 1;
740 if (!vud
->qi
[qidx
]) {
741 vud
->qi
[qidx
] = calloc(sizeof(struct fv_QueueInfo
), 1);
742 assert(vud
->qi
[qidx
]);
743 vud
->qi
[qidx
]->virtio_dev
= vud
;
744 vud
->qi
[qidx
]->qidx
= qidx
;
746 /* Shouldn't have been started */
747 assert(vud
->qi
[qidx
]->kick_fd
== -1);
749 ourqi
= vud
->qi
[qidx
];
750 ourqi
->kick_fd
= dev
->vq
[qidx
].kick_fd
;
752 ourqi
->kill_fd
= eventfd(0, EFD_CLOEXEC
| EFD_SEMAPHORE
);
753 assert(ourqi
->kill_fd
!= -1);
754 pthread_mutex_init(&ourqi
->vq_lock
, NULL
);
756 if (pthread_create(&ourqi
->thread
, NULL
, fv_queue_thread
, ourqi
)) {
757 fuse_log(FUSE_LOG_ERR
, "%s: Failed to create thread for queue %d\n",
762 fv_queue_cleanup_thread(vud
, qidx
);
766 static bool fv_queue_order(VuDev
*dev
, int qidx
)
771 static const VuDevIface fv_iface
= {
772 .get_features
= fv_get_features
,
773 .set_features
= fv_set_features
,
775 /* Don't need process message, we've not got any at vhost-user level */
776 .queue_set_started
= fv_queue_set_started
,
778 .queue_is_processed_in_order
= fv_queue_order
,
782 * Main loop; this mostly deals with events on the vhost-user
783 * socket itself, and not actual fuse data.
785 int virtio_loop(struct fuse_session
*se
)
787 fuse_log(FUSE_LOG_INFO
, "%s: Entry\n", __func__
);
789 while (!fuse_session_exited(se
)) {
793 pf
[0].fd
= se
->vu_socketfd
;
794 pf
[0].events
= POLLIN
;
797 fuse_log(FUSE_LOG_DEBUG
, "%s: Waiting for VU event\n", __func__
);
798 int poll_res
= ppoll(pf
, 1, NULL
, NULL
);
800 if (poll_res
== -1) {
801 if (errno
== EINTR
) {
802 fuse_log(FUSE_LOG_INFO
, "%s: ppoll interrupted, going around\n",
806 fuse_log(FUSE_LOG_ERR
, "virtio_loop ppoll: %m\n");
809 assert(poll_res
== 1);
810 if (pf
[0].revents
& (POLLERR
| POLLHUP
| POLLNVAL
)) {
811 fuse_log(FUSE_LOG_ERR
, "%s: Unexpected poll revents %x\n", __func__
,
815 assert(pf
[0].revents
& POLLIN
);
816 fuse_log(FUSE_LOG_DEBUG
, "%s: Got VU event\n", __func__
);
817 /* Mutual exclusion with fv_queue_thread() */
818 ret
= pthread_rwlock_wrlock(&se
->virtio_dev
->vu_dispatch_rwlock
);
819 assert(ret
== 0); /* there is no possible error case */
821 ok
= vu_dispatch(&se
->virtio_dev
->dev
);
823 pthread_rwlock_unlock(&se
->virtio_dev
->vu_dispatch_rwlock
);
826 fuse_log(FUSE_LOG_ERR
, "%s: vu_dispatch failed\n", __func__
);
832 * Make sure all fv_queue_thread()s quit on exit, as we're about to
833 * free virtio dev and fuse session, no one should access them anymore.
835 for (int i
= 0; i
< se
->virtio_dev
->nqueues
; i
++) {
836 if (!se
->virtio_dev
->qi
[i
]) {
840 fuse_log(FUSE_LOG_INFO
, "%s: Stopping queue %d thread\n", __func__
, i
);
841 fv_queue_cleanup_thread(se
->virtio_dev
, i
);
844 fuse_log(FUSE_LOG_INFO
, "%s: Exit\n", __func__
);
849 static void strreplace(char *s
, char old
, char new)
858 static bool fv_socket_lock(struct fuse_session
*se
)
860 g_autofree gchar
*sk_name
= NULL
;
861 g_autofree gchar
*pidfile
= NULL
;
862 g_autofree gchar
*dir
= NULL
;
863 Error
*local_err
= NULL
;
865 dir
= qemu_get_local_state_pathname("run/virtiofsd");
867 if (g_mkdir_with_parents(dir
, S_IRWXU
) < 0) {
868 fuse_log(FUSE_LOG_ERR
, "%s: Failed to create directory %s: %s",
869 __func__
, dir
, strerror(errno
));
873 sk_name
= g_strdup(se
->vu_socket_path
);
874 strreplace(sk_name
, '/', '.');
875 pidfile
= g_strdup_printf("%s/%s.pid", dir
, sk_name
);
877 if (!qemu_write_pidfile(pidfile
, &local_err
)) {
878 error_report_err(local_err
);
885 static int fv_create_listen_socket(struct fuse_session
*se
)
887 struct sockaddr_un un
;
890 /* Nothing to do if fd is already initialized */
891 if (se
->vu_listen_fd
>= 0) {
895 if (strlen(se
->vu_socket_path
) >= sizeof(un
.sun_path
)) {
896 fuse_log(FUSE_LOG_ERR
, "Socket path too long\n");
900 if (!strlen(se
->vu_socket_path
)) {
901 fuse_log(FUSE_LOG_ERR
, "Socket path is empty\n");
905 /* Check the vu_socket_path is already used */
906 if (!fv_socket_lock(se
)) {
911 * Create the Unix socket to communicate with qemu
912 * based on QEMU's vhost-user-bridge
914 unlink(se
->vu_socket_path
);
915 strcpy(un
.sun_path
, se
->vu_socket_path
);
916 size_t addr_len
= sizeof(un
);
918 int listen_sock
= socket(AF_UNIX
, SOCK_STREAM
, 0);
919 if (listen_sock
== -1) {
920 fuse_log(FUSE_LOG_ERR
, "vhost socket creation: %m\n");
923 un
.sun_family
= AF_UNIX
;
926 * Unfortunately bind doesn't let you set the mask on the socket,
927 * so set umask to 077 and restore it later.
929 old_umask
= umask(0077);
930 if (bind(listen_sock
, (struct sockaddr
*)&un
, addr_len
) == -1) {
931 fuse_log(FUSE_LOG_ERR
, "vhost socket bind: %m\n");
938 if (listen(listen_sock
, 1) == -1) {
939 fuse_log(FUSE_LOG_ERR
, "vhost socket listen: %m\n");
944 se
->vu_listen_fd
= listen_sock
;
948 int virtio_session_mount(struct fuse_session
*se
)
952 ret
= fv_create_listen_socket(se
);
959 fuse_log(FUSE_LOG_INFO
, "%s: Waiting for vhost-user socket connection...\n",
961 int data_sock
= accept(se
->vu_listen_fd
, NULL
, NULL
);
962 if (data_sock
== -1) {
963 fuse_log(FUSE_LOG_ERR
, "vhost socket accept: %m\n");
964 close(se
->vu_listen_fd
);
967 close(se
->vu_listen_fd
);
968 se
->vu_listen_fd
= -1;
969 fuse_log(FUSE_LOG_INFO
, "%s: Received vhost-user socket connection\n",
972 /* TODO: Some cleanup/deallocation! */
973 se
->virtio_dev
= calloc(sizeof(struct fv_VuDev
), 1);
974 if (!se
->virtio_dev
) {
975 fuse_log(FUSE_LOG_ERR
, "%s: virtio_dev calloc failed\n", __func__
);
980 se
->vu_socketfd
= data_sock
;
981 se
->virtio_dev
->se
= se
;
982 pthread_rwlock_init(&se
->virtio_dev
->vu_dispatch_rwlock
, NULL
);
983 vu_init(&se
->virtio_dev
->dev
, 2, se
->vu_socketfd
, fv_panic
, fv_set_watch
,
984 fv_remove_watch
, &fv_iface
);
989 void virtio_session_close(struct fuse_session
*se
)
991 close(se
->vu_socketfd
);
993 if (!se
->virtio_dev
) {
997 free(se
->virtio_dev
->qi
);
998 pthread_rwlock_destroy(&se
->virtio_dev
->vu_dispatch_rwlock
);
999 free(se
->virtio_dev
);
1000 se
->virtio_dev
= NULL
;