2 * virtio-fs glue for FUSE
3 * Copyright (C) 2018 Red Hat, Inc. and/or its affiliates
6 * Dave Gilbert <dgilbert@redhat.com>
8 * Implements the glue between libfuse and libvhost-user
10 * This program can be distributed under the terms of the GNU LGPLv2.
11 * See the file COPYING.LIB
14 #include "qemu/osdep.h"
16 #include "qapi/error.h"
18 #include "standard-headers/linux/fuse.h"
19 #include "fuse_misc.h"
21 #include "fuse_virtio.h"
23 #include <sys/eventfd.h>
24 #include <sys/socket.h>
28 #include "libvhost-user.h"
34 * This lock protects the VuVirtq preventing races between
35 * fv_queue_thread() and fv_queue_worker().
37 pthread_mutex_t vq_lock
;
39 struct fv_VuDev
*virtio_dev
;
41 /* Our queue index, corresponds to array position */
44 int kill_fd
; /* For killing the thread */
52 /* Used to complete requests that involve no reply */
57 * We pass the dev element into libvhost-user
58 * and then use it to get back to the outer
59 * container for other data.
63 struct fuse_session
*se
;
66 * Either handle virtqueues or vhost-user protocol messages. Don't do
67 * both at the same time since that could lead to race conditions if
68 * virtqueues or memory tables change while another thread is accessing
71 * The assumptions are:
72 * 1. fv_queue_thread() reads/writes to virtqueues and only reads VuDev.
73 * 2. virtio_loop() reads/writes virtqueues and VuDev.
75 pthread_rwlock_t vu_dispatch_rwlock
;
78 * The following pair of fields are only accessed in the main
82 struct fv_QueueInfo
**qi
;
86 struct virtio_fs_config
{
91 /* Callback from libvhost-user */
92 static uint64_t fv_get_features(VuDev
*dev
)
94 return 1ULL << VIRTIO_F_VERSION_1
;
97 /* Callback from libvhost-user */
98 static void fv_set_features(VuDev
*dev
, uint64_t features
)
103 * Callback from libvhost-user if there's a new fd we're supposed to listen
104 * to, typically a queue kick?
106 static void fv_set_watch(VuDev
*dev
, int fd
, int condition
, vu_watch_cb cb
,
109 fuse_log(FUSE_LOG_WARNING
, "%s: TODO! fd=%d\n", __func__
, fd
);
113 * Callback from libvhost-user if we're no longer supposed to listen on an fd
115 static void fv_remove_watch(VuDev
*dev
, int fd
)
117 fuse_log(FUSE_LOG_WARNING
, "%s: TODO! fd=%d\n", __func__
, fd
);
120 /* Callback from libvhost-user to panic */
121 static void fv_panic(VuDev
*dev
, const char *err
)
123 fuse_log(FUSE_LOG_ERR
, "%s: libvhost-user: %s\n", __func__
, err
);
124 /* TODO: Allow reconnects?? */
129 * Copy from an iovec into a fuse_buf (memory only)
130 * Caller must ensure there is space
132 static void copy_from_iov(struct fuse_buf
*buf
, size_t out_num
,
133 const struct iovec
*out_sg
)
135 void *dest
= buf
->mem
;
138 size_t onelen
= out_sg
->iov_len
;
139 memcpy(dest
, out_sg
->iov_base
, onelen
);
147 * Copy from one iov to another, the given number of bytes
148 * The caller must have checked sizes.
150 static void copy_iov(struct iovec
*src_iov
, int src_count
,
151 struct iovec
*dst_iov
, int dst_count
, size_t to_copy
)
153 size_t dst_offset
= 0;
154 /* Outer loop copies 'src' elements */
157 size_t src_len
= src_iov
[0].iov_len
;
158 size_t src_offset
= 0;
160 if (src_len
> to_copy
) {
163 /* Inner loop copies contents of one 'src' to maybe multiple dst. */
166 size_t dst_len
= dst_iov
[0].iov_len
- dst_offset
;
167 if (dst_len
> src_len
) {
171 memcpy(dst_iov
[0].iov_base
+ dst_offset
,
172 src_iov
[0].iov_base
+ src_offset
, dst_len
);
175 src_offset
+= dst_len
;
176 dst_offset
+= dst_len
;
178 assert(dst_offset
<= dst_iov
[0].iov_len
);
179 if (dst_offset
== dst_iov
[0].iov_len
) {
191 * pthread_rwlock_rdlock() and pthread_rwlock_wrlock can fail if
192 * a deadlock condition is detected or the current thread already
193 * owns the lock. They can also fail, like pthread_rwlock_unlock(),
194 * if the mutex wasn't properly initialized. None of these are ever
195 * expected to happen.
197 static void vu_dispatch_rdlock(struct fv_VuDev
*vud
)
199 int ret
= pthread_rwlock_rdlock(&vud
->vu_dispatch_rwlock
);
203 static void vu_dispatch_wrlock(struct fv_VuDev
*vud
)
205 int ret
= pthread_rwlock_wrlock(&vud
->vu_dispatch_rwlock
);
209 static void vu_dispatch_unlock(struct fv_VuDev
*vud
)
211 int ret
= pthread_rwlock_unlock(&vud
->vu_dispatch_rwlock
);
216 * Called back by ll whenever it wants to send a reply/message back
217 * The 1st element of the iov starts with the fuse_out_header
218 * 'unique'==0 means it's a notify message.
220 int virtio_send_msg(struct fuse_session
*se
, struct fuse_chan
*ch
,
221 struct iovec
*iov
, int count
)
223 FVRequest
*req
= container_of(ch
, FVRequest
, ch
);
224 struct fv_QueueInfo
*qi
= ch
->qi
;
225 VuDev
*dev
= &se
->virtio_dev
->dev
;
226 VuVirtq
*q
= vu_get_queue(dev
, qi
->qidx
);
227 VuVirtqElement
*elem
= &req
->elem
;
231 assert(iov
[0].iov_len
>= sizeof(struct fuse_out_header
));
233 struct fuse_out_header
*out
= iov
[0].iov_base
;
234 /* TODO: Endianness! */
236 size_t tosend_len
= iov_size(iov
, count
);
238 /* unique == 0 is notification, which we don't support */
240 assert(!req
->reply_sent
);
242 /* The 'in' part of the elem is to qemu */
243 unsigned int in_num
= elem
->in_num
;
244 struct iovec
*in_sg
= elem
->in_sg
;
245 size_t in_len
= iov_size(in_sg
, in_num
);
246 fuse_log(FUSE_LOG_DEBUG
, "%s: elem %d: with %d in desc of length %zd\n",
247 __func__
, elem
->index
, in_num
, in_len
);
250 * The elem should have room for a 'fuse_out_header' (out from fuse)
251 * plus the data based on the len in the header.
253 if (in_len
< sizeof(struct fuse_out_header
)) {
254 fuse_log(FUSE_LOG_ERR
, "%s: elem %d too short for out_header\n",
255 __func__
, elem
->index
);
259 if (in_len
< tosend_len
) {
260 fuse_log(FUSE_LOG_ERR
, "%s: elem %d too small for data len %zd\n",
261 __func__
, elem
->index
, tosend_len
);
266 copy_iov(iov
, count
, in_sg
, in_num
, tosend_len
);
268 vu_dispatch_rdlock(qi
->virtio_dev
);
269 pthread_mutex_lock(&qi
->vq_lock
);
270 vu_queue_push(dev
, q
, elem
, tosend_len
);
271 vu_queue_notify(dev
, q
);
272 pthread_mutex_unlock(&qi
->vq_lock
);
273 vu_dispatch_unlock(qi
->virtio_dev
);
275 req
->reply_sent
= true;
282 * Callback from fuse_send_data_iov_* when it's virtio and the buffer
283 * is a single FD with FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK
284 * We need send the iov and then the buffer.
285 * Return 0 on success
287 int virtio_send_data_iov(struct fuse_session
*se
, struct fuse_chan
*ch
,
288 struct iovec
*iov
, int count
, struct fuse_bufvec
*buf
,
291 FVRequest
*req
= container_of(ch
, FVRequest
, ch
);
292 struct fv_QueueInfo
*qi
= ch
->qi
;
293 VuDev
*dev
= &se
->virtio_dev
->dev
;
294 VuVirtq
*q
= vu_get_queue(dev
, qi
->qidx
);
295 VuVirtqElement
*elem
= &req
->elem
;
299 assert(iov
[0].iov_len
>= sizeof(struct fuse_out_header
));
301 struct fuse_out_header
*out
= iov
[0].iov_base
;
302 /* TODO: Endianness! */
304 size_t iov_len
= iov_size(iov
, count
);
305 size_t tosend_len
= iov_len
+ len
;
307 out
->len
= tosend_len
;
309 fuse_log(FUSE_LOG_DEBUG
, "%s: count=%d len=%zd iov_len=%zd\n", __func__
,
310 count
, len
, iov_len
);
312 /* unique == 0 is notification which we don't support */
315 assert(!req
->reply_sent
);
317 /* The 'in' part of the elem is to qemu */
318 unsigned int in_num
= elem
->in_num
;
319 struct iovec
*in_sg
= elem
->in_sg
;
320 size_t in_len
= iov_size(in_sg
, in_num
);
321 fuse_log(FUSE_LOG_DEBUG
, "%s: elem %d: with %d in desc of length %zd\n",
322 __func__
, elem
->index
, in_num
, in_len
);
325 * The elem should have room for a 'fuse_out_header' (out from fuse)
326 * plus the data based on the len in the header.
328 if (in_len
< sizeof(struct fuse_out_header
)) {
329 fuse_log(FUSE_LOG_ERR
, "%s: elem %d too short for out_header\n",
330 __func__
, elem
->index
);
334 if (in_len
< tosend_len
) {
335 fuse_log(FUSE_LOG_ERR
, "%s: elem %d too small for data len %zd\n",
336 __func__
, elem
->index
, tosend_len
);
341 /* TODO: Limit to 'len' */
343 /* First copy the header data from iov->in_sg */
344 copy_iov(iov
, count
, in_sg
, in_num
, iov_len
);
347 * Build a copy of the the in_sg iov so we can skip bits in it,
348 * including changing the offsets
350 struct iovec
*in_sg_cpy
= calloc(sizeof(struct iovec
), in_num
);
352 memcpy(in_sg_cpy
, in_sg
, sizeof(struct iovec
) * in_num
);
353 /* These get updated as we skip */
354 struct iovec
*in_sg_ptr
= in_sg_cpy
;
355 int in_sg_cpy_count
= in_num
;
357 /* skip over parts of in_sg that contained the header iov */
358 size_t skip_size
= iov_len
;
360 size_t in_sg_left
= 0;
362 while (skip_size
!= 0 && in_sg_cpy_count
) {
363 if (skip_size
>= in_sg_ptr
[0].iov_len
) {
364 skip_size
-= in_sg_ptr
[0].iov_len
;
368 in_sg_ptr
[0].iov_len
-= skip_size
;
369 in_sg_ptr
[0].iov_base
+= skip_size
;
375 for (i
= 0, in_sg_left
= 0; i
< in_sg_cpy_count
; i
++) {
376 in_sg_left
+= in_sg_ptr
[i
].iov_len
;
378 fuse_log(FUSE_LOG_DEBUG
,
379 "%s: after skip skip_size=%zd in_sg_cpy_count=%d "
381 __func__
, skip_size
, in_sg_cpy_count
, in_sg_left
);
382 ret
= preadv(buf
->buf
[0].fd
, in_sg_ptr
, in_sg_cpy_count
,
387 fuse_log(FUSE_LOG_DEBUG
, "%s: preadv failed (%m) len=%zd\n",
392 fuse_log(FUSE_LOG_DEBUG
, "%s: preadv ret=%d len=%zd\n", __func__
,
394 if (ret
< len
&& ret
) {
395 fuse_log(FUSE_LOG_DEBUG
, "%s: ret < len\n", __func__
);
396 /* Skip over this much next time around */
398 buf
->buf
[0].pos
+= ret
;
401 /* Lets do another read */
406 fuse_log(FUSE_LOG_DEBUG
, "%s: !ret in_sg_left=%zd\n", __func__
,
411 fuse_log(FUSE_LOG_DEBUG
, "%s: ret!=len\n", __func__
);
418 } while (in_sg_left
);
421 /* Need to fix out->len on EOF */
423 struct fuse_out_header
*out_sg
= in_sg
[0].iov_base
;
426 out_sg
->len
= tosend_len
;
431 vu_dispatch_rdlock(qi
->virtio_dev
);
432 pthread_mutex_lock(&qi
->vq_lock
);
433 vu_queue_push(dev
, q
, elem
, tosend_len
);
434 vu_queue_notify(dev
, q
);
435 pthread_mutex_unlock(&qi
->vq_lock
);
436 vu_dispatch_unlock(qi
->virtio_dev
);
440 req
->reply_sent
= true;
446 static __thread
bool clone_fs_called
;
448 /* Process one FVRequest in a thread pool */
449 static void fv_queue_worker(gpointer data
, gpointer user_data
)
451 struct fv_QueueInfo
*qi
= user_data
;
452 struct fuse_session
*se
= qi
->virtio_dev
->se
;
453 struct VuDev
*dev
= &qi
->virtio_dev
->dev
;
454 FVRequest
*req
= data
;
455 VuVirtqElement
*elem
= &req
->elem
;
456 struct fuse_buf fbuf
= {};
457 bool allocated_bufv
= false;
458 struct fuse_bufvec bufv
;
459 struct fuse_bufvec
*pbufv
;
461 assert(se
->bufsize
> sizeof(struct fuse_in_header
));
463 if (!clone_fs_called
) {
466 /* unshare FS for xattr operation */
467 ret
= unshare(CLONE_FS
);
468 /* should not fail */
471 clone_fs_called
= true;
475 * An element contains one request and the space to send our response
476 * They're spread over multiple descriptors in a scatter/gather set
477 * and we can't trust the guest to keep them still; so copy in/out.
479 fbuf
.mem
= malloc(se
->bufsize
);
482 fuse_mutex_init(&req
->ch
.lock
);
486 /* The 'out' part of the elem is from qemu */
487 unsigned int out_num
= elem
->out_num
;
488 struct iovec
*out_sg
= elem
->out_sg
;
489 size_t out_len
= iov_size(out_sg
, out_num
);
490 fuse_log(FUSE_LOG_DEBUG
,
491 "%s: elem %d: with %d out desc of length %zd\n",
492 __func__
, elem
->index
, out_num
, out_len
);
495 * The elem should contain a 'fuse_in_header' (in to fuse)
496 * plus the data based on the len in the header.
498 if (out_len
< sizeof(struct fuse_in_header
)) {
499 fuse_log(FUSE_LOG_ERR
, "%s: elem %d too short for in_header\n",
500 __func__
, elem
->index
);
501 assert(0); /* TODO */
503 if (out_len
> se
->bufsize
) {
504 fuse_log(FUSE_LOG_ERR
, "%s: elem %d too large for buffer\n", __func__
,
506 assert(0); /* TODO */
508 /* Copy just the first element and look at it */
509 copy_from_iov(&fbuf
, 1, out_sg
);
511 pbufv
= NULL
; /* Compiler thinks an unitialised path */
513 out_sg
[0].iov_len
== sizeof(struct fuse_in_header
) &&
514 ((struct fuse_in_header
*)fbuf
.mem
)->opcode
== FUSE_WRITE
&&
515 out_sg
[1].iov_len
== sizeof(struct fuse_write_in
)) {
517 * For a write we don't actually need to copy the
518 * data, we can just do it straight out of guest memory
519 * but we must still copy the headers in case the guest
520 * was nasty and changed them while we were using them.
522 fuse_log(FUSE_LOG_DEBUG
, "%s: Write special case\n", __func__
);
524 /* copy the fuse_write_in header afte rthe fuse_in_header */
525 fbuf
.mem
+= out_sg
->iov_len
;
526 copy_from_iov(&fbuf
, 1, out_sg
+ 1);
527 fbuf
.mem
-= out_sg
->iov_len
;
528 fbuf
.size
= out_sg
[0].iov_len
+ out_sg
[1].iov_len
;
530 /* Allocate the bufv, with space for the rest of the iov */
531 pbufv
= malloc(sizeof(struct fuse_bufvec
) +
532 sizeof(struct fuse_buf
) * (out_num
- 2));
534 fuse_log(FUSE_LOG_ERR
, "%s: pbufv malloc failed\n",
539 allocated_bufv
= true;
541 pbufv
->buf
[0] = fbuf
;
543 size_t iovindex
, pbufvindex
;
544 iovindex
= 2; /* 2 headers, separate iovs */
545 pbufvindex
= 1; /* 2 headers, 1 fusebuf */
547 for (; iovindex
< out_num
; iovindex
++, pbufvindex
++) {
549 pbufv
->buf
[pbufvindex
].pos
= ~0; /* Dummy */
550 pbufv
->buf
[pbufvindex
].flags
= 0;
551 pbufv
->buf
[pbufvindex
].mem
= out_sg
[iovindex
].iov_base
;
552 pbufv
->buf
[pbufvindex
].size
= out_sg
[iovindex
].iov_len
;
555 /* Normal (non fast write) path */
557 /* Copy the rest of the buffer */
558 fbuf
.mem
+= out_sg
->iov_len
;
559 copy_from_iov(&fbuf
, out_num
- 1, out_sg
+ 1);
560 fbuf
.mem
-= out_sg
->iov_len
;
563 /* TODO! Endianness of header */
565 /* TODO: Add checks for fuse_session_exited */
572 fuse_session_process_buf_int(se
, pbufv
, &req
->ch
);
575 if (allocated_bufv
) {
579 /* If the request has no reply, still recycle the virtqueue element */
580 if (!req
->reply_sent
) {
581 struct VuVirtq
*q
= vu_get_queue(dev
, qi
->qidx
);
583 fuse_log(FUSE_LOG_DEBUG
, "%s: elem %d no reply sent\n", __func__
,
586 vu_dispatch_rdlock(qi
->virtio_dev
);
587 pthread_mutex_lock(&qi
->vq_lock
);
588 vu_queue_push(dev
, q
, elem
, 0);
589 vu_queue_notify(dev
, q
);
590 pthread_mutex_unlock(&qi
->vq_lock
);
591 vu_dispatch_unlock(qi
->virtio_dev
);
594 pthread_mutex_destroy(&req
->ch
.lock
);
599 /* Thread function for individual queues, created when a queue is 'started' */
600 static void *fv_queue_thread(void *opaque
)
602 struct fv_QueueInfo
*qi
= opaque
;
603 struct VuDev
*dev
= &qi
->virtio_dev
->dev
;
604 struct VuVirtq
*q
= vu_get_queue(dev
, qi
->qidx
);
605 struct fuse_session
*se
= qi
->virtio_dev
->se
;
606 GThreadPool
*pool
= NULL
;
607 GList
*req_list
= NULL
;
609 if (se
->thread_pool_size
) {
610 fuse_log(FUSE_LOG_DEBUG
, "%s: Creating thread pool for Queue %d\n",
612 pool
= g_thread_pool_new(fv_queue_worker
, qi
, se
->thread_pool_size
,
615 fuse_log(FUSE_LOG_ERR
, "%s: g_thread_pool_new failed\n", __func__
);
620 fuse_log(FUSE_LOG_INFO
, "%s: Start for queue %d kick_fd %d\n", __func__
,
621 qi
->qidx
, qi
->kick_fd
);
625 pf
[0].fd
= qi
->kick_fd
;
626 pf
[0].events
= POLLIN
;
628 pf
[1].fd
= qi
->kill_fd
;
629 pf
[1].events
= POLLIN
;
632 fuse_log(FUSE_LOG_DEBUG
, "%s: Waiting for Queue %d event\n", __func__
,
634 int poll_res
= ppoll(pf
, 2, NULL
, NULL
);
636 if (poll_res
== -1) {
637 if (errno
== EINTR
) {
638 fuse_log(FUSE_LOG_INFO
, "%s: ppoll interrupted, going around\n",
642 fuse_log(FUSE_LOG_ERR
, "fv_queue_thread ppoll: %m\n");
645 assert(poll_res
>= 1);
646 if (pf
[0].revents
& (POLLERR
| POLLHUP
| POLLNVAL
)) {
647 fuse_log(FUSE_LOG_ERR
, "%s: Unexpected poll revents %x Queue %d\n",
648 __func__
, pf
[0].revents
, qi
->qidx
);
651 if (pf
[1].revents
& (POLLERR
| POLLHUP
| POLLNVAL
)) {
652 fuse_log(FUSE_LOG_ERR
,
653 "%s: Unexpected poll revents %x Queue %d killfd\n",
654 __func__
, pf
[1].revents
, qi
->qidx
);
658 fuse_log(FUSE_LOG_INFO
, "%s: kill event on queue %d - quitting\n",
662 assert(pf
[0].revents
& POLLIN
);
663 fuse_log(FUSE_LOG_DEBUG
, "%s: Got queue event on Queue %d\n", __func__
,
667 if (eventfd_read(qi
->kick_fd
, &evalue
)) {
668 fuse_log(FUSE_LOG_ERR
, "Eventfd_read for queue: %m\n");
671 /* Mutual exclusion with virtio_loop() */
672 vu_dispatch_rdlock(qi
->virtio_dev
);
673 pthread_mutex_lock(&qi
->vq_lock
);
674 /* out is from guest, in is too guest */
675 unsigned int in_bytes
, out_bytes
;
676 vu_queue_get_avail_bytes(dev
, q
, &in_bytes
, &out_bytes
, ~0, ~0);
678 fuse_log(FUSE_LOG_DEBUG
,
679 "%s: Queue %d gave evalue: %zx available: in: %u out: %u\n",
680 __func__
, qi
->qidx
, (size_t)evalue
, in_bytes
, out_bytes
);
683 FVRequest
*req
= vu_queue_pop(dev
, q
, sizeof(FVRequest
));
688 req
->reply_sent
= false;
690 if (!se
->thread_pool_size
) {
691 req_list
= g_list_prepend(req_list
, req
);
693 g_thread_pool_push(pool
, req
, NULL
);
697 pthread_mutex_unlock(&qi
->vq_lock
);
698 vu_dispatch_unlock(qi
->virtio_dev
);
700 /* Process all the requests. */
701 if (!se
->thread_pool_size
&& req_list
!= NULL
) {
702 g_list_foreach(req_list
, fv_queue_worker
, qi
);
703 g_list_free(req_list
);
709 g_thread_pool_free(pool
, FALSE
, TRUE
);
715 static void fv_queue_cleanup_thread(struct fv_VuDev
*vud
, int qidx
)
718 struct fv_QueueInfo
*ourqi
;
720 assert(qidx
< vud
->nqueues
);
721 ourqi
= vud
->qi
[qidx
];
723 /* Kill the thread */
724 if (eventfd_write(ourqi
->kill_fd
, 1)) {
725 fuse_log(FUSE_LOG_ERR
, "Eventfd_write for queue %d: %s\n",
726 qidx
, strerror(errno
));
728 ret
= pthread_join(ourqi
->thread
, NULL
);
730 fuse_log(FUSE_LOG_ERR
, "%s: Failed to join thread idx %d err %d\n",
731 __func__
, qidx
, ret
);
733 pthread_mutex_destroy(&ourqi
->vq_lock
);
734 close(ourqi
->kill_fd
);
737 vud
->qi
[qidx
] = NULL
;
740 /* Callback from libvhost-user on start or stop of a queue */
741 static void fv_queue_set_started(VuDev
*dev
, int qidx
, bool started
)
743 struct fv_VuDev
*vud
= container_of(dev
, struct fv_VuDev
, dev
);
744 struct fv_QueueInfo
*ourqi
;
746 fuse_log(FUSE_LOG_INFO
, "%s: qidx=%d started=%d\n", __func__
, qidx
,
751 * Ignore additional request queues for now. passthrough_ll.c must be
752 * audited for thread-safety issues first. It was written with a
753 * well-behaved client in mind and may not protect against all types of
757 fuse_log(FUSE_LOG_ERR
,
758 "%s: multiple request queues not yet implemented, please only "
759 "configure 1 request queue\n",
765 /* Fire up a thread to watch this queue */
766 if (qidx
>= vud
->nqueues
) {
767 vud
->qi
= realloc(vud
->qi
, (qidx
+ 1) * sizeof(vud
->qi
[0]));
769 memset(vud
->qi
+ vud
->nqueues
, 0,
770 sizeof(vud
->qi
[0]) * (1 + (qidx
- vud
->nqueues
)));
771 vud
->nqueues
= qidx
+ 1;
773 if (!vud
->qi
[qidx
]) {
774 vud
->qi
[qidx
] = calloc(sizeof(struct fv_QueueInfo
), 1);
775 assert(vud
->qi
[qidx
]);
776 vud
->qi
[qidx
]->virtio_dev
= vud
;
777 vud
->qi
[qidx
]->qidx
= qidx
;
779 /* Shouldn't have been started */
780 assert(vud
->qi
[qidx
]->kick_fd
== -1);
782 ourqi
= vud
->qi
[qidx
];
783 ourqi
->kick_fd
= dev
->vq
[qidx
].kick_fd
;
785 ourqi
->kill_fd
= eventfd(0, EFD_CLOEXEC
| EFD_SEMAPHORE
);
786 assert(ourqi
->kill_fd
!= -1);
787 pthread_mutex_init(&ourqi
->vq_lock
, NULL
);
789 if (pthread_create(&ourqi
->thread
, NULL
, fv_queue_thread
, ourqi
)) {
790 fuse_log(FUSE_LOG_ERR
, "%s: Failed to create thread for queue %d\n",
796 * Temporarily drop write-lock taken in virtio_loop() so that
797 * the queue thread doesn't block in virtio_send_msg().
799 vu_dispatch_unlock(vud
);
800 fv_queue_cleanup_thread(vud
, qidx
);
801 vu_dispatch_wrlock(vud
);
805 static bool fv_queue_order(VuDev
*dev
, int qidx
)
810 static const VuDevIface fv_iface
= {
811 .get_features
= fv_get_features
,
812 .set_features
= fv_set_features
,
814 /* Don't need process message, we've not got any at vhost-user level */
815 .queue_set_started
= fv_queue_set_started
,
817 .queue_is_processed_in_order
= fv_queue_order
,
821 * Main loop; this mostly deals with events on the vhost-user
822 * socket itself, and not actual fuse data.
824 int virtio_loop(struct fuse_session
*se
)
826 fuse_log(FUSE_LOG_INFO
, "%s: Entry\n", __func__
);
828 while (!fuse_session_exited(se
)) {
831 pf
[0].fd
= se
->vu_socketfd
;
832 pf
[0].events
= POLLIN
;
835 fuse_log(FUSE_LOG_DEBUG
, "%s: Waiting for VU event\n", __func__
);
836 int poll_res
= ppoll(pf
, 1, NULL
, NULL
);
838 if (poll_res
== -1) {
839 if (errno
== EINTR
) {
840 fuse_log(FUSE_LOG_INFO
, "%s: ppoll interrupted, going around\n",
844 fuse_log(FUSE_LOG_ERR
, "virtio_loop ppoll: %m\n");
847 assert(poll_res
== 1);
848 if (pf
[0].revents
& (POLLERR
| POLLHUP
| POLLNVAL
)) {
849 fuse_log(FUSE_LOG_ERR
, "%s: Unexpected poll revents %x\n", __func__
,
853 assert(pf
[0].revents
& POLLIN
);
854 fuse_log(FUSE_LOG_DEBUG
, "%s: Got VU event\n", __func__
);
855 /* Mutual exclusion with fv_queue_thread() */
856 vu_dispatch_wrlock(se
->virtio_dev
);
858 ok
= vu_dispatch(&se
->virtio_dev
->dev
);
860 vu_dispatch_unlock(se
->virtio_dev
);
863 fuse_log(FUSE_LOG_ERR
, "%s: vu_dispatch failed\n", __func__
);
869 * Make sure all fv_queue_thread()s quit on exit, as we're about to
870 * free virtio dev and fuse session, no one should access them anymore.
872 for (int i
= 0; i
< se
->virtio_dev
->nqueues
; i
++) {
873 if (!se
->virtio_dev
->qi
[i
]) {
877 fuse_log(FUSE_LOG_INFO
, "%s: Stopping queue %d thread\n", __func__
, i
);
878 fv_queue_cleanup_thread(se
->virtio_dev
, i
);
881 fuse_log(FUSE_LOG_INFO
, "%s: Exit\n", __func__
);
886 static void strreplace(char *s
, char old
, char new)
895 static bool fv_socket_lock(struct fuse_session
*se
)
897 g_autofree gchar
*sk_name
= NULL
;
898 g_autofree gchar
*pidfile
= NULL
;
899 g_autofree gchar
*dir
= NULL
;
900 Error
*local_err
= NULL
;
902 dir
= qemu_get_local_state_pathname("run/virtiofsd");
904 if (g_mkdir_with_parents(dir
, S_IRWXU
) < 0) {
905 fuse_log(FUSE_LOG_ERR
, "%s: Failed to create directory %s: %s",
906 __func__
, dir
, strerror(errno
));
910 sk_name
= g_strdup(se
->vu_socket_path
);
911 strreplace(sk_name
, '/', '.');
912 pidfile
= g_strdup_printf("%s/%s.pid", dir
, sk_name
);
914 if (!qemu_write_pidfile(pidfile
, &local_err
)) {
915 error_report_err(local_err
);
922 static int fv_create_listen_socket(struct fuse_session
*se
)
924 struct sockaddr_un un
;
927 /* Nothing to do if fd is already initialized */
928 if (se
->vu_listen_fd
>= 0) {
932 if (strlen(se
->vu_socket_path
) >= sizeof(un
.sun_path
)) {
933 fuse_log(FUSE_LOG_ERR
, "Socket path too long\n");
937 if (!strlen(se
->vu_socket_path
)) {
938 fuse_log(FUSE_LOG_ERR
, "Socket path is empty\n");
942 /* Check the vu_socket_path is already used */
943 if (!fv_socket_lock(se
)) {
948 * Create the Unix socket to communicate with qemu
949 * based on QEMU's vhost-user-bridge
951 unlink(se
->vu_socket_path
);
952 strcpy(un
.sun_path
, se
->vu_socket_path
);
953 size_t addr_len
= sizeof(un
);
955 int listen_sock
= socket(AF_UNIX
, SOCK_STREAM
, 0);
956 if (listen_sock
== -1) {
957 fuse_log(FUSE_LOG_ERR
, "vhost socket creation: %m\n");
960 un
.sun_family
= AF_UNIX
;
963 * Unfortunately bind doesn't let you set the mask on the socket,
964 * so set umask appropriately and restore it later.
966 if (se
->vu_socket_group
) {
967 old_umask
= umask(S_IROTH
| S_IWOTH
| S_IXOTH
);
969 old_umask
= umask(S_IRGRP
| S_IWGRP
| S_IXGRP
|
970 S_IROTH
| S_IWOTH
| S_IXOTH
);
972 if (bind(listen_sock
, (struct sockaddr
*)&un
, addr_len
) == -1) {
973 fuse_log(FUSE_LOG_ERR
, "vhost socket bind: %m\n");
978 if (se
->vu_socket_group
) {
979 struct group
*g
= getgrnam(se
->vu_socket_group
);
981 if (!chown(se
->vu_socket_path
, -1, g
->gr_gid
)) {
982 fuse_log(FUSE_LOG_WARNING
,
983 "vhost socket failed to set group to %s (%d)\n",
984 se
->vu_socket_group
, g
->gr_gid
);
990 if (listen(listen_sock
, 1) == -1) {
991 fuse_log(FUSE_LOG_ERR
, "vhost socket listen: %m\n");
996 se
->vu_listen_fd
= listen_sock
;
1000 int virtio_session_mount(struct fuse_session
*se
)
1005 * Test that unshare(CLONE_FS) works. fv_queue_worker() will need it. It's
1006 * an unprivileged system call but some Docker/Moby versions are known to
1007 * reject it via seccomp when CAP_SYS_ADMIN is not given.
1009 * Note that the program is single-threaded here so this syscall has no
1010 * visible effect and is safe to make.
1012 ret
= unshare(CLONE_FS
);
1013 if (ret
== -1 && errno
== EPERM
) {
1014 fuse_log(FUSE_LOG_ERR
, "unshare(CLONE_FS) failed with EPERM. If "
1015 "running in a container please check that the container "
1016 "runtime seccomp policy allows unshare.\n");
1020 ret
= fv_create_listen_socket(se
);
1027 fuse_log(FUSE_LOG_INFO
, "%s: Waiting for vhost-user socket connection...\n",
1029 int data_sock
= accept(se
->vu_listen_fd
, NULL
, NULL
);
1030 if (data_sock
== -1) {
1031 fuse_log(FUSE_LOG_ERR
, "vhost socket accept: %m\n");
1032 close(se
->vu_listen_fd
);
1035 close(se
->vu_listen_fd
);
1036 se
->vu_listen_fd
= -1;
1037 fuse_log(FUSE_LOG_INFO
, "%s: Received vhost-user socket connection\n",
1040 /* TODO: Some cleanup/deallocation! */
1041 se
->virtio_dev
= calloc(sizeof(struct fv_VuDev
), 1);
1042 if (!se
->virtio_dev
) {
1043 fuse_log(FUSE_LOG_ERR
, "%s: virtio_dev calloc failed\n", __func__
);
1048 se
->vu_socketfd
= data_sock
;
1049 se
->virtio_dev
->se
= se
;
1050 pthread_rwlock_init(&se
->virtio_dev
->vu_dispatch_rwlock
, NULL
);
1051 if (!vu_init(&se
->virtio_dev
->dev
, 2, se
->vu_socketfd
, fv_panic
, NULL
,
1052 fv_set_watch
, fv_remove_watch
, &fv_iface
)) {
1053 fuse_log(FUSE_LOG_ERR
, "%s: vu_init failed\n", __func__
);
1060 void virtio_session_close(struct fuse_session
*se
)
1062 close(se
->vu_socketfd
);
1064 if (!se
->virtio_dev
) {
1068 free(se
->virtio_dev
->qi
);
1069 pthread_rwlock_destroy(&se
->virtio_dev
->vu_dispatch_rwlock
);
1070 free(se
->virtio_dev
);
1071 se
->virtio_dev
= NULL
;