2 * QTest testcase for the vhost-user
4 * Copyright (c) 2014 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "libqtest-single.h"
14 #include "qapi/error.h"
15 #include "qapi/qmp/qdict.h"
16 #include "qemu/config-file.h"
17 #include "qemu/option.h"
18 #include "qemu/range.h"
19 #include "qemu/sockets.h"
20 #include "chardev/char-fe.h"
21 #include "qemu/memfd.h"
22 #include "qemu/module.h"
23 #include "sysemu/sysemu.h"
24 #include "libqos/libqos.h"
25 #include "libqos/pci-pc.h"
26 #include "libqos/virtio-pci.h"
28 #include "libqos/malloc-pc.h"
29 #include "libqos/qgraph_internal.h"
30 #include "hw/virtio/virtio-net.h"
32 #include "standard-headers/linux/vhost_types.h"
33 #include "standard-headers/linux/virtio_ids.h"
34 #include "standard-headers/linux/virtio_net.h"
35 #include "standard-headers/linux/virtio_gpio.h"
42 #define QEMU_CMD_MEM " -m %d -object memory-backend-file,id=mem,size=%dM," \
43 "mem-path=%s,share=on -numa node,memdev=mem"
44 #define QEMU_CMD_MEMFD " -m %d -object memory-backend-memfd,id=mem,size=%dM," \
45 " -numa node,memdev=mem"
46 #define QEMU_CMD_CHR " -chardev socket,id=%s,path=%s%s"
47 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce=on"
49 #define HUGETLBFS_MAGIC 0x958458f6
51 /*********** FROM hw/virtio/vhost-user.c *************************************/
53 #define VHOST_MEMORY_MAX_NREGIONS 8
54 #define VHOST_MAX_VIRTQUEUES 0x100
56 #define VHOST_USER_F_PROTOCOL_FEATURES 30
57 #define VIRTIO_F_VERSION_1 32
59 #define VHOST_USER_PROTOCOL_F_MQ 0
60 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
61 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6
62 #define VHOST_USER_PROTOCOL_F_CONFIG 9
64 #define VHOST_LOG_PAGE 0x1000
66 typedef enum VhostUserRequest
{
68 VHOST_USER_GET_FEATURES
= 1,
69 VHOST_USER_SET_FEATURES
= 2,
70 VHOST_USER_SET_OWNER
= 3,
71 VHOST_USER_RESET_OWNER
= 4,
72 VHOST_USER_SET_MEM_TABLE
= 5,
73 VHOST_USER_SET_LOG_BASE
= 6,
74 VHOST_USER_SET_LOG_FD
= 7,
75 VHOST_USER_SET_VRING_NUM
= 8,
76 VHOST_USER_SET_VRING_ADDR
= 9,
77 VHOST_USER_SET_VRING_BASE
= 10,
78 VHOST_USER_GET_VRING_BASE
= 11,
79 VHOST_USER_SET_VRING_KICK
= 12,
80 VHOST_USER_SET_VRING_CALL
= 13,
81 VHOST_USER_SET_VRING_ERR
= 14,
82 VHOST_USER_GET_PROTOCOL_FEATURES
= 15,
83 VHOST_USER_SET_PROTOCOL_FEATURES
= 16,
84 VHOST_USER_GET_QUEUE_NUM
= 17,
85 VHOST_USER_SET_VRING_ENABLE
= 18,
86 VHOST_USER_GET_CONFIG
= 24,
87 VHOST_USER_SET_CONFIG
= 25,
91 typedef struct VhostUserMemoryRegion
{
92 uint64_t guest_phys_addr
;
94 uint64_t userspace_addr
;
96 } VhostUserMemoryRegion
;
98 typedef struct VhostUserMemory
{
101 VhostUserMemoryRegion regions
[VHOST_MEMORY_MAX_NREGIONS
];
104 typedef struct VhostUserLog
{
106 uint64_t mmap_offset
;
109 typedef struct VhostUserMsg
{
110 VhostUserRequest request
;
112 #define VHOST_USER_VERSION_MASK (0x3)
113 #define VHOST_USER_REPLY_MASK (0x1<<2)
115 uint32_t size
; /* the following payload size */
117 #define VHOST_USER_VRING_IDX_MASK (0xff)
118 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
120 struct vhost_vring_state state
;
121 struct vhost_vring_addr addr
;
122 VhostUserMemory memory
;
125 } QEMU_PACKED VhostUserMsg
;
127 static VhostUserMsg m
__attribute__ ((unused
));
128 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \
132 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
134 /* The version of the protocol we support */
135 #define VHOST_USER_VERSION (0x1)
136 /*****************************************************************************/
140 TEST_FLAGS_DISCONNECT
,
150 typedef struct TestServer
{
157 int fds
[VHOST_MEMORY_MAX_NREGIONS
];
158 VhostUserMemory memory
;
159 GMainContext
*context
;
169 struct vhost_user_ops
*vu_ops
;
172 struct vhost_user_ops
{
175 void (*append_opts
)(TestServer
*s
, GString
*cmd_line
,
176 const char *chr_opts
);
178 /* VHOST-USER commands. */
179 uint64_t (*get_features
)(TestServer
*s
);
180 void (*set_features
)(TestServer
*s
, CharBackend
*chr
,
182 void (*get_protocol_features
)(TestServer
*s
,
183 CharBackend
*chr
, VhostUserMsg
*msg
);
186 static const char *init_hugepagefs(void);
187 static TestServer
*test_server_new(const gchar
*name
,
188 struct vhost_user_ops
*ops
);
189 static void test_server_free(TestServer
*server
);
190 static void test_server_listen(TestServer
*server
);
198 static void append_vhost_net_opts(TestServer
*s
, GString
*cmd_line
,
199 const char *chr_opts
)
201 g_string_append_printf(cmd_line
, QEMU_CMD_CHR QEMU_CMD_NETDEV
,
202 s
->chr_name
, s
->socket_path
,
203 chr_opts
, s
->chr_name
);
207 * For GPIO there are no other magic devices we need to add (like
208 * block or netdev) so all we need to worry about is the vhost-user
211 static void append_vhost_gpio_opts(TestServer
*s
, GString
*cmd_line
,
212 const char *chr_opts
)
214 g_string_append_printf(cmd_line
, QEMU_CMD_CHR
,
215 s
->chr_name
, s
->socket_path
,
219 static void append_mem_opts(TestServer
*server
, GString
*cmd_line
,
220 int size
, enum test_memfd memfd
)
222 if (memfd
== TEST_MEMFD_AUTO
) {
223 memfd
= qemu_memfd_check(MFD_ALLOW_SEALING
) ? TEST_MEMFD_YES
227 if (memfd
== TEST_MEMFD_YES
) {
228 g_string_append_printf(cmd_line
, QEMU_CMD_MEMFD
, size
, size
);
230 const char *root
= init_hugepagefs() ? : server
->tmpfs
;
232 g_string_append_printf(cmd_line
, QEMU_CMD_MEM
, size
, size
, root
);
236 static bool wait_for_fds(TestServer
*s
)
242 g_mutex_lock(&s
->data_mutex
);
244 end_time
= g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND
;
245 while (!s
->fds_num
) {
246 if (!g_cond_wait_until(&s
->data_cond
, &s
->data_mutex
, end_time
)) {
247 /* timeout has passed */
248 g_assert(s
->fds_num
);
253 /* check for sanity */
254 g_assert_cmpint(s
->fds_num
, >, 0);
255 g_assert_cmpint(s
->fds_num
, ==, s
->memory
.nregions
);
257 g_mutex_unlock(&s
->data_mutex
);
260 for (i
= 0; i
< s
->memory
.nregions
; ++i
) {
261 VhostUserMemoryRegion
*reg
= &s
->memory
.regions
[i
];
262 if (reg
->guest_phys_addr
== 0) {
268 g_test_skip("No memory at address 0x0");
273 static void read_guest_mem_server(QTestState
*qts
, TestServer
*s
)
279 g_mutex_lock(&s
->data_mutex
);
281 /* iterate all regions */
282 for (i
= 0; i
< s
->fds_num
; i
++) {
284 /* We'll check only the region statring at 0x0*/
285 if (s
->memory
.regions
[i
].guest_phys_addr
!= 0x0) {
289 g_assert_cmpint(s
->memory
.regions
[i
].memory_size
, >, 1024);
291 size
= s
->memory
.regions
[i
].memory_size
+
292 s
->memory
.regions
[i
].mmap_offset
;
294 guest_mem
= mmap(0, size
, PROT_READ
| PROT_WRITE
,
295 MAP_SHARED
, s
->fds
[i
], 0);
297 g_assert(guest_mem
!= MAP_FAILED
);
298 guest_mem
+= (s
->memory
.regions
[i
].mmap_offset
/ sizeof(*guest_mem
));
300 for (j
= 0; j
< 1024; j
++) {
301 uint32_t a
= qtest_readb(qts
, s
->memory
.regions
[i
].guest_phys_addr
+ j
);
302 uint32_t b
= guest_mem
[j
];
304 g_assert_cmpint(a
, ==, b
);
307 munmap(guest_mem
, s
->memory
.regions
[i
].memory_size
);
310 g_mutex_unlock(&s
->data_mutex
);
313 static void *thread_function(void *data
)
315 GMainLoop
*loop
= data
;
316 g_main_loop_run(loop
);
320 static int chr_can_read(void *opaque
)
322 return VHOST_USER_HDR_SIZE
;
325 static void chr_read(void *opaque
, const uint8_t *buf
, int size
)
327 g_autoptr(GError
) err
= NULL
;
328 TestServer
*s
= opaque
;
329 CharBackend
*chr
= &s
->chr
;
331 uint8_t *p
= (uint8_t *) &msg
;
335 qemu_chr_fe_disconnect(chr
);
336 /* now switch to non-failure */
337 s
->test_fail
= false;
340 if (size
!= VHOST_USER_HDR_SIZE
) {
341 qos_printf("%s: Wrong message size received %d\n", __func__
, size
);
345 g_mutex_lock(&s
->data_mutex
);
346 memcpy(p
, buf
, VHOST_USER_HDR_SIZE
);
349 p
+= VHOST_USER_HDR_SIZE
;
350 size
= qemu_chr_fe_read_all(chr
, p
, msg
.size
);
351 if (size
!= msg
.size
) {
352 qos_printf("%s: Wrong message size received %d != %d\n",
353 __func__
, size
, msg
.size
);
358 switch (msg
.request
) {
359 case VHOST_USER_GET_FEATURES
:
360 /* Mandatory for tests to define get_features */
361 g_assert(s
->vu_ops
->get_features
);
363 /* send back features to qemu */
364 msg
.flags
|= VHOST_USER_REPLY_MASK
;
365 msg
.size
= sizeof(m
.payload
.u64
);
367 if (s
->test_flags
>= TEST_FLAGS_BAD
) {
369 s
->test_flags
= TEST_FLAGS_END
;
371 msg
.payload
.u64
= s
->vu_ops
->get_features(s
);
374 qemu_chr_fe_write_all(chr
, (uint8_t *) &msg
,
375 VHOST_USER_HDR_SIZE
+ msg
.size
);
378 case VHOST_USER_SET_FEATURES
:
379 if (s
->vu_ops
->set_features
) {
380 s
->vu_ops
->set_features(s
, chr
, &msg
);
384 case VHOST_USER_SET_OWNER
:
386 * We don't need to do anything here, the remote is just
387 * letting us know it is in charge. Just log it.
389 qos_printf("set_owner: start of session\n");
392 case VHOST_USER_GET_PROTOCOL_FEATURES
:
393 if (s
->vu_ops
->get_protocol_features
) {
394 s
->vu_ops
->get_protocol_features(s
, chr
, &msg
);
398 case VHOST_USER_GET_CONFIG
:
400 * Treat GET_CONFIG as a NOP and just reply and let the guest
401 * consider we have updated its memory. Tests currently don't
402 * require working configs.
404 msg
.flags
|= VHOST_USER_REPLY_MASK
;
405 p
= (uint8_t *) &msg
;
406 qemu_chr_fe_write_all(chr
, p
, VHOST_USER_HDR_SIZE
+ msg
.size
);
409 case VHOST_USER_SET_PROTOCOL_FEATURES
:
411 * We did set VHOST_USER_F_PROTOCOL_FEATURES so its valid for
412 * the remote end to send this. There is no handshake reply so
413 * just log the details for debugging.
415 qos_printf("set_protocol_features: 0x%"PRIx64
"\n", msg
.payload
.u64
);
419 * A real vhost-user backend would actually set the size and
420 * address of the vrings but we can simply report them.
422 case VHOST_USER_SET_VRING_NUM
:
423 qos_printf("set_vring_num: %d/%d\n",
424 msg
.payload
.state
.index
, msg
.payload
.state
.num
);
426 case VHOST_USER_SET_VRING_ADDR
:
427 qos_printf("set_vring_addr: 0x%"PRIx64
"/0x%"PRIx64
"/0x%"PRIx64
"\n",
428 msg
.payload
.addr
.avail_user_addr
,
429 msg
.payload
.addr
.desc_user_addr
,
430 msg
.payload
.addr
.used_user_addr
);
433 case VHOST_USER_GET_VRING_BASE
:
434 /* send back vring base to qemu */
435 msg
.flags
|= VHOST_USER_REPLY_MASK
;
436 msg
.size
= sizeof(m
.payload
.state
);
437 msg
.payload
.state
.num
= 0;
438 p
= (uint8_t *) &msg
;
439 qemu_chr_fe_write_all(chr
, p
, VHOST_USER_HDR_SIZE
+ msg
.size
);
441 assert(msg
.payload
.state
.index
< s
->queues
* 2);
442 s
->rings
&= ~(0x1ULL
<< msg
.payload
.state
.index
);
443 g_cond_broadcast(&s
->data_cond
);
446 case VHOST_USER_SET_MEM_TABLE
:
447 /* received the mem table */
448 memcpy(&s
->memory
, &msg
.payload
.memory
, sizeof(msg
.payload
.memory
));
449 s
->fds_num
= qemu_chr_fe_get_msgfds(chr
, s
->fds
,
450 G_N_ELEMENTS(s
->fds
));
452 /* signal the test that it can continue */
453 g_cond_broadcast(&s
->data_cond
);
456 case VHOST_USER_SET_VRING_KICK
:
457 case VHOST_USER_SET_VRING_CALL
:
459 qemu_chr_fe_get_msgfds(chr
, &fd
, 1);
461 * This is a non-blocking eventfd.
462 * The receive function forces it to be blocking,
463 * so revert it back to non-blocking.
465 g_unix_set_fd_nonblocking(fd
, true, &err
);
466 g_assert_no_error(err
);
469 case VHOST_USER_SET_LOG_BASE
:
470 if (s
->log_fd
!= -1) {
474 qemu_chr_fe_get_msgfds(chr
, &s
->log_fd
, 1);
475 msg
.flags
|= VHOST_USER_REPLY_MASK
;
477 p
= (uint8_t *) &msg
;
478 qemu_chr_fe_write_all(chr
, p
, VHOST_USER_HDR_SIZE
);
480 g_cond_broadcast(&s
->data_cond
);
483 case VHOST_USER_SET_VRING_BASE
:
484 assert(msg
.payload
.state
.index
< s
->queues
* 2);
485 s
->rings
|= 0x1ULL
<< msg
.payload
.state
.index
;
486 g_cond_broadcast(&s
->data_cond
);
489 case VHOST_USER_GET_QUEUE_NUM
:
490 msg
.flags
|= VHOST_USER_REPLY_MASK
;
491 msg
.size
= sizeof(m
.payload
.u64
);
492 msg
.payload
.u64
= s
->queues
;
493 p
= (uint8_t *) &msg
;
494 qemu_chr_fe_write_all(chr
, p
, VHOST_USER_HDR_SIZE
+ msg
.size
);
497 case VHOST_USER_SET_VRING_ENABLE
:
499 * Another case we ignore as we don't need to respond. With a
500 * fully functioning vhost-user we would enable/disable the
503 qos_printf("set_vring(%d)=%s\n", msg
.payload
.state
.index
,
504 msg
.payload
.state
.num
? "enabled" : "disabled");
508 qos_printf("vhost-user: un-handled message: %d\n", msg
.request
);
512 g_mutex_unlock(&s
->data_mutex
);
515 static const char *init_hugepagefs(void)
518 static const char *hugepagefs
;
519 const char *path
= getenv("QTEST_HUGETLBFS_PATH");
530 if (access(path
, R_OK
| W_OK
| X_OK
)) {
531 qos_printf("access on path (%s): %s", path
, strerror(errno
));
537 ret
= statfs(path
, &fs
);
538 } while (ret
!= 0 && errno
== EINTR
);
541 qos_printf("statfs on path (%s): %s", path
, strerror(errno
));
546 if (fs
.f_type
!= HUGETLBFS_MAGIC
) {
547 qos_printf("Warning: path not on HugeTLBFS: %s", path
);
559 static TestServer
*test_server_new(const gchar
*name
,
560 struct vhost_user_ops
*ops
)
562 TestServer
*server
= g_new0(TestServer
, 1);
563 g_autofree
const char *tmpfs
= NULL
;
566 server
->context
= g_main_context_new();
567 server
->loop
= g_main_loop_new(server
->context
, FALSE
);
569 /* run the main loop thread so the chardev may operate */
570 server
->thread
= g_thread_new(NULL
, thread_function
, server
->loop
);
572 tmpfs
= g_dir_make_tmp("vhost-test-XXXXXX", &err
);
574 g_test_message("Can't create temporary directory in %s: %s",
575 g_get_tmp_dir(), err
->message
);
580 server
->tmpfs
= g_strdup(tmpfs
);
581 server
->socket_path
= g_strdup_printf("%s/%s.sock", tmpfs
, name
);
582 server
->mig_path
= g_strdup_printf("%s/%s.mig", tmpfs
, name
);
583 server
->chr_name
= g_strdup_printf("chr-%s", name
);
585 g_mutex_init(&server
->data_mutex
);
586 g_cond_init(&server
->data_cond
);
590 server
->vu_ops
= ops
;
595 static void chr_event(void *opaque
, QEMUChrEvent event
)
597 TestServer
*s
= opaque
;
599 if (s
->test_flags
== TEST_FLAGS_END
&&
600 event
== CHR_EVENT_CLOSED
) {
601 s
->test_flags
= TEST_FLAGS_OK
;
605 static void test_server_create_chr(TestServer
*server
, const gchar
*opt
)
607 g_autofree gchar
*chr_path
= g_strdup_printf("unix:%s%s",
608 server
->socket_path
, opt
);
611 chr
= qemu_chr_new(server
->chr_name
, chr_path
, server
->context
);
614 qemu_chr_fe_init(&server
->chr
, chr
, &error_abort
);
615 qemu_chr_fe_set_handlers(&server
->chr
, chr_can_read
, chr_read
,
616 chr_event
, NULL
, server
, server
->context
, true);
619 static void test_server_listen(TestServer
*server
)
621 test_server_create_chr(server
, ",server=on,wait=off");
624 static void test_server_free(TestServer
*server
)
628 /* finish the helper thread and dispatch pending sources */
629 g_main_loop_quit(server
->loop
);
630 g_thread_join(server
->thread
);
631 while (g_main_context_pending(NULL
)) {
632 g_main_context_iteration(NULL
, TRUE
);
635 unlink(server
->socket_path
);
636 g_free(server
->socket_path
);
638 unlink(server
->mig_path
);
639 g_free(server
->mig_path
);
641 ret
= rmdir(server
->tmpfs
);
643 g_test_message("unable to rmdir: path (%s): %s",
644 server
->tmpfs
, strerror(errno
));
646 g_free(server
->tmpfs
);
648 qemu_chr_fe_deinit(&server
->chr
, true);
650 for (i
= 0; i
< server
->fds_num
; i
++) {
651 close(server
->fds
[i
]);
654 if (server
->log_fd
!= -1) {
655 close(server
->log_fd
);
658 g_free(server
->chr_name
);
660 g_main_loop_unref(server
->loop
);
661 g_main_context_unref(server
->context
);
662 g_cond_clear(&server
->data_cond
);
663 g_mutex_clear(&server
->data_mutex
);
667 static void wait_for_log_fd(TestServer
*s
)
671 g_mutex_lock(&s
->data_mutex
);
672 end_time
= g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND
;
673 while (s
->log_fd
== -1) {
674 if (!g_cond_wait_until(&s
->data_cond
, &s
->data_mutex
, end_time
)) {
675 /* timeout has passed */
676 g_assert(s
->log_fd
!= -1);
681 g_mutex_unlock(&s
->data_mutex
);
684 static void write_guest_mem(TestServer
*s
, uint32_t seed
)
690 /* iterate all regions */
691 for (i
= 0; i
< s
->fds_num
; i
++) {
693 /* We'll write only the region statring at 0x0 */
694 if (s
->memory
.regions
[i
].guest_phys_addr
!= 0x0) {
698 g_assert_cmpint(s
->memory
.regions
[i
].memory_size
, >, 1024);
700 size
= s
->memory
.regions
[i
].memory_size
+
701 s
->memory
.regions
[i
].mmap_offset
;
703 guest_mem
= mmap(0, size
, PROT_READ
| PROT_WRITE
,
704 MAP_SHARED
, s
->fds
[i
], 0);
706 g_assert(guest_mem
!= MAP_FAILED
);
707 guest_mem
+= (s
->memory
.regions
[i
].mmap_offset
/ sizeof(*guest_mem
));
709 for (j
= 0; j
< 256; j
++) {
710 guest_mem
[j
] = seed
+ j
;
713 munmap(guest_mem
, s
->memory
.regions
[i
].memory_size
);
718 static guint64
get_log_size(TestServer
*s
)
720 guint64 log_size
= 0;
723 for (i
= 0; i
< s
->memory
.nregions
; ++i
) {
724 VhostUserMemoryRegion
*reg
= &s
->memory
.regions
[i
];
725 guint64 last
= range_get_last(reg
->guest_phys_addr
,
727 log_size
= MAX(log_size
, last
/ (8 * VHOST_LOG_PAGE
) + 1);
733 typedef struct TestMigrateSource
{
740 test_migrate_source_check(GSource
*source
)
742 TestMigrateSource
*t
= (TestMigrateSource
*)source
;
743 gboolean overlap
= t
->src
->rings
&& t
->dest
->rings
;
750 GSourceFuncs test_migrate_source_funcs
= {
751 .check
= test_migrate_source_check
,
754 static void vhost_user_test_cleanup(void *s
)
756 TestServer
*server
= s
;
758 qos_invalidate_command_line();
759 test_server_free(server
);
762 static void *vhost_user_test_setup(GString
*cmd_line
, void *arg
)
764 TestServer
*server
= test_server_new("vhost-user-test", arg
);
765 test_server_listen(server
);
767 append_mem_opts(server
, cmd_line
, 256, TEST_MEMFD_AUTO
);
768 server
->vu_ops
->append_opts(server
, cmd_line
, "");
770 g_test_queue_destroy(vhost_user_test_cleanup
, server
);
775 static void *vhost_user_test_setup_memfd(GString
*cmd_line
, void *arg
)
777 TestServer
*server
= test_server_new("vhost-user-test", arg
);
778 test_server_listen(server
);
780 append_mem_opts(server
, cmd_line
, 256, TEST_MEMFD_YES
);
781 server
->vu_ops
->append_opts(server
, cmd_line
, "");
783 g_test_queue_destroy(vhost_user_test_cleanup
, server
);
788 static void test_read_guest_mem(void *obj
, void *arg
, QGuestAllocator
*alloc
)
790 TestServer
*server
= arg
;
792 if (!wait_for_fds(server
)) {
796 read_guest_mem_server(global_qtest
, server
);
799 static void test_migrate(void *obj
, void *arg
, QGuestAllocator
*alloc
)
803 GString
*dest_cmdline
;
811 if (!wait_for_fds(s
)) {
815 dest
= test_server_new("dest", s
->vu_ops
);
816 dest_cmdline
= g_string_new(qos_get_current_command_line());
817 uri
= g_strdup_printf("%s%s", "unix:", dest
->mig_path
);
819 size
= get_log_size(s
);
820 g_assert_cmpint(size
, ==, (256 * 1024 * 1024) / (VHOST_LOG_PAGE
* 8));
822 test_server_listen(dest
);
823 g_string_append_printf(dest_cmdline
, " -incoming %s", uri
);
824 append_mem_opts(dest
, dest_cmdline
, 256, TEST_MEMFD_AUTO
);
825 dest
->vu_ops
->append_opts(dest
, dest_cmdline
, "");
826 to
= qtest_init(dest_cmdline
->str
);
828 /* This would be where you call qos_allocate_objects(to, NULL), if you want
829 * to talk to the QVirtioNet object on the destination.
832 source
= g_source_new(&test_migrate_source_funcs
,
833 sizeof(TestMigrateSource
));
834 ((TestMigrateSource
*)source
)->src
= s
;
835 ((TestMigrateSource
*)source
)->dest
= dest
;
836 g_source_attach(source
, s
->context
);
838 /* slow down migration to have time to fiddle with log */
839 /* TODO: qtest could learn to break on some places */
840 rsp
= qmp("{ 'execute': 'migrate-set-parameters',"
841 "'arguments': { 'max-bandwidth': 10 } }");
842 g_assert(qdict_haskey(rsp
, "return"));
845 rsp
= qmp("{ 'execute': 'migrate', 'arguments': { 'uri': %s } }", uri
);
846 g_assert(qdict_haskey(rsp
, "return"));
851 log
= mmap(0, size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, s
->log_fd
, 0);
852 g_assert(log
!= MAP_FAILED
);
854 /* modify first page */
855 write_guest_mem(s
, 0x42);
859 /* speed things up */
860 rsp
= qmp("{ 'execute': 'migrate-set-parameters',"
861 "'arguments': { 'max-bandwidth': 0 } }");
862 g_assert(qdict_haskey(rsp
, "return"));
865 qmp_eventwait("STOP");
866 qtest_qmp_eventwait(to
, "RESUME");
868 g_assert(wait_for_fds(dest
));
869 read_guest_mem_server(to
, dest
);
871 g_source_destroy(source
);
872 g_source_unref(source
);
875 test_server_free(dest
);
877 g_string_free(dest_cmdline
, true);
880 static void wait_for_rings_started(TestServer
*s
, size_t count
)
884 g_mutex_lock(&s
->data_mutex
);
885 end_time
= g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND
;
886 while (ctpop64(s
->rings
) != count
) {
887 if (!g_cond_wait_until(&s
->data_cond
, &s
->data_mutex
, end_time
)) {
888 /* timeout has passed */
889 g_assert_cmpint(ctpop64(s
->rings
), ==, count
);
894 g_mutex_unlock(&s
->data_mutex
);
897 static inline void test_server_connect(TestServer
*server
)
899 test_server_create_chr(server
, ",reconnect=1");
903 reconnect_cb(gpointer user_data
)
905 TestServer
*s
= user_data
;
907 qemu_chr_fe_disconnect(&s
->chr
);
913 connect_thread(gpointer data
)
915 TestServer
*s
= data
;
917 /* wait for qemu to start before first try, to avoid extra warnings */
918 g_usleep(G_USEC_PER_SEC
);
919 test_server_connect(s
);
924 static void *vhost_user_test_setup_reconnect(GString
*cmd_line
, void *arg
)
926 TestServer
*s
= test_server_new("reconnect", arg
);
928 g_thread_new("connect", connect_thread
, s
);
929 append_mem_opts(s
, cmd_line
, 256, TEST_MEMFD_AUTO
);
930 s
->vu_ops
->append_opts(s
, cmd_line
, ",server=on");
932 g_test_queue_destroy(vhost_user_test_cleanup
, s
);
937 static void test_reconnect(void *obj
, void *arg
, QGuestAllocator
*alloc
)
942 if (!wait_for_fds(s
)) {
946 wait_for_rings_started(s
, 2);
951 src
= g_idle_source_new();
952 g_source_set_callback(src
, reconnect_cb
, s
, NULL
);
953 g_source_attach(src
, s
->context
);
955 g_assert(wait_for_fds(s
));
956 wait_for_rings_started(s
, 2);
959 static void *vhost_user_test_setup_connect_fail(GString
*cmd_line
, void *arg
)
961 TestServer
*s
= test_server_new("connect-fail", arg
);
965 g_thread_new("connect", connect_thread
, s
);
966 append_mem_opts(s
, cmd_line
, 256, TEST_MEMFD_AUTO
);
967 s
->vu_ops
->append_opts(s
, cmd_line
, ",server=on");
969 g_test_queue_destroy(vhost_user_test_cleanup
, s
);
974 static void *vhost_user_test_setup_flags_mismatch(GString
*cmd_line
, void *arg
)
976 TestServer
*s
= test_server_new("flags-mismatch", arg
);
978 s
->test_flags
= TEST_FLAGS_DISCONNECT
;
980 g_thread_new("connect", connect_thread
, s
);
981 append_mem_opts(s
, cmd_line
, 256, TEST_MEMFD_AUTO
);
982 s
->vu_ops
->append_opts(s
, cmd_line
, ",server=on");
984 g_test_queue_destroy(vhost_user_test_cleanup
, s
);
989 static void test_vhost_user_started(void *obj
, void *arg
, QGuestAllocator
*alloc
)
993 if (!wait_for_fds(s
)) {
996 wait_for_rings_started(s
, 2);
999 static void *vhost_user_test_setup_multiqueue(GString
*cmd_line
, void *arg
)
1001 TestServer
*s
= vhost_user_test_setup(cmd_line
, arg
);
1004 g_string_append_printf(cmd_line
,
1005 " -set netdev.hs0.queues=%d"
1006 " -global virtio-net-pci.vectors=%d",
1007 s
->queues
, s
->queues
* 2 + 2);
1012 static void test_multiqueue(void *obj
, void *arg
, QGuestAllocator
*alloc
)
1014 TestServer
*s
= arg
;
1016 wait_for_rings_started(s
, s
->queues
* 2);
1020 static uint64_t vu_net_get_features(TestServer
*s
)
1022 uint64_t features
= 0x1ULL
<< VHOST_F_LOG_ALL
|
1023 0x1ULL
<< VHOST_USER_F_PROTOCOL_FEATURES
;
1025 if (s
->queues
> 1) {
1026 features
|= 0x1ULL
<< VIRTIO_NET_F_MQ
;
1032 static void vu_net_set_features(TestServer
*s
, CharBackend
*chr
,
1035 g_assert(msg
->payload
.u64
& (0x1ULL
<< VHOST_USER_F_PROTOCOL_FEATURES
));
1036 if (s
->test_flags
== TEST_FLAGS_DISCONNECT
) {
1037 qemu_chr_fe_disconnect(chr
);
1038 s
->test_flags
= TEST_FLAGS_BAD
;
1042 static void vu_net_get_protocol_features(TestServer
*s
, CharBackend
*chr
,
1045 /* send back features to qemu */
1046 msg
->flags
|= VHOST_USER_REPLY_MASK
;
1047 msg
->size
= sizeof(m
.payload
.u64
);
1048 msg
->payload
.u64
= 1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD
;
1049 msg
->payload
.u64
|= 1 << VHOST_USER_PROTOCOL_F_CROSS_ENDIAN
;
1050 if (s
->queues
> 1) {
1051 msg
->payload
.u64
|= 1 << VHOST_USER_PROTOCOL_F_MQ
;
1053 qemu_chr_fe_write_all(chr
, (uint8_t *)msg
, VHOST_USER_HDR_SIZE
+ msg
->size
);
1056 /* Each VHOST-USER device should have its ops structure defined. */
1057 static struct vhost_user_ops g_vu_net_ops
= {
1058 .type
= VHOST_USER_NET
,
1060 .append_opts
= append_vhost_net_opts
,
1062 .get_features
= vu_net_get_features
,
1063 .set_features
= vu_net_set_features
,
1064 .get_protocol_features
= vu_net_get_protocol_features
,
1067 static void register_vhost_user_test(void)
1069 QOSGraphTestOptions opts
= {
1070 .before
= vhost_user_test_setup
,
1072 .arg
= &g_vu_net_ops
,
1075 qemu_add_opts(&qemu_chardev_opts
);
1077 qos_add_test("vhost-user/read-guest-mem/memfile",
1079 test_read_guest_mem
, &opts
);
1081 if (qemu_memfd_check(MFD_ALLOW_SEALING
)) {
1082 opts
.before
= vhost_user_test_setup_memfd
;
1083 qos_add_test("vhost-user/read-guest-mem/memfd",
1085 test_read_guest_mem
, &opts
);
1088 qos_add_test("vhost-user/migrate",
1090 test_migrate
, &opts
);
1092 opts
.before
= vhost_user_test_setup_reconnect
;
1093 qos_add_test("vhost-user/reconnect", "virtio-net",
1094 test_reconnect
, &opts
);
1096 opts
.before
= vhost_user_test_setup_connect_fail
;
1097 qos_add_test("vhost-user/connect-fail", "virtio-net",
1098 test_vhost_user_started
, &opts
);
1100 opts
.before
= vhost_user_test_setup_flags_mismatch
;
1101 qos_add_test("vhost-user/flags-mismatch", "virtio-net",
1102 test_vhost_user_started
, &opts
);
1104 opts
.before
= vhost_user_test_setup_multiqueue
;
1105 opts
.edge
.extra_device_opts
= "mq=on";
1106 qos_add_test("vhost-user/multiqueue",
1108 test_multiqueue
, &opts
);
1110 libqos_init(register_vhost_user_test
);
1112 static uint64_t vu_gpio_get_features(TestServer
*s
)
1114 return 0x1ULL
<< VIRTIO_F_VERSION_1
|
1115 0x1ULL
<< VIRTIO_GPIO_F_IRQ
|
1116 0x1ULL
<< VHOST_USER_F_PROTOCOL_FEATURES
;
1120 * This stub can't handle all the message types but we should reply
1121 * that we support VHOST_USER_PROTOCOL_F_CONFIG as gpio would use it
1122 * talking to a read vhost-user daemon.
1124 static void vu_gpio_get_protocol_features(TestServer
*s
, CharBackend
*chr
,
1127 /* send back features to qemu */
1128 msg
->flags
|= VHOST_USER_REPLY_MASK
;
1129 msg
->size
= sizeof(m
.payload
.u64
);
1130 msg
->payload
.u64
= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG
;
1132 qemu_chr_fe_write_all(chr
, (uint8_t *)msg
, VHOST_USER_HDR_SIZE
+ msg
->size
);
1135 static struct vhost_user_ops g_vu_gpio_ops
= {
1136 .type
= VHOST_USER_GPIO
,
1138 .append_opts
= append_vhost_gpio_opts
,
1140 .get_features
= vu_gpio_get_features
,
1141 .set_features
= vu_net_set_features
,
1142 .get_protocol_features
= vu_gpio_get_protocol_features
,
1145 static void register_vhost_gpio_test(void)
1147 QOSGraphTestOptions opts
= {
1148 .before
= vhost_user_test_setup
,
1150 .arg
= &g_vu_gpio_ops
,
1153 qemu_add_opts(&qemu_chardev_opts
);
1155 qos_add_test("read-guest-mem/memfile",
1156 "vhost-user-gpio", test_read_guest_mem
, &opts
);
1158 libqos_init(register_vhost_gpio_test
);