Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging
[qemu/kevin.git] / tests / qtest / vhost-user-test.c
blobd6075001e7c4145c1fb3cda87a81af7c3d9a016f
1 /*
2 * QTest testcase for the vhost-user
4 * Copyright (c) 2014 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include "qemu/osdep.h"
13 #include "libqtest-single.h"
14 #include "qapi/error.h"
15 #include "qapi/qmp/qdict.h"
16 #include "qemu/config-file.h"
17 #include "qemu/option.h"
18 #include "qemu/range.h"
19 #include "qemu/sockets.h"
20 #include "chardev/char-fe.h"
21 #include "qemu/memfd.h"
22 #include "qemu/module.h"
23 #include "sysemu/sysemu.h"
24 #include "libqos/libqos.h"
25 #include "libqos/pci-pc.h"
26 #include "libqos/virtio-pci.h"
28 #include "libqos/malloc-pc.h"
29 #include "libqos/qgraph_internal.h"
30 #include "hw/virtio/virtio-net.h"
32 #include "standard-headers/linux/vhost_types.h"
33 #include "standard-headers/linux/virtio_ids.h"
34 #include "standard-headers/linux/virtio_net.h"
35 #include "standard-headers/linux/virtio_gpio.h"
36 #include "standard-headers/linux/virtio_scmi.h"
38 #ifdef CONFIG_LINUX
39 #include <sys/vfs.h>
40 #endif
43 #define QEMU_CMD_MEM " -m %d -object memory-backend-file,id=mem,size=%dM," \
44 "mem-path=%s,share=on -numa node,memdev=mem"
45 #define QEMU_CMD_MEMFD " -m %d -object memory-backend-memfd,id=mem,size=%dM," \
46 " -numa node,memdev=mem"
47 #define QEMU_CMD_SHM " -m %d -object memory-backend-shm,id=mem,size=%dM," \
48 " -numa node,memdev=mem"
49 #define QEMU_CMD_CHR " -chardev socket,id=%s,path=%s%s"
50 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce=on"
52 #define HUGETLBFS_MAGIC 0x958458f6
54 /*********** FROM hw/virtio/vhost-user.c *************************************/
56 #define VHOST_MEMORY_MAX_NREGIONS 8
57 #define VHOST_MAX_VIRTQUEUES 0x100
59 #define VHOST_USER_F_PROTOCOL_FEATURES 30
60 #define VIRTIO_F_VERSION_1 32
62 #define VHOST_USER_PROTOCOL_F_MQ 0
63 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
64 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6
65 #define VHOST_USER_PROTOCOL_F_CONFIG 9
67 #define VHOST_LOG_PAGE 0x1000
69 typedef enum VhostUserRequest {
70 VHOST_USER_NONE = 0,
71 VHOST_USER_GET_FEATURES = 1,
72 VHOST_USER_SET_FEATURES = 2,
73 VHOST_USER_SET_OWNER = 3,
74 VHOST_USER_RESET_OWNER = 4,
75 VHOST_USER_SET_MEM_TABLE = 5,
76 VHOST_USER_SET_LOG_BASE = 6,
77 VHOST_USER_SET_LOG_FD = 7,
78 VHOST_USER_SET_VRING_NUM = 8,
79 VHOST_USER_SET_VRING_ADDR = 9,
80 VHOST_USER_SET_VRING_BASE = 10,
81 VHOST_USER_GET_VRING_BASE = 11,
82 VHOST_USER_SET_VRING_KICK = 12,
83 VHOST_USER_SET_VRING_CALL = 13,
84 VHOST_USER_SET_VRING_ERR = 14,
85 VHOST_USER_GET_PROTOCOL_FEATURES = 15,
86 VHOST_USER_SET_PROTOCOL_FEATURES = 16,
87 VHOST_USER_GET_QUEUE_NUM = 17,
88 VHOST_USER_SET_VRING_ENABLE = 18,
89 VHOST_USER_GET_CONFIG = 24,
90 VHOST_USER_SET_CONFIG = 25,
91 VHOST_USER_MAX
92 } VhostUserRequest;
94 typedef struct VhostUserMemoryRegion {
95 uint64_t guest_phys_addr;
96 uint64_t memory_size;
97 uint64_t userspace_addr;
98 uint64_t mmap_offset;
99 } VhostUserMemoryRegion;
101 typedef struct VhostUserMemory {
102 uint32_t nregions;
103 uint32_t padding;
104 VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
105 } VhostUserMemory;
107 typedef struct VhostUserLog {
108 uint64_t mmap_size;
109 uint64_t mmap_offset;
110 } VhostUserLog;
112 typedef struct VhostUserMsg {
113 VhostUserRequest request;
115 #define VHOST_USER_VERSION_MASK (0x3)
116 #define VHOST_USER_REPLY_MASK (0x1<<2)
117 uint32_t flags;
118 uint32_t size; /* the following payload size */
119 union {
120 #define VHOST_USER_VRING_IDX_MASK (0xff)
121 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
122 uint64_t u64;
123 struct vhost_vring_state state;
124 struct vhost_vring_addr addr;
125 VhostUserMemory memory;
126 VhostUserLog log;
127 } payload;
128 } QEMU_PACKED VhostUserMsg;
130 static VhostUserMsg m __attribute__ ((unused));
131 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \
132 + sizeof(m.flags) \
133 + sizeof(m.size))
135 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
137 /* The version of the protocol we support */
138 #define VHOST_USER_VERSION (0x1)
139 /*****************************************************************************/
141 enum {
142 TEST_FLAGS_OK,
143 TEST_FLAGS_DISCONNECT,
144 TEST_FLAGS_BAD,
145 TEST_FLAGS_END,
148 enum {
149 VHOST_USER_NET,
150 VHOST_USER_GPIO,
151 VHOST_USER_SCMI,
154 typedef struct TestServer {
155 gchar *socket_path;
156 gchar *mig_path;
157 gchar *chr_name;
158 gchar *tmpfs;
159 CharBackend chr;
160 int fds_num;
161 int fds[VHOST_MEMORY_MAX_NREGIONS];
162 VhostUserMemory memory;
163 GMainContext *context;
164 GMainLoop *loop;
165 GThread *thread;
166 GMutex data_mutex;
167 GCond data_cond;
168 int log_fd;
169 uint64_t rings;
170 bool test_fail;
171 int test_flags;
172 int queues;
173 struct vhost_user_ops *vu_ops;
174 } TestServer;
176 struct vhost_user_ops {
177 /* Device types. */
178 int type;
179 void (*append_opts)(TestServer *s, GString *cmd_line,
180 const char *chr_opts);
182 /* VHOST-USER commands. */
183 uint64_t (*get_features)(TestServer *s);
184 void (*set_features)(TestServer *s, CharBackend *chr,
185 VhostUserMsg *msg);
186 void (*get_protocol_features)(TestServer *s,
187 CharBackend *chr, VhostUserMsg *msg);
190 static const char *init_hugepagefs(void);
191 static TestServer *test_server_new(const gchar *name,
192 struct vhost_user_ops *ops);
193 static void test_server_free(TestServer *server);
194 static void test_server_listen(TestServer *server);
196 enum test_memfd {
197 TEST_MEMFD_AUTO,
198 TEST_MEMFD_YES,
199 TEST_MEMFD_NO,
200 TEST_MEMFD_SHM,
203 static void append_vhost_net_opts(TestServer *s, GString *cmd_line,
204 const char *chr_opts)
206 g_string_append_printf(cmd_line, QEMU_CMD_CHR QEMU_CMD_NETDEV,
207 s->chr_name, s->socket_path,
208 chr_opts, s->chr_name);
212 * For GPIO there are no other magic devices we need to add (like
213 * block or netdev) so all we need to worry about is the vhost-user
214 * chardev socket.
216 static void append_vhost_gpio_opts(TestServer *s, GString *cmd_line,
217 const char *chr_opts)
219 g_string_append_printf(cmd_line, QEMU_CMD_CHR,
220 s->chr_name, s->socket_path,
221 chr_opts);
224 static void append_mem_opts(TestServer *server, GString *cmd_line,
225 int size, enum test_memfd memfd)
227 if (memfd == TEST_MEMFD_AUTO) {
228 memfd = qemu_memfd_check(MFD_ALLOW_SEALING) ? TEST_MEMFD_YES
229 : TEST_MEMFD_NO;
232 if (memfd == TEST_MEMFD_YES) {
233 g_string_append_printf(cmd_line, QEMU_CMD_MEMFD, size, size);
234 } else if (memfd == TEST_MEMFD_SHM) {
235 g_string_append_printf(cmd_line, QEMU_CMD_SHM, size, size);
236 } else {
237 const char *root = init_hugepagefs() ? : server->tmpfs;
239 g_string_append_printf(cmd_line, QEMU_CMD_MEM, size, size, root);
243 static bool wait_for_fds(TestServer *s)
245 gint64 end_time;
246 bool got_region;
247 int i;
249 g_mutex_lock(&s->data_mutex);
251 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
252 while (!s->fds_num) {
253 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
254 /* timeout has passed */
255 g_assert(s->fds_num);
256 break;
260 /* check for sanity */
261 g_assert_cmpint(s->fds_num, >, 0);
262 g_assert_cmpint(s->fds_num, ==, s->memory.nregions);
264 g_mutex_unlock(&s->data_mutex);
266 got_region = false;
267 for (i = 0; i < s->memory.nregions; ++i) {
268 VhostUserMemoryRegion *reg = &s->memory.regions[i];
269 if (reg->guest_phys_addr == 0) {
270 got_region = true;
271 break;
274 if (!got_region) {
275 g_test_skip("No memory at address 0x0");
277 return got_region;
280 static void read_guest_mem_server(QTestState *qts, TestServer *s)
282 uint8_t *guest_mem;
283 int i, j;
284 size_t size;
286 g_mutex_lock(&s->data_mutex);
288 /* iterate all regions */
289 for (i = 0; i < s->fds_num; i++) {
291 /* We'll check only the region starting at 0x0 */
292 if (s->memory.regions[i].guest_phys_addr != 0x0) {
293 continue;
296 g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
298 size = s->memory.regions[i].memory_size +
299 s->memory.regions[i].mmap_offset;
301 guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
302 MAP_SHARED, s->fds[i], 0);
304 g_assert(guest_mem != MAP_FAILED);
305 guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
307 for (j = 0; j < 1024; j++) {
308 uint32_t a = qtest_readb(qts, s->memory.regions[i].guest_phys_addr + j);
309 uint32_t b = guest_mem[j];
311 g_assert_cmpint(a, ==, b);
314 munmap(guest_mem, s->memory.regions[i].memory_size);
317 g_mutex_unlock(&s->data_mutex);
320 static void *thread_function(void *data)
322 GMainLoop *loop = data;
323 g_main_loop_run(loop);
324 return NULL;
327 static int chr_can_read(void *opaque)
329 return VHOST_USER_HDR_SIZE;
332 static void chr_read(void *opaque, const uint8_t *buf, int size)
334 g_autoptr(GError) err = NULL;
335 TestServer *s = opaque;
336 CharBackend *chr = &s->chr;
337 VhostUserMsg msg;
338 uint8_t *p = (uint8_t *) &msg;
339 int fd = -1;
341 if (s->test_fail) {
342 qemu_chr_fe_disconnect(chr);
343 /* now switch to non-failure */
344 s->test_fail = false;
347 if (size != VHOST_USER_HDR_SIZE) {
348 qos_printf("%s: Wrong message size received %d\n", __func__, size);
349 return;
352 g_mutex_lock(&s->data_mutex);
353 memcpy(p, buf, VHOST_USER_HDR_SIZE);
355 if (msg.size) {
356 p += VHOST_USER_HDR_SIZE;
357 size = qemu_chr_fe_read_all(chr, p, msg.size);
358 if (size != msg.size) {
359 qos_printf("%s: Wrong message size received %d != %d\n",
360 __func__, size, msg.size);
361 goto out;
365 switch (msg.request) {
366 case VHOST_USER_GET_FEATURES:
367 /* Mandatory for tests to define get_features */
368 g_assert(s->vu_ops->get_features);
370 /* send back features to qemu */
371 msg.flags |= VHOST_USER_REPLY_MASK;
372 msg.size = sizeof(m.payload.u64);
374 if (s->test_flags >= TEST_FLAGS_BAD) {
375 msg.payload.u64 = 0;
376 s->test_flags = TEST_FLAGS_END;
377 } else {
378 msg.payload.u64 = s->vu_ops->get_features(s);
381 qemu_chr_fe_write_all(chr, (uint8_t *) &msg,
382 VHOST_USER_HDR_SIZE + msg.size);
383 break;
385 case VHOST_USER_SET_FEATURES:
386 if (s->vu_ops->set_features) {
387 s->vu_ops->set_features(s, chr, &msg);
389 break;
391 case VHOST_USER_SET_OWNER:
393 * We don't need to do anything here, the remote is just
394 * letting us know it is in charge. Just log it.
396 qos_printf("set_owner: start of session\n");
397 break;
399 case VHOST_USER_GET_PROTOCOL_FEATURES:
400 if (s->vu_ops->get_protocol_features) {
401 s->vu_ops->get_protocol_features(s, chr, &msg);
403 break;
405 case VHOST_USER_GET_CONFIG:
407 * Treat GET_CONFIG as a NOP and just reply and let the guest
408 * consider we have updated its memory. Tests currently don't
409 * require working configs.
411 msg.flags |= VHOST_USER_REPLY_MASK;
412 p = (uint8_t *) &msg;
413 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
414 break;
416 case VHOST_USER_SET_PROTOCOL_FEATURES:
418 * We did set VHOST_USER_F_PROTOCOL_FEATURES so its valid for
419 * the remote end to send this. There is no handshake reply so
420 * just log the details for debugging.
422 qos_printf("set_protocol_features: 0x%"PRIx64 "\n", msg.payload.u64);
423 break;
426 * A real vhost-user backend would actually set the size and
427 * address of the vrings but we can simply report them.
429 case VHOST_USER_SET_VRING_NUM:
430 qos_printf("set_vring_num: %d/%d\n",
431 msg.payload.state.index, msg.payload.state.num);
432 break;
433 case VHOST_USER_SET_VRING_ADDR:
434 qos_printf("set_vring_addr: 0x%"PRIx64"/0x%"PRIx64"/0x%"PRIx64"\n",
435 msg.payload.addr.avail_user_addr,
436 msg.payload.addr.desc_user_addr,
437 msg.payload.addr.used_user_addr);
438 break;
440 case VHOST_USER_GET_VRING_BASE:
441 /* send back vring base to qemu */
442 msg.flags |= VHOST_USER_REPLY_MASK;
443 msg.size = sizeof(m.payload.state);
444 msg.payload.state.num = 0;
445 p = (uint8_t *) &msg;
446 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
448 assert(msg.payload.state.index < s->queues * 2);
449 s->rings &= ~(0x1ULL << msg.payload.state.index);
450 g_cond_broadcast(&s->data_cond);
451 break;
453 case VHOST_USER_SET_MEM_TABLE:
454 /* received the mem table */
455 memcpy(&s->memory, &msg.payload.memory, sizeof(msg.payload.memory));
456 s->fds_num = qemu_chr_fe_get_msgfds(chr, s->fds,
457 G_N_ELEMENTS(s->fds));
459 /* signal the test that it can continue */
460 g_cond_broadcast(&s->data_cond);
461 break;
463 case VHOST_USER_SET_VRING_KICK:
464 case VHOST_USER_SET_VRING_CALL:
465 /* consume the fd */
466 if (!qemu_chr_fe_get_msgfds(chr, &fd, 1) && fd < 0) {
467 qos_printf("call fd: %d, do not set non-blocking\n", fd);
468 break;
471 * This is a non-blocking eventfd.
472 * The receive function forces it to be blocking,
473 * so revert it back to non-blocking.
475 g_unix_set_fd_nonblocking(fd, true, &err);
476 g_assert_no_error(err);
477 break;
479 case VHOST_USER_SET_LOG_BASE:
480 if (s->log_fd != -1) {
481 close(s->log_fd);
482 s->log_fd = -1;
484 qemu_chr_fe_get_msgfds(chr, &s->log_fd, 1);
485 msg.flags |= VHOST_USER_REPLY_MASK;
486 msg.size = 0;
487 p = (uint8_t *) &msg;
488 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE);
490 g_cond_broadcast(&s->data_cond);
491 break;
493 case VHOST_USER_SET_VRING_BASE:
494 assert(msg.payload.state.index < s->queues * 2);
495 s->rings |= 0x1ULL << msg.payload.state.index;
496 g_cond_broadcast(&s->data_cond);
497 break;
499 case VHOST_USER_GET_QUEUE_NUM:
500 msg.flags |= VHOST_USER_REPLY_MASK;
501 msg.size = sizeof(m.payload.u64);
502 msg.payload.u64 = s->queues;
503 p = (uint8_t *) &msg;
504 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
505 break;
507 case VHOST_USER_SET_VRING_ENABLE:
509 * Another case we ignore as we don't need to respond. With a
510 * fully functioning vhost-user we would enable/disable the
511 * vring monitoring.
513 qos_printf("set_vring(%d)=%s\n", msg.payload.state.index,
514 msg.payload.state.num ? "enabled" : "disabled");
515 break;
517 default:
518 qos_printf("vhost-user: un-handled message: %d\n", msg.request);
519 break;
522 out:
523 g_mutex_unlock(&s->data_mutex);
526 static const char *init_hugepagefs(void)
528 #ifdef CONFIG_LINUX
529 static const char *hugepagefs;
530 const char *path = getenv("QTEST_HUGETLBFS_PATH");
531 struct statfs fs;
532 int ret;
534 if (hugepagefs) {
535 return hugepagefs;
537 if (!path) {
538 return NULL;
541 if (access(path, R_OK | W_OK | X_OK)) {
542 qos_printf("access on path (%s): %s", path, strerror(errno));
543 g_test_fail();
544 return NULL;
547 do {
548 ret = statfs(path, &fs);
549 } while (ret != 0 && errno == EINTR);
551 if (ret != 0) {
552 qos_printf("statfs on path (%s): %s", path, strerror(errno));
553 g_test_fail();
554 return NULL;
557 if (fs.f_type != HUGETLBFS_MAGIC) {
558 qos_printf("Warning: path not on HugeTLBFS: %s", path);
559 g_test_fail();
560 return NULL;
563 hugepagefs = path;
564 return hugepagefs;
565 #else
566 return NULL;
567 #endif
570 static TestServer *test_server_new(const gchar *name,
571 struct vhost_user_ops *ops)
573 TestServer *server = g_new0(TestServer, 1);
574 g_autofree const char *tmpfs = NULL;
575 GError *err = NULL;
577 server->context = g_main_context_new();
578 server->loop = g_main_loop_new(server->context, FALSE);
580 /* run the main loop thread so the chardev may operate */
581 server->thread = g_thread_new(NULL, thread_function, server->loop);
583 tmpfs = g_dir_make_tmp("vhost-test-XXXXXX", &err);
584 if (!tmpfs) {
585 g_test_message("Can't create temporary directory in %s: %s",
586 g_get_tmp_dir(), err->message);
587 g_error_free(err);
589 g_assert(tmpfs);
591 server->tmpfs = g_strdup(tmpfs);
592 server->socket_path = g_strdup_printf("%s/%s.sock", tmpfs, name);
593 server->mig_path = g_strdup_printf("%s/%s.mig", tmpfs, name);
594 server->chr_name = g_strdup_printf("chr-%s", name);
596 g_mutex_init(&server->data_mutex);
597 g_cond_init(&server->data_cond);
599 server->log_fd = -1;
600 server->queues = 1;
601 server->vu_ops = ops;
603 return server;
606 static void chr_event(void *opaque, QEMUChrEvent event)
608 TestServer *s = opaque;
610 if (s->test_flags == TEST_FLAGS_END &&
611 event == CHR_EVENT_CLOSED) {
612 s->test_flags = TEST_FLAGS_OK;
616 static void test_server_create_chr(TestServer *server, const gchar *opt)
618 g_autofree gchar *chr_path = g_strdup_printf("unix:%s%s",
619 server->socket_path, opt);
620 Chardev *chr;
622 chr = qemu_chr_new(server->chr_name, chr_path, server->context);
623 g_assert(chr);
625 qemu_chr_fe_init(&server->chr, chr, &error_abort);
626 qemu_chr_fe_set_handlers(&server->chr, chr_can_read, chr_read,
627 chr_event, NULL, server, server->context, true);
630 static void test_server_listen(TestServer *server)
632 test_server_create_chr(server, ",server=on,wait=off");
635 static void test_server_free(TestServer *server)
637 int i, ret;
639 /* finish the helper thread and dispatch pending sources */
640 g_main_loop_quit(server->loop);
641 g_thread_join(server->thread);
642 while (g_main_context_pending(NULL)) {
643 g_main_context_iteration(NULL, TRUE);
646 unlink(server->socket_path);
647 g_free(server->socket_path);
649 unlink(server->mig_path);
650 g_free(server->mig_path);
652 ret = rmdir(server->tmpfs);
653 if (ret != 0) {
654 g_test_message("unable to rmdir: path (%s): %s",
655 server->tmpfs, strerror(errno));
657 g_free(server->tmpfs);
659 qemu_chr_fe_deinit(&server->chr, true);
661 for (i = 0; i < server->fds_num; i++) {
662 close(server->fds[i]);
665 if (server->log_fd != -1) {
666 close(server->log_fd);
669 g_free(server->chr_name);
671 g_main_loop_unref(server->loop);
672 g_main_context_unref(server->context);
673 g_cond_clear(&server->data_cond);
674 g_mutex_clear(&server->data_mutex);
675 g_free(server);
678 static void wait_for_log_fd(TestServer *s)
680 gint64 end_time;
682 g_mutex_lock(&s->data_mutex);
683 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
684 while (s->log_fd == -1) {
685 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
686 /* timeout has passed */
687 g_assert(s->log_fd != -1);
688 break;
692 g_mutex_unlock(&s->data_mutex);
695 static void write_guest_mem(TestServer *s, uint32_t seed)
697 uint32_t *guest_mem;
698 int i, j;
699 size_t size;
701 /* iterate all regions */
702 for (i = 0; i < s->fds_num; i++) {
704 /* We'll write only the region statring at 0x0 */
705 if (s->memory.regions[i].guest_phys_addr != 0x0) {
706 continue;
709 g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
711 size = s->memory.regions[i].memory_size +
712 s->memory.regions[i].mmap_offset;
714 guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
715 MAP_SHARED, s->fds[i], 0);
717 g_assert(guest_mem != MAP_FAILED);
718 guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
720 for (j = 0; j < 256; j++) {
721 guest_mem[j] = seed + j;
724 munmap(guest_mem, s->memory.regions[i].memory_size);
725 break;
729 static guint64 get_log_size(TestServer *s)
731 guint64 log_size = 0;
732 int i;
734 for (i = 0; i < s->memory.nregions; ++i) {
735 VhostUserMemoryRegion *reg = &s->memory.regions[i];
736 guint64 last = range_get_last(reg->guest_phys_addr,
737 reg->memory_size);
738 log_size = MAX(log_size, last / (8 * VHOST_LOG_PAGE) + 1);
741 return log_size;
744 typedef struct TestMigrateSource {
745 GSource source;
746 TestServer *src;
747 TestServer *dest;
748 } TestMigrateSource;
750 static gboolean
751 test_migrate_source_check(GSource *source)
753 TestMigrateSource *t = (TestMigrateSource *)source;
754 gboolean overlap = t->src->rings && t->dest->rings;
756 g_assert(!overlap);
758 return FALSE;
761 GSourceFuncs test_migrate_source_funcs = {
762 .check = test_migrate_source_check,
765 static void vhost_user_test_cleanup(void *s)
767 TestServer *server = s;
769 qos_invalidate_command_line();
770 test_server_free(server);
773 static void *vhost_user_test_setup(GString *cmd_line, void *arg)
775 TestServer *server = test_server_new("vhost-user-test", arg);
776 test_server_listen(server);
778 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_AUTO);
779 server->vu_ops->append_opts(server, cmd_line, "");
781 g_test_queue_destroy(vhost_user_test_cleanup, server);
783 return server;
786 static void *vhost_user_test_setup_memfd(GString *cmd_line, void *arg)
788 TestServer *server = test_server_new("vhost-user-test", arg);
789 test_server_listen(server);
791 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_YES);
792 server->vu_ops->append_opts(server, cmd_line, "");
794 g_test_queue_destroy(vhost_user_test_cleanup, server);
796 return server;
799 static void *vhost_user_test_setup_shm(GString *cmd_line, void *arg)
801 TestServer *server = test_server_new("vhost-user-test", arg);
802 test_server_listen(server);
804 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_SHM);
805 server->vu_ops->append_opts(server, cmd_line, "");
807 g_test_queue_destroy(vhost_user_test_cleanup, server);
809 return server;
812 static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
814 TestServer *server = arg;
816 if (!wait_for_fds(server)) {
817 return;
820 read_guest_mem_server(global_qtest, server);
823 static void test_migrate(void *obj, void *arg, QGuestAllocator *alloc)
825 TestServer *s = arg;
826 TestServer *dest;
827 GString *dest_cmdline;
828 char *uri;
829 QTestState *to;
830 GSource *source;
831 QDict *rsp;
832 guint8 *log;
833 guint64 size;
835 if (!wait_for_fds(s)) {
836 return;
839 dest = test_server_new("dest", s->vu_ops);
840 dest_cmdline = g_string_new(qos_get_current_command_line());
841 uri = g_strdup_printf("%s%s", "unix:", dest->mig_path);
843 size = get_log_size(s);
844 g_assert_cmpint(size, ==, (256 * 1024 * 1024) / (VHOST_LOG_PAGE * 8));
846 test_server_listen(dest);
847 g_string_append_printf(dest_cmdline, " -incoming %s", uri);
848 append_mem_opts(dest, dest_cmdline, 256, TEST_MEMFD_AUTO);
849 dest->vu_ops->append_opts(dest, dest_cmdline, "");
850 to = qtest_init(dest_cmdline->str);
852 /* This would be where you call qos_allocate_objects(to, NULL), if you want
853 * to talk to the QVirtioNet object on the destination.
856 source = g_source_new(&test_migrate_source_funcs,
857 sizeof(TestMigrateSource));
858 ((TestMigrateSource *)source)->src = s;
859 ((TestMigrateSource *)source)->dest = dest;
860 g_source_attach(source, s->context);
862 /* slow down migration to have time to fiddle with log */
863 /* TODO: qtest could learn to break on some places */
864 rsp = qmp("{ 'execute': 'migrate-set-parameters',"
865 "'arguments': { 'max-bandwidth': 10 } }");
866 g_assert(qdict_haskey(rsp, "return"));
867 qobject_unref(rsp);
869 rsp = qmp("{ 'execute': 'migrate', 'arguments': { 'uri': %s } }", uri);
870 g_assert(qdict_haskey(rsp, "return"));
871 qobject_unref(rsp);
873 wait_for_log_fd(s);
875 log = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, s->log_fd, 0);
876 g_assert(log != MAP_FAILED);
878 /* modify first page */
879 write_guest_mem(s, 0x42);
880 log[0] = 1;
881 munmap(log, size);
883 /* speed things up */
884 rsp = qmp("{ 'execute': 'migrate-set-parameters',"
885 "'arguments': { 'max-bandwidth': 0 } }");
886 g_assert(qdict_haskey(rsp, "return"));
887 qobject_unref(rsp);
889 qmp_eventwait("STOP");
890 qtest_qmp_eventwait(to, "RESUME");
892 g_assert(wait_for_fds(dest));
893 read_guest_mem_server(to, dest);
895 g_source_destroy(source);
896 g_source_unref(source);
898 qtest_quit(to);
899 test_server_free(dest);
900 g_free(uri);
901 g_string_free(dest_cmdline, true);
904 static void wait_for_rings_started(TestServer *s, size_t count)
906 gint64 end_time;
908 g_mutex_lock(&s->data_mutex);
909 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
910 while (ctpop64(s->rings) != count) {
911 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
912 /* timeout has passed */
913 g_assert_cmpint(ctpop64(s->rings), ==, count);
914 break;
918 g_mutex_unlock(&s->data_mutex);
921 static inline void test_server_connect(TestServer *server)
923 test_server_create_chr(server, ",reconnect=1");
926 static gboolean
927 reconnect_cb(gpointer user_data)
929 TestServer *s = user_data;
931 qemu_chr_fe_disconnect(&s->chr);
933 return FALSE;
936 static gpointer
937 connect_thread(gpointer data)
939 TestServer *s = data;
941 /* wait for qemu to start before first try, to avoid extra warnings */
942 g_usleep(G_USEC_PER_SEC);
943 test_server_connect(s);
945 return NULL;
948 static void *vhost_user_test_setup_reconnect(GString *cmd_line, void *arg)
950 TestServer *s = test_server_new("reconnect", arg);
952 g_thread_unref(g_thread_new("connect", connect_thread, s));
953 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
954 s->vu_ops->append_opts(s, cmd_line, ",server=on");
956 g_test_queue_destroy(vhost_user_test_cleanup, s);
958 return s;
961 static void test_reconnect(void *obj, void *arg, QGuestAllocator *alloc)
963 TestServer *s = arg;
964 GSource *src;
966 if (!wait_for_fds(s)) {
967 return;
970 wait_for_rings_started(s, 2);
972 /* reconnect */
973 s->fds_num = 0;
974 s->rings = 0;
975 src = g_idle_source_new();
976 g_source_set_callback(src, reconnect_cb, s, NULL);
977 g_source_attach(src, s->context);
978 g_source_unref(src);
979 g_assert(wait_for_fds(s));
980 wait_for_rings_started(s, 2);
983 static void *vhost_user_test_setup_connect_fail(GString *cmd_line, void *arg)
985 TestServer *s = test_server_new("connect-fail", arg);
987 s->test_fail = true;
989 g_thread_unref(g_thread_new("connect", connect_thread, s));
990 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
991 s->vu_ops->append_opts(s, cmd_line, ",server=on");
993 g_test_queue_destroy(vhost_user_test_cleanup, s);
995 return s;
998 static void *vhost_user_test_setup_flags_mismatch(GString *cmd_line, void *arg)
1000 TestServer *s = test_server_new("flags-mismatch", arg);
1002 s->test_flags = TEST_FLAGS_DISCONNECT;
1004 g_thread_unref(g_thread_new("connect", connect_thread, s));
1005 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
1006 s->vu_ops->append_opts(s, cmd_line, ",server=on");
1008 g_test_queue_destroy(vhost_user_test_cleanup, s);
1010 return s;
1013 static void test_vhost_user_started(void *obj, void *arg, QGuestAllocator *alloc)
1015 TestServer *s = arg;
1017 if (!wait_for_fds(s)) {
1018 return;
1020 wait_for_rings_started(s, 2);
1023 static void *vhost_user_test_setup_multiqueue(GString *cmd_line, void *arg)
1025 TestServer *s = vhost_user_test_setup(cmd_line, arg);
1027 s->queues = 2;
1028 g_string_append_printf(cmd_line,
1029 " -set netdev.hs0.queues=%d"
1030 " -global virtio-net-pci.vectors=%d",
1031 s->queues, s->queues * 2 + 2);
1033 return s;
1036 static void test_multiqueue(void *obj, void *arg, QGuestAllocator *alloc)
1038 TestServer *s = arg;
1040 wait_for_rings_started(s, s->queues * 2);
1044 static uint64_t vu_net_get_features(TestServer *s)
1046 uint64_t features = 0x1ULL << VHOST_F_LOG_ALL |
1047 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1049 if (s->queues > 1) {
1050 features |= 0x1ULL << VIRTIO_NET_F_MQ;
1053 return features;
1056 static void vu_net_set_features(TestServer *s, CharBackend *chr,
1057 VhostUserMsg *msg)
1059 g_assert(msg->payload.u64 & (0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES));
1060 if (s->test_flags == TEST_FLAGS_DISCONNECT) {
1061 qemu_chr_fe_disconnect(chr);
1062 s->test_flags = TEST_FLAGS_BAD;
1066 static void vu_net_get_protocol_features(TestServer *s, CharBackend *chr,
1067 VhostUserMsg *msg)
1069 /* send back features to qemu */
1070 msg->flags |= VHOST_USER_REPLY_MASK;
1071 msg->size = sizeof(m.payload.u64);
1072 msg->payload.u64 = 1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD;
1073 msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_CROSS_ENDIAN;
1074 if (s->queues > 1) {
1075 msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_MQ;
1077 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
1080 /* Each VHOST-USER device should have its ops structure defined. */
1081 static struct vhost_user_ops g_vu_net_ops = {
1082 .type = VHOST_USER_NET,
1084 .append_opts = append_vhost_net_opts,
1086 .get_features = vu_net_get_features,
1087 .set_features = vu_net_set_features,
1088 .get_protocol_features = vu_net_get_protocol_features,
1091 static void register_vhost_user_test(void)
1093 QOSGraphTestOptions opts = {
1094 .before = vhost_user_test_setup,
1095 .subprocess = true,
1096 .arg = &g_vu_net_ops,
1099 qemu_add_opts(&qemu_chardev_opts);
1101 qos_add_test("vhost-user/read-guest-mem/memfile",
1102 "virtio-net",
1103 test_read_guest_mem, &opts);
1105 opts.before = vhost_user_test_setup_shm;
1106 qos_add_test("vhost-user/read-guest-mem/shm",
1107 "virtio-net",
1108 test_read_guest_mem, &opts);
1110 if (qemu_memfd_check(MFD_ALLOW_SEALING)) {
1111 opts.before = vhost_user_test_setup_memfd;
1112 qos_add_test("vhost-user/read-guest-mem/memfd",
1113 "virtio-net",
1114 test_read_guest_mem, &opts);
1117 qos_add_test("vhost-user/migrate",
1118 "virtio-net",
1119 test_migrate, &opts);
1121 opts.before = vhost_user_test_setup_reconnect;
1122 qos_add_test("vhost-user/reconnect", "virtio-net",
1123 test_reconnect, &opts);
1125 opts.before = vhost_user_test_setup_connect_fail;
1126 qos_add_test("vhost-user/connect-fail", "virtio-net",
1127 test_vhost_user_started, &opts);
1129 opts.before = vhost_user_test_setup_flags_mismatch;
1130 qos_add_test("vhost-user/flags-mismatch", "virtio-net",
1131 test_vhost_user_started, &opts);
1133 opts.before = vhost_user_test_setup_multiqueue;
1134 opts.edge.extra_device_opts = "mq=on";
1135 qos_add_test("vhost-user/multiqueue",
1136 "virtio-net",
1137 test_multiqueue, &opts);
1139 libqos_init(register_vhost_user_test);
1141 static uint64_t vu_gpio_get_features(TestServer *s)
1143 return 0x1ULL << VIRTIO_F_VERSION_1 |
1144 0x1ULL << VIRTIO_GPIO_F_IRQ |
1145 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1149 * This stub can't handle all the message types but we should reply
1150 * that we support VHOST_USER_PROTOCOL_F_CONFIG as gpio would use it
1151 * talking to a read vhost-user daemon.
1153 static void vu_gpio_get_protocol_features(TestServer *s, CharBackend *chr,
1154 VhostUserMsg *msg)
1156 /* send back features to qemu */
1157 msg->flags |= VHOST_USER_REPLY_MASK;
1158 msg->size = sizeof(m.payload.u64);
1159 msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_CONFIG;
1161 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
1164 static struct vhost_user_ops g_vu_gpio_ops = {
1165 .type = VHOST_USER_GPIO,
1167 .append_opts = append_vhost_gpio_opts,
1169 .get_features = vu_gpio_get_features,
1170 .set_features = vu_net_set_features,
1171 .get_protocol_features = vu_gpio_get_protocol_features,
1174 static void register_vhost_gpio_test(void)
1176 QOSGraphTestOptions opts = {
1177 .before = vhost_user_test_setup,
1178 .subprocess = true,
1179 .arg = &g_vu_gpio_ops,
1182 qemu_add_opts(&qemu_chardev_opts);
1184 qos_add_test("read-guest-mem/memfile",
1185 "vhost-user-gpio", test_read_guest_mem, &opts);
1187 libqos_init(register_vhost_gpio_test);
1189 static uint64_t vu_scmi_get_features(TestServer *s)
1191 return 0x1ULL << VIRTIO_F_VERSION_1 |
1192 0x1ULL << VIRTIO_SCMI_F_P2A_CHANNELS |
1193 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1196 static void vu_scmi_get_protocol_features(TestServer *s, CharBackend *chr,
1197 VhostUserMsg *msg)
1199 msg->flags |= VHOST_USER_REPLY_MASK;
1200 msg->size = sizeof(m.payload.u64);
1201 msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_MQ;
1203 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
1206 static struct vhost_user_ops g_vu_scmi_ops = {
1207 .type = VHOST_USER_SCMI,
1209 .append_opts = append_vhost_gpio_opts,
1211 .get_features = vu_scmi_get_features,
1212 .set_features = vu_net_set_features,
1213 .get_protocol_features = vu_scmi_get_protocol_features,
1216 static void register_vhost_scmi_test(void)
1218 QOSGraphTestOptions opts = {
1219 .before = vhost_user_test_setup,
1220 .subprocess = true,
1221 .arg = &g_vu_scmi_ops,
1224 qemu_add_opts(&qemu_chardev_opts);
1226 qos_add_test("scmi/read-guest-mem/memfile",
1227 "vhost-user-scmi", test_read_guest_mem, &opts);
1229 libqos_init(register_vhost_scmi_test);