2 * QTest testcase for the vhost-user
4 * Copyright (c) 2014 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #define QEMU_GLIB_COMPAT_H
15 #include "qemu/option.h"
16 #include "sysemu/char.h"
17 #include "sysemu/sysemu.h"
19 #include <linux/vhost.h>
22 #include <qemu/sockets.h>
24 #if GLIB_CHECK_VERSION(2, 32, 0)
25 #define HAVE_MUTEX_INIT
26 #define HAVE_COND_INIT
27 #define HAVE_THREAD_NEW
30 #define QEMU_CMD_ACCEL " -machine accel=tcg"
31 #define QEMU_CMD_MEM " -m 512 -object memory-backend-file,id=mem,size=512M,"\
32 "mem-path=%s,share=on -numa node,memdev=mem"
33 #define QEMU_CMD_CHR " -chardev socket,id=chr0,path=%s"
34 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=net0,chardev=chr0,vhostforce"
35 #define QEMU_CMD_NET " -device virtio-net-pci,netdev=net0 "
36 #define QEMU_CMD_ROM " -option-rom ../pc-bios/pxe-virtio.rom"
38 #define QEMU_CMD QEMU_CMD_ACCEL QEMU_CMD_MEM QEMU_CMD_CHR \
39 QEMU_CMD_NETDEV QEMU_CMD_NET QEMU_CMD_ROM
41 #define HUGETLBFS_MAGIC 0x958458f6
43 /*********** FROM hw/virtio/vhost-user.c *************************************/
45 #define VHOST_MEMORY_MAX_NREGIONS 8
47 typedef enum VhostUserRequest
{
49 VHOST_USER_GET_FEATURES
= 1,
50 VHOST_USER_SET_FEATURES
= 2,
51 VHOST_USER_SET_OWNER
= 3,
52 VHOST_USER_RESET_OWNER
= 4,
53 VHOST_USER_SET_MEM_TABLE
= 5,
54 VHOST_USER_SET_LOG_BASE
= 6,
55 VHOST_USER_SET_LOG_FD
= 7,
56 VHOST_USER_SET_VRING_NUM
= 8,
57 VHOST_USER_SET_VRING_ADDR
= 9,
58 VHOST_USER_SET_VRING_BASE
= 10,
59 VHOST_USER_GET_VRING_BASE
= 11,
60 VHOST_USER_SET_VRING_KICK
= 12,
61 VHOST_USER_SET_VRING_CALL
= 13,
62 VHOST_USER_SET_VRING_ERR
= 14,
66 typedef struct VhostUserMemoryRegion
{
67 uint64_t guest_phys_addr
;
69 uint64_t userspace_addr
;
71 } VhostUserMemoryRegion
;
73 typedef struct VhostUserMemory
{
76 VhostUserMemoryRegion regions
[VHOST_MEMORY_MAX_NREGIONS
];
79 typedef struct VhostUserMsg
{
80 VhostUserRequest request
;
82 #define VHOST_USER_VERSION_MASK (0x3)
83 #define VHOST_USER_REPLY_MASK (0x1<<2)
85 uint32_t size
; /* the following payload size */
88 struct vhost_vring_state state
;
89 struct vhost_vring_addr addr
;
90 VhostUserMemory memory
;
92 } QEMU_PACKED VhostUserMsg
;
94 static VhostUserMsg m
__attribute__ ((unused
));
95 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \
99 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
101 /* The version of the protocol we support */
102 #define VHOST_USER_VERSION (0x1)
103 /*****************************************************************************/
105 int fds_num
= 0, fds
[VHOST_MEMORY_MAX_NREGIONS
];
106 static VhostUserMemory memory
;
107 static GMutex
*data_mutex
;
108 static GCond
*data_cond
;
110 static GMutex
*_mutex_new(void)
114 #ifdef HAVE_MUTEX_INIT
115 mutex
= g_new(GMutex
, 1);
118 mutex
= g_mutex_new();
124 static void _mutex_free(GMutex
*mutex
)
126 #ifdef HAVE_MUTEX_INIT
127 g_mutex_clear(mutex
);
134 static GCond
*_cond_new(void)
138 #ifdef HAVE_COND_INIT
139 cond
= g_new(GCond
, 1);
148 static gboolean
_cond_wait_until(GCond
*cond
, GMutex
*mutex
, gint64 end_time
)
150 gboolean ret
= FALSE
;
151 #ifdef HAVE_COND_INIT
152 ret
= g_cond_wait_until(cond
, mutex
, end_time
);
154 GTimeVal time
= { end_time
/ G_TIME_SPAN_SECOND
,
155 end_time
% G_TIME_SPAN_SECOND
};
156 ret
= g_cond_timed_wait(cond
, mutex
, &time
);
161 static void _cond_free(GCond
*cond
)
163 #ifdef HAVE_COND_INIT
171 static GThread
*_thread_new(const gchar
*name
, GThreadFunc func
, gpointer data
)
173 GThread
*thread
= NULL
;
174 GError
*error
= NULL
;
175 #ifdef HAVE_THREAD_NEW
176 thread
= g_thread_try_new(name
, func
, data
, &error
);
178 thread
= g_thread_create(func
, data
, TRUE
, &error
);
183 static void read_guest_mem(void)
190 g_mutex_lock(data_mutex
);
192 end_time
= g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND
;
194 if (!_cond_wait_until(data_cond
, data_mutex
, end_time
)) {
195 /* timeout has passed */
201 /* check for sanity */
202 g_assert_cmpint(fds_num
, >, 0);
203 g_assert_cmpint(fds_num
, ==, memory
.nregions
);
205 /* iterate all regions */
206 for (i
= 0; i
< fds_num
; i
++) {
208 /* We'll check only the region statring at 0x0*/
209 if (memory
.regions
[i
].guest_phys_addr
!= 0x0) {
213 g_assert_cmpint(memory
.regions
[i
].memory_size
, >, 1024);
215 size
= memory
.regions
[i
].memory_size
+ memory
.regions
[i
].mmap_offset
;
217 guest_mem
= mmap(0, size
, PROT_READ
| PROT_WRITE
,
218 MAP_SHARED
, fds
[i
], 0);
220 g_assert(guest_mem
!= MAP_FAILED
);
221 guest_mem
+= (memory
.regions
[i
].mmap_offset
/ sizeof(*guest_mem
));
223 for (j
= 0; j
< 256; j
++) {
224 uint32_t a
= readl(memory
.regions
[i
].guest_phys_addr
+ j
*4);
225 uint32_t b
= guest_mem
[j
];
227 g_assert_cmpint(a
, ==, b
);
230 munmap(guest_mem
, memory
.regions
[i
].memory_size
);
233 g_assert_cmpint(1, ==, 1);
234 g_mutex_unlock(data_mutex
);
237 static void *thread_function(void *data
)
240 loop
= g_main_loop_new(NULL
, FALSE
);
241 g_main_loop_run(loop
);
245 static int chr_can_read(void *opaque
)
247 return VHOST_USER_HDR_SIZE
;
250 static void chr_read(void *opaque
, const uint8_t *buf
, int size
)
252 CharDriverState
*chr
= opaque
;
254 uint8_t *p
= (uint8_t *) &msg
;
257 if (size
!= VHOST_USER_HDR_SIZE
) {
258 g_test_message("Wrong message size received %d\n", size
);
262 g_mutex_lock(data_mutex
);
263 memcpy(p
, buf
, VHOST_USER_HDR_SIZE
);
266 p
+= VHOST_USER_HDR_SIZE
;
267 qemu_chr_fe_read_all(chr
, p
, msg
.size
);
270 switch (msg
.request
) {
271 case VHOST_USER_GET_FEATURES
:
272 /* send back features to qemu */
273 msg
.flags
|= VHOST_USER_REPLY_MASK
;
274 msg
.size
= sizeof(m
.u64
);
276 p
= (uint8_t *) &msg
;
277 qemu_chr_fe_write_all(chr
, p
, VHOST_USER_HDR_SIZE
+ msg
.size
);
280 case VHOST_USER_GET_VRING_BASE
:
281 /* send back vring base to qemu */
282 msg
.flags
|= VHOST_USER_REPLY_MASK
;
283 msg
.size
= sizeof(m
.state
);
285 p
= (uint8_t *) &msg
;
286 qemu_chr_fe_write_all(chr
, p
, VHOST_USER_HDR_SIZE
+ msg
.size
);
289 case VHOST_USER_SET_MEM_TABLE
:
290 /* received the mem table */
291 memcpy(&memory
, &msg
.memory
, sizeof(msg
.memory
));
292 fds_num
= qemu_chr_fe_get_msgfds(chr
, fds
, sizeof(fds
) / sizeof(int));
294 /* signal the test that it can continue */
295 g_cond_signal(data_cond
);
298 case VHOST_USER_SET_VRING_KICK
:
299 case VHOST_USER_SET_VRING_CALL
:
301 qemu_chr_fe_get_msgfds(chr
, &fd
, 1);
303 * This is a non-blocking eventfd.
304 * The receive function forces it to be blocking,
305 * so revert it back to non-blocking.
307 qemu_set_nonblock(fd
);
312 g_mutex_unlock(data_mutex
);
315 static const char *init_hugepagefs(void)
321 path
= getenv("QTEST_HUGETLBFS_PATH");
326 if (access(path
, R_OK
| W_OK
| X_OK
)) {
327 g_test_message("access on path (%s): %s\n", path
, strerror(errno
));
332 ret
= statfs(path
, &fs
);
333 } while (ret
!= 0 && errno
== EINTR
);
336 g_test_message("statfs on path (%s): %s\n", path
, strerror(errno
));
340 if (fs
.f_type
!= HUGETLBFS_MAGIC
) {
341 g_test_message("Warning: path not on HugeTLBFS: %s\n", path
);
348 int main(int argc
, char **argv
)
350 QTestState
*s
= NULL
;
351 CharDriverState
*chr
= NULL
;
352 const char *hugefs
= 0;
353 char *socket_path
= 0;
358 g_test_init(&argc
, &argv
, NULL
);
360 module_call_init(MODULE_INIT_QOM
);
362 hugefs
= init_hugepagefs();
367 socket_path
= g_strdup_printf("/tmp/vhost-%d.sock", getpid());
369 /* create char dev and add read handlers */
370 qemu_add_opts(&qemu_chardev_opts
);
371 chr_path
= g_strdup_printf("unix:%s,server,nowait", socket_path
);
372 chr
= qemu_chr_new("chr0", chr_path
, NULL
);
374 qemu_chr_add_handlers(chr
, chr_can_read
, chr_read
, NULL
, chr
);
376 /* run the main loop thread so the chardev may operate */
377 data_mutex
= _mutex_new();
378 data_cond
= _cond_new();
379 _thread_new(NULL
, thread_function
, NULL
);
381 qemu_cmd
= g_strdup_printf(QEMU_CMD
, hugefs
, socket_path
);
382 s
= qtest_start(qemu_cmd
);
385 qtest_add_func("/vhost-user/read-guest-mem", read_guest_mem
);
396 _cond_free(data_cond
);
397 _mutex_free(data_mutex
);