4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "net/vhost_net.h"
14 #include "net/vhost-user.h"
15 #include "chardev/char-fe.h"
16 #include "qemu/config-file.h"
17 #include "qemu/error-report.h"
18 #include "qmp-commands.h"
21 typedef struct VhostUserState
{
23 CharBackend chr
; /* only queue index 0 */
24 VHostNetState
*vhost_net
;
26 uint64_t acked_features
;
30 VHostNetState
*vhost_user_get_vhost_net(NetClientState
*nc
)
32 VhostUserState
*s
= DO_UPCAST(VhostUserState
, nc
, nc
);
33 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
37 uint64_t vhost_user_get_acked_features(NetClientState
*nc
)
39 VhostUserState
*s
= DO_UPCAST(VhostUserState
, nc
, nc
);
40 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
41 return s
->acked_features
;
44 static void vhost_user_stop(int queues
, NetClientState
*ncs
[])
49 for (i
= 0; i
< queues
; i
++) {
50 assert(ncs
[i
]->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
52 s
= DO_UPCAST(VhostUserState
, nc
, ncs
[i
]);
55 /* save acked features */
56 uint64_t features
= vhost_net_get_acked_features(s
->vhost_net
);
58 s
->acked_features
= features
;
60 vhost_net_cleanup(s
->vhost_net
);
65 static int vhost_user_start(int queues
, NetClientState
*ncs
[], CharBackend
*be
)
67 VhostNetOptions options
;
68 struct vhost_net
*net
= NULL
;
73 options
.backend_type
= VHOST_BACKEND_TYPE_USER
;
75 for (i
= 0; i
< queues
; i
++) {
76 assert(ncs
[i
]->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
78 s
= DO_UPCAST(VhostUserState
, nc
, ncs
[i
]);
80 options
.net_backend
= ncs
[i
];
82 options
.busyloop_timeout
= 0;
83 net
= vhost_net_init(&options
);
85 error_report("failed to init vhost_net for queue %d", i
);
90 max_queues
= vhost_net_get_max_queues(net
);
91 if (queues
> max_queues
) {
92 error_report("you are asking more queues than supported: %d",
99 vhost_net_cleanup(s
->vhost_net
);
100 g_free(s
->vhost_net
);
109 vhost_net_cleanup(net
);
111 vhost_user_stop(i
, ncs
);
115 static ssize_t
vhost_user_receive(NetClientState
*nc
, const uint8_t *buf
,
118 /* In case of RARP (message size is 60) notify backup to send a fake RARP.
119 This fake RARP will be sent by backend only for guest
120 without GUEST_ANNOUNCE capability.
123 VhostUserState
*s
= DO_UPCAST(VhostUserState
, nc
, nc
);
125 static int display_rarp_failure
= 1;
128 /* extract guest mac address from the RARP message */
129 memcpy(mac_addr
, &buf
[6], 6);
131 r
= vhost_net_notify_migration_done(s
->vhost_net
, mac_addr
);
133 if ((r
!= 0) && (display_rarp_failure
)) {
135 "Vhost user backend fails to broadcast fake RARP\n");
137 display_rarp_failure
= 0;
144 static void vhost_user_cleanup(NetClientState
*nc
)
146 VhostUserState
*s
= DO_UPCAST(VhostUserState
, nc
, nc
);
149 vhost_net_cleanup(s
->vhost_net
);
150 g_free(s
->vhost_net
);
153 if (nc
->queue_index
== 0) {
155 g_source_remove(s
->watch
);
158 qemu_chr_fe_deinit(&s
->chr
, true);
161 qemu_purge_queued_packets(nc
);
164 static bool vhost_user_has_vnet_hdr(NetClientState
*nc
)
166 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
171 static bool vhost_user_has_ufo(NetClientState
*nc
)
173 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
178 static NetClientInfo net_vhost_user_info
= {
179 .type
= NET_CLIENT_DRIVER_VHOST_USER
,
180 .size
= sizeof(VhostUserState
),
181 .receive
= vhost_user_receive
,
182 .cleanup
= vhost_user_cleanup
,
183 .has_vnet_hdr
= vhost_user_has_vnet_hdr
,
184 .has_ufo
= vhost_user_has_ufo
,
187 static gboolean
net_vhost_user_watch(GIOChannel
*chan
, GIOCondition cond
,
190 VhostUserState
*s
= opaque
;
192 qemu_chr_fe_disconnect(&s
->chr
);
197 static void net_vhost_user_event(void *opaque
, int event
);
199 static void chr_closed_bh(void *opaque
)
201 const char *name
= opaque
;
202 NetClientState
*ncs
[MAX_QUEUE_NUM
];
207 queues
= qemu_find_net_clients_except(name
, ncs
,
208 NET_CLIENT_DRIVER_NIC
,
210 assert(queues
< MAX_QUEUE_NUM
);
212 s
= DO_UPCAST(VhostUserState
, nc
, ncs
[0]);
214 qmp_set_link(name
, false, &err
);
215 vhost_user_stop(queues
, ncs
);
217 qemu_chr_fe_set_handlers(&s
->chr
, NULL
, NULL
, net_vhost_user_event
,
218 NULL
, opaque
, NULL
, true);
221 error_report_err(err
);
225 static void net_vhost_user_event(void *opaque
, int event
)
227 const char *name
= opaque
;
228 NetClientState
*ncs
[MAX_QUEUE_NUM
];
234 queues
= qemu_find_net_clients_except(name
, ncs
,
235 NET_CLIENT_DRIVER_NIC
,
237 assert(queues
< MAX_QUEUE_NUM
);
239 s
= DO_UPCAST(VhostUserState
, nc
, ncs
[0]);
240 chr
= qemu_chr_fe_get_driver(&s
->chr
);
241 trace_vhost_user_event(chr
->label
, event
);
243 case CHR_EVENT_OPENED
:
244 if (vhost_user_start(queues
, ncs
, &s
->chr
) < 0) {
245 qemu_chr_fe_disconnect(&s
->chr
);
248 s
->watch
= qemu_chr_fe_add_watch(&s
->chr
, G_IO_HUP
,
249 net_vhost_user_watch
, s
);
250 qmp_set_link(name
, true, &err
);
253 case CHR_EVENT_CLOSED
:
254 /* a close event may happen during a read/write, but vhost
255 * code assumes the vhost_dev remains setup, so delay the
256 * stop & clear to idle.
257 * FIXME: better handle failure in vhost code, remove bh
260 AioContext
*ctx
= qemu_get_current_aio_context();
262 g_source_remove(s
->watch
);
264 qemu_chr_fe_set_handlers(&s
->chr
, NULL
, NULL
, NULL
, NULL
,
267 aio_bh_schedule_oneshot(ctx
, chr_closed_bh
, opaque
);
273 error_report_err(err
);
277 static int net_vhost_user_init(NetClientState
*peer
, const char *device
,
278 const char *name
, Chardev
*chr
,
282 NetClientState
*nc
, *nc0
= NULL
;
289 for (i
= 0; i
< queues
; i
++) {
290 nc
= qemu_new_net_client(&net_vhost_user_info
, peer
, device
, name
);
291 snprintf(nc
->info_str
, sizeof(nc
->info_str
), "vhost-user%d to %s",
296 s
= DO_UPCAST(VhostUserState
, nc
, nc
);
297 if (!qemu_chr_fe_init(&s
->chr
, chr
, &err
)) {
298 error_report_err(err
);
305 s
= DO_UPCAST(VhostUserState
, nc
, nc0
);
307 if (qemu_chr_fe_wait_connected(&s
->chr
, &err
) < 0) {
308 error_report_err(err
);
311 qemu_chr_fe_set_handlers(&s
->chr
, NULL
, NULL
,
312 net_vhost_user_event
, NULL
, nc0
->name
, NULL
,
314 } while (!s
->started
);
316 assert(s
->vhost_net
);
321 static Chardev
*net_vhost_claim_chardev(
322 const NetdevVhostUserOptions
*opts
, Error
**errp
)
324 Chardev
*chr
= qemu_chr_find(opts
->chardev
);
327 error_setg(errp
, "chardev \"%s\" not found", opts
->chardev
);
331 if (!qemu_chr_has_feature(chr
, QEMU_CHAR_FEATURE_RECONNECTABLE
)) {
332 error_setg(errp
, "chardev \"%s\" is not reconnectable",
336 if (!qemu_chr_has_feature(chr
, QEMU_CHAR_FEATURE_FD_PASS
)) {
337 error_setg(errp
, "chardev \"%s\" does not support FD passing",
345 static int net_vhost_check_net(void *opaque
, QemuOpts
*opts
, Error
**errp
)
347 const char *name
= opaque
;
348 const char *driver
, *netdev
;
350 driver
= qemu_opt_get(opts
, "driver");
351 netdev
= qemu_opt_get(opts
, "netdev");
353 if (!driver
|| !netdev
) {
357 if (strcmp(netdev
, name
) == 0 &&
358 !g_str_has_prefix(driver
, "virtio-net-")) {
359 error_setg(errp
, "vhost-user requires frontend driver virtio-net-*");
366 int net_init_vhost_user(const Netdev
*netdev
, const char *name
,
367 NetClientState
*peer
, Error
**errp
)
370 const NetdevVhostUserOptions
*vhost_user_opts
;
373 assert(netdev
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
374 vhost_user_opts
= &netdev
->u
.vhost_user
;
376 chr
= net_vhost_claim_chardev(vhost_user_opts
, errp
);
381 /* verify net frontend */
382 if (qemu_opts_foreach(qemu_find_opts("device"), net_vhost_check_net
,
383 (char *)name
, errp
)) {
387 queues
= vhost_user_opts
->has_queues
? vhost_user_opts
->queues
: 1;
388 if (queues
< 1 || queues
> MAX_QUEUE_NUM
) {
390 "vhost-user number of queues must be in range [1, %d]",
395 return net_vhost_user_init(peer
, "vhost_user", name
, chr
, queues
);