4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "net/vhost_net.h"
14 #include "net/vhost-user.h"
15 #include "chardev/char-fe.h"
16 #include "qemu/config-file.h"
17 #include "qemu/error-report.h"
18 #include "qmp-commands.h"
21 typedef struct VhostUserState
{
23 CharBackend chr
; /* only queue index 0 */
24 VHostNetState
*vhost_net
;
26 uint64_t acked_features
;
30 VHostNetState
*vhost_user_get_vhost_net(NetClientState
*nc
)
32 VhostUserState
*s
= DO_UPCAST(VhostUserState
, nc
, nc
);
33 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
37 uint64_t vhost_user_get_acked_features(NetClientState
*nc
)
39 VhostUserState
*s
= DO_UPCAST(VhostUserState
, nc
, nc
);
40 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
41 return s
->acked_features
;
44 static void vhost_user_stop(int queues
, NetClientState
*ncs
[])
49 for (i
= 0; i
< queues
; i
++) {
50 assert(ncs
[i
]->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
52 s
= DO_UPCAST(VhostUserState
, nc
, ncs
[i
]);
55 /* save acked features */
56 uint64_t features
= vhost_net_get_acked_features(s
->vhost_net
);
58 s
->acked_features
= features
;
60 vhost_net_cleanup(s
->vhost_net
);
65 static int vhost_user_start(int queues
, NetClientState
*ncs
[], CharBackend
*be
)
67 VhostNetOptions options
;
68 struct vhost_net
*net
= NULL
;
73 options
.backend_type
= VHOST_BACKEND_TYPE_USER
;
75 for (i
= 0; i
< queues
; i
++) {
76 assert(ncs
[i
]->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
78 s
= DO_UPCAST(VhostUserState
, nc
, ncs
[i
]);
80 options
.net_backend
= ncs
[i
];
82 options
.busyloop_timeout
= 0;
83 net
= vhost_net_init(&options
);
85 error_report("failed to init vhost_net for queue %d", i
);
90 max_queues
= vhost_net_get_max_queues(net
);
91 if (queues
> max_queues
) {
92 error_report("you are asking more queues than supported: %d",
99 vhost_net_cleanup(s
->vhost_net
);
100 g_free(s
->vhost_net
);
109 vhost_net_cleanup(net
);
111 vhost_user_stop(i
, ncs
);
115 static ssize_t
vhost_user_receive(NetClientState
*nc
, const uint8_t *buf
,
118 /* In case of RARP (message size is 60) notify backup to send a fake RARP.
119 This fake RARP will be sent by backend only for guest
120 without GUEST_ANNOUNCE capability.
123 VhostUserState
*s
= DO_UPCAST(VhostUserState
, nc
, nc
);
125 static int display_rarp_failure
= 1;
128 /* extract guest mac address from the RARP message */
129 memcpy(mac_addr
, &buf
[6], 6);
131 r
= vhost_net_notify_migration_done(s
->vhost_net
, mac_addr
);
133 if ((r
!= 0) && (display_rarp_failure
)) {
135 "Vhost user backend fails to broadcast fake RARP\n");
137 display_rarp_failure
= 0;
144 static void vhost_user_cleanup(NetClientState
*nc
)
146 VhostUserState
*s
= DO_UPCAST(VhostUserState
, nc
, nc
);
149 vhost_net_cleanup(s
->vhost_net
);
150 g_free(s
->vhost_net
);
153 if (nc
->queue_index
== 0) {
154 qemu_chr_fe_deinit(&s
->chr
, true);
157 qemu_purge_queued_packets(nc
);
160 static bool vhost_user_has_vnet_hdr(NetClientState
*nc
)
162 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
167 static bool vhost_user_has_ufo(NetClientState
*nc
)
169 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
174 static NetClientInfo net_vhost_user_info
= {
175 .type
= NET_CLIENT_DRIVER_VHOST_USER
,
176 .size
= sizeof(VhostUserState
),
177 .receive
= vhost_user_receive
,
178 .cleanup
= vhost_user_cleanup
,
179 .has_vnet_hdr
= vhost_user_has_vnet_hdr
,
180 .has_ufo
= vhost_user_has_ufo
,
183 static gboolean
net_vhost_user_watch(GIOChannel
*chan
, GIOCondition cond
,
186 VhostUserState
*s
= opaque
;
188 qemu_chr_fe_disconnect(&s
->chr
);
193 static void net_vhost_user_event(void *opaque
, int event
);
195 static void chr_closed_bh(void *opaque
)
197 const char *name
= opaque
;
198 NetClientState
*ncs
[MAX_QUEUE_NUM
];
203 queues
= qemu_find_net_clients_except(name
, ncs
,
204 NET_CLIENT_DRIVER_NIC
,
206 assert(queues
< MAX_QUEUE_NUM
);
208 s
= DO_UPCAST(VhostUserState
, nc
, ncs
[0]);
210 qmp_set_link(name
, false, &err
);
211 vhost_user_stop(queues
, ncs
);
213 qemu_chr_fe_set_handlers(&s
->chr
, NULL
, NULL
, net_vhost_user_event
,
214 NULL
, opaque
, NULL
, true);
217 error_report_err(err
);
221 static void net_vhost_user_event(void *opaque
, int event
)
223 const char *name
= opaque
;
224 NetClientState
*ncs
[MAX_QUEUE_NUM
];
230 queues
= qemu_find_net_clients_except(name
, ncs
,
231 NET_CLIENT_DRIVER_NIC
,
233 assert(queues
< MAX_QUEUE_NUM
);
235 s
= DO_UPCAST(VhostUserState
, nc
, ncs
[0]);
236 chr
= qemu_chr_fe_get_driver(&s
->chr
);
237 trace_vhost_user_event(chr
->label
, event
);
239 case CHR_EVENT_OPENED
:
240 if (vhost_user_start(queues
, ncs
, &s
->chr
) < 0) {
241 qemu_chr_fe_disconnect(&s
->chr
);
244 s
->watch
= qemu_chr_fe_add_watch(&s
->chr
, G_IO_HUP
,
245 net_vhost_user_watch
, s
);
246 qmp_set_link(name
, true, &err
);
249 case CHR_EVENT_CLOSED
:
250 /* a close event may happen during a read/write, but vhost
251 * code assumes the vhost_dev remains setup, so delay the
252 * stop & clear to idle.
253 * FIXME: better handle failure in vhost code, remove bh
256 AioContext
*ctx
= qemu_get_current_aio_context();
258 g_source_remove(s
->watch
);
260 qemu_chr_fe_set_handlers(&s
->chr
, NULL
, NULL
, NULL
, NULL
,
263 aio_bh_schedule_oneshot(ctx
, chr_closed_bh
, opaque
);
269 error_report_err(err
);
273 static int net_vhost_user_init(NetClientState
*peer
, const char *device
,
274 const char *name
, Chardev
*chr
,
278 NetClientState
*nc
, *nc0
= NULL
;
285 for (i
= 0; i
< queues
; i
++) {
286 nc
= qemu_new_net_client(&net_vhost_user_info
, peer
, device
, name
);
287 snprintf(nc
->info_str
, sizeof(nc
->info_str
), "vhost-user%d to %s",
292 s
= DO_UPCAST(VhostUserState
, nc
, nc
);
293 if (!qemu_chr_fe_init(&s
->chr
, chr
, &err
)) {
294 error_report_err(err
);
301 s
= DO_UPCAST(VhostUserState
, nc
, nc0
);
303 if (qemu_chr_fe_wait_connected(&s
->chr
, &err
) < 0) {
304 error_report_err(err
);
307 qemu_chr_fe_set_handlers(&s
->chr
, NULL
, NULL
,
308 net_vhost_user_event
, NULL
, nc0
->name
, NULL
,
310 } while (!s
->started
);
312 assert(s
->vhost_net
);
317 static Chardev
*net_vhost_claim_chardev(
318 const NetdevVhostUserOptions
*opts
, Error
**errp
)
320 Chardev
*chr
= qemu_chr_find(opts
->chardev
);
323 error_setg(errp
, "chardev \"%s\" not found", opts
->chardev
);
327 if (!qemu_chr_has_feature(chr
, QEMU_CHAR_FEATURE_RECONNECTABLE
)) {
328 error_setg(errp
, "chardev \"%s\" is not reconnectable",
332 if (!qemu_chr_has_feature(chr
, QEMU_CHAR_FEATURE_FD_PASS
)) {
333 error_setg(errp
, "chardev \"%s\" does not support FD passing",
341 static int net_vhost_check_net(void *opaque
, QemuOpts
*opts
, Error
**errp
)
343 const char *name
= opaque
;
344 const char *driver
, *netdev
;
346 driver
= qemu_opt_get(opts
, "driver");
347 netdev
= qemu_opt_get(opts
, "netdev");
349 if (!driver
|| !netdev
) {
353 if (strcmp(netdev
, name
) == 0 &&
354 !g_str_has_prefix(driver
, "virtio-net-")) {
355 error_setg(errp
, "vhost-user requires frontend driver virtio-net-*");
362 int net_init_vhost_user(const Netdev
*netdev
, const char *name
,
363 NetClientState
*peer
, Error
**errp
)
366 const NetdevVhostUserOptions
*vhost_user_opts
;
369 assert(netdev
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
370 vhost_user_opts
= &netdev
->u
.vhost_user
;
372 chr
= net_vhost_claim_chardev(vhost_user_opts
, errp
);
377 /* verify net frontend */
378 if (qemu_opts_foreach(qemu_find_opts("device"), net_vhost_check_net
,
379 (char *)name
, errp
)) {
383 queues
= vhost_user_opts
->has_queues
? vhost_user_opts
->queues
: 1;
384 if (queues
< 1 || queues
> MAX_QUEUE_NUM
) {
386 "vhost-user number of queues must be in range [1, %d]",
391 return net_vhost_user_init(peer
, "vhost_user", name
, chr
, queues
);