4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "net/vhost_net.h"
14 #include "net/vhost-user.h"
15 #include "sysemu/char.h"
16 #include "qemu/config-file.h"
17 #include "qemu/error-report.h"
18 #include "qmp-commands.h"
21 typedef struct VhostUserState
{
23 CharBackend chr
; /* only queue index 0 */
24 VHostNetState
*vhost_net
;
26 uint64_t acked_features
;
30 VHostNetState
*vhost_user_get_vhost_net(NetClientState
*nc
)
32 VhostUserState
*s
= DO_UPCAST(VhostUserState
, nc
, nc
);
33 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
37 uint64_t vhost_user_get_acked_features(NetClientState
*nc
)
39 VhostUserState
*s
= DO_UPCAST(VhostUserState
, nc
, nc
);
40 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
41 return s
->acked_features
;
44 static void vhost_user_stop(int queues
, NetClientState
*ncs
[])
49 for (i
= 0; i
< queues
; i
++) {
50 assert(ncs
[i
]->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
52 s
= DO_UPCAST(VhostUserState
, nc
, ncs
[i
]);
55 /* save acked features */
56 uint64_t features
= vhost_net_get_acked_features(s
->vhost_net
);
58 s
->acked_features
= features
;
60 vhost_net_cleanup(s
->vhost_net
);
65 static int vhost_user_start(int queues
, NetClientState
*ncs
[], CharBackend
*be
)
67 VhostNetOptions options
;
68 struct vhost_net
*net
= NULL
;
73 options
.backend_type
= VHOST_BACKEND_TYPE_USER
;
75 for (i
= 0; i
< queues
; i
++) {
76 assert(ncs
[i
]->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
78 s
= DO_UPCAST(VhostUserState
, nc
, ncs
[i
]);
80 options
.net_backend
= ncs
[i
];
82 options
.busyloop_timeout
= 0;
83 net
= vhost_net_init(&options
);
85 error_report("failed to init vhost_net for queue %d", i
);
90 max_queues
= vhost_net_get_max_queues(net
);
91 if (queues
> max_queues
) {
92 error_report("you are asking more queues than supported: %d",
99 vhost_net_cleanup(s
->vhost_net
);
100 g_free(s
->vhost_net
);
109 vhost_net_cleanup(net
);
111 vhost_user_stop(i
, ncs
);
115 static ssize_t
vhost_user_receive(NetClientState
*nc
, const uint8_t *buf
,
118 /* In case of RARP (message size is 60) notify backup to send a fake RARP.
119 This fake RARP will be sent by backend only for guest
120 without GUEST_ANNOUNCE capability.
123 VhostUserState
*s
= DO_UPCAST(VhostUserState
, nc
, nc
);
125 static int display_rarp_failure
= 1;
128 /* extract guest mac address from the RARP message */
129 memcpy(mac_addr
, &buf
[6], 6);
131 r
= vhost_net_notify_migration_done(s
->vhost_net
, mac_addr
);
133 if ((r
!= 0) && (display_rarp_failure
)) {
135 "Vhost user backend fails to broadcast fake RARP\n");
137 display_rarp_failure
= 0;
144 static void vhost_user_cleanup(NetClientState
*nc
)
146 VhostUserState
*s
= DO_UPCAST(VhostUserState
, nc
, nc
);
149 vhost_net_cleanup(s
->vhost_net
);
150 g_free(s
->vhost_net
);
153 if (nc
->queue_index
== 0) {
154 Chardev
*chr
= qemu_chr_fe_get_driver(&s
->chr
);
156 qemu_chr_fe_deinit(&s
->chr
);
157 qemu_chr_delete(chr
);
160 qemu_purge_queued_packets(nc
);
163 static bool vhost_user_has_vnet_hdr(NetClientState
*nc
)
165 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
170 static bool vhost_user_has_ufo(NetClientState
*nc
)
172 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
177 static NetClientInfo net_vhost_user_info
= {
178 .type
= NET_CLIENT_DRIVER_VHOST_USER
,
179 .size
= sizeof(VhostUserState
),
180 .receive
= vhost_user_receive
,
181 .cleanup
= vhost_user_cleanup
,
182 .has_vnet_hdr
= vhost_user_has_vnet_hdr
,
183 .has_ufo
= vhost_user_has_ufo
,
186 static gboolean
net_vhost_user_watch(GIOChannel
*chan
, GIOCondition cond
,
189 VhostUserState
*s
= opaque
;
191 qemu_chr_fe_disconnect(&s
->chr
);
196 static void net_vhost_user_event(void *opaque
, int event
);
198 static void chr_closed_bh(void *opaque
)
200 const char *name
= opaque
;
201 NetClientState
*ncs
[MAX_QUEUE_NUM
];
206 queues
= qemu_find_net_clients_except(name
, ncs
,
207 NET_CLIENT_DRIVER_NIC
,
209 assert(queues
< MAX_QUEUE_NUM
);
211 s
= DO_UPCAST(VhostUserState
, nc
, ncs
[0]);
213 qmp_set_link(name
, false, &err
);
214 vhost_user_stop(queues
, ncs
);
216 qemu_chr_fe_set_handlers(&s
->chr
, NULL
, NULL
, net_vhost_user_event
,
220 error_report_err(err
);
224 static void net_vhost_user_event(void *opaque
, int event
)
226 const char *name
= opaque
;
227 NetClientState
*ncs
[MAX_QUEUE_NUM
];
233 queues
= qemu_find_net_clients_except(name
, ncs
,
234 NET_CLIENT_DRIVER_NIC
,
236 assert(queues
< MAX_QUEUE_NUM
);
238 s
= DO_UPCAST(VhostUserState
, nc
, ncs
[0]);
239 chr
= qemu_chr_fe_get_driver(&s
->chr
);
240 trace_vhost_user_event(chr
->label
, event
);
242 case CHR_EVENT_OPENED
:
243 if (vhost_user_start(queues
, ncs
, &s
->chr
) < 0) {
244 qemu_chr_fe_disconnect(&s
->chr
);
247 s
->watch
= qemu_chr_fe_add_watch(&s
->chr
, G_IO_HUP
,
248 net_vhost_user_watch
, s
);
249 qmp_set_link(name
, true, &err
);
252 case CHR_EVENT_CLOSED
:
253 /* a close event may happen during a read/write, but vhost
254 * code assumes the vhost_dev remains setup, so delay the
255 * stop & clear to idle.
256 * FIXME: better handle failure in vhost code, remove bh
259 AioContext
*ctx
= qemu_get_current_aio_context();
261 g_source_remove(s
->watch
);
263 qemu_chr_fe_set_handlers(&s
->chr
, NULL
, NULL
, NULL
,
266 aio_bh_schedule_oneshot(ctx
, chr_closed_bh
, opaque
);
272 error_report_err(err
);
276 static int net_vhost_user_init(NetClientState
*peer
, const char *device
,
277 const char *name
, Chardev
*chr
,
281 NetClientState
*nc
, *nc0
= NULL
;
288 for (i
= 0; i
< queues
; i
++) {
289 nc
= qemu_new_net_client(&net_vhost_user_info
, peer
, device
, name
);
290 snprintf(nc
->info_str
, sizeof(nc
->info_str
), "vhost-user%d to %s",
295 s
= DO_UPCAST(VhostUserState
, nc
, nc
);
296 if (!qemu_chr_fe_init(&s
->chr
, chr
, &err
)) {
297 error_report_err(err
);
304 s
= DO_UPCAST(VhostUserState
, nc
, nc0
);
306 if (qemu_chr_fe_wait_connected(&s
->chr
, &err
) < 0) {
307 error_report_err(err
);
310 qemu_chr_fe_set_handlers(&s
->chr
, NULL
, NULL
,
311 net_vhost_user_event
, nc0
->name
, NULL
, true);
312 } while (!s
->started
);
314 assert(s
->vhost_net
);
319 static Chardev
*net_vhost_claim_chardev(
320 const NetdevVhostUserOptions
*opts
, Error
**errp
)
322 Chardev
*chr
= qemu_chr_find(opts
->chardev
);
325 error_setg(errp
, "chardev \"%s\" not found", opts
->chardev
);
329 if (!qemu_chr_has_feature(chr
, QEMU_CHAR_FEATURE_RECONNECTABLE
)) {
330 error_setg(errp
, "chardev \"%s\" is not reconnectable",
334 if (!qemu_chr_has_feature(chr
, QEMU_CHAR_FEATURE_FD_PASS
)) {
335 error_setg(errp
, "chardev \"%s\" does not support FD passing",
343 static int net_vhost_check_net(void *opaque
, QemuOpts
*opts
, Error
**errp
)
345 const char *name
= opaque
;
346 const char *driver
, *netdev
;
348 driver
= qemu_opt_get(opts
, "driver");
349 netdev
= qemu_opt_get(opts
, "netdev");
351 if (!driver
|| !netdev
) {
355 if (strcmp(netdev
, name
) == 0 &&
356 !g_str_has_prefix(driver
, "virtio-net-")) {
357 error_setg(errp
, "vhost-user requires frontend driver virtio-net-*");
364 int net_init_vhost_user(const Netdev
*netdev
, const char *name
,
365 NetClientState
*peer
, Error
**errp
)
368 const NetdevVhostUserOptions
*vhost_user_opts
;
371 assert(netdev
->type
== NET_CLIENT_DRIVER_VHOST_USER
);
372 vhost_user_opts
= &netdev
->u
.vhost_user
;
374 chr
= net_vhost_claim_chardev(vhost_user_opts
, errp
);
379 /* verify net frontend */
380 if (qemu_opts_foreach(qemu_find_opts("device"), net_vhost_check_net
,
381 (char *)name
, errp
)) {
385 queues
= vhost_user_opts
->has_queues
? vhost_user_opts
->queues
: 1;
386 if (queues
< 1 || queues
> MAX_QUEUE_NUM
) {
388 "vhost-user number of queues must be in range [1, %d]",
393 return net_vhost_user_init(peer
, "vhost_user", name
, chr
, queues
);