target/arm: Convert FMADD, FMSUB, FNMADD, FNMSUB to decodetree
[qemu/kevin.git] / net / vhost-user.c
blob12555518e83887be0a6fbf133bf354f7f57d8b05
1 /*
2 * vhost-user.c
4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include "qemu/osdep.h"
12 #include "clients.h"
13 #include "net/vhost_net.h"
14 #include "net/vhost-user.h"
15 #include "hw/virtio/vhost-user.h"
16 #include "chardev/char-fe.h"
17 #include "qapi/error.h"
18 #include "qapi/qapi-commands-net.h"
19 #include "qemu/config-file.h"
20 #include "qemu/error-report.h"
21 #include "qemu/option.h"
22 #include "trace.h"
24 typedef struct NetVhostUserState {
25 NetClientState nc;
26 CharBackend chr; /* only queue index 0 */
27 VhostUserState *vhost_user;
28 VHostNetState *vhost_net;
29 guint watch;
30 uint64_t acked_features;
31 bool started;
32 } NetVhostUserState;
34 VHostNetState *vhost_user_get_vhost_net(NetClientState *nc)
36 NetVhostUserState *s = DO_UPCAST(NetVhostUserState, nc, nc);
37 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_USER);
38 return s->vhost_net;
41 uint64_t vhost_user_get_acked_features(NetClientState *nc)
43 NetVhostUserState *s = DO_UPCAST(NetVhostUserState, nc, nc);
44 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_USER);
45 return s->acked_features;
48 void vhost_user_save_acked_features(NetClientState *nc)
50 NetVhostUserState *s;
52 s = DO_UPCAST(NetVhostUserState, nc, nc);
53 if (s->vhost_net) {
54 uint64_t features = vhost_net_get_acked_features(s->vhost_net);
55 if (features) {
56 s->acked_features = features;
61 static void vhost_user_stop(int queues, NetClientState *ncs[])
63 int i;
64 NetVhostUserState *s;
66 for (i = 0; i < queues; i++) {
67 assert(ncs[i]->info->type == NET_CLIENT_DRIVER_VHOST_USER);
69 s = DO_UPCAST(NetVhostUserState, nc, ncs[i]);
71 if (s->vhost_net) {
72 vhost_user_save_acked_features(ncs[i]);
73 vhost_net_cleanup(s->vhost_net);
78 static int vhost_user_start(int queues, NetClientState *ncs[],
79 VhostUserState *be)
81 VhostNetOptions options;
82 struct vhost_net *net = NULL;
83 NetVhostUserState *s;
84 int max_queues;
85 int i;
87 options.backend_type = VHOST_BACKEND_TYPE_USER;
89 for (i = 0; i < queues; i++) {
90 assert(ncs[i]->info->type == NET_CLIENT_DRIVER_VHOST_USER);
92 s = DO_UPCAST(NetVhostUserState, nc, ncs[i]);
94 options.net_backend = ncs[i];
95 options.opaque = be;
96 options.busyloop_timeout = 0;
97 options.nvqs = 2;
98 net = vhost_net_init(&options);
99 if (!net) {
100 error_report("failed to init vhost_net for queue %d", i);
101 goto err;
104 if (i == 0) {
105 max_queues = vhost_net_get_max_queues(net);
106 if (queues > max_queues) {
107 error_report("you are asking more queues than supported: %d",
108 max_queues);
109 goto err;
113 if (s->vhost_net) {
114 vhost_net_cleanup(s->vhost_net);
115 g_free(s->vhost_net);
117 s->vhost_net = net;
120 return 0;
122 err:
123 if (net) {
124 vhost_net_cleanup(net);
125 g_free(net);
127 vhost_user_stop(i, ncs);
128 return -1;
131 static ssize_t vhost_user_receive(NetClientState *nc, const uint8_t *buf,
132 size_t size)
134 /* In case of RARP (message size is 60) notify backup to send a fake RARP.
135 This fake RARP will be sent by backend only for guest
136 without GUEST_ANNOUNCE capability.
138 if (size == 60) {
139 NetVhostUserState *s = DO_UPCAST(NetVhostUserState, nc, nc);
140 int r;
141 static int display_rarp_failure = 1;
142 char mac_addr[6];
144 /* extract guest mac address from the RARP message */
145 memcpy(mac_addr, &buf[6], 6);
147 r = vhost_net_notify_migration_done(s->vhost_net, mac_addr);
149 if ((r != 0) && (display_rarp_failure)) {
150 fprintf(stderr,
151 "Vhost user backend fails to broadcast fake RARP\n");
152 fflush(stderr);
153 display_rarp_failure = 0;
157 return size;
160 static void net_vhost_user_cleanup(NetClientState *nc)
162 NetVhostUserState *s = DO_UPCAST(NetVhostUserState, nc, nc);
164 if (s->vhost_net) {
165 vhost_net_cleanup(s->vhost_net);
166 g_free(s->vhost_net);
167 s->vhost_net = NULL;
169 if (nc->queue_index == 0) {
170 if (s->watch) {
171 g_source_remove(s->watch);
172 s->watch = 0;
174 qemu_chr_fe_deinit(&s->chr, true);
175 if (s->vhost_user) {
176 vhost_user_cleanup(s->vhost_user);
177 g_free(s->vhost_user);
178 s->vhost_user = NULL;
182 qemu_purge_queued_packets(nc);
185 static int vhost_user_set_vnet_endianness(NetClientState *nc,
186 bool enable)
188 /* Nothing to do. If the server supports
189 * VHOST_USER_PROTOCOL_F_CROSS_ENDIAN, it will get the
190 * vnet header endianness from there. If it doesn't, negotiation
191 * fails.
193 return 0;
196 static bool vhost_user_has_vnet_hdr(NetClientState *nc)
198 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_USER);
200 return true;
203 static bool vhost_user_has_ufo(NetClientState *nc)
205 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_USER);
207 return true;
210 static bool vhost_user_check_peer_type(NetClientState *nc, ObjectClass *oc,
211 Error **errp)
213 const char *driver = object_class_get_name(oc);
215 if (!g_str_has_prefix(driver, "virtio-net-")) {
216 error_setg(errp, "vhost-user requires frontend driver virtio-net-*");
217 return false;
220 return true;
223 static NetClientInfo net_vhost_user_info = {
224 .type = NET_CLIENT_DRIVER_VHOST_USER,
225 .size = sizeof(NetVhostUserState),
226 .receive = vhost_user_receive,
227 .cleanup = net_vhost_user_cleanup,
228 .has_vnet_hdr = vhost_user_has_vnet_hdr,
229 .has_ufo = vhost_user_has_ufo,
230 .set_vnet_be = vhost_user_set_vnet_endianness,
231 .set_vnet_le = vhost_user_set_vnet_endianness,
232 .check_peer_type = vhost_user_check_peer_type,
235 static gboolean net_vhost_user_watch(void *do_not_use, GIOCondition cond,
236 void *opaque)
238 NetVhostUserState *s = opaque;
240 qemu_chr_fe_disconnect(&s->chr);
242 return G_SOURCE_CONTINUE;
245 static void net_vhost_user_event(void *opaque, QEMUChrEvent event);
247 static void chr_closed_bh(void *opaque)
249 const char *name = opaque;
250 NetClientState *ncs[MAX_QUEUE_NUM];
251 NetVhostUserState *s;
252 Error *err = NULL;
253 int queues, i;
255 queues = qemu_find_net_clients_except(name, ncs,
256 NET_CLIENT_DRIVER_NIC,
257 MAX_QUEUE_NUM);
258 assert(queues < MAX_QUEUE_NUM);
260 s = DO_UPCAST(NetVhostUserState, nc, ncs[0]);
262 for (i = queues -1; i >= 0; i--) {
263 vhost_user_save_acked_features(ncs[i]);
266 qmp_set_link(name, false, &err);
268 qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, net_vhost_user_event,
269 NULL, opaque, NULL, true);
271 if (err) {
272 error_report_err(err);
276 static void net_vhost_user_event(void *opaque, QEMUChrEvent event)
278 const char *name = opaque;
279 NetClientState *ncs[MAX_QUEUE_NUM];
280 NetVhostUserState *s;
281 Chardev *chr;
282 Error *err = NULL;
283 int queues;
285 queues = qemu_find_net_clients_except(name, ncs,
286 NET_CLIENT_DRIVER_NIC,
287 MAX_QUEUE_NUM);
288 assert(queues < MAX_QUEUE_NUM);
290 s = DO_UPCAST(NetVhostUserState, nc, ncs[0]);
291 chr = qemu_chr_fe_get_driver(&s->chr);
292 trace_vhost_user_event(chr->label, event);
293 switch (event) {
294 case CHR_EVENT_OPENED:
295 if (vhost_user_start(queues, ncs, s->vhost_user) < 0) {
296 qemu_chr_fe_disconnect(&s->chr);
297 return;
299 s->watch = qemu_chr_fe_add_watch(&s->chr, G_IO_HUP,
300 net_vhost_user_watch, s);
301 qmp_set_link(name, true, &err);
302 s->started = true;
303 break;
304 case CHR_EVENT_CLOSED:
305 /* a close event may happen during a read/write, but vhost
306 * code assumes the vhost_dev remains setup, so delay the
307 * stop & clear to idle.
308 * FIXME: better handle failure in vhost code, remove bh
310 if (s->watch) {
311 AioContext *ctx = qemu_get_current_aio_context();
313 g_source_remove(s->watch);
314 s->watch = 0;
315 qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, NULL, NULL,
316 NULL, NULL, false);
318 aio_bh_schedule_oneshot(ctx, chr_closed_bh, opaque);
320 break;
321 case CHR_EVENT_BREAK:
322 case CHR_EVENT_MUX_IN:
323 case CHR_EVENT_MUX_OUT:
324 /* Ignore */
325 break;
328 if (err) {
329 error_report_err(err);
333 static int net_vhost_user_init(NetClientState *peer, const char *device,
334 const char *name, Chardev *chr,
335 int queues)
337 Error *err = NULL;
338 NetClientState *nc, *nc0 = NULL;
339 NetVhostUserState *s = NULL;
340 VhostUserState *user;
341 int i;
343 assert(name);
344 assert(queues > 0);
346 user = g_new0(struct VhostUserState, 1);
347 for (i = 0; i < queues; i++) {
348 nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
349 qemu_set_info_str(nc, "vhost-user%d to %s", i, chr->label);
350 nc->queue_index = i;
351 if (!nc0) {
352 nc0 = nc;
353 s = DO_UPCAST(NetVhostUserState, nc, nc);
354 if (!qemu_chr_fe_init(&s->chr, chr, &err) ||
355 !vhost_user_init(user, &s->chr, &err)) {
356 error_report_err(err);
357 goto err;
360 s = DO_UPCAST(NetVhostUserState, nc, nc);
361 s->vhost_user = user;
364 s = DO_UPCAST(NetVhostUserState, nc, nc0);
365 do {
366 if (qemu_chr_fe_wait_connected(&s->chr, &err) < 0) {
367 error_report_err(err);
368 goto err;
370 qemu_chr_fe_set_handlers(&s->chr, NULL, NULL,
371 net_vhost_user_event, NULL, nc0->name, NULL,
372 true);
373 } while (!s->started);
375 assert(s->vhost_net);
377 return 0;
379 err:
380 if (user) {
381 vhost_user_cleanup(user);
382 g_free(user);
383 if (s) {
384 s->vhost_user = NULL;
387 if (nc0) {
388 qemu_del_net_client(nc0);
391 return -1;
394 static Chardev *net_vhost_claim_chardev(
395 const NetdevVhostUserOptions *opts, Error **errp)
397 Chardev *chr = qemu_chr_find(opts->chardev);
399 if (chr == NULL) {
400 error_setg(errp, "chardev \"%s\" not found", opts->chardev);
401 return NULL;
404 if (!qemu_chr_has_feature(chr, QEMU_CHAR_FEATURE_RECONNECTABLE)) {
405 error_setg(errp, "chardev \"%s\" is not reconnectable",
406 opts->chardev);
407 return NULL;
409 if (!qemu_chr_has_feature(chr, QEMU_CHAR_FEATURE_FD_PASS)) {
410 error_setg(errp, "chardev \"%s\" does not support FD passing",
411 opts->chardev);
412 return NULL;
415 return chr;
418 int net_init_vhost_user(const Netdev *netdev, const char *name,
419 NetClientState *peer, Error **errp)
421 int queues;
422 const NetdevVhostUserOptions *vhost_user_opts;
423 Chardev *chr;
425 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_USER);
426 vhost_user_opts = &netdev->u.vhost_user;
428 chr = net_vhost_claim_chardev(vhost_user_opts, errp);
429 if (!chr) {
430 return -1;
433 queues = vhost_user_opts->has_queues ? vhost_user_opts->queues : 1;
434 if (queues < 1 || queues > MAX_QUEUE_NUM) {
435 error_setg(errp,
436 "vhost-user number of queues must be in range [1, %d]",
437 MAX_QUEUE_NUM);
438 return -1;
441 return net_vhost_user_init(peer, "vhost_user", name, chr, queues);