dump: Recognize "fd:" protocols on Windows hosts
[qemu/armbru.git] / hw / net / vhost_net.c
blobe8e1661646219094995538af85e792f2e593269a
1 /*
2 * vhost-net support
4 * Copyright Red Hat, Inc. 2010
6 * Authors:
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "net/net.h"
18 #include "net/tap.h"
19 #include "net/vhost-user.h"
20 #include "net/vhost-vdpa.h"
22 #include "standard-headers/linux/vhost_types.h"
23 #include "hw/virtio/virtio-net.h"
24 #include "net/vhost_net.h"
25 #include "qapi/error.h"
26 #include "qemu/error-report.h"
27 #include "qemu/main-loop.h"
29 #include <sys/socket.h>
30 #include <net/if.h>
31 #include <netinet/in.h>
34 #include "standard-headers/linux/virtio_ring.h"
35 #include "hw/virtio/vhost.h"
36 #include "hw/virtio/virtio-bus.h"
37 #include "linux-headers/linux/vhost.h"
40 /* Features supported by host kernel. */
41 static const int kernel_feature_bits[] = {
42 VIRTIO_F_NOTIFY_ON_EMPTY,
43 VIRTIO_RING_F_INDIRECT_DESC,
44 VIRTIO_RING_F_EVENT_IDX,
45 VIRTIO_NET_F_MRG_RXBUF,
46 VIRTIO_F_VERSION_1,
47 VIRTIO_NET_F_MTU,
48 VIRTIO_F_IOMMU_PLATFORM,
49 VIRTIO_F_RING_PACKED,
50 VIRTIO_F_RING_RESET,
51 VIRTIO_NET_F_HASH_REPORT,
52 VHOST_INVALID_FEATURE_BIT
55 /* Features supported by others. */
56 static const int user_feature_bits[] = {
57 VIRTIO_F_NOTIFY_ON_EMPTY,
58 VIRTIO_RING_F_INDIRECT_DESC,
59 VIRTIO_RING_F_EVENT_IDX,
61 VIRTIO_F_ANY_LAYOUT,
62 VIRTIO_F_VERSION_1,
63 VIRTIO_NET_F_CSUM,
64 VIRTIO_NET_F_GUEST_CSUM,
65 VIRTIO_NET_F_GSO,
66 VIRTIO_NET_F_GUEST_TSO4,
67 VIRTIO_NET_F_GUEST_TSO6,
68 VIRTIO_NET_F_GUEST_ECN,
69 VIRTIO_NET_F_GUEST_UFO,
70 VIRTIO_NET_F_HOST_TSO4,
71 VIRTIO_NET_F_HOST_TSO6,
72 VIRTIO_NET_F_HOST_ECN,
73 VIRTIO_NET_F_HOST_UFO,
74 VIRTIO_NET_F_MRG_RXBUF,
75 VIRTIO_NET_F_MTU,
76 VIRTIO_F_IOMMU_PLATFORM,
77 VIRTIO_F_RING_PACKED,
78 VIRTIO_F_RING_RESET,
79 VIRTIO_NET_F_RSS,
80 VIRTIO_NET_F_HASH_REPORT,
81 VIRTIO_NET_F_GUEST_USO4,
82 VIRTIO_NET_F_GUEST_USO6,
83 VIRTIO_NET_F_HOST_USO,
85 /* This bit implies RARP isn't sent by QEMU out of band */
86 VIRTIO_NET_F_GUEST_ANNOUNCE,
88 VIRTIO_NET_F_MQ,
90 VHOST_INVALID_FEATURE_BIT
93 static const int *vhost_net_get_feature_bits(struct vhost_net *net)
95 const int *feature_bits = 0;
97 switch (net->nc->info->type) {
98 case NET_CLIENT_DRIVER_TAP:
99 feature_bits = kernel_feature_bits;
100 break;
101 case NET_CLIENT_DRIVER_VHOST_USER:
102 feature_bits = user_feature_bits;
103 break;
104 #ifdef CONFIG_VHOST_NET_VDPA
105 case NET_CLIENT_DRIVER_VHOST_VDPA:
106 feature_bits = vdpa_feature_bits;
107 break;
108 #endif
109 default:
110 error_report("Feature bits not defined for this type: %d",
111 net->nc->info->type);
112 break;
115 return feature_bits;
118 uint64_t vhost_net_get_features(struct vhost_net *net, uint64_t features)
120 return vhost_get_features(&net->dev, vhost_net_get_feature_bits(net),
121 features);
123 int vhost_net_get_config(struct vhost_net *net, uint8_t *config,
124 uint32_t config_len)
126 return vhost_dev_get_config(&net->dev, config, config_len, NULL);
128 int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
129 uint32_t offset, uint32_t size, uint32_t flags)
131 return vhost_dev_set_config(&net->dev, data, offset, size, flags);
134 void vhost_net_ack_features(struct vhost_net *net, uint64_t features)
136 net->dev.acked_features = net->dev.backend_features;
137 vhost_ack_features(&net->dev, vhost_net_get_feature_bits(net), features);
140 uint64_t vhost_net_get_max_queues(VHostNetState *net)
142 return net->dev.max_queues;
145 uint64_t vhost_net_get_acked_features(VHostNetState *net)
147 return net->dev.acked_features;
150 void vhost_net_save_acked_features(NetClientState *nc)
152 #ifdef CONFIG_VHOST_NET_USER
153 if (nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
154 vhost_user_save_acked_features(nc);
156 #endif
159 static int vhost_net_get_fd(NetClientState *backend)
161 switch (backend->info->type) {
162 case NET_CLIENT_DRIVER_TAP:
163 return tap_get_fd(backend);
164 default:
165 fprintf(stderr, "vhost-net requires tap backend\n");
166 return -ENOSYS;
170 struct vhost_net *vhost_net_init(VhostNetOptions *options)
172 int r;
173 bool backend_kernel = options->backend_type == VHOST_BACKEND_TYPE_KERNEL;
174 struct vhost_net *net = g_new0(struct vhost_net, 1);
175 uint64_t features = 0;
176 Error *local_err = NULL;
178 if (!options->net_backend) {
179 fprintf(stderr, "vhost-net requires net backend to be setup\n");
180 goto fail;
182 net->nc = options->net_backend;
183 net->dev.nvqs = options->nvqs;
185 net->dev.max_queues = 1;
186 net->dev.vqs = net->vqs;
188 if (backend_kernel) {
189 r = vhost_net_get_fd(options->net_backend);
190 if (r < 0) {
191 goto fail;
193 net->dev.backend_features = qemu_has_vnet_hdr(options->net_backend)
194 ? 0 : (1ULL << VHOST_NET_F_VIRTIO_NET_HDR);
195 net->backend = r;
196 net->dev.protocol_features = 0;
197 } else {
198 net->dev.backend_features = 0;
199 net->dev.protocol_features = 0;
200 net->backend = -1;
202 /* vhost-user needs vq_index to initiate a specific queue pair */
203 net->dev.vq_index = net->nc->queue_index * net->dev.nvqs;
206 r = vhost_dev_init(&net->dev, options->opaque,
207 options->backend_type, options->busyloop_timeout,
208 &local_err);
209 if (r < 0) {
210 error_report_err(local_err);
211 goto fail;
213 if (backend_kernel) {
214 if (!qemu_has_vnet_hdr_len(options->net_backend,
215 sizeof(struct virtio_net_hdr_mrg_rxbuf))) {
216 net->dev.features &= ~(1ULL << VIRTIO_NET_F_MRG_RXBUF);
218 if (~net->dev.features & net->dev.backend_features) {
219 fprintf(stderr, "vhost lacks feature mask 0x%" PRIx64
220 " for backend\n",
221 (uint64_t)(~net->dev.features & net->dev.backend_features));
222 goto fail;
226 /* Set sane init value. Override when guest acks. */
227 #ifdef CONFIG_VHOST_NET_USER
228 if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
229 features = vhost_user_get_acked_features(net->nc);
230 if (~net->dev.features & features) {
231 fprintf(stderr, "vhost lacks feature mask 0x%" PRIx64
232 " for backend\n",
233 (uint64_t)(~net->dev.features & features));
234 goto fail;
237 #endif
239 vhost_net_ack_features(net, features);
241 return net;
243 fail:
244 vhost_dev_cleanup(&net->dev);
245 g_free(net);
246 return NULL;
249 static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index,
250 int vq_index_end)
252 net->dev.vq_index = vq_index;
253 net->dev.vq_index_end = vq_index_end;
256 static int vhost_net_start_one(struct vhost_net *net,
257 VirtIODevice *dev)
259 struct vhost_vring_file file = { };
260 int r;
262 if (net->nc->info->start) {
263 r = net->nc->info->start(net->nc);
264 if (r < 0) {
265 return r;
269 r = vhost_dev_enable_notifiers(&net->dev, dev);
270 if (r < 0) {
271 goto fail_notifiers;
274 r = vhost_dev_start(&net->dev, dev, false);
275 if (r < 0) {
276 goto fail_start;
279 if (net->nc->info->poll) {
280 net->nc->info->poll(net->nc, false);
283 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
284 qemu_set_fd_handler(net->backend, NULL, NULL, NULL);
285 file.fd = net->backend;
286 for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
287 if (!virtio_queue_enabled(dev, net->dev.vq_index +
288 file.index)) {
289 /* Queue might not be ready for start */
290 continue;
292 r = vhost_net_set_backend(&net->dev, &file);
293 if (r < 0) {
294 r = -errno;
295 goto fail;
300 if (net->nc->info->load) {
301 r = net->nc->info->load(net->nc);
302 if (r < 0) {
303 goto fail;
306 return 0;
307 fail:
308 file.fd = -1;
309 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
310 while (file.index-- > 0) {
311 if (!virtio_queue_enabled(dev, net->dev.vq_index +
312 file.index)) {
313 /* Queue might not be ready for start */
314 continue;
316 int ret = vhost_net_set_backend(&net->dev, &file);
317 assert(ret >= 0);
320 if (net->nc->info->poll) {
321 net->nc->info->poll(net->nc, true);
323 vhost_dev_stop(&net->dev, dev, false);
324 fail_start:
325 vhost_dev_disable_notifiers(&net->dev, dev);
326 fail_notifiers:
327 return r;
330 static void vhost_net_stop_one(struct vhost_net *net,
331 VirtIODevice *dev)
333 struct vhost_vring_file file = { .fd = -1 };
335 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
336 for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
337 int r = vhost_net_set_backend(&net->dev, &file);
338 assert(r >= 0);
341 if (net->nc->info->poll) {
342 net->nc->info->poll(net->nc, true);
344 vhost_dev_stop(&net->dev, dev, false);
345 if (net->nc->info->stop) {
346 net->nc->info->stop(net->nc);
348 vhost_dev_disable_notifiers(&net->dev, dev);
351 int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
352 int data_queue_pairs, int cvq)
354 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
355 VirtioBusState *vbus = VIRTIO_BUS(qbus);
356 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
357 int total_notifiers = data_queue_pairs * 2 + cvq;
358 VirtIONet *n = VIRTIO_NET(dev);
359 int nvhosts = data_queue_pairs + cvq;
360 struct vhost_net *net;
361 int r, e, i, index_end = data_queue_pairs * 2;
362 NetClientState *peer;
364 if (cvq) {
365 index_end += 1;
368 if (!k->set_guest_notifiers) {
369 error_report("binding does not support guest notifiers");
370 return -ENOSYS;
373 for (i = 0; i < nvhosts; i++) {
375 if (i < data_queue_pairs) {
376 peer = qemu_get_peer(ncs, i);
377 } else { /* Control Virtqueue */
378 peer = qemu_get_peer(ncs, n->max_queue_pairs);
381 net = get_vhost_net(peer);
382 vhost_net_set_vq_index(net, i * 2, index_end);
384 /* Suppress the masking guest notifiers on vhost user
385 * because vhost user doesn't interrupt masking/unmasking
386 * properly.
388 if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
389 dev->use_guest_notifier_mask = false;
393 r = k->set_guest_notifiers(qbus->parent, total_notifiers, true);
394 if (r < 0) {
395 error_report("Error binding guest notifier: %d", -r);
396 goto err;
399 for (i = 0; i < nvhosts; i++) {
400 if (i < data_queue_pairs) {
401 peer = qemu_get_peer(ncs, i);
402 } else {
403 peer = qemu_get_peer(ncs, n->max_queue_pairs);
406 if (peer->vring_enable) {
407 /* restore vring enable state */
408 r = vhost_set_vring_enable(peer, peer->vring_enable);
410 if (r < 0) {
411 goto err_start;
415 r = vhost_net_start_one(get_vhost_net(peer), dev);
416 if (r < 0) {
417 goto err_start;
421 return 0;
423 err_start:
424 while (--i >= 0) {
425 peer = qemu_get_peer(ncs, i < data_queue_pairs ?
426 i : n->max_queue_pairs);
427 vhost_net_stop_one(get_vhost_net(peer), dev);
429 e = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
430 if (e < 0) {
431 fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
432 fflush(stderr);
434 err:
435 return r;
438 void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
439 int data_queue_pairs, int cvq)
441 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
442 VirtioBusState *vbus = VIRTIO_BUS(qbus);
443 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
444 VirtIONet *n = VIRTIO_NET(dev);
445 NetClientState *peer;
446 int total_notifiers = data_queue_pairs * 2 + cvq;
447 int nvhosts = data_queue_pairs + cvq;
448 int i, r;
450 for (i = 0; i < nvhosts; i++) {
451 if (i < data_queue_pairs) {
452 peer = qemu_get_peer(ncs, i);
453 } else {
454 peer = qemu_get_peer(ncs, n->max_queue_pairs);
456 vhost_net_stop_one(get_vhost_net(peer), dev);
459 r = k->set_guest_notifiers(qbus->parent, total_notifiers, false);
460 if (r < 0) {
461 fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
462 fflush(stderr);
464 assert(r >= 0);
467 void vhost_net_cleanup(struct vhost_net *net)
469 vhost_dev_cleanup(&net->dev);
472 int vhost_net_notify_migration_done(struct vhost_net *net, char* mac_addr)
474 const VhostOps *vhost_ops = net->dev.vhost_ops;
476 assert(vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
477 assert(vhost_ops->vhost_migration_done);
479 return vhost_ops->vhost_migration_done(&net->dev, mac_addr);
482 bool vhost_net_virtqueue_pending(VHostNetState *net, int idx)
484 return vhost_virtqueue_pending(&net->dev, idx);
487 void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
488 int idx, bool mask)
490 vhost_virtqueue_mask(&net->dev, dev, idx, mask);
493 bool vhost_net_config_pending(VHostNetState *net)
495 return vhost_config_pending(&net->dev);
498 void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev, bool mask)
500 vhost_config_mask(&net->dev, dev, mask);
502 VHostNetState *get_vhost_net(NetClientState *nc)
504 VHostNetState *vhost_net = 0;
506 if (!nc) {
507 return 0;
510 switch (nc->info->type) {
511 case NET_CLIENT_DRIVER_TAP:
512 vhost_net = tap_get_vhost_net(nc);
514 * tap_get_vhost_net() can return NULL if a tap net-device backend is
515 * created with 'vhost=off' option, 'vhostforce=off' or no vhost or
516 * vhostforce or vhostfd options at all. Please see net_init_tap_one().
517 * Hence, we omit the assertion here.
519 break;
520 #ifdef CONFIG_VHOST_NET_USER
521 case NET_CLIENT_DRIVER_VHOST_USER:
522 vhost_net = vhost_user_get_vhost_net(nc);
523 assert(vhost_net);
524 break;
525 #endif
526 #ifdef CONFIG_VHOST_NET_VDPA
527 case NET_CLIENT_DRIVER_VHOST_VDPA:
528 vhost_net = vhost_vdpa_get_vhost_net(nc);
529 assert(vhost_net);
530 break;
531 #endif
532 default:
533 break;
536 return vhost_net;
539 int vhost_set_vring_enable(NetClientState *nc, int enable)
541 VHostNetState *net = get_vhost_net(nc);
542 const VhostOps *vhost_ops = net->dev.vhost_ops;
544 nc->vring_enable = enable;
546 if (vhost_ops && vhost_ops->vhost_set_vring_enable) {
547 return vhost_ops->vhost_set_vring_enable(&net->dev, enable);
550 return 0;
553 int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu)
555 const VhostOps *vhost_ops = net->dev.vhost_ops;
557 if (!vhost_ops->vhost_net_set_mtu) {
558 return 0;
561 return vhost_ops->vhost_net_set_mtu(&net->dev, mtu);
564 void vhost_net_virtqueue_reset(VirtIODevice *vdev, NetClientState *nc,
565 int vq_index)
567 VHostNetState *net = get_vhost_net(nc->peer);
568 const VhostOps *vhost_ops = net->dev.vhost_ops;
569 struct vhost_vring_file file = { .fd = -1 };
570 int idx;
572 /* should only be called after backend is connected */
573 assert(vhost_ops);
575 idx = vhost_ops->vhost_get_vq_index(&net->dev, vq_index);
577 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
578 file.index = idx;
579 int r = vhost_net_set_backend(&net->dev, &file);
580 assert(r >= 0);
583 vhost_virtqueue_stop(&net->dev,
584 vdev,
585 net->dev.vqs + idx,
586 net->dev.vq_index + idx);
589 int vhost_net_virtqueue_restart(VirtIODevice *vdev, NetClientState *nc,
590 int vq_index)
592 VHostNetState *net = get_vhost_net(nc->peer);
593 const VhostOps *vhost_ops = net->dev.vhost_ops;
594 struct vhost_vring_file file = { };
595 int idx, r;
597 if (!net->dev.started) {
598 return -EBUSY;
601 /* should only be called after backend is connected */
602 assert(vhost_ops);
604 idx = vhost_ops->vhost_get_vq_index(&net->dev, vq_index);
606 r = vhost_virtqueue_start(&net->dev,
607 vdev,
608 net->dev.vqs + idx,
609 net->dev.vq_index + idx);
610 if (r < 0) {
611 goto err_start;
614 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
615 file.index = idx;
616 file.fd = net->backend;
617 r = vhost_net_set_backend(&net->dev, &file);
618 if (r < 0) {
619 r = -errno;
620 goto err_start;
624 return 0;
626 err_start:
627 error_report("Error when restarting the queue.");
629 if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
630 file.fd = VHOST_FILE_UNBIND;
631 file.index = idx;
632 int ret = vhost_net_set_backend(&net->dev, &file);
633 assert(ret >= 0);
636 vhost_dev_stop(&net->dev, vdev, false);
638 return r;