accel/tcg: Improve can_do_io management
[qemu/armbru.git] / backends / cryptodev-vhost.c
blob93523732f39ffdd4b53891a4d004ec53d5f202d8
1 /*
2 * QEMU Cryptodev backend for QEMU cipher APIs
4 * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
6 * Authors:
7 * Gonglei <arei.gonglei@huawei.com>
8 * Jay Zhou <jianjay.zhou@huawei.com>
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "qemu/osdep.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "sysemu/cryptodev-vhost.h"
29 #ifdef CONFIG_VHOST_CRYPTO
30 #include "qapi/error.h"
31 #include "qemu/error-report.h"
32 #include "hw/virtio/virtio-crypto.h"
33 #include "sysemu/cryptodev-vhost-user.h"
35 uint64_t
36 cryptodev_vhost_get_max_queues(
37 CryptoDevBackendVhost *crypto)
39 return crypto->dev.max_queues;
42 void cryptodev_vhost_cleanup(CryptoDevBackendVhost *crypto)
44 vhost_dev_cleanup(&crypto->dev);
45 g_free(crypto);
48 struct CryptoDevBackendVhost *
49 cryptodev_vhost_init(
50 CryptoDevBackendVhostOptions *options)
52 int r;
53 CryptoDevBackendVhost *crypto;
54 Error *local_err = NULL;
56 crypto = g_new(CryptoDevBackendVhost, 1);
57 crypto->dev.max_queues = 1;
58 crypto->dev.nvqs = 1;
59 crypto->dev.vqs = crypto->vqs;
61 crypto->cc = options->cc;
63 crypto->dev.protocol_features = 0;
64 crypto->backend = -1;
66 /* vhost-user needs vq_index to initiate a specific queue pair */
67 crypto->dev.vq_index = crypto->cc->queue_index * crypto->dev.nvqs;
69 r = vhost_dev_init(&crypto->dev, options->opaque, options->backend_type, 0,
70 &local_err);
71 if (r < 0) {
72 error_report_err(local_err);
73 goto fail;
76 return crypto;
77 fail:
78 g_free(crypto);
79 return NULL;
82 static int
83 cryptodev_vhost_start_one(CryptoDevBackendVhost *crypto,
84 VirtIODevice *dev)
86 int r;
88 crypto->dev.nvqs = 1;
89 crypto->dev.vqs = crypto->vqs;
91 r = vhost_dev_enable_notifiers(&crypto->dev, dev);
92 if (r < 0) {
93 goto fail_notifiers;
96 r = vhost_dev_start(&crypto->dev, dev, false);
97 if (r < 0) {
98 goto fail_start;
101 return 0;
103 fail_start:
104 vhost_dev_disable_notifiers(&crypto->dev, dev);
105 fail_notifiers:
106 return r;
109 static void
110 cryptodev_vhost_stop_one(CryptoDevBackendVhost *crypto,
111 VirtIODevice *dev)
113 vhost_dev_stop(&crypto->dev, dev, false);
114 vhost_dev_disable_notifiers(&crypto->dev, dev);
117 CryptoDevBackendVhost *
118 cryptodev_get_vhost(CryptoDevBackendClient *cc,
119 CryptoDevBackend *b,
120 uint16_t queue)
122 CryptoDevBackendVhost *vhost_crypto = NULL;
124 if (!cc) {
125 return NULL;
128 switch (cc->type) {
129 #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
130 case QCRYPTODEV_BACKEND_TYPE_VHOST_USER:
131 vhost_crypto = cryptodev_vhost_user_get_vhost(cc, b, queue);
132 break;
133 #endif
134 default:
135 break;
138 return vhost_crypto;
141 static void
142 cryptodev_vhost_set_vq_index(CryptoDevBackendVhost *crypto,
143 int vq_index)
145 crypto->dev.vq_index = vq_index;
148 static int
149 vhost_set_vring_enable(CryptoDevBackendClient *cc,
150 CryptoDevBackend *b,
151 uint16_t queue, int enable)
153 CryptoDevBackendVhost *crypto =
154 cryptodev_get_vhost(cc, b, queue);
155 const VhostOps *vhost_ops;
157 cc->vring_enable = enable;
159 if (!crypto) {
160 return 0;
163 vhost_ops = crypto->dev.vhost_ops;
164 if (vhost_ops->vhost_set_vring_enable) {
165 return vhost_ops->vhost_set_vring_enable(&crypto->dev, enable);
168 return 0;
171 int cryptodev_vhost_start(VirtIODevice *dev, int total_queues)
173 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
174 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
175 VirtioBusState *vbus = VIRTIO_BUS(qbus);
176 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
177 int r, e;
178 int i;
179 CryptoDevBackend *b = vcrypto->cryptodev;
180 CryptoDevBackendVhost *vhost_crypto;
181 CryptoDevBackendClient *cc;
183 if (!k->set_guest_notifiers) {
184 error_report("binding does not support guest notifiers");
185 return -ENOSYS;
188 for (i = 0; i < total_queues; i++) {
189 cc = b->conf.peers.ccs[i];
191 vhost_crypto = cryptodev_get_vhost(cc, b, i);
192 cryptodev_vhost_set_vq_index(vhost_crypto, i);
194 /* Suppress the masking guest notifiers on vhost user
195 * because vhost user doesn't interrupt masking/unmasking
196 * properly.
198 if (cc->type == QCRYPTODEV_BACKEND_TYPE_VHOST_USER) {
199 dev->use_guest_notifier_mask = false;
203 r = k->set_guest_notifiers(qbus->parent, total_queues, true);
204 if (r < 0) {
205 error_report("error binding guest notifier: %d", -r);
206 goto err;
209 for (i = 0; i < total_queues; i++) {
210 cc = b->conf.peers.ccs[i];
212 vhost_crypto = cryptodev_get_vhost(cc, b, i);
213 r = cryptodev_vhost_start_one(vhost_crypto, dev);
215 if (r < 0) {
216 goto err_start;
219 if (cc->vring_enable) {
220 /* restore vring enable state */
221 r = vhost_set_vring_enable(cc, b, i, cc->vring_enable);
223 if (r < 0) {
224 goto err_start;
229 return 0;
231 err_start:
232 while (--i >= 0) {
233 cc = b->conf.peers.ccs[i];
234 vhost_crypto = cryptodev_get_vhost(cc, b, i);
235 cryptodev_vhost_stop_one(vhost_crypto, dev);
237 e = k->set_guest_notifiers(qbus->parent, total_queues, false);
238 if (e < 0) {
239 error_report("vhost guest notifier cleanup failed: %d", e);
241 err:
242 return r;
245 void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues)
247 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
248 VirtioBusState *vbus = VIRTIO_BUS(qbus);
249 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
250 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
251 CryptoDevBackend *b = vcrypto->cryptodev;
252 CryptoDevBackendVhost *vhost_crypto;
253 CryptoDevBackendClient *cc;
254 size_t i;
255 int r;
257 for (i = 0; i < total_queues; i++) {
258 cc = b->conf.peers.ccs[i];
260 vhost_crypto = cryptodev_get_vhost(cc, b, i);
261 cryptodev_vhost_stop_one(vhost_crypto, dev);
264 r = k->set_guest_notifiers(qbus->parent, total_queues, false);
265 if (r < 0) {
266 error_report("vhost guest notifier cleanup failed: %d", r);
268 assert(r >= 0);
271 void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev,
272 int queue,
273 int idx, bool mask)
275 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
276 CryptoDevBackend *b = vcrypto->cryptodev;
277 CryptoDevBackendVhost *vhost_crypto;
278 CryptoDevBackendClient *cc;
280 assert(queue < MAX_CRYPTO_QUEUE_NUM);
282 cc = b->conf.peers.ccs[queue];
283 vhost_crypto = cryptodev_get_vhost(cc, b, queue);
285 vhost_virtqueue_mask(&vhost_crypto->dev, dev, idx, mask);
288 bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev,
289 int queue, int idx)
291 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
292 CryptoDevBackend *b = vcrypto->cryptodev;
293 CryptoDevBackendVhost *vhost_crypto;
294 CryptoDevBackendClient *cc;
296 assert(queue < MAX_CRYPTO_QUEUE_NUM);
298 cc = b->conf.peers.ccs[queue];
299 vhost_crypto = cryptodev_get_vhost(cc, b, queue);
301 return vhost_virtqueue_pending(&vhost_crypto->dev, idx);
304 #else
305 uint64_t
306 cryptodev_vhost_get_max_queues(CryptoDevBackendVhost *crypto)
308 return 0;
311 void cryptodev_vhost_cleanup(CryptoDevBackendVhost *crypto)
315 struct CryptoDevBackendVhost *
316 cryptodev_vhost_init(CryptoDevBackendVhostOptions *options)
318 return NULL;
321 CryptoDevBackendVhost *
322 cryptodev_get_vhost(CryptoDevBackendClient *cc,
323 CryptoDevBackend *b,
324 uint16_t queue)
326 return NULL;
329 int cryptodev_vhost_start(VirtIODevice *dev, int total_queues)
331 return -1;
334 void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues)
338 void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev,
339 int queue,
340 int idx, bool mask)
344 bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev,
345 int queue, int idx)
347 return false;
349 #endif