2 * QEMU Cryptodev backend for QEMU cipher APIs
4 * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
7 * Gonglei <arei.gonglei@huawei.com>
8 * Jay Zhou <jianjay.zhou@huawei.com>
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "qemu/osdep.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "sysemu/cryptodev-vhost.h"
29 #ifdef CONFIG_VHOST_CRYPTO
30 #include "qapi/error.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qemu/error-report.h"
33 #include "hw/virtio/virtio-crypto.h"
34 #include "sysemu/cryptodev-vhost-user.h"
37 cryptodev_vhost_get_max_queues(
38 CryptoDevBackendVhost
*crypto
)
40 return crypto
->dev
.max_queues
;
43 void cryptodev_vhost_cleanup(CryptoDevBackendVhost
*crypto
)
45 vhost_dev_cleanup(&crypto
->dev
);
49 struct CryptoDevBackendVhost
*
51 CryptoDevBackendVhostOptions
*options
)
54 CryptoDevBackendVhost
*crypto
;
56 crypto
= g_new(CryptoDevBackendVhost
, 1);
57 crypto
->dev
.max_queues
= 1;
59 crypto
->dev
.vqs
= crypto
->vqs
;
61 crypto
->cc
= options
->cc
;
63 crypto
->dev
.protocol_features
= 0;
66 /* vhost-user needs vq_index to initiate a specific queue pair */
67 crypto
->dev
.vq_index
= crypto
->cc
->queue_index
* crypto
->dev
.nvqs
;
69 r
= vhost_dev_init(&crypto
->dev
, options
->opaque
, options
->backend_type
, 0);
81 cryptodev_vhost_start_one(CryptoDevBackendVhost
*crypto
,
87 crypto
->dev
.vqs
= crypto
->vqs
;
89 r
= vhost_dev_enable_notifiers(&crypto
->dev
, dev
);
94 r
= vhost_dev_start(&crypto
->dev
, dev
);
102 vhost_dev_disable_notifiers(&crypto
->dev
, dev
);
108 cryptodev_vhost_stop_one(CryptoDevBackendVhost
*crypto
,
111 vhost_dev_stop(&crypto
->dev
, dev
);
112 vhost_dev_disable_notifiers(&crypto
->dev
, dev
);
115 CryptoDevBackendVhost
*
116 cryptodev_get_vhost(CryptoDevBackendClient
*cc
,
120 CryptoDevBackendVhost
*vhost_crypto
= NULL
;
127 #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
128 case CRYPTODEV_BACKEND_TYPE_VHOST_USER
:
129 vhost_crypto
= cryptodev_vhost_user_get_vhost(cc
, b
, queue
);
140 cryptodev_vhost_set_vq_index(CryptoDevBackendVhost
*crypto
,
143 crypto
->dev
.vq_index
= vq_index
;
147 vhost_set_vring_enable(CryptoDevBackendClient
*cc
,
149 uint16_t queue
, int enable
)
151 CryptoDevBackendVhost
*crypto
=
152 cryptodev_get_vhost(cc
, b
, queue
);
153 const VhostOps
*vhost_ops
;
155 cc
->vring_enable
= enable
;
161 vhost_ops
= crypto
->dev
.vhost_ops
;
162 if (vhost_ops
->vhost_set_vring_enable
) {
163 return vhost_ops
->vhost_set_vring_enable(&crypto
->dev
, enable
);
169 int cryptodev_vhost_start(VirtIODevice
*dev
, int total_queues
)
171 VirtIOCrypto
*vcrypto
= VIRTIO_CRYPTO(dev
);
172 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(dev
)));
173 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
174 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
177 CryptoDevBackend
*b
= vcrypto
->cryptodev
;
178 CryptoDevBackendVhost
*vhost_crypto
;
179 CryptoDevBackendClient
*cc
;
181 if (!k
->set_guest_notifiers
) {
182 error_report("binding does not support guest notifiers");
186 for (i
= 0; i
< total_queues
; i
++) {
187 cc
= b
->conf
.peers
.ccs
[i
];
189 vhost_crypto
= cryptodev_get_vhost(cc
, b
, i
);
190 cryptodev_vhost_set_vq_index(vhost_crypto
, i
);
192 /* Suppress the masking guest notifiers on vhost user
193 * because vhost user doesn't interrupt masking/unmasking
196 if (cc
->type
== CRYPTODEV_BACKEND_TYPE_VHOST_USER
) {
197 dev
->use_guest_notifier_mask
= false;
201 r
= k
->set_guest_notifiers(qbus
->parent
, total_queues
, true);
203 error_report("error binding guest notifier: %d", -r
);
207 for (i
= 0; i
< total_queues
; i
++) {
208 cc
= b
->conf
.peers
.ccs
[i
];
210 vhost_crypto
= cryptodev_get_vhost(cc
, b
, i
);
211 r
= cryptodev_vhost_start_one(vhost_crypto
, dev
);
217 if (cc
->vring_enable
) {
218 /* restore vring enable state */
219 r
= vhost_set_vring_enable(cc
, b
, i
, cc
->vring_enable
);
231 cc
= b
->conf
.peers
.ccs
[i
];
232 vhost_crypto
= cryptodev_get_vhost(cc
, b
, i
);
233 cryptodev_vhost_stop_one(vhost_crypto
, dev
);
235 e
= k
->set_guest_notifiers(qbus
->parent
, total_queues
, false);
237 error_report("vhost guest notifier cleanup failed: %d", e
);
243 void cryptodev_vhost_stop(VirtIODevice
*dev
, int total_queues
)
245 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(dev
)));
246 VirtioBusState
*vbus
= VIRTIO_BUS(qbus
);
247 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(vbus
);
248 VirtIOCrypto
*vcrypto
= VIRTIO_CRYPTO(dev
);
249 CryptoDevBackend
*b
= vcrypto
->cryptodev
;
250 CryptoDevBackendVhost
*vhost_crypto
;
251 CryptoDevBackendClient
*cc
;
255 for (i
= 0; i
< total_queues
; i
++) {
256 cc
= b
->conf
.peers
.ccs
[i
];
258 vhost_crypto
= cryptodev_get_vhost(cc
, b
, i
);
259 cryptodev_vhost_stop_one(vhost_crypto
, dev
);
262 r
= k
->set_guest_notifiers(qbus
->parent
, total_queues
, false);
264 error_report("vhost guest notifier cleanup failed: %d", r
);
269 void cryptodev_vhost_virtqueue_mask(VirtIODevice
*dev
,
273 VirtIOCrypto
*vcrypto
= VIRTIO_CRYPTO(dev
);
274 CryptoDevBackend
*b
= vcrypto
->cryptodev
;
275 CryptoDevBackendVhost
*vhost_crypto
;
276 CryptoDevBackendClient
*cc
;
278 assert(queue
< MAX_CRYPTO_QUEUE_NUM
);
280 cc
= b
->conf
.peers
.ccs
[queue
];
281 vhost_crypto
= cryptodev_get_vhost(cc
, b
, queue
);
283 vhost_virtqueue_mask(&vhost_crypto
->dev
, dev
, idx
, mask
);
286 bool cryptodev_vhost_virtqueue_pending(VirtIODevice
*dev
,
289 VirtIOCrypto
*vcrypto
= VIRTIO_CRYPTO(dev
);
290 CryptoDevBackend
*b
= vcrypto
->cryptodev
;
291 CryptoDevBackendVhost
*vhost_crypto
;
292 CryptoDevBackendClient
*cc
;
294 assert(queue
< MAX_CRYPTO_QUEUE_NUM
);
296 cc
= b
->conf
.peers
.ccs
[queue
];
297 vhost_crypto
= cryptodev_get_vhost(cc
, b
, queue
);
299 return vhost_virtqueue_pending(&vhost_crypto
->dev
, idx
);
304 cryptodev_vhost_get_max_queues(CryptoDevBackendVhost
*crypto
)
309 void cryptodev_vhost_cleanup(CryptoDevBackendVhost
*crypto
)
313 struct CryptoDevBackendVhost
*
314 cryptodev_vhost_init(CryptoDevBackendVhostOptions
*options
)
319 CryptoDevBackendVhost
*
320 cryptodev_get_vhost(CryptoDevBackendClient
*cc
,
327 int cryptodev_vhost_start(VirtIODevice
*dev
, int total_queues
)
332 void cryptodev_vhost_stop(VirtIODevice
*dev
, int total_queues
)
336 void cryptodev_vhost_virtqueue_mask(VirtIODevice
*dev
,
342 bool cryptodev_vhost_virtqueue_pending(VirtIODevice
*dev
,