crypto: inside-secure - fix gcc-4.9 warnings
[linux-2.6/btrfs-unstable.git] / drivers / crypto / inside-secure / safexcel_cipher.c
blob5438552bc6d783b57a23763e64c59afb90c340ae
1 /*
2 * Copyright (C) 2017 Marvell
4 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
15 #include <crypto/aes.h>
16 #include <crypto/skcipher.h>
18 #include "safexcel.h"
20 enum safexcel_cipher_direction {
21 SAFEXCEL_ENCRYPT,
22 SAFEXCEL_DECRYPT,
25 struct safexcel_cipher_ctx {
26 struct safexcel_context base;
27 struct safexcel_crypto_priv *priv;
29 enum safexcel_cipher_direction direction;
30 u32 mode;
32 __le32 key[8];
33 unsigned int key_len;
36 static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
37 struct crypto_async_request *async,
38 struct safexcel_command_desc *cdesc,
39 u32 length)
41 struct skcipher_request *req = skcipher_request_cast(async);
42 struct safexcel_token *token;
43 unsigned offset = 0;
45 if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
46 offset = AES_BLOCK_SIZE / sizeof(u32);
47 memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE);
49 cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
52 token = (struct safexcel_token *)(cdesc->control_data.token + offset);
54 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
55 token[0].packet_length = length;
56 token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET;
57 token[0].instructions = EIP197_TOKEN_INS_LAST |
58 EIP197_TOKEN_INS_TYPE_CRYTO |
59 EIP197_TOKEN_INS_TYPE_OUTPUT;
62 static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
63 unsigned int len)
65 struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
66 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
67 struct crypto_aes_ctx aes;
68 int ret, i;
70 ret = crypto_aes_expand_key(&aes, key, len);
71 if (ret) {
72 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
73 return ret;
76 for (i = 0; i < len / sizeof(u32); i++) {
77 if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
78 ctx->base.needs_inv = true;
79 break;
83 for (i = 0; i < len / sizeof(u32); i++)
84 ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
86 ctx->key_len = len;
88 memzero_explicit(&aes, sizeof(aes));
89 return 0;
92 static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
93 struct safexcel_command_desc *cdesc)
95 struct safexcel_crypto_priv *priv = ctx->priv;
96 int ctrl_size;
98 if (ctx->direction == SAFEXCEL_ENCRYPT)
99 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
100 else
101 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
103 cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
104 cdesc->control_data.control1 |= ctx->mode;
106 switch (ctx->key_len) {
107 case AES_KEYSIZE_128:
108 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
109 ctrl_size = 4;
110 break;
111 case AES_KEYSIZE_192:
112 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
113 ctrl_size = 6;
114 break;
115 case AES_KEYSIZE_256:
116 cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
117 ctrl_size = 8;
118 break;
119 default:
120 dev_err(priv->dev, "aes keysize not supported: %u\n",
121 ctx->key_len);
122 return -EINVAL;
124 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
126 return 0;
129 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
130 struct crypto_async_request *async,
131 bool *should_complete, int *ret)
133 struct skcipher_request *req = skcipher_request_cast(async);
134 struct safexcel_result_desc *rdesc;
135 int ndesc = 0;
137 *ret = 0;
139 spin_lock_bh(&priv->ring[ring].egress_lock);
140 do {
141 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
142 if (IS_ERR(rdesc)) {
143 dev_err(priv->dev,
144 "cipher: result: could not retrieve the result descriptor\n");
145 *ret = PTR_ERR(rdesc);
146 break;
149 if (rdesc->result_data.error_code) {
150 dev_err(priv->dev,
151 "cipher: result: result descriptor error (%d)\n",
152 rdesc->result_data.error_code);
153 *ret = -EIO;
156 ndesc++;
157 } while (!rdesc->last_seg);
159 safexcel_complete(priv, ring);
160 spin_unlock_bh(&priv->ring[ring].egress_lock);
162 if (req->src == req->dst) {
163 dma_unmap_sg(priv->dev, req->src,
164 sg_nents_for_len(req->src, req->cryptlen),
165 DMA_BIDIRECTIONAL);
166 } else {
167 dma_unmap_sg(priv->dev, req->src,
168 sg_nents_for_len(req->src, req->cryptlen),
169 DMA_TO_DEVICE);
170 dma_unmap_sg(priv->dev, req->dst,
171 sg_nents_for_len(req->dst, req->cryptlen),
172 DMA_FROM_DEVICE);
175 *should_complete = true;
177 return ndesc;
180 static int safexcel_aes_send(struct crypto_async_request *async,
181 int ring, struct safexcel_request *request,
182 int *commands, int *results)
184 struct skcipher_request *req = skcipher_request_cast(async);
185 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
186 struct safexcel_crypto_priv *priv = ctx->priv;
187 struct safexcel_command_desc *cdesc;
188 struct safexcel_result_desc *rdesc;
189 struct scatterlist *sg;
190 int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
191 int i, ret = 0;
193 if (req->src == req->dst) {
194 nr_src = dma_map_sg(priv->dev, req->src,
195 sg_nents_for_len(req->src, req->cryptlen),
196 DMA_BIDIRECTIONAL);
197 nr_dst = nr_src;
198 if (!nr_src)
199 return -EINVAL;
200 } else {
201 nr_src = dma_map_sg(priv->dev, req->src,
202 sg_nents_for_len(req->src, req->cryptlen),
203 DMA_TO_DEVICE);
204 if (!nr_src)
205 return -EINVAL;
207 nr_dst = dma_map_sg(priv->dev, req->dst,
208 sg_nents_for_len(req->dst, req->cryptlen),
209 DMA_FROM_DEVICE);
210 if (!nr_dst) {
211 dma_unmap_sg(priv->dev, req->src,
212 sg_nents_for_len(req->src, req->cryptlen),
213 DMA_TO_DEVICE);
214 return -EINVAL;
218 memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
220 spin_lock_bh(&priv->ring[ring].egress_lock);
222 /* command descriptors */
223 for_each_sg(req->src, sg, nr_src, i) {
224 int len = sg_dma_len(sg);
226 /* Do not overflow the request */
227 if (queued - len < 0)
228 len = queued;
230 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
231 sg_dma_address(sg), len, req->cryptlen,
232 ctx->base.ctxr_dma);
233 if (IS_ERR(cdesc)) {
234 /* No space left in the command descriptor ring */
235 ret = PTR_ERR(cdesc);
236 goto cdesc_rollback;
238 n_cdesc++;
240 if (n_cdesc == 1) {
241 safexcel_context_control(ctx, cdesc);
242 safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
245 queued -= len;
246 if (!queued)
247 break;
250 /* result descriptors */
251 for_each_sg(req->dst, sg, nr_dst, i) {
252 bool first = !i, last = (i == nr_dst - 1);
253 u32 len = sg_dma_len(sg);
255 rdesc = safexcel_add_rdesc(priv, ring, first, last,
256 sg_dma_address(sg), len);
257 if (IS_ERR(rdesc)) {
258 /* No space left in the result descriptor ring */
259 ret = PTR_ERR(rdesc);
260 goto rdesc_rollback;
262 n_rdesc++;
265 spin_unlock_bh(&priv->ring[ring].egress_lock);
267 request->req = &req->base;
268 ctx->base.handle_result = safexcel_handle_result;
270 *commands = n_cdesc;
271 *results = n_rdesc;
272 return 0;
274 rdesc_rollback:
275 for (i = 0; i < n_rdesc; i++)
276 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
277 cdesc_rollback:
278 for (i = 0; i < n_cdesc; i++)
279 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
281 spin_unlock_bh(&priv->ring[ring].egress_lock);
283 if (req->src == req->dst) {
284 dma_unmap_sg(priv->dev, req->src,
285 sg_nents_for_len(req->src, req->cryptlen),
286 DMA_BIDIRECTIONAL);
287 } else {
288 dma_unmap_sg(priv->dev, req->src,
289 sg_nents_for_len(req->src, req->cryptlen),
290 DMA_TO_DEVICE);
291 dma_unmap_sg(priv->dev, req->dst,
292 sg_nents_for_len(req->dst, req->cryptlen),
293 DMA_FROM_DEVICE);
296 return ret;
299 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
300 int ring,
301 struct crypto_async_request *async,
302 bool *should_complete, int *ret)
304 struct skcipher_request *req = skcipher_request_cast(async);
305 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
306 struct safexcel_result_desc *rdesc;
307 int ndesc = 0, enq_ret;
309 *ret = 0;
311 spin_lock_bh(&priv->ring[ring].egress_lock);
312 do {
313 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
314 if (IS_ERR(rdesc)) {
315 dev_err(priv->dev,
316 "cipher: invalidate: could not retrieve the result descriptor\n");
317 *ret = PTR_ERR(rdesc);
318 break;
321 if (rdesc->result_data.error_code) {
322 dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
323 rdesc->result_data.error_code);
324 *ret = -EIO;
327 ndesc++;
328 } while (!rdesc->last_seg);
330 safexcel_complete(priv, ring);
331 spin_unlock_bh(&priv->ring[ring].egress_lock);
333 if (ctx->base.exit_inv) {
334 dma_pool_free(priv->context_pool, ctx->base.ctxr,
335 ctx->base.ctxr_dma);
337 *should_complete = true;
339 return ndesc;
342 ring = safexcel_select_ring(priv);
343 ctx->base.ring = ring;
344 ctx->base.needs_inv = false;
345 ctx->base.send = safexcel_aes_send;
347 spin_lock_bh(&priv->ring[ring].queue_lock);
348 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
349 spin_unlock_bh(&priv->ring[ring].queue_lock);
351 if (enq_ret != -EINPROGRESS)
352 *ret = enq_ret;
354 if (!priv->ring[ring].need_dequeue)
355 safexcel_dequeue(priv, ring);
357 *should_complete = false;
359 return ndesc;
362 static int safexcel_cipher_send_inv(struct crypto_async_request *async,
363 int ring, struct safexcel_request *request,
364 int *commands, int *results)
366 struct skcipher_request *req = skcipher_request_cast(async);
367 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
368 struct safexcel_crypto_priv *priv = ctx->priv;
369 int ret;
371 ctx->base.handle_result = safexcel_handle_inv_result;
373 ret = safexcel_invalidate_cache(async, &ctx->base, priv,
374 ctx->base.ctxr_dma, ring, request);
375 if (unlikely(ret))
376 return ret;
378 *commands = 1;
379 *results = 1;
381 return 0;
384 static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
386 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
387 struct safexcel_crypto_priv *priv = ctx->priv;
388 struct skcipher_request req;
389 struct safexcel_inv_result result = {};
390 int ring = ctx->base.ring;
392 memset(&req, 0, sizeof(struct skcipher_request));
394 /* create invalidation request */
395 init_completion(&result.completion);
396 skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
397 safexcel_inv_complete, &result);
399 skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
400 ctx = crypto_tfm_ctx(req.base.tfm);
401 ctx->base.exit_inv = true;
402 ctx->base.send = safexcel_cipher_send_inv;
404 spin_lock_bh(&priv->ring[ring].queue_lock);
405 crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
406 spin_unlock_bh(&priv->ring[ring].queue_lock);
408 if (!priv->ring[ring].need_dequeue)
409 safexcel_dequeue(priv, ring);
411 wait_for_completion_interruptible(&result.completion);
413 if (result.error) {
414 dev_warn(priv->dev,
415 "cipher: sync: invalidate: completion error %d\n",
416 result.error);
417 return result.error;
420 return 0;
423 static int safexcel_aes(struct skcipher_request *req,
424 enum safexcel_cipher_direction dir, u32 mode)
426 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
427 struct safexcel_crypto_priv *priv = ctx->priv;
428 int ret, ring;
430 ctx->direction = dir;
431 ctx->mode = mode;
433 if (ctx->base.ctxr) {
434 if (ctx->base.needs_inv)
435 ctx->base.send = safexcel_cipher_send_inv;
436 } else {
437 ctx->base.ring = safexcel_select_ring(priv);
438 ctx->base.send = safexcel_aes_send;
440 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
441 EIP197_GFP_FLAGS(req->base),
442 &ctx->base.ctxr_dma);
443 if (!ctx->base.ctxr)
444 return -ENOMEM;
447 ring = ctx->base.ring;
449 spin_lock_bh(&priv->ring[ring].queue_lock);
450 ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
451 spin_unlock_bh(&priv->ring[ring].queue_lock);
453 if (!priv->ring[ring].need_dequeue)
454 safexcel_dequeue(priv, ring);
456 return ret;
459 static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
461 return safexcel_aes(req, SAFEXCEL_ENCRYPT,
462 CONTEXT_CONTROL_CRYPTO_MODE_ECB);
465 static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
467 return safexcel_aes(req, SAFEXCEL_DECRYPT,
468 CONTEXT_CONTROL_CRYPTO_MODE_ECB);
471 static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
473 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
474 struct safexcel_alg_template *tmpl =
475 container_of(tfm->__crt_alg, struct safexcel_alg_template,
476 alg.skcipher.base);
478 ctx->priv = tmpl->priv;
480 return 0;
483 static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
485 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
486 struct safexcel_crypto_priv *priv = ctx->priv;
487 int ret;
489 memzero_explicit(ctx->key, 8 * sizeof(u32));
491 /* context not allocated, skip invalidation */
492 if (!ctx->base.ctxr)
493 return;
495 memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
497 ret = safexcel_cipher_exit_inv(tfm);
498 if (ret)
499 dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
502 struct safexcel_alg_template safexcel_alg_ecb_aes = {
503 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
504 .alg.skcipher = {
505 .setkey = safexcel_aes_setkey,
506 .encrypt = safexcel_ecb_aes_encrypt,
507 .decrypt = safexcel_ecb_aes_decrypt,
508 .min_keysize = AES_MIN_KEY_SIZE,
509 .max_keysize = AES_MAX_KEY_SIZE,
510 .base = {
511 .cra_name = "ecb(aes)",
512 .cra_driver_name = "safexcel-ecb-aes",
513 .cra_priority = 300,
514 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
515 CRYPTO_ALG_KERN_DRIVER_ONLY,
516 .cra_blocksize = AES_BLOCK_SIZE,
517 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
518 .cra_alignmask = 0,
519 .cra_init = safexcel_skcipher_cra_init,
520 .cra_exit = safexcel_skcipher_cra_exit,
521 .cra_module = THIS_MODULE,
526 static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
528 return safexcel_aes(req, SAFEXCEL_ENCRYPT,
529 CONTEXT_CONTROL_CRYPTO_MODE_CBC);
532 static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
534 return safexcel_aes(req, SAFEXCEL_DECRYPT,
535 CONTEXT_CONTROL_CRYPTO_MODE_CBC);
538 struct safexcel_alg_template safexcel_alg_cbc_aes = {
539 .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
540 .alg.skcipher = {
541 .setkey = safexcel_aes_setkey,
542 .encrypt = safexcel_cbc_aes_encrypt,
543 .decrypt = safexcel_cbc_aes_decrypt,
544 .min_keysize = AES_MIN_KEY_SIZE,
545 .max_keysize = AES_MAX_KEY_SIZE,
546 .ivsize = AES_BLOCK_SIZE,
547 .base = {
548 .cra_name = "cbc(aes)",
549 .cra_driver_name = "safexcel-cbc-aes",
550 .cra_priority = 300,
551 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
552 CRYPTO_ALG_KERN_DRIVER_ONLY,
553 .cra_blocksize = AES_BLOCK_SIZE,
554 .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
555 .cra_alignmask = 0,
556 .cra_init = safexcel_skcipher_cra_init,
557 .cra_exit = safexcel_skcipher_cra_exit,
558 .cra_module = THIS_MODULE,