drivers/staging/usbip/userspace/libsrc/vhci_driver.c: test the just-initialized value
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / crypto / caam / caamalg.c
blob4159265b453b1449d3247dd7977ca540cae7f329
1 /*
2 * caam - Freescale FSL CAAM support for crypto API
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 * Based on talitos crypto API driver.
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
14 * . | | (cipherKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
18 * | *(packet 2) | |
19 * --------------- |
20 * . |
21 * . |
22 * --------------- |
23 * | JobDesc #3 |------------
24 * | *(packet 3) |
25 * ---------------
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
33 * So, a job desc looks like:
35 * ---------------------
36 * | Header |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR |
39 * | (output buffer) |
40 * | SEQ_IN_PTR |
41 * | (input buffer) |
42 * | LOAD (to DECO) |
43 * ---------------------
46 #include "compat.h"
48 #include "regs.h"
49 #include "intern.h"
50 #include "desc_constr.h"
51 #include "jr.h"
52 #include "error.h"
55 * crypto alg
57 #define CAAM_CRA_PRIORITY 3000
58 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
59 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
60 SHA512_DIGEST_SIZE * 2)
61 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
62 #define CAAM_MAX_IV_LENGTH 16
64 /* length of descriptors text */
65 #define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3)
67 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
68 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
69 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
70 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
73 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
74 20 * CAAM_CMD_SZ)
75 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
76 15 * CAAM_CMD_SZ)
78 #define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
79 CAAM_MAX_KEY_SIZE)
80 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
82 #ifdef DEBUG
83 /* for print_hex_dumps with line references */
84 #define xstr(s) str(s)
85 #define str(s) #s
86 #define debug(format, arg...) printk(format, arg)
87 #else
88 #define debug(format, arg...)
89 #endif
91 /* Set DK bit in class 1 operation if shared */
92 static inline void append_dec_op1(u32 *desc, u32 type)
94 u32 *jump_cmd, *uncond_jump_cmd;
96 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
97 append_operation(desc, type | OP_ALG_AS_INITFINAL |
98 OP_ALG_DECRYPT);
99 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
100 set_jump_tgt_here(desc, jump_cmd);
101 append_operation(desc, type | OP_ALG_AS_INITFINAL |
102 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
103 set_jump_tgt_here(desc, uncond_jump_cmd);
107 * Wait for completion of class 1 key loading before allowing
108 * error propagation
110 static inline void append_dec_shr_done(u32 *desc)
112 u32 *jump_cmd;
114 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
115 set_jump_tgt_here(desc, jump_cmd);
116 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
120 * For aead functions, read payload and write payload,
121 * both of which are specified in req->src and req->dst
123 static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
125 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
126 KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
127 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
131 * For aead encrypt and decrypt, read iv for both classes
133 static inline void aead_append_ld_iv(u32 *desc, int ivsize)
135 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
136 LDST_CLASS_1_CCB | ivsize);
137 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
141 * For ablkcipher encrypt and decrypt, read from req->src and
142 * write to req->dst
144 static inline void ablkcipher_append_src_dst(u32 *desc)
146 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
147 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
148 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \
149 KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \
150 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \
154 * If all data, including src (with assoc and iv) or dst (with iv only) are
155 * contiguous
157 #define GIV_SRC_CONTIG 1
158 #define GIV_DST_CONTIG (1 << 1)
161 * per-session context
163 struct caam_ctx {
164 struct device *jrdev;
165 u32 sh_desc_enc[DESC_MAX_USED_LEN];
166 u32 sh_desc_dec[DESC_MAX_USED_LEN];
167 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
168 dma_addr_t sh_desc_enc_dma;
169 dma_addr_t sh_desc_dec_dma;
170 dma_addr_t sh_desc_givenc_dma;
171 u32 class1_alg_type;
172 u32 class2_alg_type;
173 u32 alg_op;
174 u8 key[CAAM_MAX_KEY_SIZE];
175 dma_addr_t key_dma;
176 unsigned int enckeylen;
177 unsigned int split_key_len;
178 unsigned int split_key_pad_len;
179 unsigned int authsize;
182 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
183 int keys_fit_inline)
185 if (keys_fit_inline) {
186 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
187 ctx->split_key_len, CLASS_2 |
188 KEY_DEST_MDHA_SPLIT | KEY_ENC);
189 append_key_as_imm(desc, (void *)ctx->key +
190 ctx->split_key_pad_len, ctx->enckeylen,
191 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
192 } else {
193 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
194 KEY_DEST_MDHA_SPLIT | KEY_ENC);
195 append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
196 ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
200 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
201 int keys_fit_inline)
203 u32 *key_jump_cmd;
205 init_sh_desc(desc, HDR_SHARE_WAIT);
207 /* Skip if already shared */
208 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
209 JUMP_COND_SHRD);
211 append_key_aead(desc, ctx, keys_fit_inline);
213 set_jump_tgt_here(desc, key_jump_cmd);
215 /* Propagate errors from shared to job descriptor */
216 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
219 static int aead_set_sh_desc(struct crypto_aead *aead)
221 struct aead_tfm *tfm = &aead->base.crt_aead;
222 struct caam_ctx *ctx = crypto_aead_ctx(aead);
223 struct device *jrdev = ctx->jrdev;
224 bool keys_fit_inline = 0;
225 u32 *key_jump_cmd, *jump_cmd;
226 u32 geniv, moveiv;
227 u32 *desc;
229 if (!ctx->enckeylen || !ctx->authsize)
230 return 0;
233 * Job Descriptor and Shared Descriptors
234 * must all fit into the 64-word Descriptor h/w Buffer
236 if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
237 ctx->split_key_pad_len + ctx->enckeylen <=
238 CAAM_DESC_BYTES_MAX)
239 keys_fit_inline = 1;
241 /* aead_encrypt shared descriptor */
242 desc = ctx->sh_desc_enc;
244 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
246 /* Class 2 operation */
247 append_operation(desc, ctx->class2_alg_type |
248 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
250 /* cryptlen = seqoutlen - authsize */
251 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
253 /* assoclen + cryptlen = seqinlen - ivsize */
254 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
256 /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
257 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
259 /* read assoc before reading payload */
260 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
261 KEY_VLF);
262 aead_append_ld_iv(desc, tfm->ivsize);
264 /* Class 1 operation */
265 append_operation(desc, ctx->class1_alg_type |
266 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
268 /* Read and write cryptlen bytes */
269 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
270 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
271 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
273 /* Write ICV */
274 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
275 LDST_SRCDST_BYTE_CONTEXT);
277 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
278 desc_bytes(desc),
279 DMA_TO_DEVICE);
280 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
281 dev_err(jrdev, "unable to map shared descriptor\n");
282 return -ENOMEM;
284 #ifdef DEBUG
285 print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
286 DUMP_PREFIX_ADDRESS, 16, 4, desc,
287 desc_bytes(desc), 1);
288 #endif
291 * Job Descriptor and Shared Descriptors
292 * must all fit into the 64-word Descriptor h/w Buffer
294 if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
295 ctx->split_key_pad_len + ctx->enckeylen <=
296 CAAM_DESC_BYTES_MAX)
297 keys_fit_inline = 1;
299 desc = ctx->sh_desc_dec;
301 /* aead_decrypt shared descriptor */
302 init_sh_desc(desc, HDR_SHARE_WAIT);
304 /* Skip if already shared */
305 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
306 JUMP_COND_SHRD);
308 append_key_aead(desc, ctx, keys_fit_inline);
310 /* Only propagate error immediately if shared */
311 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
312 set_jump_tgt_here(desc, key_jump_cmd);
313 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
314 set_jump_tgt_here(desc, jump_cmd);
316 /* Class 2 operation */
317 append_operation(desc, ctx->class2_alg_type |
318 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
320 /* assoclen + cryptlen = seqinlen - ivsize */
321 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
322 ctx->authsize + tfm->ivsize)
323 /* assoclen = (assoclen + cryptlen) - cryptlen */
324 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
325 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
327 /* read assoc before reading payload */
328 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
329 KEY_VLF);
331 aead_append_ld_iv(desc, tfm->ivsize);
333 append_dec_op1(desc, ctx->class1_alg_type);
335 /* Read and write cryptlen bytes */
336 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
337 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
338 aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
340 /* Load ICV */
341 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
342 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
343 append_dec_shr_done(desc);
345 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
346 desc_bytes(desc),
347 DMA_TO_DEVICE);
348 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
349 dev_err(jrdev, "unable to map shared descriptor\n");
350 return -ENOMEM;
352 #ifdef DEBUG
353 print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
354 DUMP_PREFIX_ADDRESS, 16, 4, desc,
355 desc_bytes(desc), 1);
356 #endif
359 * Job Descriptor and Shared Descriptors
360 * must all fit into the 64-word Descriptor h/w Buffer
362 if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
363 ctx->split_key_pad_len + ctx->enckeylen <=
364 CAAM_DESC_BYTES_MAX)
365 keys_fit_inline = 1;
367 /* aead_givencrypt shared descriptor */
368 desc = ctx->sh_desc_givenc;
370 init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
372 /* Generate IV */
373 geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
374 NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
375 NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
376 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
377 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
378 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
379 append_move(desc, MOVE_SRC_INFIFO |
380 MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
381 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
383 /* Copy IV to class 1 context */
384 append_move(desc, MOVE_SRC_CLASS1CTX |
385 MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
387 /* Return to encryption */
388 append_operation(desc, ctx->class2_alg_type |
389 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
391 /* ivsize + cryptlen = seqoutlen - authsize */
392 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
394 /* assoclen = seqinlen - (ivsize + cryptlen) */
395 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
397 /* read assoc before reading payload */
398 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
399 KEY_VLF);
401 /* Copy iv from class 1 ctx to class 2 fifo*/
402 moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
403 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
404 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
405 LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
406 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
407 LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
409 /* Class 1 operation */
410 append_operation(desc, ctx->class1_alg_type |
411 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
413 /* Will write ivsize + cryptlen */
414 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
416 /* Not need to reload iv */
417 append_seq_fifo_load(desc, tfm->ivsize,
418 FIFOLD_CLASS_SKIP);
420 /* Will read cryptlen */
421 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
422 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
424 /* Write ICV */
425 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
426 LDST_SRCDST_BYTE_CONTEXT);
428 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
429 desc_bytes(desc),
430 DMA_TO_DEVICE);
431 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
432 dev_err(jrdev, "unable to map shared descriptor\n");
433 return -ENOMEM;
435 #ifdef DEBUG
436 print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
437 DUMP_PREFIX_ADDRESS, 16, 4, desc,
438 desc_bytes(desc), 1);
439 #endif
441 return 0;
444 static int aead_setauthsize(struct crypto_aead *authenc,
445 unsigned int authsize)
447 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
449 ctx->authsize = authsize;
450 aead_set_sh_desc(authenc);
452 return 0;
455 struct split_key_result {
456 struct completion completion;
457 int err;
460 static void split_key_done(struct device *dev, u32 *desc, u32 err,
461 void *context)
463 struct split_key_result *res = context;
465 #ifdef DEBUG
466 dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
467 #endif
469 if (err) {
470 char tmp[CAAM_ERROR_STR_MAX];
472 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
475 res->err = err;
477 complete(&res->completion);
481 get a split ipad/opad key
483 Split key generation-----------------------------------------------
485 [00] 0xb0810008 jobdesc: stidx=1 share=never len=8
486 [01] 0x04000014 key: class2->keyreg len=20
487 @0xffe01000
488 [03] 0x84410014 operation: cls2-op sha1 hmac init dec
489 [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
490 [05] 0xa4000001 jump: class2 local all ->1 [06]
491 [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
492 @0xffe04000
494 static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
496 struct device *jrdev = ctx->jrdev;
497 u32 *desc;
498 struct split_key_result result;
499 dma_addr_t dma_addr_in, dma_addr_out;
500 int ret = 0;
502 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
504 init_job_desc(desc, 0);
506 dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen,
507 DMA_TO_DEVICE);
508 if (dma_mapping_error(jrdev, dma_addr_in)) {
509 dev_err(jrdev, "unable to map key input memory\n");
510 kfree(desc);
511 return -ENOMEM;
513 append_key(desc, dma_addr_in, authkeylen, CLASS_2 |
514 KEY_DEST_CLASS_REG);
516 /* Sets MDHA up into an HMAC-INIT */
517 append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT |
518 OP_ALG_AS_INIT);
521 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
522 into both pads inside MDHA
524 append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
525 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
528 * FIFO_STORE with the explicit split-key content store
529 * (0x26 output type)
531 dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
532 DMA_FROM_DEVICE);
533 if (dma_mapping_error(jrdev, dma_addr_out)) {
534 dev_err(jrdev, "unable to map key output memory\n");
535 kfree(desc);
536 return -ENOMEM;
538 append_fifo_store(desc, dma_addr_out, ctx->split_key_len,
539 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
541 #ifdef DEBUG
542 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
543 DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1);
544 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
545 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
546 #endif
548 result.err = 0;
549 init_completion(&result.completion);
551 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
552 if (!ret) {
553 /* in progress */
554 wait_for_completion_interruptible(&result.completion);
555 ret = result.err;
556 #ifdef DEBUG
557 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
558 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
559 ctx->split_key_pad_len, 1);
560 #endif
563 dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len,
564 DMA_FROM_DEVICE);
565 dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE);
567 kfree(desc);
569 return ret;
572 static int aead_setkey(struct crypto_aead *aead,
573 const u8 *key, unsigned int keylen)
575 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
576 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
577 struct caam_ctx *ctx = crypto_aead_ctx(aead);
578 struct device *jrdev = ctx->jrdev;
579 struct rtattr *rta = (void *)key;
580 struct crypto_authenc_key_param *param;
581 unsigned int authkeylen;
582 unsigned int enckeylen;
583 int ret = 0;
585 param = RTA_DATA(rta);
586 enckeylen = be32_to_cpu(param->enckeylen);
588 key += RTA_ALIGN(rta->rta_len);
589 keylen -= RTA_ALIGN(rta->rta_len);
591 if (keylen < enckeylen)
592 goto badkey;
594 authkeylen = keylen - enckeylen;
596 if (keylen > CAAM_MAX_KEY_SIZE)
597 goto badkey;
599 /* Pick class 2 key length from algorithm submask */
600 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
601 OP_ALG_ALGSEL_SHIFT] * 2;
602 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
604 #ifdef DEBUG
605 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
606 keylen, enckeylen, authkeylen);
607 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
608 ctx->split_key_len, ctx->split_key_pad_len);
609 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
610 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
611 #endif
613 ret = gen_split_key(ctx, key, authkeylen);
614 if (ret) {
615 goto badkey;
618 /* postpend encryption key to auth split key */
619 memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
621 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
622 enckeylen, DMA_TO_DEVICE);
623 if (dma_mapping_error(jrdev, ctx->key_dma)) {
624 dev_err(jrdev, "unable to map key i/o memory\n");
625 return -ENOMEM;
627 #ifdef DEBUG
628 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
629 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
630 ctx->split_key_pad_len + enckeylen, 1);
631 #endif
633 ctx->enckeylen = enckeylen;
635 ret = aead_set_sh_desc(aead);
636 if (ret) {
637 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
638 enckeylen, DMA_TO_DEVICE);
641 return ret;
642 badkey:
643 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
644 return -EINVAL;
647 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
648 const u8 *key, unsigned int keylen)
650 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
651 struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
652 struct device *jrdev = ctx->jrdev;
653 int ret = 0;
654 u32 *key_jump_cmd, *jump_cmd;
655 u32 *desc;
657 #ifdef DEBUG
658 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
659 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
660 #endif
662 memcpy(ctx->key, key, keylen);
663 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
664 DMA_TO_DEVICE);
665 if (dma_mapping_error(jrdev, ctx->key_dma)) {
666 dev_err(jrdev, "unable to map key i/o memory\n");
667 return -ENOMEM;
669 ctx->enckeylen = keylen;
671 /* ablkcipher_encrypt shared descriptor */
672 desc = ctx->sh_desc_enc;
673 init_sh_desc(desc, HDR_SHARE_WAIT);
674 /* Skip if already shared */
675 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
676 JUMP_COND_SHRD);
678 /* Load class1 key only */
679 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
680 ctx->enckeylen, CLASS_1 |
681 KEY_DEST_CLASS_REG);
683 set_jump_tgt_here(desc, key_jump_cmd);
685 /* Propagate errors from shared to job descriptor */
686 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
688 /* Load iv */
689 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
690 LDST_CLASS_1_CCB | tfm->ivsize);
692 /* Load operation */
693 append_operation(desc, ctx->class1_alg_type |
694 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
696 /* Perform operation */
697 ablkcipher_append_src_dst(desc);
699 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
700 desc_bytes(desc),
701 DMA_TO_DEVICE);
702 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
703 dev_err(jrdev, "unable to map shared descriptor\n");
704 return -ENOMEM;
706 #ifdef DEBUG
707 print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
708 DUMP_PREFIX_ADDRESS, 16, 4, desc,
709 desc_bytes(desc), 1);
710 #endif
711 /* ablkcipher_decrypt shared descriptor */
712 desc = ctx->sh_desc_dec;
714 init_sh_desc(desc, HDR_SHARE_WAIT);
715 /* Skip if already shared */
716 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
717 JUMP_COND_SHRD);
719 /* Load class1 key only */
720 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
721 ctx->enckeylen, CLASS_1 |
722 KEY_DEST_CLASS_REG);
724 /* For aead, only propagate error immediately if shared */
725 jump_cmd = append_jump(desc, JUMP_TEST_ALL);
726 set_jump_tgt_here(desc, key_jump_cmd);
727 append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
728 set_jump_tgt_here(desc, jump_cmd);
730 /* load IV */
731 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
732 LDST_CLASS_1_CCB | tfm->ivsize);
734 /* Choose operation */
735 append_dec_op1(desc, ctx->class1_alg_type);
737 /* Perform operation */
738 ablkcipher_append_src_dst(desc);
740 /* Wait for key to load before allowing propagating error */
741 append_dec_shr_done(desc);
743 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
744 desc_bytes(desc),
745 DMA_TO_DEVICE);
746 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
747 dev_err(jrdev, "unable to map shared descriptor\n");
748 return -ENOMEM;
751 #ifdef DEBUG
752 print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
753 DUMP_PREFIX_ADDRESS, 16, 4, desc,
754 desc_bytes(desc), 1);
755 #endif
757 return ret;
760 struct link_tbl_entry {
761 u64 ptr;
762 u32 len;
763 u8 reserved;
764 u8 buf_pool_id;
765 u16 offset;
769 * aead_edesc - s/w-extended aead descriptor
770 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
771 * @src_nents: number of segments in input scatterlist
772 * @dst_nents: number of segments in output scatterlist
773 * @iv_dma: dma address of iv for checking continuity and link table
774 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
775 * @link_tbl_bytes: length of dma mapped link_tbl space
776 * @link_tbl_dma: bus physical mapped address of h/w link table
777 * @hw_desc: the h/w job descriptor followed by any referenced link tables
779 struct aead_edesc {
780 int assoc_nents;
781 int src_nents;
782 int dst_nents;
783 dma_addr_t iv_dma;
784 int link_tbl_bytes;
785 dma_addr_t link_tbl_dma;
786 struct link_tbl_entry *link_tbl;
787 u32 hw_desc[0];
791 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
792 * @src_nents: number of segments in input scatterlist
793 * @dst_nents: number of segments in output scatterlist
794 * @iv_dma: dma address of iv for checking continuity and link table
795 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
796 * @link_tbl_bytes: length of dma mapped link_tbl space
797 * @link_tbl_dma: bus physical mapped address of h/w link table
798 * @hw_desc: the h/w job descriptor followed by any referenced link tables
800 struct ablkcipher_edesc {
801 int src_nents;
802 int dst_nents;
803 dma_addr_t iv_dma;
804 int link_tbl_bytes;
805 dma_addr_t link_tbl_dma;
806 struct link_tbl_entry *link_tbl;
807 u32 hw_desc[0];
810 static void caam_unmap(struct device *dev, struct scatterlist *src,
811 struct scatterlist *dst, int src_nents, int dst_nents,
812 dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma,
813 int link_tbl_bytes)
815 if (unlikely(dst != src)) {
816 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
817 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
818 } else {
819 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
822 if (iv_dma)
823 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
824 if (link_tbl_bytes)
825 dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes,
826 DMA_TO_DEVICE);
829 static void aead_unmap(struct device *dev,
830 struct aead_edesc *edesc,
831 struct aead_request *req)
833 struct crypto_aead *aead = crypto_aead_reqtfm(req);
834 int ivsize = crypto_aead_ivsize(aead);
836 dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
838 caam_unmap(dev, req->src, req->dst,
839 edesc->src_nents, edesc->dst_nents,
840 edesc->iv_dma, ivsize, edesc->link_tbl_dma,
841 edesc->link_tbl_bytes);
844 static void ablkcipher_unmap(struct device *dev,
845 struct ablkcipher_edesc *edesc,
846 struct ablkcipher_request *req)
848 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
849 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
851 caam_unmap(dev, req->src, req->dst,
852 edesc->src_nents, edesc->dst_nents,
853 edesc->iv_dma, ivsize, edesc->link_tbl_dma,
854 edesc->link_tbl_bytes);
857 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
858 void *context)
860 struct aead_request *req = context;
861 struct aead_edesc *edesc;
862 #ifdef DEBUG
863 struct crypto_aead *aead = crypto_aead_reqtfm(req);
864 struct caam_ctx *ctx = crypto_aead_ctx(aead);
865 int ivsize = crypto_aead_ivsize(aead);
867 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
868 #endif
870 edesc = (struct aead_edesc *)((char *)desc -
871 offsetof(struct aead_edesc, hw_desc));
873 if (err) {
874 char tmp[CAAM_ERROR_STR_MAX];
876 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
879 aead_unmap(jrdev, edesc, req);
881 #ifdef DEBUG
882 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
883 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
884 req->assoclen , 1);
885 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
886 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
887 edesc->src_nents ? 100 : ivsize, 1);
888 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
889 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
890 edesc->src_nents ? 100 : req->cryptlen +
891 ctx->authsize + 4, 1);
892 #endif
894 kfree(edesc);
896 aead_request_complete(req, err);
899 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
900 void *context)
902 struct aead_request *req = context;
903 struct aead_edesc *edesc;
904 #ifdef DEBUG
905 struct crypto_aead *aead = crypto_aead_reqtfm(req);
906 struct caam_ctx *ctx = crypto_aead_ctx(aead);
907 int ivsize = crypto_aead_ivsize(aead);
909 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
910 #endif
912 edesc = (struct aead_edesc *)((char *)desc -
913 offsetof(struct aead_edesc, hw_desc));
915 #ifdef DEBUG
916 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
917 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
918 ivsize, 1);
919 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
920 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
921 req->cryptlen, 1);
922 #endif
924 if (err) {
925 char tmp[CAAM_ERROR_STR_MAX];
927 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
930 aead_unmap(jrdev, edesc, req);
933 * verify hw auth check passed else return -EBADMSG
935 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
936 err = -EBADMSG;
938 #ifdef DEBUG
939 print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
940 DUMP_PREFIX_ADDRESS, 16, 4,
941 ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
942 sizeof(struct iphdr) + req->assoclen +
943 ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
944 ctx->authsize + 36, 1);
945 if (!err && edesc->link_tbl_bytes) {
946 struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
947 print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
948 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
949 sg->length + ctx->authsize + 16, 1);
951 #endif
953 kfree(edesc);
955 aead_request_complete(req, err);
958 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
959 void *context)
961 struct ablkcipher_request *req = context;
962 struct ablkcipher_edesc *edesc;
963 #ifdef DEBUG
964 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
965 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
967 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
968 #endif
970 edesc = (struct ablkcipher_edesc *)((char *)desc -
971 offsetof(struct ablkcipher_edesc, hw_desc));
973 if (err) {
974 char tmp[CAAM_ERROR_STR_MAX];
976 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
979 #ifdef DEBUG
980 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
981 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
982 edesc->src_nents > 1 ? 100 : ivsize, 1);
983 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
984 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
985 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
986 #endif
988 ablkcipher_unmap(jrdev, edesc, req);
989 kfree(edesc);
991 ablkcipher_request_complete(req, err);
994 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
995 void *context)
997 struct ablkcipher_request *req = context;
998 struct ablkcipher_edesc *edesc;
999 #ifdef DEBUG
1000 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1001 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1003 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1004 #endif
1006 edesc = (struct ablkcipher_edesc *)((char *)desc -
1007 offsetof(struct ablkcipher_edesc, hw_desc));
1008 if (err) {
1009 char tmp[CAAM_ERROR_STR_MAX];
1011 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
1014 #ifdef DEBUG
1015 print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
1016 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1017 ivsize, 1);
1018 print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
1019 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1020 edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1021 #endif
1023 ablkcipher_unmap(jrdev, edesc, req);
1024 kfree(edesc);
1026 ablkcipher_request_complete(req, err);
1029 static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr,
1030 dma_addr_t dma, u32 len, u32 offset)
1032 link_tbl_ptr->ptr = dma;
1033 link_tbl_ptr->len = len;
1034 link_tbl_ptr->reserved = 0;
1035 link_tbl_ptr->buf_pool_id = 0;
1036 link_tbl_ptr->offset = offset;
1037 #ifdef DEBUG
1038 print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ",
1039 DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr,
1040 sizeof(struct link_tbl_entry), 1);
1041 #endif
1045 * convert scatterlist to h/w link table format
1046 * but does not have final bit; instead, returns last entry
1048 static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg,
1049 int sg_count, struct link_tbl_entry
1050 *link_tbl_ptr, u32 offset)
1052 while (sg_count) {
1053 sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg),
1054 sg_dma_len(sg), offset);
1055 link_tbl_ptr++;
1056 sg = sg_next(sg);
1057 sg_count--;
1059 return link_tbl_ptr - 1;
1063 * convert scatterlist to h/w link table format
1064 * scatterlist must have been previously dma mapped
1066 static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count,
1067 struct link_tbl_entry *link_tbl_ptr, u32 offset)
1069 link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset);
1070 link_tbl_ptr->len |= 0x40000000;
1074 * Fill in aead job descriptor
1076 static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1077 struct aead_edesc *edesc,
1078 struct aead_request *req,
1079 bool all_contig, bool encrypt)
1081 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1082 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1083 int ivsize = crypto_aead_ivsize(aead);
1084 int authsize = ctx->authsize;
1085 u32 *desc = edesc->hw_desc;
1086 u32 out_options = 0, in_options;
1087 dma_addr_t dst_dma, src_dma;
1088 int len, link_tbl_index = 0;
1090 #ifdef DEBUG
1091 debug("assoclen %d cryptlen %d authsize %d\n",
1092 req->assoclen, req->cryptlen, authsize);
1093 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
1094 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1095 req->assoclen , 1);
1096 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1097 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1098 edesc->src_nents ? 100 : ivsize, 1);
1099 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
1100 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1101 edesc->src_nents ? 100 : req->cryptlen, 1);
1102 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1103 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1104 desc_bytes(sh_desc), 1);
1105 #endif
1107 len = desc_len(sh_desc);
1108 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1110 if (all_contig) {
1111 src_dma = sg_dma_address(req->assoc);
1112 in_options = 0;
1113 } else {
1114 src_dma = edesc->link_tbl_dma;
1115 link_tbl_index += (edesc->assoc_nents ? : 1) + 1 +
1116 (edesc->src_nents ? : 1);
1117 in_options = LDST_SGF;
1119 if (encrypt)
1120 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1121 req->cryptlen - authsize, in_options);
1122 else
1123 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1124 req->cryptlen, in_options);
1126 if (likely(req->src == req->dst)) {
1127 if (all_contig) {
1128 dst_dma = sg_dma_address(req->src);
1129 } else {
1130 dst_dma = src_dma + sizeof(struct link_tbl_entry) *
1131 ((edesc->assoc_nents ? : 1) + 1);
1132 out_options = LDST_SGF;
1134 } else {
1135 if (!edesc->dst_nents) {
1136 dst_dma = sg_dma_address(req->dst);
1137 } else {
1138 dst_dma = edesc->link_tbl_dma +
1139 link_tbl_index *
1140 sizeof(struct link_tbl_entry);
1141 out_options = LDST_SGF;
1144 if (encrypt)
1145 append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
1146 else
1147 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1148 out_options);
1152 * Fill in aead givencrypt job descriptor
1154 static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1155 struct aead_edesc *edesc,
1156 struct aead_request *req,
1157 int contig)
1159 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1160 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1161 int ivsize = crypto_aead_ivsize(aead);
1162 int authsize = ctx->authsize;
1163 u32 *desc = edesc->hw_desc;
1164 u32 out_options = 0, in_options;
1165 dma_addr_t dst_dma, src_dma;
1166 int len, link_tbl_index = 0;
1168 #ifdef DEBUG
1169 debug("assoclen %d cryptlen %d authsize %d\n",
1170 req->assoclen, req->cryptlen, authsize);
1171 print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
1172 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1173 req->assoclen , 1);
1174 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1175 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1176 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
1177 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1178 edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1179 print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1180 DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1181 desc_bytes(sh_desc), 1);
1182 #endif
1184 len = desc_len(sh_desc);
1185 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1187 if (contig & GIV_SRC_CONTIG) {
1188 src_dma = sg_dma_address(req->assoc);
1189 in_options = 0;
1190 } else {
1191 src_dma = edesc->link_tbl_dma;
1192 link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents;
1193 in_options = LDST_SGF;
1195 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1196 req->cryptlen - authsize, in_options);
1198 if (contig & GIV_DST_CONTIG) {
1199 dst_dma = edesc->iv_dma;
1200 } else {
1201 if (likely(req->src == req->dst)) {
1202 dst_dma = src_dma + sizeof(struct link_tbl_entry) *
1203 edesc->assoc_nents;
1204 out_options = LDST_SGF;
1205 } else {
1206 dst_dma = edesc->link_tbl_dma +
1207 link_tbl_index *
1208 sizeof(struct link_tbl_entry);
1209 out_options = LDST_SGF;
1213 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
1217 * Fill in ablkcipher job descriptor
1219 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1220 struct ablkcipher_edesc *edesc,
1221 struct ablkcipher_request *req,
1222 bool iv_contig)
1224 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1225 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1226 u32 *desc = edesc->hw_desc;
1227 u32 out_options = 0, in_options;
1228 dma_addr_t dst_dma, src_dma;
1229 int len, link_tbl_index = 0;
1231 #ifdef DEBUG
1232 print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1233 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1234 ivsize, 1);
1235 print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
1236 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1237 edesc->src_nents ? 100 : req->nbytes, 1);
1238 #endif
1240 len = desc_len(sh_desc);
1241 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1243 if (iv_contig) {
1244 src_dma = edesc->iv_dma;
1245 in_options = 0;
1246 } else {
1247 src_dma = edesc->link_tbl_dma;
1248 link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1249 in_options = LDST_SGF;
1251 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1253 if (likely(req->src == req->dst)) {
1254 if (!edesc->src_nents && iv_contig) {
1255 dst_dma = sg_dma_address(req->src);
1256 } else {
1257 dst_dma = edesc->link_tbl_dma +
1258 sizeof(struct link_tbl_entry);
1259 out_options = LDST_SGF;
1261 } else {
1262 if (!edesc->dst_nents) {
1263 dst_dma = sg_dma_address(req->dst);
1264 } else {
1265 dst_dma = edesc->link_tbl_dma +
1266 link_tbl_index * sizeof(struct link_tbl_entry);
1267 out_options = LDST_SGF;
1270 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1274 * derive number of elements in scatterlist
1276 static int sg_count(struct scatterlist *sg_list, int nbytes)
1278 struct scatterlist *sg = sg_list;
1279 int sg_nents = 0;
1281 while (nbytes > 0) {
1282 sg_nents++;
1283 nbytes -= sg->length;
1284 if (!sg_is_last(sg) && (sg + 1)->length == 0)
1285 BUG(); /* Not support chaining */
1286 sg = scatterwalk_sg_next(sg);
1289 if (likely(sg_nents == 1))
1290 return 0;
1292 return sg_nents;
1296 * allocate and map the aead extended descriptor
1298 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1299 int desc_bytes, bool *all_contig_ptr)
1301 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1302 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1303 struct device *jrdev = ctx->jrdev;
1304 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1305 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1306 int assoc_nents, src_nents, dst_nents = 0;
1307 struct aead_edesc *edesc;
1308 dma_addr_t iv_dma = 0;
1309 int sgc;
1310 bool all_contig = true;
1311 int ivsize = crypto_aead_ivsize(aead);
1312 int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
1314 assoc_nents = sg_count(req->assoc, req->assoclen);
1315 src_nents = sg_count(req->src, req->cryptlen);
1317 if (unlikely(req->dst != req->src))
1318 dst_nents = sg_count(req->dst, req->cryptlen);
1320 sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1321 DMA_BIDIRECTIONAL);
1322 if (likely(req->src == req->dst)) {
1323 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1324 DMA_BIDIRECTIONAL);
1325 } else {
1326 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1327 DMA_TO_DEVICE);
1328 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1329 DMA_FROM_DEVICE);
1332 /* Check if data are contiguous */
1333 iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1334 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1335 iv_dma || src_nents || iv_dma + ivsize !=
1336 sg_dma_address(req->src)) {
1337 all_contig = false;
1338 assoc_nents = assoc_nents ? : 1;
1339 src_nents = src_nents ? : 1;
1340 link_tbl_len = assoc_nents + 1 + src_nents;
1342 link_tbl_len += dst_nents;
1344 link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
1346 /* allocate space for base edesc and hw desc commands, link tables */
1347 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1348 link_tbl_bytes, GFP_DMA | flags);
1349 if (!edesc) {
1350 dev_err(jrdev, "could not allocate extended descriptor\n");
1351 return ERR_PTR(-ENOMEM);
1354 edesc->assoc_nents = assoc_nents;
1355 edesc->src_nents = src_nents;
1356 edesc->dst_nents = dst_nents;
1357 edesc->iv_dma = iv_dma;
1358 edesc->link_tbl_bytes = link_tbl_bytes;
1359 edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
1360 desc_bytes;
1361 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1362 link_tbl_bytes, DMA_TO_DEVICE);
1363 *all_contig_ptr = all_contig;
1365 link_tbl_index = 0;
1366 if (!all_contig) {
1367 sg_to_link_tbl(req->assoc,
1368 (assoc_nents ? : 1),
1369 edesc->link_tbl +
1370 link_tbl_index, 0);
1371 link_tbl_index += assoc_nents ? : 1;
1372 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1373 iv_dma, ivsize, 0);
1374 link_tbl_index += 1;
1375 sg_to_link_tbl_last(req->src,
1376 (src_nents ? : 1),
1377 edesc->link_tbl +
1378 link_tbl_index, 0);
1379 link_tbl_index += src_nents ? : 1;
1381 if (dst_nents) {
1382 sg_to_link_tbl_last(req->dst, dst_nents,
1383 edesc->link_tbl + link_tbl_index, 0);
1386 return edesc;
1389 static int aead_encrypt(struct aead_request *req)
1391 struct aead_edesc *edesc;
1392 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1393 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1394 struct device *jrdev = ctx->jrdev;
1395 bool all_contig;
1396 u32 *desc;
1397 int ret = 0;
1399 req->cryptlen += ctx->authsize;
1401 /* allocate extended descriptor */
1402 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1403 CAAM_CMD_SZ, &all_contig);
1404 if (IS_ERR(edesc))
1405 return PTR_ERR(edesc);
1407 /* Create and submit job descriptor */
1408 init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1409 all_contig, true);
1410 #ifdef DEBUG
1411 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1412 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1413 desc_bytes(edesc->hw_desc), 1);
1414 #endif
1416 desc = edesc->hw_desc;
1417 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1418 if (!ret) {
1419 ret = -EINPROGRESS;
1420 } else {
1421 aead_unmap(jrdev, edesc, req);
1422 kfree(edesc);
1425 return ret;
1428 static int aead_decrypt(struct aead_request *req)
1430 struct aead_edesc *edesc;
1431 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1432 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1433 struct device *jrdev = ctx->jrdev;
1434 bool all_contig;
1435 u32 *desc;
1436 int ret = 0;
1438 /* allocate extended descriptor */
1439 edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1440 CAAM_CMD_SZ, &all_contig);
1441 if (IS_ERR(edesc))
1442 return PTR_ERR(edesc);
1444 #ifdef DEBUG
1445 print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
1446 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1447 req->cryptlen, 1);
1448 #endif
1450 /* Create and submit job descriptor*/
1451 init_aead_job(ctx->sh_desc_dec,
1452 ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1453 #ifdef DEBUG
1454 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1455 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1456 desc_bytes(edesc->hw_desc), 1);
1457 #endif
1459 desc = edesc->hw_desc;
1460 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1461 if (!ret) {
1462 ret = -EINPROGRESS;
1463 } else {
1464 aead_unmap(jrdev, edesc, req);
1465 kfree(edesc);
1468 return ret;
1472 * allocate and map the aead extended descriptor for aead givencrypt
1474 static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1475 *greq, int desc_bytes,
1476 u32 *contig_ptr)
1478 struct aead_request *req = &greq->areq;
1479 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1480 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1481 struct device *jrdev = ctx->jrdev;
1482 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1483 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1484 int assoc_nents, src_nents, dst_nents = 0;
1485 struct aead_edesc *edesc;
1486 dma_addr_t iv_dma = 0;
1487 int sgc;
1488 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1489 int ivsize = crypto_aead_ivsize(aead);
1490 int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
1492 assoc_nents = sg_count(req->assoc, req->assoclen);
1493 src_nents = sg_count(req->src, req->cryptlen);
1495 if (unlikely(req->dst != req->src))
1496 dst_nents = sg_count(req->dst, req->cryptlen);
1498 sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
1499 DMA_BIDIRECTIONAL);
1500 if (likely(req->src == req->dst)) {
1501 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1502 DMA_BIDIRECTIONAL);
1503 } else {
1504 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1505 DMA_TO_DEVICE);
1506 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1507 DMA_FROM_DEVICE);
1510 /* Check if data are contiguous */
1511 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1512 if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1513 iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1514 contig &= ~GIV_SRC_CONTIG;
1515 if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1516 contig &= ~GIV_DST_CONTIG;
1517 if (unlikely(req->src != req->dst)) {
1518 dst_nents = dst_nents ? : 1;
1519 link_tbl_len += 1;
1521 if (!(contig & GIV_SRC_CONTIG)) {
1522 assoc_nents = assoc_nents ? : 1;
1523 src_nents = src_nents ? : 1;
1524 link_tbl_len += assoc_nents + 1 + src_nents;
1525 if (likely(req->src == req->dst))
1526 contig &= ~GIV_DST_CONTIG;
1528 link_tbl_len += dst_nents;
1530 link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
1532 /* allocate space for base edesc and hw desc commands, link tables */
1533 edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1534 link_tbl_bytes, GFP_DMA | flags);
1535 if (!edesc) {
1536 dev_err(jrdev, "could not allocate extended descriptor\n");
1537 return ERR_PTR(-ENOMEM);
1540 edesc->assoc_nents = assoc_nents;
1541 edesc->src_nents = src_nents;
1542 edesc->dst_nents = dst_nents;
1543 edesc->iv_dma = iv_dma;
1544 edesc->link_tbl_bytes = link_tbl_bytes;
1545 edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
1546 desc_bytes;
1547 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1548 link_tbl_bytes, DMA_TO_DEVICE);
1549 *contig_ptr = contig;
1551 link_tbl_index = 0;
1552 if (!(contig & GIV_SRC_CONTIG)) {
1553 sg_to_link_tbl(req->assoc, assoc_nents,
1554 edesc->link_tbl +
1555 link_tbl_index, 0);
1556 link_tbl_index += assoc_nents;
1557 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1558 iv_dma, ivsize, 0);
1559 link_tbl_index += 1;
1560 sg_to_link_tbl_last(req->src, src_nents,
1561 edesc->link_tbl +
1562 link_tbl_index, 0);
1563 link_tbl_index += src_nents;
1565 if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1566 sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
1567 iv_dma, ivsize, 0);
1568 link_tbl_index += 1;
1569 sg_to_link_tbl_last(req->dst, dst_nents,
1570 edesc->link_tbl + link_tbl_index, 0);
1573 return edesc;
1576 static int aead_givencrypt(struct aead_givcrypt_request *areq)
1578 struct aead_request *req = &areq->areq;
1579 struct aead_edesc *edesc;
1580 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1581 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1582 struct device *jrdev = ctx->jrdev;
1583 u32 contig;
1584 u32 *desc;
1585 int ret = 0;
1587 req->cryptlen += ctx->authsize;
1589 /* allocate extended descriptor */
1590 edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1591 CAAM_CMD_SZ, &contig);
1593 if (IS_ERR(edesc))
1594 return PTR_ERR(edesc);
1596 #ifdef DEBUG
1597 print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
1598 DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1599 req->cryptlen, 1);
1600 #endif
1602 /* Create and submit job descriptor*/
1603 init_aead_giv_job(ctx->sh_desc_givenc,
1604 ctx->sh_desc_givenc_dma, edesc, req, contig);
1605 #ifdef DEBUG
1606 print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1607 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1608 desc_bytes(edesc->hw_desc), 1);
1609 #endif
1611 desc = edesc->hw_desc;
1612 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1613 if (!ret) {
1614 ret = -EINPROGRESS;
1615 } else {
1616 aead_unmap(jrdev, edesc, req);
1617 kfree(edesc);
1620 return ret;
1624 * allocate and map the ablkcipher extended descriptor for ablkcipher
1626 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1627 *req, int desc_bytes,
1628 bool *iv_contig_out)
1630 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1631 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1632 struct device *jrdev = ctx->jrdev;
1633 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1634 CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1635 GFP_KERNEL : GFP_ATOMIC;
1636 int src_nents, dst_nents = 0, link_tbl_bytes;
1637 struct ablkcipher_edesc *edesc;
1638 dma_addr_t iv_dma = 0;
1639 bool iv_contig = false;
1640 int sgc;
1641 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1642 int link_tbl_index;
1644 src_nents = sg_count(req->src, req->nbytes);
1646 if (unlikely(req->dst != req->src))
1647 dst_nents = sg_count(req->dst, req->nbytes);
1649 if (likely(req->src == req->dst)) {
1650 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1651 DMA_BIDIRECTIONAL);
1652 } else {
1653 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
1654 DMA_TO_DEVICE);
1655 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
1656 DMA_FROM_DEVICE);
1660 * Check if iv can be contiguous with source and destination.
1661 * If so, include it. If not, create scatterlist.
1663 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1664 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1665 iv_contig = true;
1666 else
1667 src_nents = src_nents ? : 1;
1668 link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1669 sizeof(struct link_tbl_entry);
1671 /* allocate space for base edesc and hw desc commands, link tables */
1672 edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1673 link_tbl_bytes, GFP_DMA | flags);
1674 if (!edesc) {
1675 dev_err(jrdev, "could not allocate extended descriptor\n");
1676 return ERR_PTR(-ENOMEM);
1679 edesc->src_nents = src_nents;
1680 edesc->dst_nents = dst_nents;
1681 edesc->link_tbl_bytes = link_tbl_bytes;
1682 edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1683 desc_bytes;
1685 link_tbl_index = 0;
1686 if (!iv_contig) {
1687 sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0);
1688 sg_to_link_tbl_last(req->src, src_nents,
1689 edesc->link_tbl + 1, 0);
1690 link_tbl_index += 1 + src_nents;
1693 if (unlikely(dst_nents)) {
1694 sg_to_link_tbl_last(req->dst, dst_nents,
1695 edesc->link_tbl + link_tbl_index, 0);
1698 edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
1699 link_tbl_bytes, DMA_TO_DEVICE);
1700 edesc->iv_dma = iv_dma;
1702 #ifdef DEBUG
1703 print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ",
1704 DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
1705 link_tbl_bytes, 1);
1706 #endif
1708 *iv_contig_out = iv_contig;
1709 return edesc;
1712 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1714 struct ablkcipher_edesc *edesc;
1715 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1716 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1717 struct device *jrdev = ctx->jrdev;
1718 bool iv_contig;
1719 u32 *desc;
1720 int ret = 0;
1722 /* allocate extended descriptor */
1723 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1724 CAAM_CMD_SZ, &iv_contig);
1725 if (IS_ERR(edesc))
1726 return PTR_ERR(edesc);
1728 /* Create and submit job descriptor*/
1729 init_ablkcipher_job(ctx->sh_desc_enc,
1730 ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1731 #ifdef DEBUG
1732 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1733 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1734 desc_bytes(edesc->hw_desc), 1);
1735 #endif
1736 desc = edesc->hw_desc;
1737 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1739 if (!ret) {
1740 ret = -EINPROGRESS;
1741 } else {
1742 ablkcipher_unmap(jrdev, edesc, req);
1743 kfree(edesc);
1746 return ret;
1749 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1751 struct ablkcipher_edesc *edesc;
1752 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1753 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1754 struct device *jrdev = ctx->jrdev;
1755 bool iv_contig;
1756 u32 *desc;
1757 int ret = 0;
1759 /* allocate extended descriptor */
1760 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1761 CAAM_CMD_SZ, &iv_contig);
1762 if (IS_ERR(edesc))
1763 return PTR_ERR(edesc);
1765 /* Create and submit job descriptor*/
1766 init_ablkcipher_job(ctx->sh_desc_dec,
1767 ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1768 desc = edesc->hw_desc;
1769 #ifdef DEBUG
1770 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1771 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1772 desc_bytes(edesc->hw_desc), 1);
1773 #endif
1775 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1776 if (!ret) {
1777 ret = -EINPROGRESS;
1778 } else {
1779 ablkcipher_unmap(jrdev, edesc, req);
1780 kfree(edesc);
1783 return ret;
1786 #define template_aead template_u.aead
1787 #define template_ablkcipher template_u.ablkcipher
1788 struct caam_alg_template {
1789 char name[CRYPTO_MAX_ALG_NAME];
1790 char driver_name[CRYPTO_MAX_ALG_NAME];
1791 unsigned int blocksize;
1792 u32 type;
1793 union {
1794 struct ablkcipher_alg ablkcipher;
1795 struct aead_alg aead;
1796 struct blkcipher_alg blkcipher;
1797 struct cipher_alg cipher;
1798 struct compress_alg compress;
1799 struct rng_alg rng;
1800 } template_u;
1801 u32 class1_alg_type;
1802 u32 class2_alg_type;
1803 u32 alg_op;
1806 static struct caam_alg_template driver_algs[] = {
1807 /* single-pass ipsec_esp descriptor */
1809 .name = "authenc(hmac(sha1),cbc(aes))",
1810 .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1811 .blocksize = AES_BLOCK_SIZE,
1812 .type = CRYPTO_ALG_TYPE_AEAD,
1813 .template_aead = {
1814 .setkey = aead_setkey,
1815 .setauthsize = aead_setauthsize,
1816 .encrypt = aead_encrypt,
1817 .decrypt = aead_decrypt,
1818 .givencrypt = aead_givencrypt,
1819 .geniv = "<built-in>",
1820 .ivsize = AES_BLOCK_SIZE,
1821 .maxauthsize = SHA1_DIGEST_SIZE,
1823 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1824 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1825 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1828 .name = "authenc(hmac(sha256),cbc(aes))",
1829 .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1830 .blocksize = AES_BLOCK_SIZE,
1831 .type = CRYPTO_ALG_TYPE_AEAD,
1832 .template_aead = {
1833 .setkey = aead_setkey,
1834 .setauthsize = aead_setauthsize,
1835 .encrypt = aead_encrypt,
1836 .decrypt = aead_decrypt,
1837 .givencrypt = aead_givencrypt,
1838 .geniv = "<built-in>",
1839 .ivsize = AES_BLOCK_SIZE,
1840 .maxauthsize = SHA256_DIGEST_SIZE,
1842 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1843 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1844 OP_ALG_AAI_HMAC_PRECOMP,
1845 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1848 .name = "authenc(hmac(sha512),cbc(aes))",
1849 .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1850 .blocksize = AES_BLOCK_SIZE,
1851 .type = CRYPTO_ALG_TYPE_AEAD,
1852 .template_aead = {
1853 .setkey = aead_setkey,
1854 .setauthsize = aead_setauthsize,
1855 .encrypt = aead_encrypt,
1856 .decrypt = aead_decrypt,
1857 .givencrypt = aead_givencrypt,
1858 .geniv = "<built-in>",
1859 .ivsize = AES_BLOCK_SIZE,
1860 .maxauthsize = SHA512_DIGEST_SIZE,
1862 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1863 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1864 OP_ALG_AAI_HMAC_PRECOMP,
1865 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1868 .name = "authenc(hmac(sha1),cbc(des3_ede))",
1869 .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1870 .blocksize = DES3_EDE_BLOCK_SIZE,
1871 .type = CRYPTO_ALG_TYPE_AEAD,
1872 .template_aead = {
1873 .setkey = aead_setkey,
1874 .setauthsize = aead_setauthsize,
1875 .encrypt = aead_encrypt,
1876 .decrypt = aead_decrypt,
1877 .givencrypt = aead_givencrypt,
1878 .geniv = "<built-in>",
1879 .ivsize = DES3_EDE_BLOCK_SIZE,
1880 .maxauthsize = SHA1_DIGEST_SIZE,
1882 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1883 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1884 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1887 .name = "authenc(hmac(sha256),cbc(des3_ede))",
1888 .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1889 .blocksize = DES3_EDE_BLOCK_SIZE,
1890 .type = CRYPTO_ALG_TYPE_AEAD,
1891 .template_aead = {
1892 .setkey = aead_setkey,
1893 .setauthsize = aead_setauthsize,
1894 .encrypt = aead_encrypt,
1895 .decrypt = aead_decrypt,
1896 .givencrypt = aead_givencrypt,
1897 .geniv = "<built-in>",
1898 .ivsize = DES3_EDE_BLOCK_SIZE,
1899 .maxauthsize = SHA256_DIGEST_SIZE,
1901 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1902 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1903 OP_ALG_AAI_HMAC_PRECOMP,
1904 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1907 .name = "authenc(hmac(sha512),cbc(des3_ede))",
1908 .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1909 .blocksize = DES3_EDE_BLOCK_SIZE,
1910 .type = CRYPTO_ALG_TYPE_AEAD,
1911 .template_aead = {
1912 .setkey = aead_setkey,
1913 .setauthsize = aead_setauthsize,
1914 .encrypt = aead_encrypt,
1915 .decrypt = aead_decrypt,
1916 .givencrypt = aead_givencrypt,
1917 .geniv = "<built-in>",
1918 .ivsize = DES3_EDE_BLOCK_SIZE,
1919 .maxauthsize = SHA512_DIGEST_SIZE,
1921 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1922 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1923 OP_ALG_AAI_HMAC_PRECOMP,
1924 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1927 .name = "authenc(hmac(sha1),cbc(des))",
1928 .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1929 .blocksize = DES_BLOCK_SIZE,
1930 .type = CRYPTO_ALG_TYPE_AEAD,
1931 .template_aead = {
1932 .setkey = aead_setkey,
1933 .setauthsize = aead_setauthsize,
1934 .encrypt = aead_encrypt,
1935 .decrypt = aead_decrypt,
1936 .givencrypt = aead_givencrypt,
1937 .geniv = "<built-in>",
1938 .ivsize = DES_BLOCK_SIZE,
1939 .maxauthsize = SHA1_DIGEST_SIZE,
1941 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1942 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1943 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1946 .name = "authenc(hmac(sha256),cbc(des))",
1947 .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1948 .blocksize = DES_BLOCK_SIZE,
1949 .type = CRYPTO_ALG_TYPE_AEAD,
1950 .template_aead = {
1951 .setkey = aead_setkey,
1952 .setauthsize = aead_setauthsize,
1953 .encrypt = aead_encrypt,
1954 .decrypt = aead_decrypt,
1955 .givencrypt = aead_givencrypt,
1956 .geniv = "<built-in>",
1957 .ivsize = DES_BLOCK_SIZE,
1958 .maxauthsize = SHA256_DIGEST_SIZE,
1960 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1961 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1962 OP_ALG_AAI_HMAC_PRECOMP,
1963 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1966 .name = "authenc(hmac(sha512),cbc(des))",
1967 .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1968 .blocksize = DES_BLOCK_SIZE,
1969 .type = CRYPTO_ALG_TYPE_AEAD,
1970 .template_aead = {
1971 .setkey = aead_setkey,
1972 .setauthsize = aead_setauthsize,
1973 .encrypt = aead_encrypt,
1974 .decrypt = aead_decrypt,
1975 .givencrypt = aead_givencrypt,
1976 .geniv = "<built-in>",
1977 .ivsize = DES_BLOCK_SIZE,
1978 .maxauthsize = SHA512_DIGEST_SIZE,
1980 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1981 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1982 OP_ALG_AAI_HMAC_PRECOMP,
1983 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1985 /* ablkcipher descriptor */
1987 .name = "cbc(aes)",
1988 .driver_name = "cbc-aes-caam",
1989 .blocksize = AES_BLOCK_SIZE,
1990 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1991 .template_ablkcipher = {
1992 .setkey = ablkcipher_setkey,
1993 .encrypt = ablkcipher_encrypt,
1994 .decrypt = ablkcipher_decrypt,
1995 .geniv = "eseqiv",
1996 .min_keysize = AES_MIN_KEY_SIZE,
1997 .max_keysize = AES_MAX_KEY_SIZE,
1998 .ivsize = AES_BLOCK_SIZE,
2000 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2003 .name = "cbc(des3_ede)",
2004 .driver_name = "cbc-3des-caam",
2005 .blocksize = DES3_EDE_BLOCK_SIZE,
2006 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2007 .template_ablkcipher = {
2008 .setkey = ablkcipher_setkey,
2009 .encrypt = ablkcipher_encrypt,
2010 .decrypt = ablkcipher_decrypt,
2011 .geniv = "eseqiv",
2012 .min_keysize = DES3_EDE_KEY_SIZE,
2013 .max_keysize = DES3_EDE_KEY_SIZE,
2014 .ivsize = DES3_EDE_BLOCK_SIZE,
2016 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2019 .name = "cbc(des)",
2020 .driver_name = "cbc-des-caam",
2021 .blocksize = DES_BLOCK_SIZE,
2022 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2023 .template_ablkcipher = {
2024 .setkey = ablkcipher_setkey,
2025 .encrypt = ablkcipher_encrypt,
2026 .decrypt = ablkcipher_decrypt,
2027 .geniv = "eseqiv",
2028 .min_keysize = DES_KEY_SIZE,
2029 .max_keysize = DES_KEY_SIZE,
2030 .ivsize = DES_BLOCK_SIZE,
2032 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2036 struct caam_crypto_alg {
2037 struct list_head entry;
2038 struct device *ctrldev;
2039 int class1_alg_type;
2040 int class2_alg_type;
2041 int alg_op;
2042 struct crypto_alg crypto_alg;
2045 static int caam_cra_init(struct crypto_tfm *tfm)
2047 struct crypto_alg *alg = tfm->__crt_alg;
2048 struct caam_crypto_alg *caam_alg =
2049 container_of(alg, struct caam_crypto_alg, crypto_alg);
2050 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2051 struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2052 int tgt_jr = atomic_inc_return(&priv->tfm_count);
2055 * distribute tfms across job rings to ensure in-order
2056 * crypto request processing per tfm
2058 ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi];
2060 /* copy descriptor header template value */
2061 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2062 ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2063 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2065 return 0;
2068 static void caam_cra_exit(struct crypto_tfm *tfm)
2070 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2072 if (ctx->sh_desc_enc_dma &&
2073 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2074 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2075 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2076 if (ctx->sh_desc_dec_dma &&
2077 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2078 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2079 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2080 if (ctx->sh_desc_givenc_dma &&
2081 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2082 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2083 desc_bytes(ctx->sh_desc_givenc),
2084 DMA_TO_DEVICE);
2087 static void __exit caam_algapi_exit(void)
2090 struct device_node *dev_node;
2091 struct platform_device *pdev;
2092 struct device *ctrldev;
2093 struct caam_drv_private *priv;
2094 struct caam_crypto_alg *t_alg, *n;
2095 int i, err;
2097 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2098 if (!dev_node)
2099 return;
2101 pdev = of_find_device_by_node(dev_node);
2102 if (!pdev)
2103 return;
2105 ctrldev = &pdev->dev;
2106 of_node_put(dev_node);
2107 priv = dev_get_drvdata(ctrldev);
2109 if (!priv->alg_list.next)
2110 return;
2112 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2113 crypto_unregister_alg(&t_alg->crypto_alg);
2114 list_del(&t_alg->entry);
2115 kfree(t_alg);
2118 for (i = 0; i < priv->total_jobrs; i++) {
2119 err = caam_jr_deregister(priv->algapi_jr[i]);
2120 if (err < 0)
2121 break;
2123 kfree(priv->algapi_jr);
2126 static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2127 struct caam_alg_template
2128 *template)
2130 struct caam_crypto_alg *t_alg;
2131 struct crypto_alg *alg;
2133 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2134 if (!t_alg) {
2135 dev_err(ctrldev, "failed to allocate t_alg\n");
2136 return ERR_PTR(-ENOMEM);
2139 alg = &t_alg->crypto_alg;
2141 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2142 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2143 template->driver_name);
2144 alg->cra_module = THIS_MODULE;
2145 alg->cra_init = caam_cra_init;
2146 alg->cra_exit = caam_cra_exit;
2147 alg->cra_priority = CAAM_CRA_PRIORITY;
2148 alg->cra_blocksize = template->blocksize;
2149 alg->cra_alignmask = 0;
2150 alg->cra_ctxsize = sizeof(struct caam_ctx);
2151 alg->cra_flags = CRYPTO_ALG_ASYNC | template->type;
2152 switch (template->type) {
2153 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2154 alg->cra_type = &crypto_ablkcipher_type;
2155 alg->cra_ablkcipher = template->template_ablkcipher;
2156 break;
2157 case CRYPTO_ALG_TYPE_AEAD:
2158 alg->cra_type = &crypto_aead_type;
2159 alg->cra_aead = template->template_aead;
2160 break;
2163 t_alg->class1_alg_type = template->class1_alg_type;
2164 t_alg->class2_alg_type = template->class2_alg_type;
2165 t_alg->alg_op = template->alg_op;
2166 t_alg->ctrldev = ctrldev;
2168 return t_alg;
2171 static int __init caam_algapi_init(void)
2173 struct device_node *dev_node;
2174 struct platform_device *pdev;
2175 struct device *ctrldev, **jrdev;
2176 struct caam_drv_private *priv;
2177 int i = 0, err = 0;
2179 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2180 if (!dev_node)
2181 return -ENODEV;
2183 pdev = of_find_device_by_node(dev_node);
2184 if (!pdev)
2185 return -ENODEV;
2187 ctrldev = &pdev->dev;
2188 priv = dev_get_drvdata(ctrldev);
2189 of_node_put(dev_node);
2191 INIT_LIST_HEAD(&priv->alg_list);
2193 jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
2194 if (!jrdev)
2195 return -ENOMEM;
2197 for (i = 0; i < priv->total_jobrs; i++) {
2198 err = caam_jr_register(ctrldev, &jrdev[i]);
2199 if (err < 0)
2200 break;
2202 if (err < 0 && i == 0) {
2203 dev_err(ctrldev, "algapi error in job ring registration: %d\n",
2204 err);
2205 kfree(jrdev);
2206 return err;
2209 priv->num_jrs_for_algapi = i;
2210 priv->algapi_jr = jrdev;
2211 atomic_set(&priv->tfm_count, -1);
2213 /* register crypto algorithms the device supports */
2214 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2215 /* TODO: check if h/w supports alg */
2216 struct caam_crypto_alg *t_alg;
2218 t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
2219 if (IS_ERR(t_alg)) {
2220 err = PTR_ERR(t_alg);
2221 dev_warn(ctrldev, "%s alg allocation failed\n",
2222 driver_algs[i].driver_name);
2223 continue;
2226 err = crypto_register_alg(&t_alg->crypto_alg);
2227 if (err) {
2228 dev_warn(ctrldev, "%s alg registration failed\n",
2229 t_alg->crypto_alg.cra_driver_name);
2230 kfree(t_alg);
2231 } else {
2232 list_add_tail(&t_alg->entry, &priv->alg_list);
2233 dev_info(ctrldev, "%s\n",
2234 t_alg->crypto_alg.cra_driver_name);
2238 return err;
2241 module_init(caam_algapi_init);
2242 module_exit(caam_algapi_exit);
2244 MODULE_LICENSE("GPL");
2245 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2246 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");