2 * AES CCM routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/aead.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/crypto.h>
31 #include "nx_csbcpb.h"
35 static int ccm_aes_nx_set_key(struct crypto_aead
*tfm
,
39 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
40 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
41 struct nx_csbcpb
*csbcpb_aead
= nx_ctx
->csbcpb_aead
;
43 nx_ctx_init(nx_ctx
, HCOP_FC_AES
);
47 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_128
);
48 NX_CPB_SET_KEY_SIZE(csbcpb_aead
, NX_KS_AES_128
);
49 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_128
];
55 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_CCM
;
56 memcpy(csbcpb
->cpb
.aes_ccm
.key
, in_key
, key_len
);
58 csbcpb_aead
->cpb
.hdr
.mode
= NX_MODE_AES_CCA
;
59 memcpy(csbcpb_aead
->cpb
.aes_cca
.key
, in_key
, key_len
);
65 static int ccm4309_aes_nx_set_key(struct crypto_aead
*tfm
,
69 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&tfm
->base
);
76 memcpy(nx_ctx
->priv
.ccm
.nonce
, in_key
+ key_len
, 3);
78 return ccm_aes_nx_set_key(tfm
, in_key
, key_len
);
81 static int ccm_aes_nx_setauthsize(struct crypto_aead
*tfm
,
82 unsigned int authsize
)
97 crypto_aead_crt(tfm
)->authsize
= authsize
;
102 static int ccm4309_aes_nx_setauthsize(struct crypto_aead
*tfm
,
103 unsigned int authsize
)
114 crypto_aead_crt(tfm
)->authsize
= authsize
;
119 /* taken from crypto/ccm.c */
120 static int set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
124 memset(block
, 0, csize
);
129 else if (msglen
> (unsigned int)(1 << (8 * csize
)))
132 data
= cpu_to_be32(msglen
);
133 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
138 /* taken from crypto/ccm.c */
139 static inline int crypto_ccm_check_iv(const u8
*iv
)
141 /* 2 <= L <= 8, so 1 <= L' <= 7. */
142 if (1 > iv
[0] || iv
[0] > 7)
148 /* based on code from crypto/ccm.c */
149 static int generate_b0(u8
*iv
, unsigned int assoclen
, unsigned int authsize
,
150 unsigned int cryptlen
, u8
*b0
)
152 unsigned int l
, lp
, m
= authsize
;
160 /* set m, bits 3-5 */
161 *b0
|= (8 * ((m
- 2) / 2));
163 /* set adata, bit 6, if associated data is used */
167 rc
= set_msg_len(b0
+ 16 - l
, cryptlen
, l
);
172 static int generate_pat(u8
*iv
,
173 struct aead_request
*req
,
174 struct nx_crypto_ctx
*nx_ctx
,
175 unsigned int authsize
,
179 struct nx_sg
*nx_insg
= nx_ctx
->in_sg
;
180 struct nx_sg
*nx_outsg
= nx_ctx
->out_sg
;
181 unsigned int iauth_len
= 0;
182 u8 tmp
[16], *b1
= NULL
, *b0
= NULL
, *result
= NULL
;
185 /* zero the ctr value */
186 memset(iv
+ 15 - iv
[0], 0, iv
[0] + 1);
188 /* page 78 of nx_wb.pdf has,
189 * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
190 * in length. If a full message is used, the AES CCA implementation
191 * restricts the maximum AAD length to 2^32 -1 bytes.
192 * If partial messages are used, the implementation supports
193 * 2^64 -1 bytes maximum AAD length.
195 * However, in the cryptoapi's aead_request structure,
196 * assoclen is an unsigned int, thus it cannot hold a length
197 * value greater than 2^32 - 1.
198 * Thus the AAD is further constrained by this and is never
202 if (!req
->assoclen
) {
203 b0
= nx_ctx
->csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
;
204 } else if (req
->assoclen
<= 14) {
205 /* if associated data is 14 bytes or less, we do 1 GCM
206 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
207 * which is fed in through the source buffers here */
208 b0
= nx_ctx
->csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
;
209 b1
= nx_ctx
->priv
.ccm
.iauth_tag
;
210 iauth_len
= req
->assoclen
;
211 } else if (req
->assoclen
<= 65280) {
212 /* if associated data is less than (2^16 - 2^8), we construct
213 * B1 differently and feed in the associated data to a CCA
215 b0
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b0
;
216 b1
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b1
;
219 b0
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b0
;
220 b1
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b1
;
225 rc
= generate_b0(iv
, req
->assoclen
, authsize
, nbytes
, b0
);
230 * add control info for associated data
231 * RFC 3610 and NIST Special Publication 800-38C
235 if (req
->assoclen
<= 65280) {
236 *(u16
*)b1
= (u16
)req
->assoclen
;
237 scatterwalk_map_and_copy(b1
+ 2, req
->assoc
, 0,
238 iauth_len
, SCATTERWALK_FROM_SG
);
240 *(u16
*)b1
= (u16
)(0xfffe);
241 *(u32
*)&b1
[2] = (u32
)req
->assoclen
;
242 scatterwalk_map_and_copy(b1
+ 6, req
->assoc
, 0,
243 iauth_len
, SCATTERWALK_FROM_SG
);
247 /* now copy any remaining AAD to scatterlist and call nx... */
248 if (!req
->assoclen
) {
250 } else if (req
->assoclen
<= 14) {
251 nx_insg
= nx_build_sg_list(nx_insg
, b1
, 16, nx_ctx
->ap
->sglen
);
252 nx_outsg
= nx_build_sg_list(nx_outsg
, tmp
, 16,
255 /* inlen should be negative, indicating to phyp that its a
256 * pointer to an sg list */
257 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- nx_insg
) *
258 sizeof(struct nx_sg
);
259 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- nx_outsg
) *
260 sizeof(struct nx_sg
);
262 NX_CPB_FDM(nx_ctx
->csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
263 NX_CPB_FDM(nx_ctx
->csbcpb
) |= NX_FDM_INTERMEDIATE
;
265 result
= nx_ctx
->csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
;
267 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
268 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
272 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
273 atomic64_add(req
->assoclen
, &(nx_ctx
->stats
->aes_bytes
));
277 unsigned int processed
= 0, to_process
;
279 /* page_limit: number of sg entries that fit on one page */
280 max_sg_len
= min_t(u32
,
281 nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
284 processed
+= iauth_len
;
287 to_process
= min_t(u32
, req
->assoclen
- processed
,
288 nx_ctx
->ap
->databytelen
);
289 to_process
= min_t(u64
, to_process
,
290 NX_PAGE_SIZE
* (max_sg_len
- 1));
292 if ((to_process
+ processed
) < req
->assoclen
) {
293 NX_CPB_FDM(nx_ctx
->csbcpb_aead
) |=
296 NX_CPB_FDM(nx_ctx
->csbcpb_aead
) &=
297 ~NX_FDM_INTERMEDIATE
;
300 nx_insg
= nx_walk_and_build(nx_ctx
->in_sg
,
302 req
->assoc
, processed
,
305 nx_ctx
->op_aead
.inlen
= (nx_ctx
->in_sg
- nx_insg
) *
306 sizeof(struct nx_sg
);
308 result
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.out_pat_or_b0
;
310 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op_aead
,
311 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
315 memcpy(nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.b0
,
316 nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.out_pat_or_b0
,
319 NX_CPB_FDM(nx_ctx
->csbcpb_aead
) |= NX_FDM_CONTINUATION
;
321 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
322 atomic64_add(req
->assoclen
,
323 &(nx_ctx
->stats
->aes_bytes
));
325 processed
+= to_process
;
326 } while (processed
< req
->assoclen
);
328 result
= nx_ctx
->csbcpb_aead
->cpb
.aes_cca
.out_pat_or_b0
;
331 memcpy(out
, result
, AES_BLOCK_SIZE
);
336 static int ccm_nx_decrypt(struct aead_request
*req
,
337 struct blkcipher_desc
*desc
)
339 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
340 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
341 unsigned int nbytes
= req
->cryptlen
;
342 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
343 struct nx_ccm_priv
*priv
= &nx_ctx
->priv
.ccm
;
344 unsigned long irq_flags
;
345 unsigned int processed
= 0, to_process
;
349 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
353 /* copy out the auth tag to compare with later */
354 scatterwalk_map_and_copy(priv
->oauth_tag
,
355 req
->src
, nbytes
, authsize
,
356 SCATTERWALK_FROM_SG
);
358 rc
= generate_pat(desc
->info
, req
, nx_ctx
, authsize
, nbytes
,
359 csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
);
363 /* page_limit: number of sg entries that fit on one page */
364 max_sg_len
= min_t(u32
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
369 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
370 * update. This value is bound by sg list limits.
372 to_process
= min_t(u64
, nbytes
- processed
,
373 nx_ctx
->ap
->databytelen
);
374 to_process
= min_t(u64
, to_process
,
375 NX_PAGE_SIZE
* (max_sg_len
- 1));
377 if ((to_process
+ processed
) < nbytes
)
378 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
380 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
382 NX_CPB_FDM(nx_ctx
->csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
384 rc
= nx_build_sg_lists(nx_ctx
, desc
, req
->dst
, req
->src
,
385 to_process
, processed
,
386 csbcpb
->cpb
.aes_ccm
.iv_or_ctr
);
390 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
391 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
395 /* for partial completion, copy following for next
398 memcpy(desc
->info
, csbcpb
->cpb
.aes_ccm
.out_ctr
, AES_BLOCK_SIZE
);
399 memcpy(csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
,
400 csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
401 memcpy(csbcpb
->cpb
.aes_ccm
.in_s0
,
402 csbcpb
->cpb
.aes_ccm
.out_s0
, AES_BLOCK_SIZE
);
404 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
407 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
408 atomic64_add(csbcpb
->csb
.processed_byte_count
,
409 &(nx_ctx
->stats
->aes_bytes
));
411 processed
+= to_process
;
412 } while (processed
< nbytes
);
414 rc
= memcmp(csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
, priv
->oauth_tag
,
415 authsize
) ? -EBADMSG
: 0;
417 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
421 static int ccm_nx_encrypt(struct aead_request
*req
,
422 struct blkcipher_desc
*desc
)
424 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
425 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
426 unsigned int nbytes
= req
->cryptlen
;
427 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
428 unsigned long irq_flags
;
429 unsigned int processed
= 0, to_process
;
433 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
435 rc
= generate_pat(desc
->info
, req
, nx_ctx
, authsize
, nbytes
,
436 csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
);
440 /* page_limit: number of sg entries that fit on one page */
441 max_sg_len
= min_t(u32
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
445 /* to process: the AES_BLOCK_SIZE data chunk to process in this
446 * update. This value is bound by sg list limits.
448 to_process
= min_t(u64
, nbytes
- processed
,
449 nx_ctx
->ap
->databytelen
);
450 to_process
= min_t(u64
, to_process
,
451 NX_PAGE_SIZE
* (max_sg_len
- 1));
453 if ((to_process
+ processed
) < nbytes
)
454 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
456 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
458 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
460 rc
= nx_build_sg_lists(nx_ctx
, desc
, req
->dst
, req
->src
,
461 to_process
, processed
,
462 csbcpb
->cpb
.aes_ccm
.iv_or_ctr
);
466 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
467 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
471 /* for partial completion, copy following for next
474 memcpy(desc
->info
, csbcpb
->cpb
.aes_ccm
.out_ctr
, AES_BLOCK_SIZE
);
475 memcpy(csbcpb
->cpb
.aes_ccm
.in_pat_or_b0
,
476 csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
, AES_BLOCK_SIZE
);
477 memcpy(csbcpb
->cpb
.aes_ccm
.in_s0
,
478 csbcpb
->cpb
.aes_ccm
.out_s0
, AES_BLOCK_SIZE
);
480 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
483 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
484 atomic64_add(csbcpb
->csb
.processed_byte_count
,
485 &(nx_ctx
->stats
->aes_bytes
));
487 processed
+= to_process
;
489 } while (processed
< nbytes
);
491 /* copy out the auth tag */
492 scatterwalk_map_and_copy(csbcpb
->cpb
.aes_ccm
.out_pat_or_mac
,
493 req
->dst
, nbytes
, authsize
,
497 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
501 static int ccm4309_aes_nx_encrypt(struct aead_request
*req
)
503 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
504 struct blkcipher_desc desc
;
505 u8
*iv
= nx_ctx
->priv
.ccm
.iv
;
508 memcpy(iv
+ 1, nx_ctx
->priv
.ccm
.nonce
, 3);
509 memcpy(iv
+ 4, req
->iv
, 8);
512 desc
.tfm
= (struct crypto_blkcipher
*)req
->base
.tfm
;
514 return ccm_nx_encrypt(req
, &desc
);
517 static int ccm_aes_nx_encrypt(struct aead_request
*req
)
519 struct blkcipher_desc desc
;
523 desc
.tfm
= (struct crypto_blkcipher
*)req
->base
.tfm
;
525 rc
= crypto_ccm_check_iv(desc
.info
);
529 return ccm_nx_encrypt(req
, &desc
);
532 static int ccm4309_aes_nx_decrypt(struct aead_request
*req
)
534 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(req
->base
.tfm
);
535 struct blkcipher_desc desc
;
536 u8
*iv
= nx_ctx
->priv
.ccm
.iv
;
539 memcpy(iv
+ 1, nx_ctx
->priv
.ccm
.nonce
, 3);
540 memcpy(iv
+ 4, req
->iv
, 8);
543 desc
.tfm
= (struct crypto_blkcipher
*)req
->base
.tfm
;
545 return ccm_nx_decrypt(req
, &desc
);
548 static int ccm_aes_nx_decrypt(struct aead_request
*req
)
550 struct blkcipher_desc desc
;
554 desc
.tfm
= (struct crypto_blkcipher
*)req
->base
.tfm
;
556 rc
= crypto_ccm_check_iv(desc
.info
);
560 return ccm_nx_decrypt(req
, &desc
);
563 /* tell the block cipher walk routines that this is a stream cipher by
564 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
565 * during encrypt/decrypt doesn't solve this problem, because it calls
566 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
567 * but instead uses this tfm->blocksize. */
568 struct crypto_alg nx_ccm_aes_alg
= {
569 .cra_name
= "ccm(aes)",
570 .cra_driver_name
= "ccm-aes-nx",
572 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
|
573 CRYPTO_ALG_NEED_FALLBACK
,
575 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
576 .cra_type
= &crypto_aead_type
,
577 .cra_module
= THIS_MODULE
,
578 .cra_init
= nx_crypto_ctx_aes_ccm_init
,
579 .cra_exit
= nx_crypto_ctx_exit
,
581 .ivsize
= AES_BLOCK_SIZE
,
582 .maxauthsize
= AES_BLOCK_SIZE
,
583 .setkey
= ccm_aes_nx_set_key
,
584 .setauthsize
= ccm_aes_nx_setauthsize
,
585 .encrypt
= ccm_aes_nx_encrypt
,
586 .decrypt
= ccm_aes_nx_decrypt
,
590 struct crypto_alg nx_ccm4309_aes_alg
= {
591 .cra_name
= "rfc4309(ccm(aes))",
592 .cra_driver_name
= "rfc4309-ccm-aes-nx",
594 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
|
595 CRYPTO_ALG_NEED_FALLBACK
,
597 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
598 .cra_type
= &crypto_nivaead_type
,
599 .cra_module
= THIS_MODULE
,
600 .cra_init
= nx_crypto_ctx_aes_ccm_init
,
601 .cra_exit
= nx_crypto_ctx_exit
,
604 .maxauthsize
= AES_BLOCK_SIZE
,
605 .setkey
= ccm4309_aes_nx_set_key
,
606 .setauthsize
= ccm4309_aes_nx_setauthsize
,
607 .encrypt
= ccm4309_aes_nx_encrypt
,
608 .decrypt
= ccm4309_aes_nx_decrypt
,