2 * Block chaining cipher operations.
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/errno.h>
20 #include <linux/hardirq.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
31 BLKCIPHER_WALK_PHYS
= 1 << 0,
32 BLKCIPHER_WALK_SLOW
= 1 << 1,
33 BLKCIPHER_WALK_COPY
= 1 << 2,
34 BLKCIPHER_WALK_DIFF
= 1 << 3,
37 static int blkcipher_walk_next(struct blkcipher_desc
*desc
,
38 struct blkcipher_walk
*walk
);
39 static int blkcipher_walk_first(struct blkcipher_desc
*desc
,
40 struct blkcipher_walk
*walk
);
42 static inline void blkcipher_map_src(struct blkcipher_walk
*walk
)
44 walk
->src
.virt
.addr
= scatterwalk_map(&walk
->in
, 0);
47 static inline void blkcipher_map_dst(struct blkcipher_walk
*walk
)
49 walk
->dst
.virt
.addr
= scatterwalk_map(&walk
->out
, 1);
52 static inline void blkcipher_unmap_src(struct blkcipher_walk
*walk
)
54 scatterwalk_unmap(walk
->src
.virt
.addr
, 0);
57 static inline void blkcipher_unmap_dst(struct blkcipher_walk
*walk
)
59 scatterwalk_unmap(walk
->dst
.virt
.addr
, 1);
62 /* Get a spot of the specified length that does not straddle a page.
63 * The caller needs to ensure that there is enough space for this operation.
65 static inline u8
*blkcipher_get_spot(u8
*start
, unsigned int len
)
67 u8
*end_page
= (u8
*)(((unsigned long)(start
+ len
- 1)) & PAGE_MASK
);
68 return max(start
, end_page
);
71 static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher
*tfm
,
72 struct blkcipher_walk
*walk
,
76 unsigned int alignmask
= crypto_blkcipher_alignmask(tfm
);
78 addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
, alignmask
+ 1);
79 addr
= blkcipher_get_spot(addr
, bsize
);
80 scatterwalk_copychunks(addr
, &walk
->out
, bsize
, 1);
84 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk
*walk
,
87 if (walk
->flags
& BLKCIPHER_WALK_COPY
) {
88 blkcipher_map_dst(walk
);
89 memcpy(walk
->dst
.virt
.addr
, walk
->page
, n
);
90 blkcipher_unmap_dst(walk
);
91 } else if (!(walk
->flags
& BLKCIPHER_WALK_PHYS
)) {
92 blkcipher_unmap_src(walk
);
93 if (walk
->flags
& BLKCIPHER_WALK_DIFF
)
94 blkcipher_unmap_dst(walk
);
97 scatterwalk_advance(&walk
->in
, n
);
98 scatterwalk_advance(&walk
->out
, n
);
103 int blkcipher_walk_done(struct blkcipher_desc
*desc
,
104 struct blkcipher_walk
*walk
, int err
)
106 struct crypto_blkcipher
*tfm
= desc
->tfm
;
107 unsigned int nbytes
= 0;
109 if (likely(err
>= 0)) {
110 unsigned int n
= walk
->nbytes
- err
;
112 if (likely(!(walk
->flags
& BLKCIPHER_WALK_SLOW
)))
113 n
= blkcipher_done_fast(walk
, n
);
114 else if (WARN_ON(err
)) {
118 n
= blkcipher_done_slow(tfm
, walk
, n
);
120 nbytes
= walk
->total
- n
;
124 scatterwalk_done(&walk
->in
, 0, nbytes
);
125 scatterwalk_done(&walk
->out
, 1, nbytes
);
127 walk
->total
= nbytes
;
128 walk
->nbytes
= nbytes
;
131 crypto_yield(desc
->flags
);
132 return blkcipher_walk_next(desc
, walk
);
136 if (walk
->iv
!= desc
->info
)
137 memcpy(desc
->info
, walk
->iv
, crypto_blkcipher_ivsize(tfm
));
138 if (walk
->buffer
!= walk
->page
)
141 free_page((unsigned long)walk
->page
);
145 EXPORT_SYMBOL_GPL(blkcipher_walk_done
);
147 static inline int blkcipher_next_slow(struct blkcipher_desc
*desc
,
148 struct blkcipher_walk
*walk
,
150 unsigned int alignmask
)
153 unsigned aligned_bsize
= ALIGN(bsize
, alignmask
+ 1);
158 walk
->buffer
= walk
->page
;
162 n
= aligned_bsize
* 3 - (alignmask
+ 1) +
163 (alignmask
& ~(crypto_tfm_ctx_alignment() - 1));
164 walk
->buffer
= kmalloc(n
, GFP_ATOMIC
);
166 return blkcipher_walk_done(desc
, walk
, -ENOMEM
);
169 walk
->dst
.virt
.addr
= (u8
*)ALIGN((unsigned long)walk
->buffer
,
171 walk
->dst
.virt
.addr
= blkcipher_get_spot(walk
->dst
.virt
.addr
, bsize
);
172 walk
->src
.virt
.addr
= blkcipher_get_spot(walk
->dst
.virt
.addr
+
173 aligned_bsize
, bsize
);
175 scatterwalk_copychunks(walk
->src
.virt
.addr
, &walk
->in
, bsize
, 0);
177 walk
->nbytes
= bsize
;
178 walk
->flags
|= BLKCIPHER_WALK_SLOW
;
183 static inline int blkcipher_next_copy(struct blkcipher_walk
*walk
)
185 u8
*tmp
= walk
->page
;
187 blkcipher_map_src(walk
);
188 memcpy(tmp
, walk
->src
.virt
.addr
, walk
->nbytes
);
189 blkcipher_unmap_src(walk
);
191 walk
->src
.virt
.addr
= tmp
;
192 walk
->dst
.virt
.addr
= tmp
;
197 static inline int blkcipher_next_fast(struct blkcipher_desc
*desc
,
198 struct blkcipher_walk
*walk
)
202 walk
->src
.phys
.page
= scatterwalk_page(&walk
->in
);
203 walk
->src
.phys
.offset
= offset_in_page(walk
->in
.offset
);
204 walk
->dst
.phys
.page
= scatterwalk_page(&walk
->out
);
205 walk
->dst
.phys
.offset
= offset_in_page(walk
->out
.offset
);
207 if (walk
->flags
& BLKCIPHER_WALK_PHYS
)
210 diff
= walk
->src
.phys
.offset
- walk
->dst
.phys
.offset
;
211 diff
|= walk
->src
.virt
.page
- walk
->dst
.virt
.page
;
213 blkcipher_map_src(walk
);
214 walk
->dst
.virt
.addr
= walk
->src
.virt
.addr
;
217 walk
->flags
|= BLKCIPHER_WALK_DIFF
;
218 blkcipher_map_dst(walk
);
224 static int blkcipher_walk_next(struct blkcipher_desc
*desc
,
225 struct blkcipher_walk
*walk
)
227 struct crypto_blkcipher
*tfm
= desc
->tfm
;
228 unsigned int alignmask
= crypto_blkcipher_alignmask(tfm
);
234 if (unlikely(n
< crypto_blkcipher_blocksize(tfm
))) {
235 desc
->flags
|= CRYPTO_TFM_RES_BAD_BLOCK_LEN
;
236 return blkcipher_walk_done(desc
, walk
, -EINVAL
);
239 walk
->flags
&= ~(BLKCIPHER_WALK_SLOW
| BLKCIPHER_WALK_COPY
|
240 BLKCIPHER_WALK_DIFF
);
241 if (!scatterwalk_aligned(&walk
->in
, alignmask
) ||
242 !scatterwalk_aligned(&walk
->out
, alignmask
)) {
243 walk
->flags
|= BLKCIPHER_WALK_COPY
;
245 walk
->page
= (void *)__get_free_page(GFP_ATOMIC
);
251 bsize
= min(walk
->blocksize
, n
);
252 n
= scatterwalk_clamp(&walk
->in
, n
);
253 n
= scatterwalk_clamp(&walk
->out
, n
);
255 if (unlikely(n
< bsize
)) {
256 err
= blkcipher_next_slow(desc
, walk
, bsize
, alignmask
);
257 goto set_phys_lowmem
;
261 if (walk
->flags
& BLKCIPHER_WALK_COPY
) {
262 err
= blkcipher_next_copy(walk
);
263 goto set_phys_lowmem
;
266 return blkcipher_next_fast(desc
, walk
);
269 if (walk
->flags
& BLKCIPHER_WALK_PHYS
) {
270 walk
->src
.phys
.page
= virt_to_page(walk
->src
.virt
.addr
);
271 walk
->dst
.phys
.page
= virt_to_page(walk
->dst
.virt
.addr
);
272 walk
->src
.phys
.offset
&= PAGE_SIZE
- 1;
273 walk
->dst
.phys
.offset
&= PAGE_SIZE
- 1;
278 static inline int blkcipher_copy_iv(struct blkcipher_walk
*walk
,
279 struct crypto_blkcipher
*tfm
,
280 unsigned int alignmask
)
282 unsigned bs
= walk
->blocksize
;
283 unsigned int ivsize
= crypto_blkcipher_ivsize(tfm
);
284 unsigned aligned_bs
= ALIGN(bs
, alignmask
+ 1);
285 unsigned int size
= aligned_bs
* 2 + ivsize
+ max(aligned_bs
, ivsize
) -
289 size
+= alignmask
& ~(crypto_tfm_ctx_alignment() - 1);
290 walk
->buffer
= kmalloc(size
, GFP_ATOMIC
);
294 iv
= (u8
*)ALIGN((unsigned long)walk
->buffer
, alignmask
+ 1);
295 iv
= blkcipher_get_spot(iv
, bs
) + aligned_bs
;
296 iv
= blkcipher_get_spot(iv
, bs
) + aligned_bs
;
297 iv
= blkcipher_get_spot(iv
, ivsize
);
299 walk
->iv
= memcpy(iv
, walk
->iv
, ivsize
);
303 int blkcipher_walk_virt(struct blkcipher_desc
*desc
,
304 struct blkcipher_walk
*walk
)
306 walk
->flags
&= ~BLKCIPHER_WALK_PHYS
;
307 walk
->blocksize
= crypto_blkcipher_blocksize(desc
->tfm
);
308 return blkcipher_walk_first(desc
, walk
);
310 EXPORT_SYMBOL_GPL(blkcipher_walk_virt
);
312 int blkcipher_walk_phys(struct blkcipher_desc
*desc
,
313 struct blkcipher_walk
*walk
)
315 walk
->flags
|= BLKCIPHER_WALK_PHYS
;
316 walk
->blocksize
= crypto_blkcipher_blocksize(desc
->tfm
);
317 return blkcipher_walk_first(desc
, walk
);
319 EXPORT_SYMBOL_GPL(blkcipher_walk_phys
);
321 static int blkcipher_walk_first(struct blkcipher_desc
*desc
,
322 struct blkcipher_walk
*walk
)
324 struct crypto_blkcipher
*tfm
= desc
->tfm
;
325 unsigned int alignmask
= crypto_blkcipher_alignmask(tfm
);
327 if (WARN_ON_ONCE(in_irq()))
330 walk
->nbytes
= walk
->total
;
331 if (unlikely(!walk
->total
))
335 walk
->iv
= desc
->info
;
336 if (unlikely(((unsigned long)walk
->iv
& alignmask
))) {
337 int err
= blkcipher_copy_iv(walk
, tfm
, alignmask
);
342 scatterwalk_start(&walk
->in
, walk
->in
.sg
);
343 scatterwalk_start(&walk
->out
, walk
->out
.sg
);
346 return blkcipher_walk_next(desc
, walk
);
349 int blkcipher_walk_virt_block(struct blkcipher_desc
*desc
,
350 struct blkcipher_walk
*walk
,
351 unsigned int blocksize
)
353 walk
->flags
&= ~BLKCIPHER_WALK_PHYS
;
354 walk
->blocksize
= blocksize
;
355 return blkcipher_walk_first(desc
, walk
);
357 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block
);
359 static int setkey_unaligned(struct crypto_tfm
*tfm
, const u8
*key
,
362 struct blkcipher_alg
*cipher
= &tfm
->__crt_alg
->cra_blkcipher
;
363 unsigned long alignmask
= crypto_tfm_alg_alignmask(tfm
);
365 u8
*buffer
, *alignbuffer
;
366 unsigned long absize
;
368 absize
= keylen
+ alignmask
;
369 buffer
= kmalloc(absize
, GFP_ATOMIC
);
373 alignbuffer
= (u8
*)ALIGN((unsigned long)buffer
, alignmask
+ 1);
374 memcpy(alignbuffer
, key
, keylen
);
375 ret
= cipher
->setkey(tfm
, alignbuffer
, keylen
);
376 memset(alignbuffer
, 0, keylen
);
381 static int setkey(struct crypto_tfm
*tfm
, const u8
*key
, unsigned int keylen
)
383 struct blkcipher_alg
*cipher
= &tfm
->__crt_alg
->cra_blkcipher
;
384 unsigned long alignmask
= crypto_tfm_alg_alignmask(tfm
);
386 if (keylen
< cipher
->min_keysize
|| keylen
> cipher
->max_keysize
) {
387 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
391 if ((unsigned long)key
& alignmask
)
392 return setkey_unaligned(tfm
, key
, keylen
);
394 return cipher
->setkey(tfm
, key
, keylen
);
397 static int async_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
400 return setkey(crypto_ablkcipher_tfm(tfm
), key
, keylen
);
403 static int async_encrypt(struct ablkcipher_request
*req
)
405 struct crypto_tfm
*tfm
= req
->base
.tfm
;
406 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
407 struct blkcipher_desc desc
= {
408 .tfm
= __crypto_blkcipher_cast(tfm
),
410 .flags
= req
->base
.flags
,
414 return alg
->encrypt(&desc
, req
->dst
, req
->src
, req
->nbytes
);
417 static int async_decrypt(struct ablkcipher_request
*req
)
419 struct crypto_tfm
*tfm
= req
->base
.tfm
;
420 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
421 struct blkcipher_desc desc
= {
422 .tfm
= __crypto_blkcipher_cast(tfm
),
424 .flags
= req
->base
.flags
,
427 return alg
->decrypt(&desc
, req
->dst
, req
->src
, req
->nbytes
);
430 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg
*alg
, u32 type
,
433 struct blkcipher_alg
*cipher
= &alg
->cra_blkcipher
;
434 unsigned int len
= alg
->cra_ctxsize
;
436 if ((mask
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_MASK
&&
438 len
= ALIGN(len
, (unsigned long)alg
->cra_alignmask
+ 1);
439 len
+= cipher
->ivsize
;
445 static int crypto_init_blkcipher_ops_async(struct crypto_tfm
*tfm
)
447 struct ablkcipher_tfm
*crt
= &tfm
->crt_ablkcipher
;
448 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
450 crt
->setkey
= async_setkey
;
451 crt
->encrypt
= async_encrypt
;
452 crt
->decrypt
= async_decrypt
;
454 crt
->givencrypt
= skcipher_null_givencrypt
;
455 crt
->givdecrypt
= skcipher_null_givdecrypt
;
457 crt
->base
= __crypto_ablkcipher_cast(tfm
);
458 crt
->ivsize
= alg
->ivsize
;
463 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm
*tfm
)
465 struct blkcipher_tfm
*crt
= &tfm
->crt_blkcipher
;
466 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
467 unsigned long align
= crypto_tfm_alg_alignmask(tfm
) + 1;
470 crt
->setkey
= setkey
;
471 crt
->encrypt
= alg
->encrypt
;
472 crt
->decrypt
= alg
->decrypt
;
474 addr
= (unsigned long)crypto_tfm_ctx(tfm
);
475 addr
= ALIGN(addr
, align
);
476 addr
+= ALIGN(tfm
->__crt_alg
->cra_ctxsize
, align
);
477 crt
->iv
= (void *)addr
;
482 static int crypto_init_blkcipher_ops(struct crypto_tfm
*tfm
, u32 type
, u32 mask
)
484 struct blkcipher_alg
*alg
= &tfm
->__crt_alg
->cra_blkcipher
;
486 if (alg
->ivsize
> PAGE_SIZE
/ 8)
489 if ((mask
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_MASK
)
490 return crypto_init_blkcipher_ops_sync(tfm
);
492 return crypto_init_blkcipher_ops_async(tfm
);
495 static void crypto_blkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
496 __attribute__ ((unused
));
497 static void crypto_blkcipher_show(struct seq_file
*m
, struct crypto_alg
*alg
)
499 seq_printf(m
, "type : blkcipher\n");
500 seq_printf(m
, "blocksize : %u\n", alg
->cra_blocksize
);
501 seq_printf(m
, "min keysize : %u\n", alg
->cra_blkcipher
.min_keysize
);
502 seq_printf(m
, "max keysize : %u\n", alg
->cra_blkcipher
.max_keysize
);
503 seq_printf(m
, "ivsize : %u\n", alg
->cra_blkcipher
.ivsize
);
504 seq_printf(m
, "geniv : %s\n", alg
->cra_blkcipher
.geniv
?:
508 const struct crypto_type crypto_blkcipher_type
= {
509 .ctxsize
= crypto_blkcipher_ctxsize
,
510 .init
= crypto_init_blkcipher_ops
,
511 #ifdef CONFIG_PROC_FS
512 .show
= crypto_blkcipher_show
,
515 EXPORT_SYMBOL_GPL(crypto_blkcipher_type
);
517 static int crypto_grab_nivcipher(struct crypto_skcipher_spawn
*spawn
,
518 const char *name
, u32 type
, u32 mask
)
520 struct crypto_alg
*alg
;
523 type
= crypto_skcipher_type(type
);
524 mask
= crypto_skcipher_mask(mask
) | CRYPTO_ALG_GENIV
;
526 alg
= crypto_alg_mod_lookup(name
, type
, mask
);
530 err
= crypto_init_spawn(&spawn
->base
, alg
, spawn
->base
.inst
, mask
);
535 struct crypto_instance
*skcipher_geniv_alloc(struct crypto_template
*tmpl
,
536 struct rtattr
**tb
, u32 type
,
540 int (*setkey
)(struct crypto_ablkcipher
*tfm
, const u8
*key
,
541 unsigned int keylen
);
542 int (*encrypt
)(struct ablkcipher_request
*req
);
543 int (*decrypt
)(struct ablkcipher_request
*req
);
545 unsigned int min_keysize
;
546 unsigned int max_keysize
;
552 struct crypto_skcipher_spawn
*spawn
;
553 struct crypto_attr_type
*algt
;
554 struct crypto_instance
*inst
;
555 struct crypto_alg
*alg
;
558 algt
= crypto_get_attr_type(tb
);
563 if ((algt
->type
^ (CRYPTO_ALG_TYPE_GIVCIPHER
| CRYPTO_ALG_GENIV
)) &
565 return ERR_PTR(-EINVAL
);
567 name
= crypto_attr_alg_name(tb
[1]);
572 inst
= kzalloc(sizeof(*inst
) + sizeof(*spawn
), GFP_KERNEL
);
574 return ERR_PTR(-ENOMEM
);
576 spawn
= crypto_instance_ctx(inst
);
578 /* Ignore async algorithms if necessary. */
579 mask
|= crypto_requires_sync(algt
->type
, algt
->mask
);
581 crypto_set_skcipher_spawn(spawn
, inst
);
582 err
= crypto_grab_nivcipher(spawn
, name
, type
, mask
);
586 alg
= crypto_skcipher_spawn_alg(spawn
);
588 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) ==
589 CRYPTO_ALG_TYPE_BLKCIPHER
) {
590 balg
.ivsize
= alg
->cra_blkcipher
.ivsize
;
591 balg
.min_keysize
= alg
->cra_blkcipher
.min_keysize
;
592 balg
.max_keysize
= alg
->cra_blkcipher
.max_keysize
;
594 balg
.setkey
= async_setkey
;
595 balg
.encrypt
= async_encrypt
;
596 balg
.decrypt
= async_decrypt
;
598 balg
.geniv
= alg
->cra_blkcipher
.geniv
;
600 balg
.ivsize
= alg
->cra_ablkcipher
.ivsize
;
601 balg
.min_keysize
= alg
->cra_ablkcipher
.min_keysize
;
602 balg
.max_keysize
= alg
->cra_ablkcipher
.max_keysize
;
604 balg
.setkey
= alg
->cra_ablkcipher
.setkey
;
605 balg
.encrypt
= alg
->cra_ablkcipher
.encrypt
;
606 balg
.decrypt
= alg
->cra_ablkcipher
.decrypt
;
608 balg
.geniv
= alg
->cra_ablkcipher
.geniv
;
616 * This is only true if we're constructing an algorithm with its
617 * default IV generator. For the default generator we elide the
618 * template name and double-check the IV generator.
620 if (algt
->mask
& CRYPTO_ALG_GENIV
) {
622 balg
.geniv
= crypto_default_geniv(alg
);
624 if (strcmp(tmpl
->name
, balg
.geniv
))
627 memcpy(inst
->alg
.cra_name
, alg
->cra_name
, CRYPTO_MAX_ALG_NAME
);
628 memcpy(inst
->alg
.cra_driver_name
, alg
->cra_driver_name
,
629 CRYPTO_MAX_ALG_NAME
);
632 if (snprintf(inst
->alg
.cra_name
, CRYPTO_MAX_ALG_NAME
,
633 "%s(%s)", tmpl
->name
, alg
->cra_name
) >=
636 if (snprintf(inst
->alg
.cra_driver_name
, CRYPTO_MAX_ALG_NAME
,
637 "%s(%s)", tmpl
->name
, alg
->cra_driver_name
) >=
642 inst
->alg
.cra_flags
= CRYPTO_ALG_TYPE_GIVCIPHER
| CRYPTO_ALG_GENIV
;
643 inst
->alg
.cra_flags
|= alg
->cra_flags
& CRYPTO_ALG_ASYNC
;
644 inst
->alg
.cra_priority
= alg
->cra_priority
;
645 inst
->alg
.cra_blocksize
= alg
->cra_blocksize
;
646 inst
->alg
.cra_alignmask
= alg
->cra_alignmask
;
647 inst
->alg
.cra_type
= &crypto_givcipher_type
;
649 inst
->alg
.cra_ablkcipher
.ivsize
= balg
.ivsize
;
650 inst
->alg
.cra_ablkcipher
.min_keysize
= balg
.min_keysize
;
651 inst
->alg
.cra_ablkcipher
.max_keysize
= balg
.max_keysize
;
652 inst
->alg
.cra_ablkcipher
.geniv
= balg
.geniv
;
654 inst
->alg
.cra_ablkcipher
.setkey
= balg
.setkey
;
655 inst
->alg
.cra_ablkcipher
.encrypt
= balg
.encrypt
;
656 inst
->alg
.cra_ablkcipher
.decrypt
= balg
.decrypt
;
662 crypto_drop_skcipher(spawn
);
668 EXPORT_SYMBOL_GPL(skcipher_geniv_alloc
);
670 void skcipher_geniv_free(struct crypto_instance
*inst
)
672 crypto_drop_skcipher(crypto_instance_ctx(inst
));
675 EXPORT_SYMBOL_GPL(skcipher_geniv_free
);
677 int skcipher_geniv_init(struct crypto_tfm
*tfm
)
679 struct crypto_instance
*inst
= (void *)tfm
->__crt_alg
;
680 struct crypto_ablkcipher
*cipher
;
682 cipher
= crypto_spawn_skcipher(crypto_instance_ctx(inst
));
684 return PTR_ERR(cipher
);
686 tfm
->crt_ablkcipher
.base
= cipher
;
687 tfm
->crt_ablkcipher
.reqsize
+= crypto_ablkcipher_reqsize(cipher
);
691 EXPORT_SYMBOL_GPL(skcipher_geniv_init
);
693 void skcipher_geniv_exit(struct crypto_tfm
*tfm
)
695 crypto_free_ablkcipher(tfm
->crt_ablkcipher
.base
);
697 EXPORT_SYMBOL_GPL(skcipher_geniv_exit
);
699 MODULE_LICENSE("GPL");
700 MODULE_DESCRIPTION("Generic block chaining cipher type");