2 * chainiv: Chain IV Generator
4 * Generate IVs simply be using the last block of the previous encryption.
5 * This is mainly useful for CBC with a synchronous algorithm.
7 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
16 #include <crypto/internal/skcipher.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/random.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/workqueue.h>
27 CHAINIV_STATE_INUSE
= 0,
35 struct async_chainiv_ctx
{
41 struct crypto_queue queue
;
42 struct work_struct postponed
;
47 static int chainiv_givencrypt(struct skcipher_givcrypt_request
*req
)
49 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
50 struct chainiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
51 struct ablkcipher_request
*subreq
= skcipher_givcrypt_reqctx(req
);
55 ablkcipher_request_set_tfm(subreq
, skcipher_geniv_cipher(geniv
));
56 ablkcipher_request_set_callback(subreq
, req
->creq
.base
.flags
&
57 ~CRYPTO_TFM_REQ_MAY_SLEEP
,
58 req
->creq
.base
.complete
,
60 ablkcipher_request_set_crypt(subreq
, req
->creq
.src
, req
->creq
.dst
,
61 req
->creq
.nbytes
, req
->creq
.info
);
63 spin_lock_bh(&ctx
->lock
);
65 ivsize
= crypto_ablkcipher_ivsize(geniv
);
67 memcpy(req
->giv
, ctx
->iv
, ivsize
);
68 memcpy(subreq
->info
, ctx
->iv
, ivsize
);
70 err
= crypto_ablkcipher_encrypt(subreq
);
74 memcpy(ctx
->iv
, subreq
->info
, ivsize
);
77 spin_unlock_bh(&ctx
->lock
);
82 static int chainiv_givencrypt_first(struct skcipher_givcrypt_request
*req
)
84 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
85 struct chainiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
87 spin_lock_bh(&ctx
->lock
);
88 if (crypto_ablkcipher_crt(geniv
)->givencrypt
!=
89 chainiv_givencrypt_first
)
92 crypto_ablkcipher_crt(geniv
)->givencrypt
= chainiv_givencrypt
;
93 get_random_bytes(ctx
->iv
, crypto_ablkcipher_ivsize(geniv
));
96 spin_unlock_bh(&ctx
->lock
);
98 return chainiv_givencrypt(req
);
101 static int chainiv_init_common(struct crypto_tfm
*tfm
)
103 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ablkcipher_request
);
105 return skcipher_geniv_init(tfm
);
108 static int chainiv_init(struct crypto_tfm
*tfm
)
110 struct chainiv_ctx
*ctx
= crypto_tfm_ctx(tfm
);
112 spin_lock_init(&ctx
->lock
);
114 return chainiv_init_common(tfm
);
117 static int async_chainiv_schedule_work(struct async_chainiv_ctx
*ctx
)
121 if (!ctx
->queue
.qlen
) {
122 smp_mb__before_clear_bit();
123 clear_bit(CHAINIV_STATE_INUSE
, &ctx
->state
);
125 if (!ctx
->queue
.qlen
||
126 test_and_set_bit(CHAINIV_STATE_INUSE
, &ctx
->state
))
130 queued
= schedule_work(&ctx
->postponed
);
137 static int async_chainiv_postpone_request(struct skcipher_givcrypt_request
*req
)
139 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
140 struct async_chainiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
143 spin_lock_bh(&ctx
->lock
);
144 err
= skcipher_enqueue_givcrypt(&ctx
->queue
, req
);
145 spin_unlock_bh(&ctx
->lock
);
147 if (test_and_set_bit(CHAINIV_STATE_INUSE
, &ctx
->state
))
151 return async_chainiv_schedule_work(ctx
);
154 static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request
*req
)
156 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
157 struct async_chainiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
158 struct ablkcipher_request
*subreq
= skcipher_givcrypt_reqctx(req
);
159 unsigned int ivsize
= crypto_ablkcipher_ivsize(geniv
);
161 memcpy(req
->giv
, ctx
->iv
, ivsize
);
162 memcpy(subreq
->info
, ctx
->iv
, ivsize
);
164 ctx
->err
= crypto_ablkcipher_encrypt(subreq
);
168 memcpy(ctx
->iv
, subreq
->info
, ivsize
);
171 return async_chainiv_schedule_work(ctx
);
174 static int async_chainiv_givencrypt(struct skcipher_givcrypt_request
*req
)
176 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
177 struct async_chainiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
178 struct ablkcipher_request
*subreq
= skcipher_givcrypt_reqctx(req
);
180 ablkcipher_request_set_tfm(subreq
, skcipher_geniv_cipher(geniv
));
181 ablkcipher_request_set_callback(subreq
, req
->creq
.base
.flags
,
182 req
->creq
.base
.complete
,
183 req
->creq
.base
.data
);
184 ablkcipher_request_set_crypt(subreq
, req
->creq
.src
, req
->creq
.dst
,
185 req
->creq
.nbytes
, req
->creq
.info
);
187 if (test_and_set_bit(CHAINIV_STATE_INUSE
, &ctx
->state
))
190 if (ctx
->queue
.qlen
) {
191 clear_bit(CHAINIV_STATE_INUSE
, &ctx
->state
);
195 return async_chainiv_givencrypt_tail(req
);
198 return async_chainiv_postpone_request(req
);
201 static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request
*req
)
203 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
204 struct async_chainiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
206 if (test_and_set_bit(CHAINIV_STATE_INUSE
, &ctx
->state
))
209 if (crypto_ablkcipher_crt(geniv
)->givencrypt
!=
210 async_chainiv_givencrypt_first
)
213 crypto_ablkcipher_crt(geniv
)->givencrypt
= async_chainiv_givencrypt
;
214 get_random_bytes(ctx
->iv
, crypto_ablkcipher_ivsize(geniv
));
217 clear_bit(CHAINIV_STATE_INUSE
, &ctx
->state
);
220 return async_chainiv_givencrypt(req
);
223 static void async_chainiv_do_postponed(struct work_struct
*work
)
225 struct async_chainiv_ctx
*ctx
= container_of(work
,
226 struct async_chainiv_ctx
,
228 struct skcipher_givcrypt_request
*req
;
229 struct ablkcipher_request
*subreq
;
231 /* Only handle one request at a time to avoid hogging keventd. */
232 spin_lock_bh(&ctx
->lock
);
233 req
= skcipher_dequeue_givcrypt(&ctx
->queue
);
234 spin_unlock_bh(&ctx
->lock
);
237 async_chainiv_schedule_work(ctx
);
241 subreq
= skcipher_givcrypt_reqctx(req
);
242 subreq
->base
.flags
|= CRYPTO_TFM_REQ_MAY_SLEEP
;
244 async_chainiv_givencrypt_tail(req
);
247 static int async_chainiv_init(struct crypto_tfm
*tfm
)
249 struct async_chainiv_ctx
*ctx
= crypto_tfm_ctx(tfm
);
251 spin_lock_init(&ctx
->lock
);
253 crypto_init_queue(&ctx
->queue
, 100);
254 INIT_WORK(&ctx
->postponed
, async_chainiv_do_postponed
);
256 return chainiv_init_common(tfm
);
259 static void async_chainiv_exit(struct crypto_tfm
*tfm
)
261 struct async_chainiv_ctx
*ctx
= crypto_tfm_ctx(tfm
);
263 BUG_ON(test_bit(CHAINIV_STATE_INUSE
, &ctx
->state
) || ctx
->queue
.qlen
);
265 skcipher_geniv_exit(tfm
);
268 static struct crypto_template chainiv_tmpl
;
270 static struct crypto_instance
*chainiv_alloc(struct rtattr
**tb
)
272 struct crypto_attr_type
*algt
;
273 struct crypto_instance
*inst
;
276 algt
= crypto_get_attr_type(tb
);
281 inst
= skcipher_geniv_alloc(&chainiv_tmpl
, tb
, 0, 0);
285 inst
->alg
.cra_ablkcipher
.givencrypt
= chainiv_givencrypt_first
;
287 inst
->alg
.cra_init
= chainiv_init
;
288 inst
->alg
.cra_exit
= skcipher_geniv_exit
;
290 inst
->alg
.cra_ctxsize
= sizeof(struct chainiv_ctx
);
292 if (!crypto_requires_sync(algt
->type
, algt
->mask
)) {
293 inst
->alg
.cra_flags
|= CRYPTO_ALG_ASYNC
;
295 inst
->alg
.cra_ablkcipher
.givencrypt
=
296 async_chainiv_givencrypt_first
;
298 inst
->alg
.cra_init
= async_chainiv_init
;
299 inst
->alg
.cra_exit
= async_chainiv_exit
;
301 inst
->alg
.cra_ctxsize
= sizeof(struct async_chainiv_ctx
);
304 inst
->alg
.cra_ctxsize
+= inst
->alg
.cra_ablkcipher
.ivsize
;
310 static struct crypto_template chainiv_tmpl
= {
312 .alloc
= chainiv_alloc
,
313 .free
= skcipher_geniv_free
,
314 .module
= THIS_MODULE
,
317 static int __init
chainiv_module_init(void)
319 return crypto_register_template(&chainiv_tmpl
);
322 static void __exit
chainiv_module_exit(void)
324 crypto_unregister_template(&chainiv_tmpl
);
327 module_init(chainiv_module_init
);
328 module_exit(chainiv_module_exit
);
330 MODULE_LICENSE("GPL");
331 MODULE_DESCRIPTION("Chain IV Generator");