2 * chainiv: Chain IV Generator
4 * Generate IVs simply be using the last block of the previous encryption.
5 * This is mainly useful for CBC with a synchronous algorithm.
7 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
16 #include <crypto/internal/skcipher.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/random.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/workqueue.h>
27 CHAINIV_STATE_INUSE
= 0,
35 struct async_chainiv_ctx
{
41 struct crypto_queue queue
;
42 struct work_struct postponed
;
47 static int chainiv_givencrypt(struct skcipher_givcrypt_request
*req
)
49 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
50 struct chainiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
51 struct ablkcipher_request
*subreq
= skcipher_givcrypt_reqctx(req
);
55 ablkcipher_request_set_tfm(subreq
, skcipher_geniv_cipher(geniv
));
56 ablkcipher_request_set_callback(subreq
, req
->creq
.base
.flags
&
57 ~CRYPTO_TFM_REQ_MAY_SLEEP
,
58 req
->creq
.base
.complete
,
60 ablkcipher_request_set_crypt(subreq
, req
->creq
.src
, req
->creq
.dst
,
61 req
->creq
.nbytes
, req
->creq
.info
);
63 spin_lock_bh(&ctx
->lock
);
65 ivsize
= crypto_ablkcipher_ivsize(geniv
);
67 memcpy(req
->giv
, ctx
->iv
, ivsize
);
68 memcpy(subreq
->info
, ctx
->iv
, ivsize
);
70 err
= crypto_ablkcipher_encrypt(subreq
);
74 memcpy(ctx
->iv
, subreq
->info
, ivsize
);
77 spin_unlock_bh(&ctx
->lock
);
82 static int chainiv_givencrypt_first(struct skcipher_givcrypt_request
*req
)
84 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
85 struct chainiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
87 spin_lock_bh(&ctx
->lock
);
88 if (crypto_ablkcipher_crt(geniv
)->givencrypt
!=
89 chainiv_givencrypt_first
)
92 crypto_ablkcipher_crt(geniv
)->givencrypt
= chainiv_givencrypt
;
93 get_random_bytes(ctx
->iv
, crypto_ablkcipher_ivsize(geniv
));
96 spin_unlock_bh(&ctx
->lock
);
98 return chainiv_givencrypt(req
);
101 static int chainiv_init_common(struct crypto_tfm
*tfm
)
103 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ablkcipher_request
);
105 return skcipher_geniv_init(tfm
);
108 static int chainiv_init(struct crypto_tfm
*tfm
)
110 struct chainiv_ctx
*ctx
= crypto_tfm_ctx(tfm
);
112 spin_lock_init(&ctx
->lock
);
114 return chainiv_init_common(tfm
);
117 static int async_chainiv_schedule_work(struct async_chainiv_ctx
*ctx
)
122 if (!ctx
->queue
.qlen
) {
123 smp_mb__before_clear_bit();
124 clear_bit(CHAINIV_STATE_INUSE
, &ctx
->state
);
126 if (!ctx
->queue
.qlen
||
127 test_and_set_bit(CHAINIV_STATE_INUSE
, &ctx
->state
))
131 queued
= schedule_work(&ctx
->postponed
);
138 static int async_chainiv_postpone_request(struct skcipher_givcrypt_request
*req
)
140 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
141 struct async_chainiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
144 spin_lock_bh(&ctx
->lock
);
145 err
= skcipher_enqueue_givcrypt(&ctx
->queue
, req
);
146 spin_unlock_bh(&ctx
->lock
);
148 if (test_and_set_bit(CHAINIV_STATE_INUSE
, &ctx
->state
))
152 return async_chainiv_schedule_work(ctx
);
155 static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request
*req
)
157 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
158 struct async_chainiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
159 struct ablkcipher_request
*subreq
= skcipher_givcrypt_reqctx(req
);
160 unsigned int ivsize
= crypto_ablkcipher_ivsize(geniv
);
162 memcpy(req
->giv
, ctx
->iv
, ivsize
);
163 memcpy(subreq
->info
, ctx
->iv
, ivsize
);
165 ctx
->err
= crypto_ablkcipher_encrypt(subreq
);
169 memcpy(ctx
->iv
, subreq
->info
, ivsize
);
172 return async_chainiv_schedule_work(ctx
);
175 static int async_chainiv_givencrypt(struct skcipher_givcrypt_request
*req
)
177 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
178 struct async_chainiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
179 struct ablkcipher_request
*subreq
= skcipher_givcrypt_reqctx(req
);
181 ablkcipher_request_set_tfm(subreq
, skcipher_geniv_cipher(geniv
));
182 ablkcipher_request_set_callback(subreq
, req
->creq
.base
.flags
,
183 req
->creq
.base
.complete
,
184 req
->creq
.base
.data
);
185 ablkcipher_request_set_crypt(subreq
, req
->creq
.src
, req
->creq
.dst
,
186 req
->creq
.nbytes
, req
->creq
.info
);
188 if (test_and_set_bit(CHAINIV_STATE_INUSE
, &ctx
->state
))
191 if (ctx
->queue
.qlen
) {
192 clear_bit(CHAINIV_STATE_INUSE
, &ctx
->state
);
196 return async_chainiv_givencrypt_tail(req
);
199 return async_chainiv_postpone_request(req
);
202 static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request
*req
)
204 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
205 struct async_chainiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
207 if (test_and_set_bit(CHAINIV_STATE_INUSE
, &ctx
->state
))
210 if (crypto_ablkcipher_crt(geniv
)->givencrypt
!=
211 async_chainiv_givencrypt_first
)
214 crypto_ablkcipher_crt(geniv
)->givencrypt
= async_chainiv_givencrypt
;
215 get_random_bytes(ctx
->iv
, crypto_ablkcipher_ivsize(geniv
));
218 clear_bit(CHAINIV_STATE_INUSE
, &ctx
->state
);
221 return async_chainiv_givencrypt(req
);
224 static void async_chainiv_do_postponed(struct work_struct
*work
)
226 struct async_chainiv_ctx
*ctx
= container_of(work
,
227 struct async_chainiv_ctx
,
229 struct skcipher_givcrypt_request
*req
;
230 struct ablkcipher_request
*subreq
;
233 /* Only handle one request at a time to avoid hogging keventd. */
234 spin_lock_bh(&ctx
->lock
);
235 req
= skcipher_dequeue_givcrypt(&ctx
->queue
);
236 spin_unlock_bh(&ctx
->lock
);
239 async_chainiv_schedule_work(ctx
);
243 subreq
= skcipher_givcrypt_reqctx(req
);
244 subreq
->base
.flags
|= CRYPTO_TFM_REQ_MAY_SLEEP
;
246 err
= async_chainiv_givencrypt_tail(req
);
249 skcipher_givcrypt_complete(req
, err
);
253 static int async_chainiv_init(struct crypto_tfm
*tfm
)
255 struct async_chainiv_ctx
*ctx
= crypto_tfm_ctx(tfm
);
257 spin_lock_init(&ctx
->lock
);
259 crypto_init_queue(&ctx
->queue
, 100);
260 INIT_WORK(&ctx
->postponed
, async_chainiv_do_postponed
);
262 return chainiv_init_common(tfm
);
265 static void async_chainiv_exit(struct crypto_tfm
*tfm
)
267 struct async_chainiv_ctx
*ctx
= crypto_tfm_ctx(tfm
);
269 BUG_ON(test_bit(CHAINIV_STATE_INUSE
, &ctx
->state
) || ctx
->queue
.qlen
);
271 skcipher_geniv_exit(tfm
);
274 static struct crypto_template chainiv_tmpl
;
276 static struct crypto_instance
*chainiv_alloc(struct rtattr
**tb
)
278 struct crypto_attr_type
*algt
;
279 struct crypto_instance
*inst
;
282 algt
= crypto_get_attr_type(tb
);
287 inst
= skcipher_geniv_alloc(&chainiv_tmpl
, tb
, 0, 0);
291 inst
->alg
.cra_ablkcipher
.givencrypt
= chainiv_givencrypt_first
;
293 inst
->alg
.cra_init
= chainiv_init
;
294 inst
->alg
.cra_exit
= skcipher_geniv_exit
;
296 inst
->alg
.cra_ctxsize
= sizeof(struct chainiv_ctx
);
298 if (!crypto_requires_sync(algt
->type
, algt
->mask
)) {
299 inst
->alg
.cra_flags
|= CRYPTO_ALG_ASYNC
;
301 inst
->alg
.cra_ablkcipher
.givencrypt
=
302 async_chainiv_givencrypt_first
;
304 inst
->alg
.cra_init
= async_chainiv_init
;
305 inst
->alg
.cra_exit
= async_chainiv_exit
;
307 inst
->alg
.cra_ctxsize
= sizeof(struct async_chainiv_ctx
);
310 inst
->alg
.cra_ctxsize
+= inst
->alg
.cra_ablkcipher
.ivsize
;
316 static struct crypto_template chainiv_tmpl
= {
318 .alloc
= chainiv_alloc
,
319 .free
= skcipher_geniv_free
,
320 .module
= THIS_MODULE
,
323 int __init
chainiv_module_init(void)
325 return crypto_register_template(&chainiv_tmpl
);
328 void chainiv_module_exit(void)
330 crypto_unregister_template(&chainiv_tmpl
);