2 * eseqiv: Encrypted Sequence Number IV Generator
4 * This generator generates an IV based on a sequence number by xoring it
5 * with a salt and then encrypting it with the same key as used to encrypt
6 * the plain text. This algorithm requires that the block size be equal
7 * to the IV size. It is mainly useful for CBC.
9 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/scatterlist.h>
27 #include <linux/spinlock.h>
28 #include <linux/string.h>
30 struct eseqiv_request_ctx
{
31 struct scatterlist src
[2];
32 struct scatterlist dst
[2];
42 static void eseqiv_complete2(struct skcipher_givcrypt_request
*req
)
44 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
45 struct eseqiv_request_ctx
*reqctx
= skcipher_givcrypt_reqctx(req
);
47 memcpy(req
->giv
, PTR_ALIGN((u8
*)reqctx
->tail
,
48 crypto_ablkcipher_alignmask(geniv
) + 1),
49 crypto_ablkcipher_ivsize(geniv
));
52 static void eseqiv_complete(struct crypto_async_request
*base
, int err
)
54 struct skcipher_givcrypt_request
*req
= base
->data
;
59 eseqiv_complete2(req
);
62 skcipher_givcrypt_complete(req
, err
);
65 static void eseqiv_chain(struct scatterlist
*head
, struct scatterlist
*sg
,
69 head
->length
+= sg
->length
;
70 sg
= scatterwalk_sg_next(sg
);
74 scatterwalk_sg_chain(head
, 2, sg
);
79 static int eseqiv_givencrypt(struct skcipher_givcrypt_request
*req
)
81 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
82 struct eseqiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
83 struct eseqiv_request_ctx
*reqctx
= skcipher_givcrypt_reqctx(req
);
84 struct ablkcipher_request
*subreq
;
85 crypto_completion_t complete
;
87 struct scatterlist
*osrc
, *odst
;
88 struct scatterlist
*dst
;
99 subreq
= (void *)(reqctx
->tail
+ ctx
->reqoff
);
100 ablkcipher_request_set_tfm(subreq
, skcipher_geniv_cipher(geniv
));
103 complete
= req
->creq
.base
.complete
;
104 data
= req
->creq
.base
.data
;
106 osrc
= req
->creq
.src
;
107 odst
= req
->creq
.dst
;
108 srcp
= sg_page(osrc
);
109 dstp
= sg_page(odst
);
110 vsrc
= PageHighMem(srcp
) ? NULL
: page_address(srcp
) + osrc
->offset
;
111 vdst
= PageHighMem(dstp
) ? NULL
: page_address(dstp
) + odst
->offset
;
113 ivsize
= crypto_ablkcipher_ivsize(geniv
);
115 if (vsrc
!= giv
+ ivsize
&& vdst
!= giv
+ ivsize
) {
116 giv
= PTR_ALIGN((u8
*)reqctx
->tail
,
117 crypto_ablkcipher_alignmask(geniv
) + 1);
118 complete
= eseqiv_complete
;
122 ablkcipher_request_set_callback(subreq
, req
->creq
.base
.flags
, complete
,
125 sg_init_table(reqctx
->src
, 2);
126 sg_set_buf(reqctx
->src
, giv
, ivsize
);
127 eseqiv_chain(reqctx
->src
, osrc
, vsrc
== giv
+ ivsize
);
131 sg_init_table(reqctx
->dst
, 2);
132 sg_set_buf(reqctx
->dst
, giv
, ivsize
);
133 eseqiv_chain(reqctx
->dst
, odst
, vdst
== giv
+ ivsize
);
138 ablkcipher_request_set_crypt(subreq
, reqctx
->src
, dst
,
139 req
->creq
.nbytes
+ ivsize
,
142 memcpy(req
->creq
.info
, ctx
->salt
, ivsize
);
145 if (ivsize
> sizeof(u64
)) {
146 memset(req
->giv
, 0, ivsize
- sizeof(u64
));
149 seq
= cpu_to_be64(req
->seq
);
150 memcpy(req
->giv
+ ivsize
- len
, &seq
, len
);
152 err
= crypto_ablkcipher_encrypt(subreq
);
156 eseqiv_complete2(req
);
162 static int eseqiv_givencrypt_first(struct skcipher_givcrypt_request
*req
)
164 struct crypto_ablkcipher
*geniv
= skcipher_givcrypt_reqtfm(req
);
165 struct eseqiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
167 spin_lock_bh(&ctx
->lock
);
168 if (crypto_ablkcipher_crt(geniv
)->givencrypt
!= eseqiv_givencrypt_first
)
171 crypto_ablkcipher_crt(geniv
)->givencrypt
= eseqiv_givencrypt
;
172 get_random_bytes(ctx
->salt
, crypto_ablkcipher_ivsize(geniv
));
175 spin_unlock_bh(&ctx
->lock
);
177 return eseqiv_givencrypt(req
);
180 static int eseqiv_init(struct crypto_tfm
*tfm
)
182 struct crypto_ablkcipher
*geniv
= __crypto_ablkcipher_cast(tfm
);
183 struct eseqiv_ctx
*ctx
= crypto_ablkcipher_ctx(geniv
);
184 unsigned long alignmask
;
185 unsigned int reqsize
;
187 spin_lock_init(&ctx
->lock
);
189 alignmask
= crypto_tfm_ctx_alignment() - 1;
190 reqsize
= sizeof(struct eseqiv_request_ctx
);
192 if (alignmask
& reqsize
) {
193 alignmask
&= reqsize
;
197 alignmask
= ~alignmask
;
198 alignmask
&= crypto_ablkcipher_alignmask(geniv
);
200 reqsize
+= alignmask
;
201 reqsize
+= crypto_ablkcipher_ivsize(geniv
);
202 reqsize
= ALIGN(reqsize
, crypto_tfm_ctx_alignment());
204 ctx
->reqoff
= reqsize
- sizeof(struct eseqiv_request_ctx
);
206 tfm
->crt_ablkcipher
.reqsize
= reqsize
+
207 sizeof(struct ablkcipher_request
);
209 return skcipher_geniv_init(tfm
);
212 static struct crypto_template eseqiv_tmpl
;
214 static struct crypto_instance
*eseqiv_alloc(struct rtattr
**tb
)
216 struct crypto_instance
*inst
;
219 inst
= skcipher_geniv_alloc(&eseqiv_tmpl
, tb
, 0, 0);
224 if (inst
->alg
.cra_ablkcipher
.ivsize
!= inst
->alg
.cra_blocksize
)
227 inst
->alg
.cra_ablkcipher
.givencrypt
= eseqiv_givencrypt_first
;
229 inst
->alg
.cra_init
= eseqiv_init
;
230 inst
->alg
.cra_exit
= skcipher_geniv_exit
;
232 inst
->alg
.cra_ctxsize
= sizeof(struct eseqiv_ctx
);
233 inst
->alg
.cra_ctxsize
+= inst
->alg
.cra_ablkcipher
.ivsize
;
239 skcipher_geniv_free(inst
);
244 static struct crypto_template eseqiv_tmpl
= {
246 .alloc
= eseqiv_alloc
,
247 .free
= skcipher_geniv_free
,
248 .module
= THIS_MODULE
,
251 int __init
eseqiv_module_init(void)
253 return crypto_register_template(&eseqiv_tmpl
);
256 void __exit
eseqiv_module_exit(void)
258 crypto_unregister_template(&eseqiv_tmpl
);