2 * Copyright (C)2006 USAGI/WIDE Project
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * Kazunori Miyazawa <miyazawa@linux-ipv6.org>
22 #include <linux/crypto.h>
23 #include <linux/err.h>
24 #include <linux/kernel.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/slab.h>
28 #include <linux/scatterlist.h>
31 static u_int32_t ks
[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
32 0x02020202, 0x02020202, 0x02020202, 0x02020202,
33 0x03030303, 0x03030303, 0x03030303, 0x03030303};
35 * +------------------------
37 * +------------------------
39 * +------------------------
41 * +------------------------
43 * +------------------------
45 * +------------------------
46 * | consts (block size * 3)
47 * +------------------------
49 struct crypto_xcbc_ctx
{
50 struct crypto_tfm
*child
;
55 void (*xor)(u8
*a
, const u8
*b
, unsigned int bs
);
60 static void xor_128(u8
*a
, const u8
*b
, unsigned int bs
)
62 ((u32
*)a
)[0] ^= ((u32
*)b
)[0];
63 ((u32
*)a
)[1] ^= ((u32
*)b
)[1];
64 ((u32
*)a
)[2] ^= ((u32
*)b
)[2];
65 ((u32
*)a
)[3] ^= ((u32
*)b
)[3];
68 static int _crypto_xcbc_digest_setkey(struct crypto_hash
*parent
,
69 struct crypto_xcbc_ctx
*ctx
)
71 int bs
= crypto_hash_blocksize(parent
);
75 if ((err
= crypto_cipher_setkey(ctx
->child
, ctx
->key
, ctx
->keylen
)))
78 ctx
->child
->__crt_alg
->cra_cipher
.cia_encrypt(ctx
->child
, key1
,
81 return crypto_cipher_setkey(ctx
->child
, key1
, bs
);
84 static int crypto_xcbc_digest_setkey(struct crypto_hash
*parent
,
85 const u8
*inkey
, unsigned int keylen
)
87 struct crypto_xcbc_ctx
*ctx
= crypto_hash_ctx_aligned(parent
);
89 if (keylen
!= crypto_tfm_alg_blocksize(ctx
->child
))
93 memcpy(ctx
->key
, inkey
, keylen
);
94 ctx
->consts
= (u8
*)ks
;
96 return _crypto_xcbc_digest_setkey(parent
, ctx
);
99 static int crypto_xcbc_digest_init(struct hash_desc
*pdesc
)
101 struct crypto_xcbc_ctx
*ctx
= crypto_hash_ctx_aligned(pdesc
->tfm
);
102 int bs
= crypto_hash_blocksize(pdesc
->tfm
);
105 memset(ctx
->odds
, 0, bs
);
106 memset(ctx
->prev
, 0, bs
);
111 static int crypto_xcbc_digest_update(struct hash_desc
*pdesc
,
112 struct scatterlist
*sg
,
115 struct crypto_hash
*parent
= pdesc
->tfm
;
116 struct crypto_xcbc_ctx
*ctx
= crypto_hash_ctx_aligned(parent
);
117 struct crypto_tfm
*tfm
= ctx
->child
;
118 int bs
= crypto_hash_blocksize(parent
);
123 struct page
*pg
= sg
[i
].page
;
124 unsigned int offset
= sg
[i
].offset
;
125 unsigned int slen
= sg
[i
].length
;
128 unsigned int len
= min(slen
, ((unsigned int)(PAGE_SIZE
)) - offset
);
129 char *p
= crypto_kmap(pg
, 0) + offset
;
131 /* checking the data can fill the block */
132 if ((ctx
->len
+ len
) <= bs
) {
133 memcpy(ctx
->odds
+ ctx
->len
, p
, len
);
137 /* checking the rest of the page */
138 if (len
+ offset
>= PAGE_SIZE
) {
145 crypto_yield(tfm
->crt_flags
);
149 /* filling odds with new data and encrypting it */
150 memcpy(ctx
->odds
+ ctx
->len
, p
, bs
- ctx
->len
);
151 len
-= bs
- ctx
->len
;
154 ctx
->xor(ctx
->prev
, ctx
->odds
, bs
);
155 tfm
->__crt_alg
->cra_cipher
.cia_encrypt(tfm
, ctx
->prev
, ctx
->prev
);
157 /* clearing the length */
160 /* encrypting the rest of data */
162 ctx
->xor(ctx
->prev
, p
, bs
);
163 tfm
->__crt_alg
->cra_cipher
.cia_encrypt(tfm
, ctx
->prev
, ctx
->prev
);
168 /* keeping the surplus of blocksize */
170 memcpy(ctx
->odds
, p
, len
);
174 crypto_yield(tfm
->crt_flags
);
175 slen
-= min(slen
, ((unsigned int)(PAGE_SIZE
)) - offset
);
179 nbytes
-=sg
[i
].length
;
186 static int crypto_xcbc_digest_final(struct hash_desc
*pdesc
, u8
*out
)
188 struct crypto_hash
*parent
= pdesc
->tfm
;
189 struct crypto_xcbc_ctx
*ctx
= crypto_hash_ctx_aligned(parent
);
190 struct crypto_tfm
*tfm
= ctx
->child
;
191 int bs
= crypto_hash_blocksize(parent
);
194 if (ctx
->len
== bs
) {
197 if ((err
= crypto_cipher_setkey(tfm
, ctx
->key
, ctx
->keylen
)) != 0)
200 tfm
->__crt_alg
->cra_cipher
.cia_encrypt(tfm
, key2
, (const u8
*)(ctx
->consts
+bs
));
202 ctx
->xor(ctx
->prev
, ctx
->odds
, bs
);
203 ctx
->xor(ctx
->prev
, key2
, bs
);
204 _crypto_xcbc_digest_setkey(parent
, ctx
);
206 tfm
->__crt_alg
->cra_cipher
.cia_encrypt(tfm
, out
, ctx
->prev
);
210 u8
*p
= ctx
->odds
+ ctx
->len
;
214 rlen
= bs
- ctx
->len
-1;
218 if ((err
= crypto_cipher_setkey(tfm
, ctx
->key
, ctx
->keylen
)) != 0)
221 tfm
->__crt_alg
->cra_cipher
.cia_encrypt(tfm
, key3
, (const u8
*)(ctx
->consts
+bs
*2));
223 ctx
->xor(ctx
->prev
, ctx
->odds
, bs
);
224 ctx
->xor(ctx
->prev
, key3
, bs
);
226 _crypto_xcbc_digest_setkey(parent
, ctx
);
228 tfm
->__crt_alg
->cra_cipher
.cia_encrypt(tfm
, out
, ctx
->prev
);
234 static int crypto_xcbc_digest(struct hash_desc
*pdesc
,
235 struct scatterlist
*sg
, unsigned int nbytes
, u8
*out
)
237 crypto_xcbc_digest_init(pdesc
);
238 crypto_xcbc_digest_update(pdesc
, sg
, nbytes
);
239 return crypto_xcbc_digest_final(pdesc
, out
);
242 static int xcbc_init_tfm(struct crypto_tfm
*tfm
)
244 struct crypto_instance
*inst
= (void *)tfm
->__crt_alg
;
245 struct crypto_spawn
*spawn
= crypto_instance_ctx(inst
);
246 struct crypto_xcbc_ctx
*ctx
= crypto_hash_ctx_aligned(__crypto_hash_cast(tfm
));
247 int bs
= crypto_hash_blocksize(__crypto_hash_cast(tfm
));
249 tfm
= crypto_spawn_tfm(spawn
);
261 ctx
->child
= crypto_cipher_cast(tfm
);
262 ctx
->odds
= (u8
*)(ctx
+1);
263 ctx
->prev
= ctx
->odds
+ bs
;
264 ctx
->key
= ctx
->prev
+ bs
;
269 static void xcbc_exit_tfm(struct crypto_tfm
*tfm
)
271 struct crypto_xcbc_ctx
*ctx
= crypto_hash_ctx_aligned(__crypto_hash_cast(tfm
));
272 crypto_free_cipher(ctx
->child
);
275 static struct crypto_instance
*xcbc_alloc(void *param
, unsigned int len
)
277 struct crypto_instance
*inst
;
278 struct crypto_alg
*alg
;
279 alg
= crypto_get_attr_alg(param
, len
, CRYPTO_ALG_TYPE_CIPHER
,
280 CRYPTO_ALG_TYPE_HASH_MASK
| CRYPTO_ALG_ASYNC
);
282 return ERR_PTR(PTR_ERR(alg
));
284 switch(alg
->cra_blocksize
) {
288 return ERR_PTR(PTR_ERR(alg
));
291 inst
= crypto_alloc_instance("xcbc", alg
);
295 inst
->alg
.cra_flags
= CRYPTO_ALG_TYPE_HASH
;
296 inst
->alg
.cra_priority
= alg
->cra_priority
;
297 inst
->alg
.cra_blocksize
= alg
->cra_blocksize
;
298 inst
->alg
.cra_alignmask
= alg
->cra_alignmask
;
299 inst
->alg
.cra_type
= &crypto_hash_type
;
301 inst
->alg
.cra_hash
.digestsize
=
302 (alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) ==
303 CRYPTO_ALG_TYPE_HASH
? alg
->cra_hash
.digestsize
:
305 inst
->alg
.cra_ctxsize
= sizeof(struct crypto_xcbc_ctx
) +
306 ALIGN(inst
->alg
.cra_blocksize
* 3, sizeof(void *));
307 inst
->alg
.cra_init
= xcbc_init_tfm
;
308 inst
->alg
.cra_exit
= xcbc_exit_tfm
;
310 inst
->alg
.cra_hash
.init
= crypto_xcbc_digest_init
;
311 inst
->alg
.cra_hash
.update
= crypto_xcbc_digest_update
;
312 inst
->alg
.cra_hash
.final
= crypto_xcbc_digest_final
;
313 inst
->alg
.cra_hash
.digest
= crypto_xcbc_digest
;
314 inst
->alg
.cra_hash
.setkey
= crypto_xcbc_digest_setkey
;
321 static void xcbc_free(struct crypto_instance
*inst
)
323 crypto_drop_spawn(crypto_instance_ctx(inst
));
327 static struct crypto_template crypto_xcbc_tmpl
= {
331 .module
= THIS_MODULE
,
334 static int __init
crypto_xcbc_module_init(void)
336 return crypto_register_template(&crypto_xcbc_tmpl
);
339 static void __exit
crypto_xcbc_module_exit(void)
341 crypto_unregister_template(&crypto_xcbc_tmpl
);
344 module_init(crypto_xcbc_module_init
);
345 module_exit(crypto_xcbc_module_exit
);
347 MODULE_LICENSE("GPL");
348 MODULE_DESCRIPTION("XCBC keyed hash algorithm");