2 * Shared glue code for 128bit block ciphers
4 * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
6 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 * CTR part based on code (crypto/ctr.c) by:
9 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 #include <linux/module.h>
29 #include <crypto/b128ops.h>
30 #include <crypto/lrw.h>
31 #include <crypto/xts.h>
32 #include <asm/crypto/glue_helper.h>
33 #include <crypto/scatterwalk.h>
35 static int __glue_ecb_crypt_128bit(const struct common_glue_ctx
*gctx
,
36 struct blkcipher_desc
*desc
,
37 struct blkcipher_walk
*walk
)
39 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
40 const unsigned int bsize
= 128 / 8;
41 unsigned int nbytes
, i
, func_bytes
;
42 bool fpu_enabled
= false;
45 err
= blkcipher_walk_virt(desc
, walk
);
47 while ((nbytes
= walk
->nbytes
)) {
48 u8
*wsrc
= walk
->src
.virt
.addr
;
49 u8
*wdst
= walk
->dst
.virt
.addr
;
51 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
52 desc
, fpu_enabled
, nbytes
);
54 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
55 func_bytes
= bsize
* gctx
->funcs
[i
].num_blocks
;
57 /* Process multi-block batch */
58 if (nbytes
>= func_bytes
) {
60 gctx
->funcs
[i
].fn_u
.ecb(ctx
, wdst
,
66 } while (nbytes
>= func_bytes
);
74 err
= blkcipher_walk_done(desc
, walk
, nbytes
);
77 glue_fpu_end(fpu_enabled
);
81 int glue_ecb_crypt_128bit(const struct common_glue_ctx
*gctx
,
82 struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
83 struct scatterlist
*src
, unsigned int nbytes
)
85 struct blkcipher_walk walk
;
87 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
88 return __glue_ecb_crypt_128bit(gctx
, desc
, &walk
);
90 EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit
);
92 static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn
,
93 struct blkcipher_desc
*desc
,
94 struct blkcipher_walk
*walk
)
96 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
97 const unsigned int bsize
= 128 / 8;
98 unsigned int nbytes
= walk
->nbytes
;
99 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
100 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
101 u128
*iv
= (u128
*)walk
->iv
;
104 u128_xor(dst
, src
, iv
);
105 fn(ctx
, (u8
*)dst
, (u8
*)dst
);
111 } while (nbytes
>= bsize
);
113 *(u128
*)walk
->iv
= *iv
;
117 int glue_cbc_encrypt_128bit(const common_glue_func_t fn
,
118 struct blkcipher_desc
*desc
,
119 struct scatterlist
*dst
,
120 struct scatterlist
*src
, unsigned int nbytes
)
122 struct blkcipher_walk walk
;
125 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
126 err
= blkcipher_walk_virt(desc
, &walk
);
128 while ((nbytes
= walk
.nbytes
)) {
129 nbytes
= __glue_cbc_encrypt_128bit(fn
, desc
, &walk
);
130 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
135 EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit
);
138 __glue_cbc_decrypt_128bit(const struct common_glue_ctx
*gctx
,
139 struct blkcipher_desc
*desc
,
140 struct blkcipher_walk
*walk
)
142 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
143 const unsigned int bsize
= 128 / 8;
144 unsigned int nbytes
= walk
->nbytes
;
145 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
146 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
148 unsigned int num_blocks
, func_bytes
;
151 /* Start of the last block. */
152 src
+= nbytes
/ bsize
- 1;
153 dst
+= nbytes
/ bsize
- 1;
157 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
158 num_blocks
= gctx
->funcs
[i
].num_blocks
;
159 func_bytes
= bsize
* num_blocks
;
161 /* Process multi-block batch */
162 if (nbytes
>= func_bytes
) {
164 nbytes
-= func_bytes
- bsize
;
165 src
-= num_blocks
- 1;
166 dst
-= num_blocks
- 1;
168 gctx
->funcs
[i
].fn_u
.cbc(ctx
, dst
, src
);
174 u128_xor(dst
, dst
, src
- 1);
177 } while (nbytes
>= func_bytes
);
185 u128_xor(dst
, dst
, (u128
*)walk
->iv
);
186 *(u128
*)walk
->iv
= last_iv
;
191 int glue_cbc_decrypt_128bit(const struct common_glue_ctx
*gctx
,
192 struct blkcipher_desc
*desc
,
193 struct scatterlist
*dst
,
194 struct scatterlist
*src
, unsigned int nbytes
)
196 const unsigned int bsize
= 128 / 8;
197 bool fpu_enabled
= false;
198 struct blkcipher_walk walk
;
201 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
202 err
= blkcipher_walk_virt(desc
, &walk
);
204 while ((nbytes
= walk
.nbytes
)) {
205 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
206 desc
, fpu_enabled
, nbytes
);
207 nbytes
= __glue_cbc_decrypt_128bit(gctx
, desc
, &walk
);
208 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
211 glue_fpu_end(fpu_enabled
);
214 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit
);
216 static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr
,
217 struct blkcipher_desc
*desc
,
218 struct blkcipher_walk
*walk
)
220 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
221 u8
*src
= (u8
*)walk
->src
.virt
.addr
;
222 u8
*dst
= (u8
*)walk
->dst
.virt
.addr
;
223 unsigned int nbytes
= walk
->nbytes
;
227 be128_to_u128(&ctrblk
, (be128
*)walk
->iv
);
229 memcpy(&tmp
, src
, nbytes
);
230 fn_ctr(ctx
, &tmp
, &tmp
, &ctrblk
);
231 memcpy(dst
, &tmp
, nbytes
);
233 u128_to_be128((be128
*)walk
->iv
, &ctrblk
);
235 EXPORT_SYMBOL_GPL(glue_ctr_crypt_final_128bit
);
237 static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx
*gctx
,
238 struct blkcipher_desc
*desc
,
239 struct blkcipher_walk
*walk
)
241 const unsigned int bsize
= 128 / 8;
242 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
243 unsigned int nbytes
= walk
->nbytes
;
244 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
245 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
247 unsigned int num_blocks
, func_bytes
;
250 be128_to_u128(&ctrblk
, (be128
*)walk
->iv
);
252 /* Process multi-block batch */
253 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
254 num_blocks
= gctx
->funcs
[i
].num_blocks
;
255 func_bytes
= bsize
* num_blocks
;
257 if (nbytes
>= func_bytes
) {
259 gctx
->funcs
[i
].fn_u
.ctr(ctx
, dst
, src
, &ctrblk
);
263 nbytes
-= func_bytes
;
264 } while (nbytes
>= func_bytes
);
272 u128_to_be128((be128
*)walk
->iv
, &ctrblk
);
276 int glue_ctr_crypt_128bit(const struct common_glue_ctx
*gctx
,
277 struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
278 struct scatterlist
*src
, unsigned int nbytes
)
280 const unsigned int bsize
= 128 / 8;
281 bool fpu_enabled
= false;
282 struct blkcipher_walk walk
;
285 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
286 err
= blkcipher_walk_virt_block(desc
, &walk
, bsize
);
288 while ((nbytes
= walk
.nbytes
) >= bsize
) {
289 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
290 desc
, fpu_enabled
, nbytes
);
291 nbytes
= __glue_ctr_crypt_128bit(gctx
, desc
, &walk
);
292 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
295 glue_fpu_end(fpu_enabled
);
298 glue_ctr_crypt_final_128bit(
299 gctx
->funcs
[gctx
->num_funcs
- 1].fn_u
.ctr
, desc
, &walk
);
300 err
= blkcipher_walk_done(desc
, &walk
, 0);
305 EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit
);
307 MODULE_LICENSE("GPL");