6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
7 * Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com>.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
15 #include <linux/kernel.h>
16 #include <linux/crypto.h>
17 #include <linux/errno.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <asm/scatterlist.h>
25 typedef void (cryptfn_t
)(void *, u8
*, const u8
*);
26 typedef void (procfn_t
)(struct crypto_tfm
*, u8
*,
27 u8
*, cryptfn_t
, int enc
, void *);
30 struct scatterlist
*sg
;
33 unsigned int len_this_page
;
34 unsigned int len_this_segment
;
38 enum km_type crypto_km_types
[] = {
45 static inline void xor_64(u8
*a
, const u8
*b
)
47 ((u32
*)a
)[0] ^= ((u32
*)b
)[0];
48 ((u32
*)a
)[1] ^= ((u32
*)b
)[1];
51 static inline void xor_128(u8
*a
, const u8
*b
)
53 ((u32
*)a
)[0] ^= ((u32
*)b
)[0];
54 ((u32
*)a
)[1] ^= ((u32
*)b
)[1];
55 ((u32
*)a
)[2] ^= ((u32
*)b
)[2];
56 ((u32
*)a
)[3] ^= ((u32
*)b
)[3];
60 /* Define sg_next is an inline routine now in case we want to change
61 scatterlist to a linked list later. */
62 static inline struct scatterlist
*sg_next(struct scatterlist
*sg
)
67 void *which_buf(struct scatter_walk
*walk
, unsigned int nbytes
, void *scratch
)
69 if (nbytes
<= walk
->len_this_page
&&
70 (((unsigned long)walk
->data
) & (PAGE_CACHE_SIZE
- 1)) + nbytes
<=
77 static void memcpy_dir(void *buf
, void *sgdata
, size_t nbytes
, int out
)
80 memcpy(sgdata
, buf
, nbytes
);
82 memcpy(buf
, sgdata
, nbytes
);
85 static void scatterwalk_start(struct scatter_walk
*walk
, struct scatterlist
*sg
)
87 unsigned int rest_of_page
;
91 walk
->page
= sg
->page
;
92 walk
->len_this_segment
= sg
->length
;
94 rest_of_page
= PAGE_CACHE_SIZE
- (sg
->offset
& (PAGE_CACHE_SIZE
- 1));
95 walk
->len_this_page
= min(sg
->length
, rest_of_page
);
96 walk
->offset
= sg
->offset
;
99 static void scatterwalk_map(struct scatter_walk
*walk
, int out
)
101 walk
->data
= crypto_kmap(walk
->page
, out
) + walk
->offset
;
104 static void scatter_page_done(struct scatter_walk
*walk
, int out
,
107 /* walk->data may be pointing the first byte of the next page;
108 however, we know we transfered at least one byte. So,
109 walk->data - 1 will be a virutual address in the mapped page. */
112 flush_dcache_page(walk
->page
);
115 walk
->len_this_segment
-= walk
->len_this_page
;
117 if (walk
->len_this_segment
) {
119 walk
->len_this_page
= min(walk
->len_this_segment
,
120 (unsigned)PAGE_CACHE_SIZE
);
124 scatterwalk_start(walk
, sg_next(walk
->sg
));
128 static void scatter_done(struct scatter_walk
*walk
, int out
, int more
)
130 crypto_kunmap(walk
->data
, out
);
131 if (walk
->len_this_page
== 0 || !more
)
132 scatter_page_done(walk
, out
, more
);
136 * Do not call this unless the total length of all of the fragments
137 * has been verified as multiple of the block size.
139 static int copy_chunks(void *buf
, struct scatter_walk
*walk
,
140 size_t nbytes
, int out
)
142 if (buf
!= walk
->data
) {
143 while (nbytes
> walk
->len_this_page
) {
144 memcpy_dir(buf
, walk
->data
, walk
->len_this_page
, out
);
145 buf
+= walk
->len_this_page
;
146 nbytes
-= walk
->len_this_page
;
148 crypto_kunmap(walk
->data
, out
);
149 scatter_page_done(walk
, out
, 1);
150 scatterwalk_map(walk
, out
);
153 memcpy_dir(buf
, walk
->data
, nbytes
, out
);
156 walk
->offset
+= nbytes
;
157 walk
->len_this_page
-= nbytes
;
158 walk
->len_this_segment
-= nbytes
;
163 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
164 * multiple page boundaries by using temporary blocks. In user context,
165 * the kernel is given a chance to schedule us once per block.
167 static int crypt(struct crypto_tfm
*tfm
,
168 struct scatterlist
*dst
,
169 struct scatterlist
*src
,
170 unsigned int nbytes
, cryptfn_t crfn
,
171 procfn_t prfn
, int enc
, void *info
)
173 struct scatter_walk walk_in
, walk_out
;
174 const unsigned int bsize
= crypto_tfm_alg_blocksize(tfm
);
175 u8 tmp_src
[nbytes
> src
->length
? bsize
: 0];
176 u8 tmp_dst
[nbytes
> dst
->length
? bsize
: 0];
181 if (nbytes
% bsize
) {
182 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_BLOCK_LEN
;
186 scatterwalk_start(&walk_in
, src
);
187 scatterwalk_start(&walk_out
, dst
);
192 scatterwalk_map(&walk_in
, 0);
193 scatterwalk_map(&walk_out
, 1);
194 src_p
= which_buf(&walk_in
, bsize
, tmp_src
);
195 dst_p
= which_buf(&walk_out
, bsize
, tmp_dst
);
199 copy_chunks(src_p
, &walk_in
, bsize
, 0);
201 prfn(tfm
, dst_p
, src_p
, crfn
, enc
, info
);
203 scatter_done(&walk_in
, 0, nbytes
);
205 copy_chunks(dst_p
, &walk_out
, bsize
, 1);
206 scatter_done(&walk_out
, 1, nbytes
);
215 static void cbc_process(struct crypto_tfm
*tfm
,
216 u8
*dst
, u8
*src
, cryptfn_t fn
, int enc
, void *info
)
220 /* Null encryption */
225 tfm
->crt_u
.cipher
.cit_xor_block(iv
, src
);
226 fn(crypto_tfm_ctx(tfm
), dst
, iv
);
227 memcpy(iv
, dst
, crypto_tfm_alg_blocksize(tfm
));
229 const int need_stack
= (src
== dst
);
230 u8 stack
[need_stack
? crypto_tfm_alg_blocksize(tfm
) : 0];
231 u8
*buf
= need_stack
? stack
: dst
;
233 fn(crypto_tfm_ctx(tfm
), buf
, src
);
234 tfm
->crt_u
.cipher
.cit_xor_block(buf
, iv
);
235 memcpy(iv
, src
, crypto_tfm_alg_blocksize(tfm
));
237 memcpy(dst
, buf
, crypto_tfm_alg_blocksize(tfm
));
241 static void ecb_process(struct crypto_tfm
*tfm
, u8
*dst
, u8
*src
,
242 cryptfn_t fn
, int enc
, void *info
)
244 fn(crypto_tfm_ctx(tfm
), dst
, src
);
247 static int setkey(struct crypto_tfm
*tfm
, const u8
*key
, unsigned int keylen
)
249 struct cipher_alg
*cia
= &tfm
->__crt_alg
->cra_cipher
;
251 if (keylen
< cia
->cia_min_keysize
|| keylen
> cia
->cia_max_keysize
) {
252 tfm
->crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
255 return cia
->cia_setkey(crypto_tfm_ctx(tfm
), key
, keylen
,
259 static int ecb_encrypt(struct crypto_tfm
*tfm
,
260 struct scatterlist
*dst
,
261 struct scatterlist
*src
, unsigned int nbytes
)
263 return crypt(tfm
, dst
, src
, nbytes
,
264 tfm
->__crt_alg
->cra_cipher
.cia_encrypt
,
265 ecb_process
, 1, NULL
);
268 static int ecb_decrypt(struct crypto_tfm
*tfm
,
269 struct scatterlist
*dst
,
270 struct scatterlist
*src
,
273 return crypt(tfm
, dst
, src
, nbytes
,
274 tfm
->__crt_alg
->cra_cipher
.cia_decrypt
,
275 ecb_process
, 1, NULL
);
278 static int cbc_encrypt(struct crypto_tfm
*tfm
,
279 struct scatterlist
*dst
,
280 struct scatterlist
*src
,
283 return crypt(tfm
, dst
, src
, nbytes
,
284 tfm
->__crt_alg
->cra_cipher
.cia_encrypt
,
285 cbc_process
, 1, tfm
->crt_cipher
.cit_iv
);
288 static int cbc_encrypt_iv(struct crypto_tfm
*tfm
,
289 struct scatterlist
*dst
,
290 struct scatterlist
*src
,
291 unsigned int nbytes
, u8
*iv
)
293 return crypt(tfm
, dst
, src
, nbytes
,
294 tfm
->__crt_alg
->cra_cipher
.cia_encrypt
,
298 static int cbc_decrypt(struct crypto_tfm
*tfm
,
299 struct scatterlist
*dst
,
300 struct scatterlist
*src
,
303 return crypt(tfm
, dst
, src
, nbytes
,
304 tfm
->__crt_alg
->cra_cipher
.cia_decrypt
,
305 cbc_process
, 0, tfm
->crt_cipher
.cit_iv
);
308 static int cbc_decrypt_iv(struct crypto_tfm
*tfm
,
309 struct scatterlist
*dst
,
310 struct scatterlist
*src
,
311 unsigned int nbytes
, u8
*iv
)
313 return crypt(tfm
, dst
, src
, nbytes
,
314 tfm
->__crt_alg
->cra_cipher
.cia_decrypt
,
318 static int nocrypt(struct crypto_tfm
*tfm
,
319 struct scatterlist
*dst
,
320 struct scatterlist
*src
,
326 static int nocrypt_iv(struct crypto_tfm
*tfm
,
327 struct scatterlist
*dst
,
328 struct scatterlist
*src
,
329 unsigned int nbytes
, u8
*iv
)
334 int crypto_init_cipher_flags(struct crypto_tfm
*tfm
, u32 flags
)
336 u32 mode
= flags
& CRYPTO_TFM_MODE_MASK
;
338 tfm
->crt_cipher
.cit_mode
= mode
? mode
: CRYPTO_TFM_MODE_ECB
;
339 if (flags
& CRYPTO_TFM_REQ_WEAK_KEY
)
340 tfm
->crt_flags
= CRYPTO_TFM_REQ_WEAK_KEY
;
345 int crypto_init_cipher_ops(struct crypto_tfm
*tfm
)
348 struct cipher_tfm
*ops
= &tfm
->crt_cipher
;
350 ops
->cit_setkey
= setkey
;
352 switch (tfm
->crt_cipher
.cit_mode
) {
353 case CRYPTO_TFM_MODE_ECB
:
354 ops
->cit_encrypt
= ecb_encrypt
;
355 ops
->cit_decrypt
= ecb_decrypt
;
358 case CRYPTO_TFM_MODE_CBC
:
359 ops
->cit_encrypt
= cbc_encrypt
;
360 ops
->cit_decrypt
= cbc_decrypt
;
361 ops
->cit_encrypt_iv
= cbc_encrypt_iv
;
362 ops
->cit_decrypt_iv
= cbc_decrypt_iv
;
365 case CRYPTO_TFM_MODE_CFB
:
366 ops
->cit_encrypt
= nocrypt
;
367 ops
->cit_decrypt
= nocrypt
;
368 ops
->cit_encrypt_iv
= nocrypt_iv
;
369 ops
->cit_decrypt_iv
= nocrypt_iv
;
372 case CRYPTO_TFM_MODE_CTR
:
373 ops
->cit_encrypt
= nocrypt
;
374 ops
->cit_decrypt
= nocrypt
;
375 ops
->cit_encrypt_iv
= nocrypt_iv
;
376 ops
->cit_decrypt_iv
= nocrypt_iv
;
383 if (ops
->cit_mode
== CRYPTO_TFM_MODE_CBC
) {
385 switch (crypto_tfm_alg_blocksize(tfm
)) {
387 ops
->cit_xor_block
= xor_64
;
391 ops
->cit_xor_block
= xor_128
;
395 printk(KERN_WARNING
"%s: block size %u not supported\n",
396 crypto_tfm_alg_name(tfm
),
397 crypto_tfm_alg_blocksize(tfm
));
402 ops
->cit_ivsize
= crypto_tfm_alg_blocksize(tfm
);
403 ops
->cit_iv
= kmalloc(ops
->cit_ivsize
, GFP_KERNEL
);
404 if (ops
->cit_iv
== NULL
)
412 void crypto_exit_cipher_ops(struct crypto_tfm
*tfm
)
414 if (tfm
->crt_cipher
.cit_iv
)
415 kfree(tfm
->crt_cipher
.cit_iv
);