Documented fix.
[cryptodev-linux.git] / cryptodev_cipher.c
blob123baa1cf9edff02f032a0bc9a490d7e04a303dc
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2010 Nikos Mavrogiannopoulos <nmav@gnutls.org>
6 * This file is part of linux cryptodev.
8 * cryptodev is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, either version 3 of the License, or
11 * (at your option) any later version.
13 * cryptodev is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/crypto.h>
23 #include <linux/mm.h>
24 #include <linux/highmem.h>
25 #include <linux/random.h>
26 #include <asm/uaccess.h>
27 #include <asm/ioctl.h>
28 #include <linux/scatterlist.h>
29 #include <crypto/algapi.h>
30 #include <crypto/hash.h>
31 #include "cryptodev.h"
32 #include "cryptodev_int.h"
35 struct cryptodev_result {
36 struct completion completion;
37 int err;
40 static void cryptodev_complete(struct crypto_async_request *req, int err)
42 struct cryptodev_result *res = req->data;
44 if (err == -EINPROGRESS)
45 return;
47 res->err = err;
48 complete(&res->completion);
51 int cryptodev_cipher_init(struct cipher_data* out, const char* alg_name, uint8_t * keyp, size_t keylen)
54 struct ablkcipher_alg* alg;
55 int ret;
57 memset(out, 0, sizeof(*out));
59 out->init = 1;
61 out->async.s = crypto_alloc_ablkcipher(alg_name, 0, 0);
62 if (unlikely(IS_ERR(out->async.s))) {
63 dprintk(1,KERN_DEBUG,"%s: Failed to load cipher %s\n", __func__,
64 alg_name);
65 return -EINVAL;
68 alg = crypto_ablkcipher_alg(out->async.s);
70 if (alg != NULL) {
71 /* Was correct key length supplied? */
72 if (alg->max_keysize > 0 && unlikely((keylen < alg->min_keysize) ||
73 (keylen > alg->max_keysize))) {
74 dprintk(1,KERN_DEBUG,"Wrong keylen '%zu' for algorithm '%s'. Use %u to %u.\n",
75 keylen, alg_name, alg->min_keysize,
76 alg->max_keysize);
77 ret = -EINVAL;
78 goto error;
82 ret = crypto_ablkcipher_setkey(out->async.s, keyp, keylen);
83 if (unlikely(ret)) {
84 dprintk(1,KERN_DEBUG,"Setting key failed for %s-%zu.\n",
85 alg_name, keylen*8);
86 ret = -EINVAL;
87 goto error;
90 out->blocksize = crypto_ablkcipher_blocksize(out->async.s);
91 out->ivsize = crypto_ablkcipher_ivsize(out->async.s);
93 out->async.result = kmalloc(sizeof(*out->async.result), GFP_KERNEL);
94 if (unlikely(!out->async.result)) {
95 ret = -ENOMEM;
96 goto error;
99 memset(out->async.result, 0, sizeof(*out->async.result));
100 init_completion(&out->async.result->completion);
102 out->async.request = ablkcipher_request_alloc(out->async.s, GFP_KERNEL);
103 if (unlikely(!out->async.request)) {
104 dprintk(1,KERN_ERR,"error allocating async crypto request\n");
105 ret = -ENOMEM;
106 goto error;
109 ablkcipher_request_set_callback(out->async.request, CRYPTO_TFM_REQ_MAY_BACKLOG,
110 cryptodev_complete, out->async.result);
112 return 0;
113 error:
114 crypto_free_ablkcipher(out->async.s);
115 kfree(out->async.result);
116 ablkcipher_request_free(out->async.request);
118 return ret;
121 void cryptodev_cipher_deinit(struct cipher_data* cdata)
123 crypto_free_ablkcipher(cdata->async.s);
124 kfree(cdata->async.result);
125 ablkcipher_request_free(cdata->async.request);
127 cdata->init = 0;
130 void cryptodev_cipher_set_iv(struct cipher_data* cdata, void __user* iv, size_t iv_size)
132 memcpy(cdata->async.iv, iv, min(iv_size,sizeof(cdata->async.iv)) );
135 static inline int waitfor (struct cryptodev_result* cr, ssize_t ret)
137 switch (ret) {
138 case 0:
139 break;
140 case -EINPROGRESS:
141 case -EBUSY:
142 wait_for_completion(&cr->completion);
143 /* At this point we known for sure the request has finished,
144 * because wait_for_completion above was not interruptible.
145 * This is important because otherwise hardware or driver
146 * might try to access memory which will be freed or reused for
147 * another request. */
149 if (unlikely(cr->err)) {
150 dprintk(0,KERN_ERR,"error from async request: %zd \n", ret);
151 return cr->err;
154 break;
155 default:
156 return ret;
159 return 0;
162 ssize_t cryptodev_cipher_encrypt( struct cipher_data* cdata, struct scatterlist *sg1, struct scatterlist *sg2, size_t len)
164 int ret;
166 INIT_COMPLETION(cdata->async.result->completion);
167 ablkcipher_request_set_crypt(cdata->async.request, sg1, sg2,
168 len, cdata->async.iv);
169 ret = crypto_ablkcipher_encrypt(cdata->async.request);
171 return waitfor(cdata->async.result,ret);
174 ssize_t cryptodev_cipher_decrypt( struct cipher_data* cdata, struct scatterlist *sg1, struct scatterlist *sg2, size_t len)
176 int ret;
178 INIT_COMPLETION(cdata->async.result->completion);
179 ablkcipher_request_set_crypt(cdata->async.request, sg1, sg2,
180 len, cdata->async.iv);
181 ret = crypto_ablkcipher_decrypt(cdata->async.request);
183 return waitfor(cdata->async.result, ret);
186 /* Hash functions */
188 int cryptodev_hash_init( struct hash_data* hdata, const char* alg_name, int hmac_mode, void * mackey, size_t mackeylen)
190 int ret;
192 hdata->init = 1;
194 hdata->async.s = crypto_alloc_ahash(alg_name, 0, 0);
195 if (unlikely(IS_ERR(hdata->async.s))) {
196 dprintk(1,KERN_DEBUG,"%s: Failed to load transform for %s\n", __func__,
197 alg_name);
198 return -EINVAL;
201 /* Copy the key from user and set to TFM. */
202 if (hmac_mode != 0) {
203 ret = crypto_ahash_setkey(hdata->async.s, mackey, mackeylen);
204 if (unlikely(ret)) {
205 dprintk(1,KERN_DEBUG,"Setting hmac key failed for %s-%zu.\n",
206 alg_name, mackeylen*8);
207 ret = -EINVAL;
208 goto error;
212 hdata->digestsize = crypto_ahash_digestsize(hdata->async.s);
214 hdata->async.result = kmalloc(sizeof(*hdata->async.result), GFP_KERNEL);
215 if (unlikely(!hdata->async.result)) {
216 ret = -ENOMEM;
217 goto error;
220 memset(hdata->async.result, 0, sizeof(*hdata->async.result));
221 init_completion(&hdata->async.result->completion);
223 hdata->async.request = ahash_request_alloc(hdata->async.s, GFP_KERNEL);
224 if (unlikely(!hdata->async.request)) {
225 dprintk(0,KERN_ERR,"error allocating async crypto request\n");
226 ret = -ENOMEM;
227 goto error;
230 ahash_request_set_callback(hdata->async.request, CRYPTO_TFM_REQ_MAY_BACKLOG,
231 cryptodev_complete, hdata->async.result);
234 return 0;
236 error:
237 crypto_free_ahash(hdata->async.s);
238 return ret;
241 void cryptodev_hash_deinit(struct hash_data* hdata)
243 crypto_free_ahash(hdata->async.s);
244 hdata->init = 0;
247 int cryptodev_hash_reset( struct hash_data* hdata)
249 int ret;
250 ret = crypto_ahash_init(hdata->async.request);
251 if (unlikely(ret)) {
252 dprintk(0,KERN_ERR,
253 "error in crypto_hash_init()\n");
254 return ret;
257 return 0;
261 ssize_t cryptodev_hash_update( struct hash_data* hdata, struct scatterlist *sg, size_t len)
263 int ret;
265 INIT_COMPLETION(hdata->async.result->completion);
266 ahash_request_set_crypt(hdata->async.request, sg, NULL,
267 len);
269 ret = crypto_ahash_update(hdata->async.request);
271 return waitfor(hdata->async.result,ret);
275 int cryptodev_hash_final( struct hash_data* hdata, void* output)
277 int ret;
279 INIT_COMPLETION(hdata->async.result->completion);
280 ahash_request_set_crypt(hdata->async.request, NULL, output, 0);
282 ret = crypto_ahash_final(hdata->async.request);
284 return waitfor(hdata->async.result,ret);