Made speed SI system configurable.
[cryptodev-linux.git] / cryptodev_cipher.c
blob0bce1b6f48c03c3f4e12ed93cf20417c361cde05
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2010 Nikos Mavrogiannopoulos <nmav@gnutls.org>
5 * Portions Copyright (c) 2010 Michael Weiser
6 * Portions Copyright (c) 2010 Phil Sutter
8 * This file is part of linux cryptodev.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc.,
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
26 #include <linux/crypto.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/ioctl.h>
30 #include <linux/random.h>
31 #include <linux/scatterlist.h>
32 #include <linux/uaccess.h>
33 #include <crypto/algapi.h>
34 #include <crypto/hash.h>
35 #include "cryptodev.h"
36 #include "cryptodev_int.h"
39 struct cryptodev_result {
40 struct completion completion;
41 int err;
44 static void cryptodev_complete(struct crypto_async_request *req, int err)
46 struct cryptodev_result *res = req->data;
48 if (err == -EINPROGRESS)
49 return;
51 res->err = err;
52 complete(&res->completion);
55 int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name,
56 uint8_t *keyp, size_t keylen)
58 struct ablkcipher_alg *alg;
59 int ret;
61 memset(out, 0, sizeof(*out));
63 out->async.s = crypto_alloc_ablkcipher(alg_name, 0, 0);
64 if (unlikely(IS_ERR(out->async.s))) {
65 dprintk(1, KERN_DEBUG, "%s: Failed to load cipher %s\n",
66 __func__, alg_name);
67 return -EINVAL;
70 alg = crypto_ablkcipher_alg(out->async.s);
72 if (alg != NULL) {
73 /* Was correct key length supplied? */
74 if (alg->max_keysize > 0 &&
75 unlikely((keylen < alg->min_keysize) ||
76 (keylen > alg->max_keysize))) {
77 dprintk(1, KERN_DEBUG,
78 "Wrong keylen '%zu' for algorithm '%s'. \
79 Use %u to %u.\n",
80 keylen, alg_name, alg->min_keysize,
81 alg->max_keysize);
82 ret = -EINVAL;
83 goto error;
87 ret = crypto_ablkcipher_setkey(out->async.s, keyp, keylen);
88 if (unlikely(ret)) {
89 dprintk(1, KERN_DEBUG, "Setting key failed for %s-%zu.\n",
90 alg_name, keylen*8);
91 ret = -EINVAL;
92 goto error;
95 out->blocksize = crypto_ablkcipher_blocksize(out->async.s);
96 out->ivsize = crypto_ablkcipher_ivsize(out->async.s);
97 out->alignmask = crypto_ablkcipher_alignmask(out->async.s);
99 out->async.result = kmalloc(sizeof(*out->async.result), GFP_KERNEL);
100 if (unlikely(!out->async.result)) {
101 ret = -ENOMEM;
102 goto error;
105 memset(out->async.result, 0, sizeof(*out->async.result));
106 init_completion(&out->async.result->completion);
108 out->async.request = ablkcipher_request_alloc(out->async.s, GFP_KERNEL);
109 if (unlikely(!out->async.request)) {
110 dprintk(1, KERN_ERR, "error allocating async crypto request\n");
111 ret = -ENOMEM;
112 goto error;
115 ablkcipher_request_set_callback(out->async.request,
116 CRYPTO_TFM_REQ_MAY_BACKLOG,
117 cryptodev_complete, out->async.result);
119 out->init = 1;
120 return 0;
121 error:
122 if (out->async.request)
123 ablkcipher_request_free(out->async.request);
124 kfree(out->async.result);
125 if (out->async.s)
126 crypto_free_ablkcipher(out->async.s);
128 return ret;
131 void cryptodev_cipher_deinit(struct cipher_data *cdata)
133 if (cdata->init) {
134 if (cdata->async.request)
135 ablkcipher_request_free(cdata->async.request);
136 kfree(cdata->async.result);
137 if (cdata->async.s)
138 crypto_free_ablkcipher(cdata->async.s);
140 cdata->init = 0;
144 static inline int waitfor(struct cryptodev_result *cr, ssize_t ret)
146 switch (ret) {
147 case 0:
148 break;
149 case -EINPROGRESS:
150 case -EBUSY:
151 wait_for_completion(&cr->completion);
152 /* At this point we known for sure the request has finished,
153 * because wait_for_completion above was not interruptible.
154 * This is important because otherwise hardware or driver
155 * might try to access memory which will be freed or reused for
156 * another request. */
158 if (unlikely(cr->err)) {
159 dprintk(0, KERN_ERR, "error from async request: %d\n",
160 cr->err);
161 return cr->err;
164 break;
165 default:
166 return ret;
169 return 0;
172 ssize_t cryptodev_cipher_encrypt(struct cipher_data *cdata,
173 const struct scatterlist *sg1, struct scatterlist *sg2,
174 size_t len)
176 int ret;
178 INIT_COMPLETION(cdata->async.result->completion);
179 ablkcipher_request_set_crypt(cdata->async.request,
180 (struct scatterlist *)sg1, sg2,
181 len, cdata->async.iv);
182 ret = crypto_ablkcipher_encrypt(cdata->async.request);
184 return waitfor(cdata->async.result, ret);
187 ssize_t cryptodev_cipher_decrypt(struct cipher_data *cdata,
188 const struct scatterlist *sg1, struct scatterlist *sg2,
189 size_t len)
191 int ret;
193 INIT_COMPLETION(cdata->async.result->completion);
194 ablkcipher_request_set_crypt(cdata->async.request,
195 (struct scatterlist *)sg1, sg2,
196 len, cdata->async.iv);
197 ret = crypto_ablkcipher_decrypt(cdata->async.request);
199 return waitfor(cdata->async.result, ret);
202 /* Hash functions */
204 int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
205 int hmac_mode, void *mackey, size_t mackeylen)
207 int ret;
209 hdata->async.s = crypto_alloc_ahash(alg_name, 0, 0);
210 if (unlikely(IS_ERR(hdata->async.s))) {
211 dprintk(1, KERN_DEBUG, "%s: Failed to load transform for %s\n",
212 __func__, alg_name);
213 return -EINVAL;
216 /* Copy the key from user and set to TFM. */
217 if (hmac_mode != 0) {
218 ret = crypto_ahash_setkey(hdata->async.s, mackey, mackeylen);
219 if (unlikely(ret)) {
220 dprintk(1, KERN_DEBUG,
221 "Setting hmac key failed for %s-%zu.\n",
222 alg_name, mackeylen*8);
223 ret = -EINVAL;
224 goto error;
228 hdata->digestsize = crypto_ahash_digestsize(hdata->async.s);
229 hdata->alignmask = crypto_ahash_alignmask(hdata->async.s);
231 hdata->async.result = kmalloc(sizeof(*hdata->async.result), GFP_KERNEL);
232 if (unlikely(!hdata->async.result)) {
233 ret = -ENOMEM;
234 goto error;
237 memset(hdata->async.result, 0, sizeof(*hdata->async.result));
238 init_completion(&hdata->async.result->completion);
240 hdata->async.request = ahash_request_alloc(hdata->async.s, GFP_KERNEL);
241 if (unlikely(!hdata->async.request)) {
242 dprintk(0, KERN_ERR, "error allocating async crypto request\n");
243 ret = -ENOMEM;
244 goto error;
247 ahash_request_set_callback(hdata->async.request,
248 CRYPTO_TFM_REQ_MAY_BACKLOG,
249 cryptodev_complete, hdata->async.result);
251 ret = crypto_ahash_init(hdata->async.request);
252 if (unlikely(ret)) {
253 dprintk(0, KERN_ERR, "error in crypto_hash_init()\n");
254 goto error_request;
257 hdata->init = 1;
258 return 0;
260 error_request:
261 ahash_request_free(hdata->async.request);
262 error:
263 kfree(hdata->async.result);
264 crypto_free_ahash(hdata->async.s);
265 return ret;
268 void cryptodev_hash_deinit(struct hash_data *hdata)
270 if (hdata->init) {
271 if (hdata->async.request)
272 ahash_request_free(hdata->async.request);
273 kfree(hdata->async.result);
274 if (hdata->async.s)
275 crypto_free_ahash(hdata->async.s);
276 hdata->init = 0;
280 int cryptodev_hash_reset(struct hash_data *hdata)
282 int ret;
284 ret = crypto_ahash_init(hdata->async.request);
285 if (unlikely(ret)) {
286 dprintk(0, KERN_ERR, "error in crypto_hash_init()\n");
287 return ret;
290 return 0;
294 ssize_t cryptodev_hash_update(struct hash_data *hdata,
295 struct scatterlist *sg, size_t len)
297 int ret;
299 INIT_COMPLETION(hdata->async.result->completion);
300 ahash_request_set_crypt(hdata->async.request, sg, NULL, len);
302 ret = crypto_ahash_update(hdata->async.request);
304 return waitfor(hdata->async.result, ret);
307 int cryptodev_hash_final(struct hash_data *hdata, void* output)
309 int ret;
311 INIT_COMPLETION(hdata->async.result->completion);
312 ahash_request_set_crypt(hdata->async.request, NULL, output, 0);
314 ret = crypto_ahash_final(hdata->async.request);
316 return waitfor(hdata->async.result, ret);