eng_cryptodev.c was modified in an openssl 0.9.8 friendly way.
[cryptodev-linux.git] / cryptlib.c
blobfad5ba6cb54306c25aa5b9f82e1cc89e1a9ebdb1
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
5 * Portions Copyright (c) 2010 Michael Weiser
6 * Portions Copyright (c) 2010 Phil Sutter
8 * This file is part of linux cryptodev.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc.,
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
26 #include <linux/crypto.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/ioctl.h>
30 #include <linux/random.h>
31 #include <linux/scatterlist.h>
32 #include <linux/uaccess.h>
33 #include <crypto/algapi.h>
34 #include <crypto/hash.h>
35 #include <crypto/cryptodev.h>
36 #include <crypto/aead.h>
37 #include "cryptodev_int.h"
40 struct cryptodev_result {
41 struct completion completion;
42 int err;
45 static void cryptodev_complete(struct crypto_async_request *req, int err)
47 struct cryptodev_result *res = req->data;
49 if (err == -EINPROGRESS)
50 return;
52 res->err = err;
53 complete(&res->completion);
56 int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name,
57 uint8_t *keyp, size_t keylen, int stream, int aead)
59 int ret;
61 memset(out, 0, sizeof(*out));
63 if (aead == 0) {
64 struct ablkcipher_alg *alg;
66 out->async.s = crypto_alloc_ablkcipher(alg_name, 0, 0);
67 if (unlikely(IS_ERR(out->async.s))) {
68 dprintk(1, KERN_DEBUG, "Failed to load cipher %s\n", alg_name);
69 return -EINVAL;
72 alg = crypto_ablkcipher_alg(out->async.s);
73 if (alg != NULL) {
74 /* Was correct key length supplied? */
75 if (alg->max_keysize > 0 &&
76 unlikely((keylen < alg->min_keysize) ||
77 (keylen > alg->max_keysize))) {
78 dprintk(1, KERN_DEBUG,
79 "Wrong keylen '%zu' for algorithm '%s'. \
80 Use %u to %u.\n",
81 keylen, alg_name, alg->min_keysize,
82 alg->max_keysize);
83 ret = -EINVAL;
84 goto error;
88 out->blocksize = crypto_ablkcipher_blocksize(out->async.s);
89 out->ivsize = crypto_ablkcipher_ivsize(out->async.s);
90 out->alignmask = crypto_ablkcipher_alignmask(out->async.s);
92 ret = crypto_ablkcipher_setkey(out->async.s, keyp, keylen);
93 } else {
94 out->async.as = crypto_alloc_aead(alg_name, 0, 0);
95 if (unlikely(IS_ERR(out->async.as))) {
96 dprintk(1, KERN_DEBUG, "Failed to load cipher %s\n", alg_name);
97 return -EINVAL;
100 out->blocksize = crypto_aead_blocksize(out->async.as);
101 out->ivsize = crypto_aead_ivsize(out->async.as);
102 out->alignmask = crypto_aead_alignmask(out->async.as);
104 ret = crypto_aead_setkey(out->async.as, keyp, keylen);
107 if (unlikely(ret)) {
108 dprintk(1, KERN_DEBUG, "Setting key failed for %s-%zu.\n",
109 alg_name, keylen*8);
110 ret = -EINVAL;
111 goto error;
114 out->stream = stream;
115 out->aead = aead;
117 out->async.result = kmalloc(sizeof(*out->async.result), GFP_KERNEL);
118 if (unlikely(!out->async.result)) {
119 ret = -ENOMEM;
120 goto error;
123 memset(out->async.result, 0, sizeof(*out->async.result));
124 init_completion(&out->async.result->completion);
126 if (aead == 0) {
127 out->async.request = ablkcipher_request_alloc(out->async.s, GFP_KERNEL);
128 if (unlikely(!out->async.request)) {
129 dprintk(1, KERN_ERR, "error allocating async crypto request\n");
130 ret = -ENOMEM;
131 goto error;
134 ablkcipher_request_set_callback(out->async.request,
135 CRYPTO_TFM_REQ_MAY_BACKLOG,
136 cryptodev_complete, out->async.result);
137 } else {
138 out->async.arequest = aead_request_alloc(out->async.as, GFP_KERNEL);
139 if (unlikely(!out->async.arequest)) {
140 dprintk(1, KERN_ERR, "error allocating async crypto request\n");
141 ret = -ENOMEM;
142 goto error;
145 aead_request_set_callback(out->async.arequest,
146 CRYPTO_TFM_REQ_MAY_BACKLOG,
147 cryptodev_complete, out->async.result);
150 out->init = 1;
151 return 0;
152 error:
153 if (aead == 0) {
154 if (out->async.request)
155 ablkcipher_request_free(out->async.request);
156 if (out->async.s)
157 crypto_free_ablkcipher(out->async.s);
158 } else {
159 if (out->async.arequest)
160 aead_request_free(out->async.arequest);
161 if (out->async.s)
162 crypto_free_aead(out->async.as);
164 kfree(out->async.result);
166 return ret;
169 void cryptodev_cipher_deinit(struct cipher_data *cdata)
171 if (cdata->init) {
172 if (cdata->aead == 0) {
173 if (cdata->async.request)
174 ablkcipher_request_free(cdata->async.request);
175 if (cdata->async.s)
176 crypto_free_ablkcipher(cdata->async.s);
177 } else {
178 if (cdata->async.arequest)
179 aead_request_free(cdata->async.arequest);
180 if (cdata->async.as)
181 crypto_free_aead(cdata->async.as);
184 kfree(cdata->async.result);
185 cdata->init = 0;
189 static inline int waitfor(struct cryptodev_result *cr, ssize_t ret)
191 switch (ret) {
192 case 0:
193 break;
194 case -EINPROGRESS:
195 case -EBUSY:
196 wait_for_completion(&cr->completion);
197 /* At this point we known for sure the request has finished,
198 * because wait_for_completion above was not interruptible.
199 * This is important because otherwise hardware or driver
200 * might try to access memory which will be freed or reused for
201 * another request. */
203 if (unlikely(cr->err)) {
204 dprintk(0, KERN_ERR, "error from async request: %d\n",
205 cr->err);
206 return cr->err;
209 break;
210 default:
211 return ret;
214 return 0;
217 ssize_t cryptodev_cipher_encrypt(struct cipher_data *cdata,
218 const struct scatterlist *src, struct scatterlist *dst,
219 size_t len)
221 int ret;
223 INIT_COMPLETION(cdata->async.result->completion);
225 if (cdata->aead == 0) {
226 ablkcipher_request_set_crypt(cdata->async.request,
227 (struct scatterlist *)src, dst,
228 len, cdata->async.iv);
229 ret = crypto_ablkcipher_encrypt(cdata->async.request);
230 } else {
231 aead_request_set_crypt(cdata->async.arequest,
232 (struct scatterlist *)src, dst,
233 len, cdata->async.iv);
234 ret = crypto_aead_encrypt(cdata->async.arequest);
237 return waitfor(cdata->async.result, ret);
240 ssize_t cryptodev_cipher_decrypt(struct cipher_data *cdata,
241 const struct scatterlist *src, struct scatterlist *dst,
242 size_t len)
244 int ret;
246 INIT_COMPLETION(cdata->async.result->completion);
247 if (cdata->aead == 0) {
248 ablkcipher_request_set_crypt(cdata->async.request,
249 (struct scatterlist *)src, dst,
250 len, cdata->async.iv);
251 ret = crypto_ablkcipher_decrypt(cdata->async.request);
252 } else {
253 aead_request_set_crypt(cdata->async.arequest,
254 (struct scatterlist *)src, dst,
255 len, cdata->async.iv);
256 ret = crypto_aead_decrypt(cdata->async.arequest);
259 return waitfor(cdata->async.result, ret);
262 /* Hash functions */
264 int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
265 int hmac_mode, void *mackey, size_t mackeylen)
267 int ret;
269 hdata->async.s = crypto_alloc_ahash(alg_name, 0, 0);
270 if (unlikely(IS_ERR(hdata->async.s))) {
271 dprintk(1, KERN_DEBUG, "Failed to load transform for %s\n", alg_name);
272 return -EINVAL;
275 /* Copy the key from user and set to TFM. */
276 if (hmac_mode != 0) {
277 ret = crypto_ahash_setkey(hdata->async.s, mackey, mackeylen);
278 if (unlikely(ret)) {
279 dprintk(1, KERN_DEBUG,
280 "Setting hmac key failed for %s-%zu.\n",
281 alg_name, mackeylen*8);
282 ret = -EINVAL;
283 goto error;
287 hdata->digestsize = crypto_ahash_digestsize(hdata->async.s);
288 hdata->alignmask = crypto_ahash_alignmask(hdata->async.s);
290 hdata->async.result = kmalloc(sizeof(*hdata->async.result), GFP_KERNEL);
291 if (unlikely(!hdata->async.result)) {
292 ret = -ENOMEM;
293 goto error;
296 memset(hdata->async.result, 0, sizeof(*hdata->async.result));
297 init_completion(&hdata->async.result->completion);
299 hdata->async.request = ahash_request_alloc(hdata->async.s, GFP_KERNEL);
300 if (unlikely(!hdata->async.request)) {
301 dprintk(0, KERN_ERR, "error allocating async crypto request\n");
302 ret = -ENOMEM;
303 goto error;
306 ahash_request_set_callback(hdata->async.request,
307 CRYPTO_TFM_REQ_MAY_BACKLOG,
308 cryptodev_complete, hdata->async.result);
310 ret = crypto_ahash_init(hdata->async.request);
311 if (unlikely(ret)) {
312 dprintk(0, KERN_ERR, "error in crypto_hash_init()\n");
313 goto error_request;
316 hdata->init = 1;
317 return 0;
319 error_request:
320 ahash_request_free(hdata->async.request);
321 error:
322 kfree(hdata->async.result);
323 crypto_free_ahash(hdata->async.s);
324 return ret;
327 void cryptodev_hash_deinit(struct hash_data *hdata)
329 if (hdata->init) {
330 if (hdata->async.request)
331 ahash_request_free(hdata->async.request);
332 kfree(hdata->async.result);
333 if (hdata->async.s)
334 crypto_free_ahash(hdata->async.s);
335 hdata->init = 0;
339 int cryptodev_hash_reset(struct hash_data *hdata)
341 int ret;
343 ret = crypto_ahash_init(hdata->async.request);
344 if (unlikely(ret)) {
345 dprintk(0, KERN_ERR, "error in crypto_hash_init()\n");
346 return ret;
349 return 0;
353 ssize_t cryptodev_hash_update(struct hash_data *hdata,
354 struct scatterlist *sg, size_t len)
356 int ret;
358 INIT_COMPLETION(hdata->async.result->completion);
359 ahash_request_set_crypt(hdata->async.request, sg, NULL, len);
361 ret = crypto_ahash_update(hdata->async.request);
363 return waitfor(hdata->async.result, ret);
366 int cryptodev_hash_final(struct hash_data *hdata, void* output)
368 int ret;
370 INIT_COMPLETION(hdata->async.result->completion);
371 ahash_request_set_crypt(hdata->async.request, NULL, output, 0);
373 ret = crypto_ahash_final(hdata->async.request);
375 return waitfor(hdata->async.result, ret);