No need to check for CIOCGSESSINFO in tests.
[cryptodev-linux.git] / cryptlib.c
blob4bd073ab88c5c824b3d40978ccbb6bac6d88783f
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
5 * Portions Copyright (c) 2010 Michael Weiser
6 * Portions Copyright (c) 2010 Phil Sutter
8 * This file is part of linux cryptodev.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc.,
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
26 #include <linux/crypto.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/ioctl.h>
30 #include <linux/random.h>
31 #include <linux/scatterlist.h>
32 #include <linux/uaccess.h>
33 #include <crypto/algapi.h>
34 #include <crypto/hash.h>
35 #include <crypto/cryptodev.h>
36 #include <crypto/aead.h>
37 #include "cryptodev_int.h"
40 struct cryptodev_result {
41 struct completion completion;
42 int err;
45 static void cryptodev_complete(struct crypto_async_request *req, int err)
47 struct cryptodev_result *res = req->data;
49 if (err == -EINPROGRESS)
50 return;
52 res->err = err;
53 complete(&res->completion);
56 int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name,
57 uint8_t *keyp, size_t keylen, int stream, int aead)
59 int ret;
61 memset(out, 0, sizeof(*out));
63 if (aead == 0) {
64 struct ablkcipher_alg *alg;
66 out->async.s = crypto_alloc_ablkcipher(alg_name, 0, 0);
67 if (unlikely(IS_ERR(out->async.s))) {
68 dprintk(1, KERN_DEBUG, "%s: Failed to load cipher %s\n",
69 __func__, alg_name);
70 return -EINVAL;
73 alg = crypto_ablkcipher_alg(out->async.s);
74 if (alg != NULL) {
75 /* Was correct key length supplied? */
76 if (alg->max_keysize > 0 &&
77 unlikely((keylen < alg->min_keysize) ||
78 (keylen > alg->max_keysize))) {
79 dprintk(1, KERN_DEBUG,
80 "Wrong keylen '%zu' for algorithm '%s'. \
81 Use %u to %u.\n",
82 keylen, alg_name, alg->min_keysize,
83 alg->max_keysize);
84 ret = -EINVAL;
85 goto error;
89 out->blocksize = crypto_ablkcipher_blocksize(out->async.s);
90 out->ivsize = crypto_ablkcipher_ivsize(out->async.s);
91 out->alignmask = crypto_ablkcipher_alignmask(out->async.s);
93 ret = crypto_ablkcipher_setkey(out->async.s, keyp, keylen);
94 } else {
95 out->async.as = crypto_alloc_aead(alg_name, 0, 0);
96 if (unlikely(IS_ERR(out->async.s))) {
97 dprintk(1, KERN_DEBUG, "%s: Failed to load cipher %s\n",
98 __func__, alg_name);
99 return -EINVAL;
102 out->blocksize = crypto_aead_blocksize(out->async.as);
103 out->ivsize = crypto_aead_ivsize(out->async.as);
104 out->alignmask = crypto_aead_alignmask(out->async.as);
106 ret = crypto_aead_setkey(out->async.as, keyp, keylen);
109 if (unlikely(ret)) {
110 dprintk(1, KERN_DEBUG, "Setting key failed for %s-%zu.\n",
111 alg_name, keylen*8);
112 ret = -EINVAL;
113 goto error;
116 out->stream = stream;
117 out->aead = aead;
119 out->async.result = kmalloc(sizeof(*out->async.result), GFP_KERNEL);
120 if (unlikely(!out->async.result)) {
121 ret = -ENOMEM;
122 goto error;
125 memset(out->async.result, 0, sizeof(*out->async.result));
126 init_completion(&out->async.result->completion);
128 if (aead == 0) {
129 out->async.request = ablkcipher_request_alloc(out->async.s, GFP_KERNEL);
130 if (unlikely(!out->async.request)) {
131 dprintk(1, KERN_ERR, "error allocating async crypto request\n");
132 ret = -ENOMEM;
133 goto error;
136 ablkcipher_request_set_callback(out->async.request,
137 CRYPTO_TFM_REQ_MAY_BACKLOG,
138 cryptodev_complete, out->async.result);
139 } else {
140 out->async.arequest = aead_request_alloc(out->async.as, GFP_KERNEL);
141 if (unlikely(!out->async.arequest)) {
142 dprintk(1, KERN_ERR, "error allocating async crypto request\n");
143 ret = -ENOMEM;
144 goto error;
147 aead_request_set_callback(out->async.arequest,
148 CRYPTO_TFM_REQ_MAY_BACKLOG,
149 cryptodev_complete, out->async.result);
152 out->init = 1;
153 return 0;
154 error:
155 if (aead == 0) {
156 if (out->async.request)
157 ablkcipher_request_free(out->async.request);
158 if (out->async.s)
159 crypto_free_ablkcipher(out->async.s);
160 } else {
161 if (out->async.arequest)
162 aead_request_free(out->async.arequest);
163 if (out->async.s)
164 crypto_free_aead(out->async.as);
166 kfree(out->async.result);
168 return ret;
171 void cryptodev_cipher_deinit(struct cipher_data *cdata)
173 if (cdata->init) {
174 if (cdata->aead == 0) {
175 if (cdata->async.request)
176 ablkcipher_request_free(cdata->async.request);
177 if (cdata->async.s)
178 crypto_free_ablkcipher(cdata->async.s);
179 } else {
180 if (cdata->async.arequest)
181 aead_request_free(cdata->async.arequest);
182 if (cdata->async.as)
183 crypto_free_aead(cdata->async.as);
186 kfree(cdata->async.result);
187 cdata->init = 0;
191 static inline int waitfor(struct cryptodev_result *cr, ssize_t ret)
193 switch (ret) {
194 case 0:
195 break;
196 case -EINPROGRESS:
197 case -EBUSY:
198 wait_for_completion(&cr->completion);
199 /* At this point we known for sure the request has finished,
200 * because wait_for_completion above was not interruptible.
201 * This is important because otherwise hardware or driver
202 * might try to access memory which will be freed or reused for
203 * another request. */
205 if (unlikely(cr->err)) {
206 dprintk(0, KERN_ERR, "error from async request: %d\n",
207 cr->err);
208 return cr->err;
211 break;
212 default:
213 return ret;
216 return 0;
219 ssize_t cryptodev_cipher_encrypt(struct cipher_data *cdata,
220 const struct scatterlist *src, struct scatterlist *dst,
221 size_t len)
223 int ret;
225 INIT_COMPLETION(cdata->async.result->completion);
227 if (cdata->aead == 0) {
228 ablkcipher_request_set_crypt(cdata->async.request,
229 (struct scatterlist *)src, dst,
230 len, cdata->async.iv);
231 ret = crypto_ablkcipher_encrypt(cdata->async.request);
232 } else {
233 aead_request_set_crypt(cdata->async.arequest,
234 (struct scatterlist *)src, dst,
235 len, cdata->async.iv);
236 ret = crypto_aead_encrypt(cdata->async.arequest);
239 return waitfor(cdata->async.result, ret);
242 ssize_t cryptodev_cipher_decrypt(struct cipher_data *cdata,
243 const struct scatterlist *src, struct scatterlist *dst,
244 size_t len)
246 int ret;
248 INIT_COMPLETION(cdata->async.result->completion);
249 if (cdata->aead == 0) {
250 ablkcipher_request_set_crypt(cdata->async.request,
251 (struct scatterlist *)src, dst,
252 len, cdata->async.iv);
253 ret = crypto_ablkcipher_decrypt(cdata->async.request);
254 } else {
255 aead_request_set_crypt(cdata->async.arequest,
256 (struct scatterlist *)src, dst,
257 len, cdata->async.iv);
258 ret = crypto_aead_decrypt(cdata->async.arequest);
261 return waitfor(cdata->async.result, ret);
264 /* Hash functions */
266 int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
267 int hmac_mode, void *mackey, size_t mackeylen)
269 int ret;
271 hdata->async.s = crypto_alloc_ahash(alg_name, 0, 0);
272 if (unlikely(IS_ERR(hdata->async.s))) {
273 dprintk(1, KERN_DEBUG, "%s: Failed to load transform for %s\n",
274 __func__, alg_name);
275 return -EINVAL;
278 /* Copy the key from user and set to TFM. */
279 if (hmac_mode != 0) {
280 ret = crypto_ahash_setkey(hdata->async.s, mackey, mackeylen);
281 if (unlikely(ret)) {
282 dprintk(1, KERN_DEBUG,
283 "Setting hmac key failed for %s-%zu.\n",
284 alg_name, mackeylen*8);
285 ret = -EINVAL;
286 goto error;
290 hdata->digestsize = crypto_ahash_digestsize(hdata->async.s);
291 hdata->alignmask = crypto_ahash_alignmask(hdata->async.s);
293 hdata->async.result = kmalloc(sizeof(*hdata->async.result), GFP_KERNEL);
294 if (unlikely(!hdata->async.result)) {
295 ret = -ENOMEM;
296 goto error;
299 memset(hdata->async.result, 0, sizeof(*hdata->async.result));
300 init_completion(&hdata->async.result->completion);
302 hdata->async.request = ahash_request_alloc(hdata->async.s, GFP_KERNEL);
303 if (unlikely(!hdata->async.request)) {
304 dprintk(0, KERN_ERR, "error allocating async crypto request\n");
305 ret = -ENOMEM;
306 goto error;
309 ahash_request_set_callback(hdata->async.request,
310 CRYPTO_TFM_REQ_MAY_BACKLOG,
311 cryptodev_complete, hdata->async.result);
313 ret = crypto_ahash_init(hdata->async.request);
314 if (unlikely(ret)) {
315 dprintk(0, KERN_ERR, "error in crypto_hash_init()\n");
316 goto error_request;
319 hdata->init = 1;
320 return 0;
322 error_request:
323 ahash_request_free(hdata->async.request);
324 error:
325 kfree(hdata->async.result);
326 crypto_free_ahash(hdata->async.s);
327 return ret;
330 void cryptodev_hash_deinit(struct hash_data *hdata)
332 if (hdata->init) {
333 if (hdata->async.request)
334 ahash_request_free(hdata->async.request);
335 kfree(hdata->async.result);
336 if (hdata->async.s)
337 crypto_free_ahash(hdata->async.s);
338 hdata->init = 0;
342 int cryptodev_hash_reset(struct hash_data *hdata)
344 int ret;
346 ret = crypto_ahash_init(hdata->async.request);
347 if (unlikely(ret)) {
348 dprintk(0, KERN_ERR, "error in crypto_hash_init()\n");
349 return ret;
352 return 0;
356 ssize_t cryptodev_hash_update(struct hash_data *hdata,
357 struct scatterlist *sg, size_t len)
359 int ret;
361 INIT_COMPLETION(hdata->async.result->completion);
362 ahash_request_set_crypt(hdata->async.request, sg, NULL, len);
364 ret = crypto_ahash_update(hdata->async.request);
366 return waitfor(hdata->async.result, ret);
369 int cryptodev_hash_final(struct hash_data *hdata, void* output)
371 int ret;
373 INIT_COMPLETION(hdata->async.result->completion);
374 ahash_request_set_crypt(hdata->async.request, NULL, output, 0);
376 ret = crypto_ahash_final(hdata->async.request);
378 return waitfor(hdata->async.result, ret);