Allow to encrypt SRTP records.
[cryptodev-linux.git] / cryptodev_cipher.c
blobba6d7c0afa192977825b6eec59e8605cf447c409
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2010 Nikos Mavrogiannopoulos <nmav@gnutls.org>
5 * Portions Copyright (c) 2010 Michael Weiser
6 * Portions Copyright (c) 2010 Phil Sutter
8 * This file is part of linux cryptodev.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc.,
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
26 #include <linux/crypto.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/ioctl.h>
30 #include <linux/random.h>
31 #include <linux/scatterlist.h>
32 #include <linux/uaccess.h>
33 #include <crypto/algapi.h>
34 #include <crypto/hash.h>
35 #include <crypto/cryptodev.h>
36 #include "cryptodev_int.h"
39 struct cryptodev_result {
40 struct completion completion;
41 int err;
44 static void cryptodev_complete(struct crypto_async_request *req, int err)
46 struct cryptodev_result *res = req->data;
48 if (err == -EINPROGRESS)
49 return;
51 res->err = err;
52 complete(&res->completion);
55 int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name,
56 uint8_t *keyp, size_t keylen, int stream)
58 struct ablkcipher_alg *alg;
59 int ret;
61 memset(out, 0, sizeof(*out));
63 out->async.s = crypto_alloc_ablkcipher(alg_name, 0, 0);
64 if (unlikely(IS_ERR(out->async.s))) {
65 dprintk(1, KERN_DEBUG, "%s: Failed to load cipher %s\n",
66 __func__, alg_name);
67 return -EINVAL;
70 alg = crypto_ablkcipher_alg(out->async.s);
72 if (alg != NULL) {
73 /* Was correct key length supplied? */
74 if (alg->max_keysize > 0 &&
75 unlikely((keylen < alg->min_keysize) ||
76 (keylen > alg->max_keysize))) {
77 dprintk(1, KERN_DEBUG,
78 "Wrong keylen '%zu' for algorithm '%s'. \
79 Use %u to %u.\n",
80 keylen, alg_name, alg->min_keysize,
81 alg->max_keysize);
82 ret = -EINVAL;
83 goto error;
87 ret = crypto_ablkcipher_setkey(out->async.s, keyp, keylen);
88 if (unlikely(ret)) {
89 dprintk(1, KERN_DEBUG, "Setting key failed for %s-%zu.\n",
90 alg_name, keylen*8);
91 ret = -EINVAL;
92 goto error;
95 out->stream = stream;
96 out->blocksize = crypto_ablkcipher_blocksize(out->async.s);
97 out->ivsize = crypto_ablkcipher_ivsize(out->async.s);
98 out->alignmask = crypto_ablkcipher_alignmask(out->async.s);
100 out->async.result = kmalloc(sizeof(*out->async.result), GFP_KERNEL);
101 if (unlikely(!out->async.result)) {
102 ret = -ENOMEM;
103 goto error;
106 memset(out->async.result, 0, sizeof(*out->async.result));
107 init_completion(&out->async.result->completion);
109 out->async.request = ablkcipher_request_alloc(out->async.s, GFP_KERNEL);
110 if (unlikely(!out->async.request)) {
111 dprintk(1, KERN_ERR, "error allocating async crypto request\n");
112 ret = -ENOMEM;
113 goto error;
116 ablkcipher_request_set_callback(out->async.request,
117 CRYPTO_TFM_REQ_MAY_BACKLOG,
118 cryptodev_complete, out->async.result);
120 out->init = 1;
121 return 0;
122 error:
123 if (out->async.request)
124 ablkcipher_request_free(out->async.request);
125 kfree(out->async.result);
126 if (out->async.s)
127 crypto_free_ablkcipher(out->async.s);
129 return ret;
132 void cryptodev_cipher_deinit(struct cipher_data *cdata)
134 if (cdata->init) {
135 if (cdata->async.request)
136 ablkcipher_request_free(cdata->async.request);
137 kfree(cdata->async.result);
138 if (cdata->async.s)
139 crypto_free_ablkcipher(cdata->async.s);
141 cdata->init = 0;
145 static inline int waitfor(struct cryptodev_result *cr, ssize_t ret)
147 switch (ret) {
148 case 0:
149 break;
150 case -EINPROGRESS:
151 case -EBUSY:
152 wait_for_completion(&cr->completion);
153 /* At this point we known for sure the request has finished,
154 * because wait_for_completion above was not interruptible.
155 * This is important because otherwise hardware or driver
156 * might try to access memory which will be freed or reused for
157 * another request. */
159 if (unlikely(cr->err)) {
160 dprintk(0, KERN_ERR, "error from async request: %d\n",
161 cr->err);
162 return cr->err;
165 break;
166 default:
167 return ret;
170 return 0;
173 ssize_t cryptodev_cipher_encrypt(struct cipher_data *cdata,
174 const struct scatterlist *sg1, struct scatterlist *sg2,
175 size_t len)
177 int ret;
179 INIT_COMPLETION(cdata->async.result->completion);
180 ablkcipher_request_set_crypt(cdata->async.request,
181 (struct scatterlist *)sg1, sg2,
182 len, cdata->async.iv);
183 ret = crypto_ablkcipher_encrypt(cdata->async.request);
185 return waitfor(cdata->async.result, ret);
188 ssize_t cryptodev_cipher_decrypt(struct cipher_data *cdata,
189 const struct scatterlist *sg1, struct scatterlist *sg2,
190 size_t len)
192 int ret;
194 INIT_COMPLETION(cdata->async.result->completion);
195 ablkcipher_request_set_crypt(cdata->async.request,
196 (struct scatterlist *)sg1, sg2,
197 len, cdata->async.iv);
198 ret = crypto_ablkcipher_decrypt(cdata->async.request);
200 return waitfor(cdata->async.result, ret);
203 /* Hash functions */
205 int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
206 int hmac_mode, void *mackey, size_t mackeylen)
208 int ret;
210 hdata->async.s = crypto_alloc_ahash(alg_name, 0, 0);
211 if (unlikely(IS_ERR(hdata->async.s))) {
212 dprintk(1, KERN_DEBUG, "%s: Failed to load transform for %s\n",
213 __func__, alg_name);
214 return -EINVAL;
217 /* Copy the key from user and set to TFM. */
218 if (hmac_mode != 0) {
219 ret = crypto_ahash_setkey(hdata->async.s, mackey, mackeylen);
220 if (unlikely(ret)) {
221 dprintk(1, KERN_DEBUG,
222 "Setting hmac key failed for %s-%zu.\n",
223 alg_name, mackeylen*8);
224 ret = -EINVAL;
225 goto error;
229 hdata->digestsize = crypto_ahash_digestsize(hdata->async.s);
230 hdata->alignmask = crypto_ahash_alignmask(hdata->async.s);
232 hdata->async.result = kmalloc(sizeof(*hdata->async.result), GFP_KERNEL);
233 if (unlikely(!hdata->async.result)) {
234 ret = -ENOMEM;
235 goto error;
238 memset(hdata->async.result, 0, sizeof(*hdata->async.result));
239 init_completion(&hdata->async.result->completion);
241 hdata->async.request = ahash_request_alloc(hdata->async.s, GFP_KERNEL);
242 if (unlikely(!hdata->async.request)) {
243 dprintk(0, KERN_ERR, "error allocating async crypto request\n");
244 ret = -ENOMEM;
245 goto error;
248 ahash_request_set_callback(hdata->async.request,
249 CRYPTO_TFM_REQ_MAY_BACKLOG,
250 cryptodev_complete, hdata->async.result);
252 ret = crypto_ahash_init(hdata->async.request);
253 if (unlikely(ret)) {
254 dprintk(0, KERN_ERR, "error in crypto_hash_init()\n");
255 goto error_request;
258 hdata->init = 1;
259 return 0;
261 error_request:
262 ahash_request_free(hdata->async.request);
263 error:
264 kfree(hdata->async.result);
265 crypto_free_ahash(hdata->async.s);
266 return ret;
269 void cryptodev_hash_deinit(struct hash_data *hdata)
271 if (hdata->init) {
272 if (hdata->async.request)
273 ahash_request_free(hdata->async.request);
274 kfree(hdata->async.result);
275 if (hdata->async.s)
276 crypto_free_ahash(hdata->async.s);
277 hdata->init = 0;
281 int cryptodev_hash_reset(struct hash_data *hdata)
283 int ret;
285 ret = crypto_ahash_init(hdata->async.request);
286 if (unlikely(ret)) {
287 dprintk(0, KERN_ERR, "error in crypto_hash_init()\n");
288 return ret;
291 return 0;
295 ssize_t cryptodev_hash_update(struct hash_data *hdata,
296 struct scatterlist *sg, size_t len)
298 int ret;
300 INIT_COMPLETION(hdata->async.result->completion);
301 ahash_request_set_crypt(hdata->async.request, sg, NULL, len);
303 ret = crypto_ahash_update(hdata->async.request);
305 return waitfor(hdata->async.result, ret);
308 int cryptodev_hash_final(struct hash_data *hdata, void* output)
310 int ret;
312 INIT_COMPLETION(hdata->async.result->completion);
313 ahash_request_set_crypt(hdata->async.request, NULL, output, 0);
315 ret = crypto_ahash_final(hdata->async.request);
317 return waitfor(hdata->async.result, ret);