If kernel doesn't indicate the accelerated ciphers base the decision on known chips.
[cryptodev-linux.git] / zc.c
blob11491dc87bf556d839676a404e2ad8b6c3e77827
1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
4 * Copyright (c) 2009-2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
5 * Copyright (c) 2010 Phil Sutter
6 * Copyright (c) 2011, 2012 OpenSSL Software Foundation, Inc.
8 * This file is part of linux cryptodev.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * 02110-1301, USA.
26 #include <crypto/hash.h>
27 #include <linux/crypto.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/ioctl.h>
31 #include <linux/random.h>
32 #include <linux/syscalls.h>
33 #include <linux/pagemap.h>
34 #include <linux/uaccess.h>
35 #include <crypto/scatterwalk.h>
36 #include <linux/scatterlist.h>
37 #include "cryptodev_int.h"
38 #include "zc.h"
39 #include "version.h"
41 /* Helper functions to assist zero copy.
42 * This needs to be redesigned and moved out of the session. --nmav
45 /* offset of buf in it's first page */
46 #define PAGEOFFSET(buf) ((unsigned long)buf & ~PAGE_MASK)
48 /* fetch the pages addr resides in into pg and initialise sg with them */
49 int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
50 int pgcount, struct page **pg, struct scatterlist *sg,
51 struct task_struct *task, struct mm_struct *mm)
53 int ret, pglen, i = 0;
54 struct scatterlist *sgp;
56 down_write(&mm->mmap_sem);
57 ret = get_user_pages(task, mm,
58 (unsigned long)addr, pgcount, write, 0, pg, NULL);
59 up_write(&mm->mmap_sem);
60 if (ret != pgcount)
61 return -EINVAL;
63 sg_init_table(sg, pgcount);
65 pglen = min((ptrdiff_t)(PAGE_SIZE - PAGEOFFSET(addr)), (ptrdiff_t)len);
66 sg_set_page(sg, pg[i++], pglen, PAGEOFFSET(addr));
68 len -= pglen;
69 for (sgp = sg_next(sg); len; sgp = sg_next(sgp)) {
70 pglen = min((uint32_t)PAGE_SIZE, len);
71 sg_set_page(sgp, pg[i++], pglen, 0);
72 len -= pglen;
74 sg_mark_end(sg_last(sg, pgcount));
75 return 0;
78 int adjust_sg_array(struct csession * ses, int pagecount)
80 struct scatterlist *sg;
81 struct page **pages;
82 int array_size;
84 for (array_size = ses->array_size; array_size < pagecount;
85 array_size *= 2)
88 dprintk(2, KERN_DEBUG, "%s: reallocating to %d elements\n",
89 __func__, array_size);
90 pages = krealloc(ses->pages, array_size * sizeof(struct page *),
91 GFP_KERNEL);
92 if (unlikely(!pages))
93 return -ENOMEM;
94 ses->pages = pages;
95 sg = krealloc(ses->sg, array_size * sizeof(struct scatterlist),
96 GFP_KERNEL);
97 if (unlikely(!sg))
98 return -ENOMEM;
99 ses->sg = sg;
100 ses->array_size = array_size;
102 return 0;
105 void release_user_pages(struct csession *ses)
107 unsigned int i;
109 for (i=0;i<ses->used_pages;i++) {
110 if (!PageReserved(ses->pages[i]))
111 SetPageDirty(ses->pages[i]);
113 if (ses->readable_pages == 0)
114 flush_dcache_page(ses->pages[i]);
115 else
116 ses->readable_pages--;
118 page_cache_release(ses->pages[i]);
120 ses->used_pages = 0;
123 /* make src and dst available in scatterlists.
124 * dst might be the same as src.
126 int get_userbuf(struct csession *ses, void* __user src, int src_len,
127 void* __user dst, int dst_len,
128 struct task_struct *task, struct mm_struct *mm,
129 struct scatterlist **src_sg,
130 struct scatterlist **dst_sg)
132 int src_pagecount, dst_pagecount = 0, pagecount, write_src = 1;
133 int rc;
135 if (src == NULL)
136 return -EINVAL;
138 if (ses->alignmask && !IS_ALIGNED((unsigned long)src, ses->alignmask)) {
139 dprintk(2, KERN_WARNING, "%s: careful - source address %lx is not %d byte aligned\n",
140 __func__, (unsigned long)src, ses->alignmask + 1);
143 if (src == dst) {
144 /* dst == src */
145 src_len = max(src_len, dst_len);
146 dst_len = src_len;
149 src_pagecount = PAGECOUNT(src, src_len);
150 if (!ses->cdata.init) { /* hashing only */
151 write_src = 0;
152 } else if (src != dst) { /* non-in-situ transformation */
153 if (dst == NULL)
154 return -EINVAL;
156 dst_pagecount = PAGECOUNT(dst, dst_len);
157 write_src = 0;
159 if (ses->alignmask && !IS_ALIGNED((unsigned long)dst, ses->alignmask)) {
160 dprintk(2, KERN_WARNING, "%s: careful - destination address %lx is not %d byte aligned\n",
161 __func__, (unsigned long)dst, ses->alignmask + 1);
164 ses->used_pages = pagecount = src_pagecount + dst_pagecount;
166 if (write_src) ses->readable_pages = 0;
167 else ses->readable_pages = src_pagecount;
169 if (pagecount > ses->array_size) {
170 rc = adjust_sg_array(ses, pagecount);
171 if (rc)
172 return rc;
175 rc = __get_userbuf(src, src_len, write_src, src_pagecount,
176 ses->pages, ses->sg, task, mm);
177 if (unlikely(rc)) {
178 dprintk(1, KERN_ERR,
179 "failed to get user pages for data input\n");
180 return -EINVAL;
182 (*src_sg) = (*dst_sg) = ses->sg;
184 if (!dst_pagecount)
185 return 0;
187 (*dst_sg) = ses->sg + src_pagecount;
189 rc = __get_userbuf(dst, dst_len, 1, dst_pagecount,
190 ses->pages + src_pagecount, *dst_sg,
191 task, mm);
192 if (unlikely(rc)) {
193 dprintk(1, KERN_ERR,
194 "failed to get user pages for data output\n");
195 release_user_pages(ses);
196 return -EINVAL;
198 return 0;