Merge branch 'master' of git+ssh://openadk.org/git/openadk
[openadk.git] / target / linux / patches / 2.6.30.4 / ocf.patch
blob64c5eeb0f198a48edf7fdcc2204c48ede9e54cdb
1 diff -Nur linux-2.6.30.orig/crypto/Kconfig linux-2.6.30/crypto/Kconfig
2 --- linux-2.6.30.orig/crypto/Kconfig 2009-06-10 05:05:27.000000000 +0200
3 +++ linux-2.6.30/crypto/Kconfig 2009-06-11 10:55:27.000000000 +0200
4 @@ -781,3 +781,5 @@
5 source "drivers/crypto/Kconfig"
7 endif # if CRYPTO
9 +source "crypto/ocf/Kconfig"
10 diff -Nur linux-2.6.30.orig/crypto/Makefile linux-2.6.30/crypto/Makefile
11 --- linux-2.6.30.orig/crypto/Makefile 2009-06-10 05:05:27.000000000 +0200
12 +++ linux-2.6.30/crypto/Makefile 2009-06-11 10:55:27.000000000 +0200
13 @@ -84,6 +84,8 @@
14 obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
15 obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
17 +obj-$(CONFIG_OCF_OCF) += ocf/
20 # generic algorithms and the async_tx api
22 diff -Nur linux-2.6.30.orig/crypto/ocf/Config.in linux-2.6.30/crypto/ocf/Config.in
23 --- linux-2.6.30.orig/crypto/ocf/Config.in 1970-01-01 01:00:00.000000000 +0100
24 +++ linux-2.6.30/crypto/ocf/Config.in 2009-06-11 10:55:27.000000000 +0200
25 @@ -0,0 +1,34 @@
26 +#############################################################################
28 +mainmenu_option next_comment
29 +comment 'OCF Configuration'
30 +tristate 'OCF (Open Cryptograhic Framework)' CONFIG_OCF_OCF
31 +dep_mbool ' enable fips RNG checks (fips check on RNG data before use)' \
32 + CONFIG_OCF_FIPS $CONFIG_OCF_OCF
33 +dep_mbool ' enable harvesting entropy for /dev/random' \
34 + CONFIG_OCF_RANDOMHARVEST $CONFIG_OCF_OCF
35 +dep_tristate ' cryptodev (user space support)' \
36 + CONFIG_OCF_CRYPTODEV $CONFIG_OCF_OCF
37 +dep_tristate ' cryptosoft (software crypto engine)' \
38 + CONFIG_OCF_CRYPTOSOFT $CONFIG_OCF_OCF
39 +dep_tristate ' safenet (HW crypto engine)' \
40 + CONFIG_OCF_SAFE $CONFIG_OCF_OCF
41 +dep_tristate ' IXP4xx (HW crypto engine)' \
42 + CONFIG_OCF_IXP4XX $CONFIG_OCF_OCF
43 +dep_mbool ' Enable IXP4xx HW to perform SHA1 and MD5 hashing (very slow)' \
44 + CONFIG_OCF_IXP4XX_SHA1_MD5 $CONFIG_OCF_IXP4XX
45 +dep_tristate ' hifn (HW crypto engine)' \
46 + CONFIG_OCF_HIFN $CONFIG_OCF_OCF
47 +dep_tristate ' talitos (HW crypto engine)' \
48 + CONFIG_OCF_TALITOS $CONFIG_OCF_OCF
49 +dep_tristate ' pasemi (HW crypto engine)' \
50 + CONFIG_OCF_PASEMI $CONFIG_OCF_OCF
51 +dep_tristate ' ep80579 (HW crypto engine)' \
52 + CONFIG_OCF_EP80579 $CONFIG_OCF_OCF
53 +dep_tristate ' ocfnull (does no crypto)' \
54 + CONFIG_OCF_OCFNULL $CONFIG_OCF_OCF
55 +dep_tristate ' ocf-bench (HW crypto in-kernel benchmark)' \
56 + CONFIG_OCF_BENCH $CONFIG_OCF_OCF
57 +endmenu
59 +#############################################################################
60 diff -Nur linux-2.6.30.orig/crypto/ocf/criov.c linux-2.6.30/crypto/ocf/criov.c
61 --- linux-2.6.30.orig/crypto/ocf/criov.c 1970-01-01 01:00:00.000000000 +0100
62 +++ linux-2.6.30/crypto/ocf/criov.c 2009-06-11 10:55:27.000000000 +0200
63 @@ -0,0 +1,215 @@
64 +/* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */
66 +/*
67 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
68 + * Copyright (C) 2006-2007 David McCullough
69 + * Copyright (C) 2004-2005 Intel Corporation.
70 + * The license and original author are listed below.
71 + *
72 + * Copyright (c) 1999 Theo de Raadt
73 + *
74 + * Redistribution and use in source and binary forms, with or without
75 + * modification, are permitted provided that the following conditions
76 + * are met:
77 + *
78 + * 1. Redistributions of source code must retain the above copyright
79 + * notice, this list of conditions and the following disclaimer.
80 + * 2. Redistributions in binary form must reproduce the above copyright
81 + * notice, this list of conditions and the following disclaimer in the
82 + * documentation and/or other materials provided with the distribution.
83 + * 3. The name of the author may not be used to endorse or promote products
84 + * derived from this software without specific prior written permission.
85 + *
86 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
87 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
88 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
89 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
90 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
91 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
92 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
93 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
94 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
95 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 + *
97 +__FBSDID("$FreeBSD: src/sys/opencrypto/criov.c,v 1.5 2006/06/04 22:15:13 pjd Exp $");
98 + */
100 +#ifndef AUTOCONF_INCLUDED
101 +#include <linux/config.h>
102 +#endif
103 +#include <linux/module.h>
104 +#include <linux/init.h>
105 +#include <linux/slab.h>
106 +#include <linux/uio.h>
107 +#include <linux/skbuff.h>
108 +#include <linux/kernel.h>
109 +#include <linux/mm.h>
110 +#include <asm/io.h>
112 +#include <uio.h>
113 +#include <cryptodev.h>
116 + * This macro is only for avoiding code duplication, as we need to skip
117 + * given number of bytes in the same way in three functions below.
118 + */
119 +#define CUIO_SKIP() do { \
120 + KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \
121 + KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \
122 + while (off > 0) { \
123 + KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \
124 + if (off < iov->iov_len) \
125 + break; \
126 + off -= iov->iov_len; \
127 + iol--; \
128 + iov++; \
129 + } \
130 +} while (0)
132 +void
133 +cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
135 + struct iovec *iov = uio->uio_iov;
136 + int iol = uio->uio_iovcnt;
137 + unsigned count;
139 + CUIO_SKIP();
140 + while (len > 0) {
141 + KASSERT(iol >= 0, ("%s: empty", __func__));
142 + count = min((int)(iov->iov_len - off), len);
143 + memcpy(cp, ((caddr_t)iov->iov_base) + off, count);
144 + len -= count;
145 + cp += count;
146 + off = 0;
147 + iol--;
148 + iov++;
152 +void
153 +cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
155 + struct iovec *iov = uio->uio_iov;
156 + int iol = uio->uio_iovcnt;
157 + unsigned count;
159 + CUIO_SKIP();
160 + while (len > 0) {
161 + KASSERT(iol >= 0, ("%s: empty", __func__));
162 + count = min((int)(iov->iov_len - off), len);
163 + memcpy(((caddr_t)iov->iov_base) + off, cp, count);
164 + len -= count;
165 + cp += count;
166 + off = 0;
167 + iol--;
168 + iov++;
173 + * Return a pointer to iov/offset of location in iovec list.
174 + */
175 +struct iovec *
176 +cuio_getptr(struct uio *uio, int loc, int *off)
178 + struct iovec *iov = uio->uio_iov;
179 + int iol = uio->uio_iovcnt;
181 + while (loc >= 0) {
182 + /* Normal end of search */
183 + if (loc < iov->iov_len) {
184 + *off = loc;
185 + return (iov);
188 + loc -= iov->iov_len;
189 + if (iol == 0) {
190 + if (loc == 0) {
191 + /* Point at the end of valid data */
192 + *off = iov->iov_len;
193 + return (iov);
194 + } else
195 + return (NULL);
196 + } else {
197 + iov++, iol--;
201 + return (NULL);
204 +EXPORT_SYMBOL(cuio_copyback);
205 +EXPORT_SYMBOL(cuio_copydata);
206 +EXPORT_SYMBOL(cuio_getptr);
209 +static void
210 +skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len)
212 + int i;
213 + if (offset < skb_headlen(skb)) {
214 + memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len));
215 + len -= skb_headlen(skb);
216 + cp += skb_headlen(skb);
218 + offset -= skb_headlen(skb);
219 + for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) {
220 + if (offset < skb_shinfo(skb)->frags[i].size) {
221 + memcpy(page_address(skb_shinfo(skb)->frags[i].page) +
222 + skb_shinfo(skb)->frags[i].page_offset,
223 + cp, min_t(int, skb_shinfo(skb)->frags[i].size, len));
224 + len -= skb_shinfo(skb)->frags[i].size;
225 + cp += skb_shinfo(skb)->frags[i].size;
227 + offset -= skb_shinfo(skb)->frags[i].size;
231 +void
232 +crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in)
235 + if ((flags & CRYPTO_F_SKBUF) != 0)
236 + skb_copy_bits_back((struct sk_buff *)buf, off, in, size);
237 + else if ((flags & CRYPTO_F_IOV) != 0)
238 + cuio_copyback((struct uio *)buf, off, size, in);
239 + else
240 + bcopy(in, buf + off, size);
243 +void
244 +crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
247 + if ((flags & CRYPTO_F_SKBUF) != 0)
248 + skb_copy_bits((struct sk_buff *)buf, off, out, size);
249 + else if ((flags & CRYPTO_F_IOV) != 0)
250 + cuio_copydata((struct uio *)buf, off, size, out);
251 + else
252 + bcopy(buf + off, out, size);
255 +int
256 +crypto_apply(int flags, caddr_t buf, int off, int len,
257 + int (*f)(void *, void *, u_int), void *arg)
259 +#if 0
260 + int error;
262 + if ((flags & CRYPTO_F_SKBUF) != 0)
263 + error = XXXXXX((struct mbuf *)buf, off, len, f, arg);
264 + else if ((flags & CRYPTO_F_IOV) != 0)
265 + error = cuio_apply((struct uio *)buf, off, len, f, arg);
266 + else
267 + error = (*f)(arg, buf + off, len);
268 + return (error);
269 +#else
270 + KASSERT(0, ("crypto_apply not implemented!\n"));
271 +#endif
272 + return 0;
275 +EXPORT_SYMBOL(crypto_copyback);
276 +EXPORT_SYMBOL(crypto_copydata);
277 +EXPORT_SYMBOL(crypto_apply);
279 diff -Nur linux-2.6.30.orig/crypto/ocf/crypto.c linux-2.6.30/crypto/ocf/crypto.c
280 --- linux-2.6.30.orig/crypto/ocf/crypto.c 1970-01-01 01:00:00.000000000 +0100
281 +++ linux-2.6.30/crypto/ocf/crypto.c 2009-06-11 10:55:27.000000000 +0200
282 @@ -0,0 +1,1741 @@
283 +/*-
284 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
285 + * Copyright (C) 2006-2007 David McCullough
286 + * Copyright (C) 2004-2005 Intel Corporation.
287 + * The license and original author are listed below.
289 + * Redistribution and use in source and binary forms, with or without
290 + * Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
292 + * modification, are permitted provided that the following conditions
293 + * are met:
294 + * 1. Redistributions of source code must retain the above copyright
295 + * notice, this list of conditions and the following disclaimer.
296 + * 2. Redistributions in binary form must reproduce the above copyright
297 + * notice, this list of conditions and the following disclaimer in the
298 + * documentation and/or other materials provided with the distribution.
300 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
301 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
302 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
303 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
304 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
305 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
306 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
307 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
308 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
309 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
310 + */
312 +#if 0
313 +#include <sys/cdefs.h>
314 +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.27 2007/03/21 03:42:51 sam Exp $");
315 +#endif
318 + * Cryptographic Subsystem.
320 + * This code is derived from the Openbsd Cryptographic Framework (OCF)
321 + * that has the copyright shown below. Very little of the original
322 + * code remains.
323 + */
324 +/*-
325 + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
327 + * This code was written by Angelos D. Keromytis in Athens, Greece, in
328 + * February 2000. Network Security Technologies Inc. (NSTI) kindly
329 + * supported the development of this code.
331 + * Copyright (c) 2000, 2001 Angelos D. Keromytis
333 + * Permission to use, copy, and modify this software with or without fee
334 + * is hereby granted, provided that this entire notice is included in
335 + * all source code copies of any software which is or includes a copy or
336 + * modification of this software.
338 + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
339 + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
340 + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
341 + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
342 + * PURPOSE.
344 +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.16 2005/01/07 02:29:16 imp Exp $");
345 + */
348 +#ifndef AUTOCONF_INCLUDED
349 +#include <linux/config.h>
350 +#endif
351 +#include <linux/module.h>
352 +#include <linux/init.h>
353 +#include <linux/list.h>
354 +#include <linux/slab.h>
355 +#include <linux/wait.h>
356 +#include <linux/sched.h>
357 +#include <linux/spinlock.h>
358 +#include <linux/version.h>
359 +#include <cryptodev.h>
362 + * keep track of whether or not we have been initialised, a big
363 + * issue if we are linked into the kernel and a driver gets started before
364 + * us
365 + */
366 +static int crypto_initted = 0;
369 + * Crypto drivers register themselves by allocating a slot in the
370 + * crypto_drivers table with crypto_get_driverid() and then registering
371 + * each algorithm they support with crypto_register() and crypto_kregister().
372 + */
375 + * lock on driver table
376 + * we track its state as spin_is_locked does not do anything on non-SMP boxes
377 + */
378 +static spinlock_t crypto_drivers_lock;
379 +static int crypto_drivers_locked; /* for non-SMP boxes */
381 +#define CRYPTO_DRIVER_LOCK() \
382 + ({ \
383 + spin_lock_irqsave(&crypto_drivers_lock, d_flags); \
384 + crypto_drivers_locked = 1; \
385 + dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \
386 + })
387 +#define CRYPTO_DRIVER_UNLOCK() \
388 + ({ \
389 + dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \
390 + crypto_drivers_locked = 0; \
391 + spin_unlock_irqrestore(&crypto_drivers_lock, d_flags); \
392 + })
393 +#define CRYPTO_DRIVER_ASSERT() \
394 + ({ \
395 + if (!crypto_drivers_locked) { \
396 + dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \
397 + } \
398 + })
401 + * Crypto device/driver capabilities structure.
403 + * Synchronization:
404 + * (d) - protected by CRYPTO_DRIVER_LOCK()
405 + * (q) - protected by CRYPTO_Q_LOCK()
406 + * Not tagged fields are read-only.
407 + */
408 +struct cryptocap {
409 + device_t cc_dev; /* (d) device/driver */
410 + u_int32_t cc_sessions; /* (d) # of sessions */
411 + u_int32_t cc_koperations; /* (d) # os asym operations */
412 + /*
413 + * Largest possible operator length (in bits) for each type of
414 + * encryption algorithm. XXX not used
415 + */
416 + u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
417 + u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1];
418 + u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1];
420 + int cc_flags; /* (d) flags */
421 +#define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
422 + int cc_qblocked; /* (q) symmetric q blocked */
423 + int cc_kqblocked; /* (q) asymmetric q blocked */
425 +static struct cryptocap *crypto_drivers = NULL;
426 +static int crypto_drivers_num = 0;
429 + * There are two queues for crypto requests; one for symmetric (e.g.
430 + * cipher) operations and one for asymmetric (e.g. MOD)operations.
431 + * A single mutex is used to lock access to both queues. We could
432 + * have one per-queue but having one simplifies handling of block/unblock
433 + * operations.
434 + */
435 +static int crp_sleep = 0;
436 +static LIST_HEAD(crp_q); /* request queues */
437 +static LIST_HEAD(crp_kq);
439 +static spinlock_t crypto_q_lock;
441 +int crypto_all_qblocked = 0; /* protect with Q_LOCK */
442 +module_param(crypto_all_qblocked, int, 0444);
443 +MODULE_PARM_DESC(crypto_all_qblocked, "Are all crypto queues blocked");
445 +int crypto_all_kqblocked = 0; /* protect with Q_LOCK */
446 +module_param(crypto_all_kqblocked, int, 0444);
447 +MODULE_PARM_DESC(crypto_all_kqblocked, "Are all asym crypto queues blocked");
449 +#define CRYPTO_Q_LOCK() \
450 + ({ \
451 + spin_lock_irqsave(&crypto_q_lock, q_flags); \
452 + dprintk("%s,%d: Q_LOCK()\n", __FILE__, __LINE__); \
453 + })
454 +#define CRYPTO_Q_UNLOCK() \
455 + ({ \
456 + dprintk("%s,%d: Q_UNLOCK()\n", __FILE__, __LINE__); \
457 + spin_unlock_irqrestore(&crypto_q_lock, q_flags); \
458 + })
461 + * There are two queues for processing completed crypto requests; one
462 + * for the symmetric and one for the asymmetric ops. We only need one
463 + * but have two to avoid type futzing (cryptop vs. cryptkop). A single
464 + * mutex is used to lock access to both queues. Note that this lock
465 + * must be separate from the lock on request queues to insure driver
466 + * callbacks don't generate lock order reversals.
467 + */
468 +static LIST_HEAD(crp_ret_q); /* callback queues */
469 +static LIST_HEAD(crp_ret_kq);
471 +static spinlock_t crypto_ret_q_lock;
472 +#define CRYPTO_RETQ_LOCK() \
473 + ({ \
474 + spin_lock_irqsave(&crypto_ret_q_lock, r_flags); \
475 + dprintk("%s,%d: RETQ_LOCK\n", __FILE__, __LINE__); \
476 + })
477 +#define CRYPTO_RETQ_UNLOCK() \
478 + ({ \
479 + dprintk("%s,%d: RETQ_UNLOCK\n", __FILE__, __LINE__); \
480 + spin_unlock_irqrestore(&crypto_ret_q_lock, r_flags); \
481 + })
482 +#define CRYPTO_RETQ_EMPTY() (list_empty(&crp_ret_q) && list_empty(&crp_ret_kq))
484 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
485 +static kmem_cache_t *cryptop_zone;
486 +static kmem_cache_t *cryptodesc_zone;
487 +#else
488 +static struct kmem_cache *cryptop_zone;
489 +static struct kmem_cache *cryptodesc_zone;
490 +#endif
492 +#define debug crypto_debug
493 +int crypto_debug = 0;
494 +module_param(crypto_debug, int, 0644);
495 +MODULE_PARM_DESC(crypto_debug, "Enable debug");
496 +EXPORT_SYMBOL(crypto_debug);
499 + * Maximum number of outstanding crypto requests before we start
500 + * failing requests. We need this to prevent DOS when too many
501 + * requests are arriving for us to keep up. Otherwise we will
502 + * run the system out of memory. Since crypto is slow, we are
503 + * usually the bottleneck that needs to say, enough is enough.
505 + * We cannot print errors when this condition occurs, we are already too
506 + * slow, printing anything will just kill us
507 + */
509 +static int crypto_q_cnt = 0;
510 +module_param(crypto_q_cnt, int, 0444);
511 +MODULE_PARM_DESC(crypto_q_cnt,
512 + "Current number of outstanding crypto requests");
514 +static int crypto_q_max = 1000;
515 +module_param(crypto_q_max, int, 0644);
516 +MODULE_PARM_DESC(crypto_q_max,
517 + "Maximum number of outstanding crypto requests");
519 +#define bootverbose crypto_verbose
520 +static int crypto_verbose = 0;
521 +module_param(crypto_verbose, int, 0644);
522 +MODULE_PARM_DESC(crypto_verbose,
523 + "Enable verbose crypto startup");
525 +int crypto_usercrypto = 1; /* userland may do crypto reqs */
526 +module_param(crypto_usercrypto, int, 0644);
527 +MODULE_PARM_DESC(crypto_usercrypto,
528 + "Enable/disable user-mode access to crypto support");
530 +int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
531 +module_param(crypto_userasymcrypto, int, 0644);
532 +MODULE_PARM_DESC(crypto_userasymcrypto,
533 + "Enable/disable user-mode access to asymmetric crypto support");
535 +int crypto_devallowsoft = 0; /* only use hardware crypto */
536 +module_param(crypto_devallowsoft, int, 0644);
537 +MODULE_PARM_DESC(crypto_devallowsoft,
538 + "Enable/disable use of software crypto support");
540 +static pid_t cryptoproc = (pid_t) -1;
541 +static struct completion cryptoproc_exited;
542 +static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait);
543 +static pid_t cryptoretproc = (pid_t) -1;
544 +static struct completion cryptoretproc_exited;
545 +static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait);
547 +static int crypto_proc(void *arg);
548 +static int crypto_ret_proc(void *arg);
549 +static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
550 +static int crypto_kinvoke(struct cryptkop *krp, int flags);
551 +static void crypto_exit(void);
552 +static int crypto_init(void);
554 +static struct cryptostats cryptostats;
556 +static struct cryptocap *
557 +crypto_checkdriver(u_int32_t hid)
559 + if (crypto_drivers == NULL)
560 + return NULL;
561 + return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
565 + * Compare a driver's list of supported algorithms against another
566 + * list; return non-zero if all algorithms are supported.
567 + */
568 +static int
569 +driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
571 + const struct cryptoini *cr;
573 + /* See if all the algorithms are supported. */
574 + for (cr = cri; cr; cr = cr->cri_next)
575 + if (cap->cc_alg[cr->cri_alg] == 0)
576 + return 0;
577 + return 1;
581 + * Select a driver for a new session that supports the specified
582 + * algorithms and, optionally, is constrained according to the flags.
583 + * The algorithm we use here is pretty stupid; just use the
584 + * first driver that supports all the algorithms we need. If there
585 + * are multiple drivers we choose the driver with the fewest active
586 + * sessions. We prefer hardware-backed drivers to software ones.
588 + * XXX We need more smarts here (in real life too, but that's
589 + * XXX another story altogether).
590 + */
591 +static struct cryptocap *
592 +crypto_select_driver(const struct cryptoini *cri, int flags)
594 + struct cryptocap *cap, *best;
595 + int match, hid;
597 + CRYPTO_DRIVER_ASSERT();
599 + /*
600 + * Look first for hardware crypto devices if permitted.
601 + */
602 + if (flags & CRYPTOCAP_F_HARDWARE)
603 + match = CRYPTOCAP_F_HARDWARE;
604 + else
605 + match = CRYPTOCAP_F_SOFTWARE;
606 + best = NULL;
607 +again:
608 + for (hid = 0; hid < crypto_drivers_num; hid++) {
609 + cap = &crypto_drivers[hid];
610 + /*
611 + * If it's not initialized, is in the process of
612 + * going away, or is not appropriate (hardware
613 + * or software based on match), then skip.
614 + */
615 + if (cap->cc_dev == NULL ||
616 + (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
617 + (cap->cc_flags & match) == 0)
618 + continue;
620 + /* verify all the algorithms are supported. */
621 + if (driver_suitable(cap, cri)) {
622 + if (best == NULL ||
623 + cap->cc_sessions < best->cc_sessions)
624 + best = cap;
627 + if (best != NULL)
628 + return best;
629 + if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
630 + /* sort of an Algol 68-style for loop */
631 + match = CRYPTOCAP_F_SOFTWARE;
632 + goto again;
634 + return best;
638 + * Create a new session. The crid argument specifies a crypto
639 + * driver to use or constraints on a driver to select (hardware
640 + * only, software only, either). Whatever driver is selected
641 + * must be capable of the requested crypto algorithms.
642 + */
643 +int
644 +crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
646 + struct cryptocap *cap;
647 + u_int32_t hid, lid;
648 + int err;
649 + unsigned long d_flags;
651 + CRYPTO_DRIVER_LOCK();
652 + if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
653 + /*
654 + * Use specified driver; verify it is capable.
655 + */
656 + cap = crypto_checkdriver(crid);
657 + if (cap != NULL && !driver_suitable(cap, cri))
658 + cap = NULL;
659 + } else {
660 + /*
661 + * No requested driver; select based on crid flags.
662 + */
663 + cap = crypto_select_driver(cri, crid);
664 + /*
665 + * if NULL then can't do everything in one session.
666 + * XXX Fix this. We need to inject a "virtual" session
667 + * XXX layer right about here.
668 + */
670 + if (cap != NULL) {
671 + /* Call the driver initialization routine. */
672 + hid = cap - crypto_drivers;
673 + lid = hid; /* Pass the driver ID. */
674 + cap->cc_sessions++;
675 + CRYPTO_DRIVER_UNLOCK();
676 + err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
677 + CRYPTO_DRIVER_LOCK();
678 + if (err == 0) {
679 + (*sid) = (cap->cc_flags & 0xff000000)
680 + | (hid & 0x00ffffff);
681 + (*sid) <<= 32;
682 + (*sid) |= (lid & 0xffffffff);
683 + } else
684 + cap->cc_sessions--;
685 + } else
686 + err = EINVAL;
687 + CRYPTO_DRIVER_UNLOCK();
688 + return err;
691 +static void
692 +crypto_remove(struct cryptocap *cap)
694 + CRYPTO_DRIVER_ASSERT();
695 + if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
696 + bzero(cap, sizeof(*cap));
700 + * Delete an existing session (or a reserved session on an unregistered
701 + * driver).
702 + */
703 +int
704 +crypto_freesession(u_int64_t sid)
706 + struct cryptocap *cap;
707 + u_int32_t hid;
708 + int err = 0;
709 + unsigned long d_flags;
711 + dprintk("%s()\n", __FUNCTION__);
712 + CRYPTO_DRIVER_LOCK();
714 + if (crypto_drivers == NULL) {
715 + err = EINVAL;
716 + goto done;
719 + /* Determine two IDs. */
720 + hid = CRYPTO_SESID2HID(sid);
722 + if (hid >= crypto_drivers_num) {
723 + dprintk("%s - INVALID DRIVER NUM %d\n", __FUNCTION__, hid);
724 + err = ENOENT;
725 + goto done;
727 + cap = &crypto_drivers[hid];
729 + if (cap->cc_dev) {
730 + CRYPTO_DRIVER_UNLOCK();
731 + /* Call the driver cleanup routine, if available, unlocked. */
732 + err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
733 + CRYPTO_DRIVER_LOCK();
736 + if (cap->cc_sessions)
737 + cap->cc_sessions--;
739 + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
740 + crypto_remove(cap);
742 +done:
743 + CRYPTO_DRIVER_UNLOCK();
744 + return err;
748 + * Return an unused driver id. Used by drivers prior to registering
749 + * support for the algorithms they handle.
750 + */
751 +int32_t
752 +crypto_get_driverid(device_t dev, int flags)
754 + struct cryptocap *newdrv;
755 + int i;
756 + unsigned long d_flags;
758 + if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
759 + printf("%s: no flags specified when registering driver\n",
760 + device_get_nameunit(dev));
761 + return -1;
764 + CRYPTO_DRIVER_LOCK();
766 + for (i = 0; i < crypto_drivers_num; i++) {
767 + if (crypto_drivers[i].cc_dev == NULL &&
768 + (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
769 + break;
773 + /* Out of entries, allocate some more. */
774 + if (i == crypto_drivers_num) {
775 + /* Be careful about wrap-around. */
776 + if (2 * crypto_drivers_num <= crypto_drivers_num) {
777 + CRYPTO_DRIVER_UNLOCK();
778 + printk("crypto: driver count wraparound!\n");
779 + return -1;
782 + newdrv = kmalloc(2 * crypto_drivers_num * sizeof(struct cryptocap),
783 + GFP_KERNEL);
784 + if (newdrv == NULL) {
785 + CRYPTO_DRIVER_UNLOCK();
786 + printk("crypto: no space to expand driver table!\n");
787 + return -1;
790 + memcpy(newdrv, crypto_drivers,
791 + crypto_drivers_num * sizeof(struct cryptocap));
792 + memset(&newdrv[crypto_drivers_num], 0,
793 + crypto_drivers_num * sizeof(struct cryptocap));
795 + crypto_drivers_num *= 2;
797 + kfree(crypto_drivers);
798 + crypto_drivers = newdrv;
801 + /* NB: state is zero'd on free */
802 + crypto_drivers[i].cc_sessions = 1; /* Mark */
803 + crypto_drivers[i].cc_dev = dev;
804 + crypto_drivers[i].cc_flags = flags;
805 + if (bootverbose)
806 + printf("crypto: assign %s driver id %u, flags %u\n",
807 + device_get_nameunit(dev), i, flags);
809 + CRYPTO_DRIVER_UNLOCK();
811 + return i;
815 + * Lookup a driver by name. We match against the full device
816 + * name and unit, and against just the name. The latter gives
817 + * us a simple widlcarding by device name. On success return the
818 + * driver/hardware identifier; otherwise return -1.
819 + */
820 +int
821 +crypto_find_driver(const char *match)
823 + int i, len = strlen(match);
824 + unsigned long d_flags;
826 + CRYPTO_DRIVER_LOCK();
827 + for (i = 0; i < crypto_drivers_num; i++) {
828 + device_t dev = crypto_drivers[i].cc_dev;
829 + if (dev == NULL ||
830 + (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
831 + continue;
832 + if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
833 + strncmp(match, device_get_name(dev), len) == 0)
834 + break;
836 + CRYPTO_DRIVER_UNLOCK();
837 + return i < crypto_drivers_num ? i : -1;
841 + * Return the device_t for the specified driver or NULL
842 + * if the driver identifier is invalid.
843 + */
844 +device_t
845 +crypto_find_device_byhid(int hid)
847 + struct cryptocap *cap = crypto_checkdriver(hid);
848 + return cap != NULL ? cap->cc_dev : NULL;
852 + * Return the device/driver capabilities.
853 + */
854 +int
855 +crypto_getcaps(int hid)
857 + struct cryptocap *cap = crypto_checkdriver(hid);
858 + return cap != NULL ? cap->cc_flags : 0;
862 + * Register support for a key-related algorithm. This routine
863 + * is called once for each algorithm supported a driver.
864 + */
865 +int
866 +crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
868 + struct cryptocap *cap;
869 + int err;
870 + unsigned long d_flags;
872 + dprintk("%s()\n", __FUNCTION__);
873 + CRYPTO_DRIVER_LOCK();
875 + cap = crypto_checkdriver(driverid);
876 + if (cap != NULL &&
877 + (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
878 + /*
879 + * XXX Do some performance testing to determine placing.
880 + * XXX We probably need an auxiliary data structure that
881 + * XXX describes relative performances.
882 + */
884 + cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
885 + if (bootverbose)
886 + printf("crypto: %s registers key alg %u flags %u\n"
887 + , device_get_nameunit(cap->cc_dev)
888 + , kalg
889 + , flags
890 + );
891 + err = 0;
892 + } else
893 + err = EINVAL;
895 + CRYPTO_DRIVER_UNLOCK();
896 + return err;
900 + * Register support for a non-key-related algorithm. This routine
901 + * is called once for each such algorithm supported by a driver.
902 + */
903 +int
904 +crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
905 + u_int32_t flags)
907 + struct cryptocap *cap;
908 + int err;
909 + unsigned long d_flags;
911 + dprintk("%s(id=0x%x, alg=%d, maxoplen=%d, flags=0x%x)\n", __FUNCTION__,
912 + driverid, alg, maxoplen, flags);
914 + CRYPTO_DRIVER_LOCK();
916 + cap = crypto_checkdriver(driverid);
917 + /* NB: algorithms are in the range [1..max] */
918 + if (cap != NULL &&
919 + (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
920 + /*
921 + * XXX Do some performance testing to determine placing.
922 + * XXX We probably need an auxiliary data structure that
923 + * XXX describes relative performances.
924 + */
926 + cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
927 + cap->cc_max_op_len[alg] = maxoplen;
928 + if (bootverbose)
929 + printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
930 + , device_get_nameunit(cap->cc_dev)
931 + , alg
932 + , flags
933 + , maxoplen
934 + );
935 + cap->cc_sessions = 0; /* Unmark */
936 + err = 0;
937 + } else
938 + err = EINVAL;
940 + CRYPTO_DRIVER_UNLOCK();
941 + return err;
944 +static void
945 +driver_finis(struct cryptocap *cap)
947 + u_int32_t ses, kops;
949 + CRYPTO_DRIVER_ASSERT();
951 + ses = cap->cc_sessions;
952 + kops = cap->cc_koperations;
953 + bzero(cap, sizeof(*cap));
954 + if (ses != 0 || kops != 0) {
955 + /*
956 + * If there are pending sessions,
957 + * just mark as invalid.
958 + */
959 + cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
960 + cap->cc_sessions = ses;
961 + cap->cc_koperations = kops;
966 + * Unregister a crypto driver. If there are pending sessions using it,
967 + * leave enough information around so that subsequent calls using those
968 + * sessions will correctly detect the driver has been unregistered and
969 + * reroute requests.
970 + */
971 +int
972 +crypto_unregister(u_int32_t driverid, int alg)
974 + struct cryptocap *cap;
975 + int i, err;
976 + unsigned long d_flags;
978 + dprintk("%s()\n", __FUNCTION__);
979 + CRYPTO_DRIVER_LOCK();
981 + cap = crypto_checkdriver(driverid);
982 + if (cap != NULL &&
983 + (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
984 + cap->cc_alg[alg] != 0) {
985 + cap->cc_alg[alg] = 0;
986 + cap->cc_max_op_len[alg] = 0;
988 + /* Was this the last algorithm ? */
989 + for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
990 + if (cap->cc_alg[i] != 0)
991 + break;
993 + if (i == CRYPTO_ALGORITHM_MAX + 1)
994 + driver_finis(cap);
995 + err = 0;
996 + } else
997 + err = EINVAL;
998 + CRYPTO_DRIVER_UNLOCK();
999 + return err;
1003 + * Unregister all algorithms associated with a crypto driver.
1004 + * If there are pending sessions using it, leave enough information
1005 + * around so that subsequent calls using those sessions will
1006 + * correctly detect the driver has been unregistered and reroute
1007 + * requests.
1008 + */
1009 +int
1010 +crypto_unregister_all(u_int32_t driverid)
1012 + struct cryptocap *cap;
1013 + int err;
1014 + unsigned long d_flags;
1016 + dprintk("%s()\n", __FUNCTION__);
1017 + CRYPTO_DRIVER_LOCK();
1018 + cap = crypto_checkdriver(driverid);
1019 + if (cap != NULL) {
1020 + driver_finis(cap);
1021 + err = 0;
1022 + } else
1023 + err = EINVAL;
1024 + CRYPTO_DRIVER_UNLOCK();
1026 + return err;
1030 + * Clear blockage on a driver. The what parameter indicates whether
1031 + * the driver is now ready for cryptop's and/or cryptokop's.
1032 + */
1033 +int
1034 +crypto_unblock(u_int32_t driverid, int what)
1036 + struct cryptocap *cap;
1037 + int err;
1038 + unsigned long q_flags;
1040 + CRYPTO_Q_LOCK();
1041 + cap = crypto_checkdriver(driverid);
1042 + if (cap != NULL) {
1043 + if (what & CRYPTO_SYMQ) {
1044 + cap->cc_qblocked = 0;
1045 + crypto_all_qblocked = 0;
1047 + if (what & CRYPTO_ASYMQ) {
1048 + cap->cc_kqblocked = 0;
1049 + crypto_all_kqblocked = 0;
1051 + if (crp_sleep)
1052 + wake_up_interruptible(&cryptoproc_wait);
1053 + err = 0;
1054 + } else
1055 + err = EINVAL;
1056 + CRYPTO_Q_UNLOCK(); //DAVIDM should this be a driver lock
1058 + return err;
1062 + * Add a crypto request to a queue, to be processed by the kernel thread.
1063 + */
1064 +int
1065 +crypto_dispatch(struct cryptop *crp)
1067 + struct cryptocap *cap;
1068 + int result = -1;
1069 + unsigned long q_flags;
1071 + dprintk("%s()\n", __FUNCTION__);
1073 + cryptostats.cs_ops++;
1075 + CRYPTO_Q_LOCK();
1076 + if (crypto_q_cnt >= crypto_q_max) {
1077 + CRYPTO_Q_UNLOCK();
1078 + cryptostats.cs_drops++;
1079 + return ENOMEM;
1081 + crypto_q_cnt++;
1083 + /*
1084 + * Caller marked the request to be processed immediately; dispatch
1085 + * it directly to the driver unless the driver is currently blocked.
1086 + */
1087 + if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
1088 + int hid = CRYPTO_SESID2HID(crp->crp_sid);
1089 + cap = crypto_checkdriver(hid);
1090 + /* Driver cannot disappear when there is an active session. */
1091 + KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
1092 + if (!cap->cc_qblocked) {
1093 + crypto_all_qblocked = 0;
1094 + crypto_drivers[hid].cc_qblocked = 1;
1095 + CRYPTO_Q_UNLOCK();
1096 + result = crypto_invoke(cap, crp, 0);
1097 + CRYPTO_Q_LOCK();
1098 + if (result != ERESTART)
1099 + crypto_drivers[hid].cc_qblocked = 0;
1102 + if (result == ERESTART) {
1103 + /*
1104 + * The driver ran out of resources, mark the
1105 + * driver ``blocked'' for cryptop's and put
1106 + * the request back in the queue. It would
1107 + * best to put the request back where we got
1108 + * it but that's hard so for now we put it
1109 + * at the front. This should be ok; putting
1110 + * it at the end does not work.
1111 + */
1112 + list_add(&crp->crp_next, &crp_q);
1113 + cryptostats.cs_blocks++;
1114 + } else if (result == -1) {
1115 + TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
1117 + if (crp_sleep)
1118 + wake_up_interruptible(&cryptoproc_wait);
1119 + CRYPTO_Q_UNLOCK();
1120 + return 0;
1124 + * Add an asymetric crypto request to a queue,
1125 + * to be processed by the kernel thread.
1126 + */
1127 +int
1128 +crypto_kdispatch(struct cryptkop *krp)
1130 + int error;
1131 + unsigned long q_flags;
1133 + cryptostats.cs_kops++;
1135 + error = crypto_kinvoke(krp, krp->krp_crid);
1136 + if (error == ERESTART) {
1137 + CRYPTO_Q_LOCK();
1138 + TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
1139 + if (crp_sleep)
1140 + wake_up_interruptible(&cryptoproc_wait);
1141 + CRYPTO_Q_UNLOCK();
1142 + error = 0;
1144 + return error;
1148 + * Verify a driver is suitable for the specified operation.
1149 + */
1150 +static __inline int
1151 +kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
1153 + return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
1157 + * Select a driver for an asym operation. The driver must
1158 + * support the necessary algorithm. The caller can constrain
1159 + * which device is selected with the flags parameter. The
1160 + * algorithm we use here is pretty stupid; just use the first
1161 + * driver that supports the algorithms we need. If there are
1162 + * multiple suitable drivers we choose the driver with the
1163 + * fewest active operations. We prefer hardware-backed
1164 + * drivers to software ones when either may be used.
1165 + */
1166 +static struct cryptocap *
1167 +crypto_select_kdriver(const struct cryptkop *krp, int flags)
1169 + struct cryptocap *cap, *best, *blocked;
1170 + int match, hid;
1172 + CRYPTO_DRIVER_ASSERT();
1174 + /*
1175 + * Look first for hardware crypto devices if permitted.
1176 + */
1177 + if (flags & CRYPTOCAP_F_HARDWARE)
1178 + match = CRYPTOCAP_F_HARDWARE;
1179 + else
1180 + match = CRYPTOCAP_F_SOFTWARE;
1181 + best = NULL;
1182 + blocked = NULL;
1183 +again:
1184 + for (hid = 0; hid < crypto_drivers_num; hid++) {
1185 + cap = &crypto_drivers[hid];
1186 + /*
1187 + * If it's not initialized, is in the process of
1188 + * going away, or is not appropriate (hardware
1189 + * or software based on match), then skip.
1190 + */
1191 + if (cap->cc_dev == NULL ||
1192 + (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
1193 + (cap->cc_flags & match) == 0)
1194 + continue;
1196 + /* verify all the algorithms are supported. */
1197 + if (kdriver_suitable(cap, krp)) {
1198 + if (best == NULL ||
1199 + cap->cc_koperations < best->cc_koperations)
1200 + best = cap;
1203 + if (best != NULL)
1204 + return best;
1205 + if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
1206 + /* sort of an Algol 68-style for loop */
1207 + match = CRYPTOCAP_F_SOFTWARE;
1208 + goto again;
1210 + return best;
1214 + * Dispatch an assymetric crypto request.
1215 + */
1216 +static int
1217 +crypto_kinvoke(struct cryptkop *krp, int crid)
1219 + struct cryptocap *cap = NULL;
1220 + int error;
1221 + unsigned long d_flags;
1223 + KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
1224 + KASSERT(krp->krp_callback != NULL,
1225 + ("%s: krp->crp_callback == NULL", __func__));
1227 + CRYPTO_DRIVER_LOCK();
1228 + if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
1229 + cap = crypto_checkdriver(crid);
1230 + if (cap != NULL) {
1231 + /*
1232 + * Driver present, it must support the necessary
1233 + * algorithm and, if s/w drivers are excluded,
1234 + * it must be registered as hardware-backed.
1235 + */
1236 + if (!kdriver_suitable(cap, krp) ||
1237 + (!crypto_devallowsoft &&
1238 + (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
1239 + cap = NULL;
1241 + } else {
1242 + /*
1243 + * No requested driver; select based on crid flags.
1244 + */
1245 + if (!crypto_devallowsoft) /* NB: disallow s/w drivers */
1246 + crid &= ~CRYPTOCAP_F_SOFTWARE;
1247 + cap = crypto_select_kdriver(krp, crid);
1249 + if (cap != NULL && !cap->cc_kqblocked) {
1250 + krp->krp_hid = cap - crypto_drivers;
1251 + cap->cc_koperations++;
1252 + CRYPTO_DRIVER_UNLOCK();
1253 + error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
1254 + CRYPTO_DRIVER_LOCK();
1255 + if (error == ERESTART) {
1256 + cap->cc_koperations--;
1257 + CRYPTO_DRIVER_UNLOCK();
1258 + return (error);
1260 + /* return the actual device used */
1261 + krp->krp_crid = krp->krp_hid;
1262 + } else {
1263 + /*
1264 + * NB: cap is !NULL if device is blocked; in
1265 + * that case return ERESTART so the operation
1266 + * is resubmitted if possible.
1267 + */
1268 + error = (cap == NULL) ? ENODEV : ERESTART;
1270 + CRYPTO_DRIVER_UNLOCK();
1272 + if (error) {
1273 + krp->krp_status = error;
1274 + crypto_kdone(krp);
1276 + return 0;
1281 + * Dispatch a crypto request to the appropriate crypto devices.
1282 + */
1283 +static int
1284 +crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
1286 + KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
1287 + KASSERT(crp->crp_callback != NULL,
1288 + ("%s: crp->crp_callback == NULL", __func__));
1289 + KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
1291 + dprintk("%s()\n", __FUNCTION__);
1293 +#ifdef CRYPTO_TIMING
1294 + if (crypto_timing)
1295 + crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1296 +#endif
1297 + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
1298 + struct cryptodesc *crd;
1299 + u_int64_t nid;
1301 + /*
1302 + * Driver has unregistered; migrate the session and return
1303 + * an error to the caller so they'll resubmit the op.
1305 + * XXX: What if there are more already queued requests for this
1306 + * session?
1307 + */
1308 + crypto_freesession(crp->crp_sid);
1310 + for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
1311 + crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
1313 + /* XXX propagate flags from initial session? */
1314 + if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
1315 + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
1316 + crp->crp_sid = nid;
1318 + crp->crp_etype = EAGAIN;
1319 + crypto_done(crp);
1320 + return 0;
1321 + } else {
1322 + /*
1323 + * Invoke the driver to process the request.
1324 + */
1325 + return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
1330 + * Release a set of crypto descriptors.
1331 + */
1332 +void
1333 +crypto_freereq(struct cryptop *crp)
1335 + struct cryptodesc *crd;
1337 + if (crp == NULL)
1338 + return;
1340 +#ifdef DIAGNOSTIC
1342 + struct cryptop *crp2;
1343 + unsigned long q_flags;
1345 + CRYPTO_Q_LOCK();
1346 + TAILQ_FOREACH(crp2, &crp_q, crp_next) {
1347 + KASSERT(crp2 != crp,
1348 + ("Freeing cryptop from the crypto queue (%p).",
1349 + crp));
1351 + CRYPTO_Q_UNLOCK();
1352 + CRYPTO_RETQ_LOCK();
1353 + TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
1354 + KASSERT(crp2 != crp,
1355 + ("Freeing cryptop from the return queue (%p).",
1356 + crp));
1358 + CRYPTO_RETQ_UNLOCK();
1360 +#endif
1362 + while ((crd = crp->crp_desc) != NULL) {
1363 + crp->crp_desc = crd->crd_next;
1364 + kmem_cache_free(cryptodesc_zone, crd);
1366 + kmem_cache_free(cryptop_zone, crp);
1370 + * Acquire a set of crypto descriptors.
1371 + */
1372 +struct cryptop *
1373 +crypto_getreq(int num)
1375 + struct cryptodesc *crd;
1376 + struct cryptop *crp;
1378 + crp = kmem_cache_alloc(cryptop_zone, SLAB_ATOMIC);
1379 + if (crp != NULL) {
1380 + memset(crp, 0, sizeof(*crp));
1381 + INIT_LIST_HEAD(&crp->crp_next);
1382 + init_waitqueue_head(&crp->crp_waitq);
1383 + while (num--) {
1384 + crd = kmem_cache_alloc(cryptodesc_zone, SLAB_ATOMIC);
1385 + if (crd == NULL) {
1386 + crypto_freereq(crp);
1387 + return NULL;
1389 + memset(crd, 0, sizeof(*crd));
1390 + crd->crd_next = crp->crp_desc;
1391 + crp->crp_desc = crd;
1394 + return crp;
1398 + * Invoke the callback on behalf of the driver.
1399 + */
1400 +void
1401 +crypto_done(struct cryptop *crp)
1403 + unsigned long q_flags;
1405 + dprintk("%s()\n", __FUNCTION__);
1406 + if ((crp->crp_flags & CRYPTO_F_DONE) == 0) {
1407 + crp->crp_flags |= CRYPTO_F_DONE;
1408 + CRYPTO_Q_LOCK();
1409 + crypto_q_cnt--;
1410 + CRYPTO_Q_UNLOCK();
1411 + } else
1412 + printk("crypto: crypto_done op already done, flags 0x%x",
1413 + crp->crp_flags);
1414 + if (crp->crp_etype != 0)
1415 + cryptostats.cs_errs++;
1416 + /*
1417 + * CBIMM means unconditionally do the callback immediately;
1418 + * CBIFSYNC means do the callback immediately only if the
1419 + * operation was done synchronously. Both are used to avoid
1420 + * doing extraneous context switches; the latter is mostly
1421 + * used with the software crypto driver.
1422 + */
1423 + if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
1424 + ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
1425 + (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
1426 + /*
1427 + * Do the callback directly. This is ok when the
1428 + * callback routine does very little (e.g. the
1429 + * /dev/crypto callback method just does a wakeup).
1430 + */
1431 + crp->crp_callback(crp);
1432 + } else {
1433 + unsigned long r_flags;
1434 + /*
1435 + * Normal case; queue the callback for the thread.
1436 + */
1437 + CRYPTO_RETQ_LOCK();
1438 + if (CRYPTO_RETQ_EMPTY())
1439 + wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
1440 + TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
1441 + CRYPTO_RETQ_UNLOCK();
1446 + * Invoke the callback on behalf of the driver.
1447 + */
1448 +void
1449 +crypto_kdone(struct cryptkop *krp)
1451 + struct cryptocap *cap;
1452 + unsigned long d_flags;
1454 + if ((krp->krp_flags & CRYPTO_KF_DONE) != 0)
1455 + printk("crypto: crypto_kdone op already done, flags 0x%x",
1456 + krp->krp_flags);
1457 + krp->krp_flags |= CRYPTO_KF_DONE;
1458 + if (krp->krp_status != 0)
1459 + cryptostats.cs_kerrs++;
1461 + CRYPTO_DRIVER_LOCK();
1462 + /* XXX: What if driver is loaded in the meantime? */
1463 + if (krp->krp_hid < crypto_drivers_num) {
1464 + cap = &crypto_drivers[krp->krp_hid];
1465 + cap->cc_koperations--;
1466 + KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
1467 + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1468 + crypto_remove(cap);
1470 + CRYPTO_DRIVER_UNLOCK();
1472 + /*
1473 + * CBIMM means unconditionally do the callback immediately;
1474 + * This is used to avoid doing extraneous context switches
1475 + */
1476 + if ((krp->krp_flags & CRYPTO_KF_CBIMM)) {
1477 + /*
1478 + * Do the callback directly. This is ok when the
1479 + * callback routine does very little (e.g. the
1480 + * /dev/crypto callback method just does a wakeup).
1481 + */
1482 + krp->krp_callback(krp);
1483 + } else {
1484 + unsigned long r_flags;
1485 + /*
1486 + * Normal case; queue the callback for the thread.
1487 + */
1488 + CRYPTO_RETQ_LOCK();
1489 + if (CRYPTO_RETQ_EMPTY())
1490 + wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
1491 + TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1492 + CRYPTO_RETQ_UNLOCK();
1496 +int
1497 +crypto_getfeat(int *featp)
1499 + int hid, kalg, feat = 0;
1500 + unsigned long d_flags;
1502 + CRYPTO_DRIVER_LOCK();
1503 + for (hid = 0; hid < crypto_drivers_num; hid++) {
1504 + const struct cryptocap *cap = &crypto_drivers[hid];
1506 + if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1507 + !crypto_devallowsoft) {
1508 + continue;
1510 + for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1511 + if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
1512 + feat |= 1 << kalg;
1514 + CRYPTO_DRIVER_UNLOCK();
1515 + *featp = feat;
1516 + return (0);
1520 + * Crypto thread, dispatches crypto requests.
1521 + */
1522 +static int
1523 +crypto_proc(void *arg)
1525 + struct cryptop *crp, *submit;
1526 + struct cryptkop *krp, *krpp;
1527 + struct cryptocap *cap;
1528 + u_int32_t hid;
1529 + int result, hint;
1530 + unsigned long q_flags;
1532 + ocf_daemonize("crypto");
1534 + CRYPTO_Q_LOCK();
1535 + for (;;) {
1536 + /*
1537 + * we need to make sure we don't get into a busy loop with nothing
1538 + * to do, the two crypto_all_*blocked vars help us find out when
1539 + * we are all full and can do nothing on any driver or Q. If so we
1540 + * wait for an unblock.
1541 + */
1542 + crypto_all_qblocked = !list_empty(&crp_q);
1544 + /*
1545 + * Find the first element in the queue that can be
1546 + * processed and look-ahead to see if multiple ops
1547 + * are ready for the same driver.
1548 + */
1549 + submit = NULL;
1550 + hint = 0;
1551 + list_for_each_entry(crp, &crp_q, crp_next) {
1552 + hid = CRYPTO_SESID2HID(crp->crp_sid);
1553 + cap = crypto_checkdriver(hid);
1554 + /*
1555 + * Driver cannot disappear when there is an active
1556 + * session.
1557 + */
1558 + KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1559 + __func__, __LINE__));
1560 + if (cap == NULL || cap->cc_dev == NULL) {
1561 + /* Op needs to be migrated, process it. */
1562 + if (submit == NULL)
1563 + submit = crp;
1564 + break;
1566 + if (!cap->cc_qblocked) {
1567 + if (submit != NULL) {
1568 + /*
1569 + * We stop on finding another op,
1570 + * regardless whether its for the same
1571 + * driver or not. We could keep
1572 + * searching the queue but it might be
1573 + * better to just use a per-driver
1574 + * queue instead.
1575 + */
1576 + if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
1577 + hint = CRYPTO_HINT_MORE;
1578 + break;
1579 + } else {
1580 + submit = crp;
1581 + if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1582 + break;
1583 + /* keep scanning for more are q'd */
1587 + if (submit != NULL) {
1588 + hid = CRYPTO_SESID2HID(submit->crp_sid);
1589 + crypto_all_qblocked = 0;
1590 + list_del(&submit->crp_next);
1591 + crypto_drivers[hid].cc_qblocked = 1;
1592 + cap = crypto_checkdriver(hid);
1593 + CRYPTO_Q_UNLOCK();
1594 + KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1595 + __func__, __LINE__));
1596 + result = crypto_invoke(cap, submit, hint);
1597 + CRYPTO_Q_LOCK();
1598 + if (result == ERESTART) {
1599 + /*
1600 + * The driver ran out of resources, mark the
1601 + * driver ``blocked'' for cryptop's and put
1602 + * the request back in the queue. It would
1603 + * best to put the request back where we got
1604 + * it but that's hard so for now we put it
1605 + * at the front. This should be ok; putting
1606 + * it at the end does not work.
1607 + */
1608 + /* XXX validate sid again? */
1609 + list_add(&submit->crp_next, &crp_q);
1610 + cryptostats.cs_blocks++;
1611 + } else
1612 + crypto_drivers[hid].cc_qblocked=0;
1615 + crypto_all_kqblocked = !list_empty(&crp_kq);
1617 + /* As above, but for key ops */
1618 + krp = NULL;
1619 + list_for_each_entry(krpp, &crp_kq, krp_next) {
1620 + cap = crypto_checkdriver(krpp->krp_hid);
1621 + if (cap == NULL || cap->cc_dev == NULL) {
1622 + /*
1623 + * Operation needs to be migrated, invalidate
1624 + * the assigned device so it will reselect a
1625 + * new one below. Propagate the original
1626 + * crid selection flags if supplied.
1627 + */
1628 + krp->krp_hid = krp->krp_crid &
1629 + (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
1630 + if (krp->krp_hid == 0)
1631 + krp->krp_hid =
1632 + CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
1633 + break;
1635 + if (!cap->cc_kqblocked) {
1636 + krp = krpp;
1637 + break;
1640 + if (krp != NULL) {
1641 + crypto_all_kqblocked = 0;
1642 + list_del(&krp->krp_next);
1643 + crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1644 + CRYPTO_Q_UNLOCK();
1645 + result = crypto_kinvoke(krp, krp->krp_hid);
1646 + CRYPTO_Q_LOCK();
1647 + if (result == ERESTART) {
1648 + /*
1649 + * The driver ran out of resources, mark the
1650 + * driver ``blocked'' for cryptkop's and put
1651 + * the request back in the queue. It would
1652 + * best to put the request back where we got
1653 + * it but that's hard so for now we put it
1654 + * at the front. This should be ok; putting
1655 + * it at the end does not work.
1656 + */
1657 + /* XXX validate sid again? */
1658 + list_add(&krp->krp_next, &crp_kq);
1659 + cryptostats.cs_kblocks++;
1660 + } else
1661 + crypto_drivers[krp->krp_hid].cc_kqblocked = 0;
1664 + if (submit == NULL && krp == NULL) {
1665 + /*
1666 + * Nothing more to be processed. Sleep until we're
1667 + * woken because there are more ops to process.
1668 + * This happens either by submission or by a driver
1669 + * becoming unblocked and notifying us through
1670 + * crypto_unblock. Note that when we wakeup we
1671 + * start processing each queue again from the
1672 + * front. It's not clear that it's important to
1673 + * preserve this ordering since ops may finish
1674 + * out of order if dispatched to different devices
1675 + * and some become blocked while others do not.
1676 + */
1677 + dprintk("%s - sleeping (qe=%d qb=%d kqe=%d kqb=%d)\n",
1678 + __FUNCTION__,
1679 + list_empty(&crp_q), crypto_all_qblocked,
1680 + list_empty(&crp_kq), crypto_all_kqblocked);
1681 + CRYPTO_Q_UNLOCK();
1682 + crp_sleep = 1;
1683 + wait_event_interruptible(cryptoproc_wait,
1684 + !(list_empty(&crp_q) || crypto_all_qblocked) ||
1685 + !(list_empty(&crp_kq) || crypto_all_kqblocked) ||
1686 + cryptoproc == (pid_t) -1);
1687 + crp_sleep = 0;
1688 + if (signal_pending (current)) {
1689 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
1690 + spin_lock_irq(&current->sigmask_lock);
1691 +#endif
1692 + flush_signals(current);
1693 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
1694 + spin_unlock_irq(&current->sigmask_lock);
1695 +#endif
1697 + CRYPTO_Q_LOCK();
1698 + dprintk("%s - awake\n", __FUNCTION__);
1699 + if (cryptoproc == (pid_t) -1)
1700 + break;
1701 + cryptostats.cs_intrs++;
1704 + CRYPTO_Q_UNLOCK();
1705 + complete_and_exit(&cryptoproc_exited, 0);
1709 + * Crypto returns thread, does callbacks for processed crypto requests.
1710 + * Callbacks are done here, rather than in the crypto drivers, because
1711 + * callbacks typically are expensive and would slow interrupt handling.
1712 + */
1713 +static int
1714 +crypto_ret_proc(void *arg)
1716 + struct cryptop *crpt;
1717 + struct cryptkop *krpt;
1718 + unsigned long r_flags;
1720 + ocf_daemonize("crypto_ret");
1722 + CRYPTO_RETQ_LOCK();
1723 + for (;;) {
1724 + /* Harvest return q's for completed ops */
1725 + crpt = NULL;
1726 + if (!list_empty(&crp_ret_q))
1727 + crpt = list_entry(crp_ret_q.next, typeof(*crpt), crp_next);
1728 + if (crpt != NULL)
1729 + list_del(&crpt->crp_next);
1731 + krpt = NULL;
1732 + if (!list_empty(&crp_ret_kq))
1733 + krpt = list_entry(crp_ret_kq.next, typeof(*krpt), krp_next);
1734 + if (krpt != NULL)
1735 + list_del(&krpt->krp_next);
1737 + if (crpt != NULL || krpt != NULL) {
1738 + CRYPTO_RETQ_UNLOCK();
1739 + /*
1740 + * Run callbacks unlocked.
1741 + */
1742 + if (crpt != NULL)
1743 + crpt->crp_callback(crpt);
1744 + if (krpt != NULL)
1745 + krpt->krp_callback(krpt);
1746 + CRYPTO_RETQ_LOCK();
1747 + } else {
1748 + /*
1749 + * Nothing more to be processed. Sleep until we're
1750 + * woken because there are more returns to process.
1751 + */
1752 + dprintk("%s - sleeping\n", __FUNCTION__);
1753 + CRYPTO_RETQ_UNLOCK();
1754 + wait_event_interruptible(cryptoretproc_wait,
1755 + cryptoretproc == (pid_t) -1 ||
1756 + !list_empty(&crp_ret_q) ||
1757 + !list_empty(&crp_ret_kq));
1758 + if (signal_pending (current)) {
1759 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
1760 + spin_lock_irq(&current->sigmask_lock);
1761 +#endif
1762 + flush_signals(current);
1763 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
1764 + spin_unlock_irq(&current->sigmask_lock);
1765 +#endif
1767 + CRYPTO_RETQ_LOCK();
1768 + dprintk("%s - awake\n", __FUNCTION__);
1769 + if (cryptoretproc == (pid_t) -1) {
1770 + dprintk("%s - EXITING!\n", __FUNCTION__);
1771 + break;
1773 + cryptostats.cs_rets++;
1776 + CRYPTO_RETQ_UNLOCK();
1777 + complete_and_exit(&cryptoretproc_exited, 0);
1781 +#if 0 /* should put this into /proc or something */
1782 +static void
1783 +db_show_drivers(void)
1785 + int hid;
1787 + db_printf("%12s %4s %4s %8s %2s %2s\n"
1788 + , "Device"
1789 + , "Ses"
1790 + , "Kops"
1791 + , "Flags"
1792 + , "QB"
1793 + , "KB"
1794 + );
1795 + for (hid = 0; hid < crypto_drivers_num; hid++) {
1796 + const struct cryptocap *cap = &crypto_drivers[hid];
1797 + if (cap->cc_dev == NULL)
1798 + continue;
1799 + db_printf("%-12s %4u %4u %08x %2u %2u\n"
1800 + , device_get_nameunit(cap->cc_dev)
1801 + , cap->cc_sessions
1802 + , cap->cc_koperations
1803 + , cap->cc_flags
1804 + , cap->cc_qblocked
1805 + , cap->cc_kqblocked
1806 + );
1810 +DB_SHOW_COMMAND(crypto, db_show_crypto)
1812 + struct cryptop *crp;
1814 + db_show_drivers();
1815 + db_printf("\n");
1817 + db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
1818 + "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
1819 + "Desc", "Callback");
1820 + TAILQ_FOREACH(crp, &crp_q, crp_next) {
1821 + db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
1822 + , (int) CRYPTO_SESID2HID(crp->crp_sid)
1823 + , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
1824 + , crp->crp_ilen, crp->crp_olen
1825 + , crp->crp_etype
1826 + , crp->crp_flags
1827 + , crp->crp_desc
1828 + , crp->crp_callback
1829 + );
1831 + if (!TAILQ_EMPTY(&crp_ret_q)) {
1832 + db_printf("\n%4s %4s %4s %8s\n",
1833 + "HID", "Etype", "Flags", "Callback");
1834 + TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
1835 + db_printf("%4u %4u %04x %8p\n"
1836 + , (int) CRYPTO_SESID2HID(crp->crp_sid)
1837 + , crp->crp_etype
1838 + , crp->crp_flags
1839 + , crp->crp_callback
1840 + );
1845 +DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
1847 + struct cryptkop *krp;
1849 + db_show_drivers();
1850 + db_printf("\n");
1852 + db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
1853 + "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
1854 + TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1855 + db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
1856 + , krp->krp_op
1857 + , krp->krp_status
1858 + , krp->krp_iparams, krp->krp_oparams
1859 + , krp->krp_crid, krp->krp_hid
1860 + , krp->krp_callback
1861 + );
1863 + if (!TAILQ_EMPTY(&crp_ret_q)) {
1864 + db_printf("%4s %5s %8s %4s %8s\n",
1865 + "Op", "Status", "CRID", "HID", "Callback");
1866 + TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
1867 + db_printf("%4u %5u %08x %4u %8p\n"
1868 + , krp->krp_op
1869 + , krp->krp_status
1870 + , krp->krp_crid, krp->krp_hid
1871 + , krp->krp_callback
1872 + );
1876 +#endif
1879 +static int
1880 +crypto_init(void)
1882 + int error;
1884 + dprintk("%s(0x%x)\n", __FUNCTION__, (int) crypto_init);
1886 + if (crypto_initted)
1887 + return 0;
1888 + crypto_initted = 1;
1890 + spin_lock_init(&crypto_drivers_lock);
1891 + spin_lock_init(&crypto_q_lock);
1892 + spin_lock_init(&crypto_ret_q_lock);
1894 + cryptop_zone = kmem_cache_create("cryptop", sizeof(struct cryptop),
1895 + 0, SLAB_HWCACHE_ALIGN, NULL
1896 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1897 + , NULL
1898 +#endif
1899 + );
1901 + cryptodesc_zone = kmem_cache_create("cryptodesc", sizeof(struct cryptodesc),
1902 + 0, SLAB_HWCACHE_ALIGN, NULL
1903 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1904 + , NULL
1905 +#endif
1906 + );
1908 + if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
1909 + printk("crypto: crypto_init cannot setup crypto zones\n");
1910 + error = ENOMEM;
1911 + goto bad;
1914 + crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
1915 + crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap),
1916 + GFP_KERNEL);
1917 + if (crypto_drivers == NULL) {
1918 + printk("crypto: crypto_init cannot setup crypto drivers\n");
1919 + error = ENOMEM;
1920 + goto bad;
1923 + memset(crypto_drivers, 0, crypto_drivers_num * sizeof(struct cryptocap));
1925 + init_completion(&cryptoproc_exited);
1926 + init_completion(&cryptoretproc_exited);
1928 + cryptoproc = 0; /* to avoid race condition where proc runs first */
1929 + cryptoproc = kernel_thread(crypto_proc, NULL, CLONE_FS|CLONE_FILES);
1930 + if (cryptoproc < 0) {
1931 + error = cryptoproc;
1932 + printk("crypto: crypto_init cannot start crypto thread; error %d",
1933 + error);
1934 + goto bad;
1937 + cryptoretproc = 0; /* to avoid race condition where proc runs first */
1938 + cryptoretproc = kernel_thread(crypto_ret_proc, NULL, CLONE_FS|CLONE_FILES);
1939 + if (cryptoretproc < 0) {
1940 + error = cryptoretproc;
1941 + printk("crypto: crypto_init cannot start cryptoret thread; error %d",
1942 + error);
1943 + goto bad;
1946 + return 0;
1947 +bad:
1948 + crypto_exit();
1949 + return error;
1953 +static void
1954 +crypto_exit(void)
1956 + pid_t p;
1957 + unsigned long d_flags;
1959 + dprintk("%s()\n", __FUNCTION__);
1961 + /*
1962 + * Terminate any crypto threads.
1963 + */
1965 + CRYPTO_DRIVER_LOCK();
1966 + p = cryptoproc;
1967 + cryptoproc = (pid_t) -1;
1968 + kill_pid(p, SIGTERM, 1);
1969 + wake_up_interruptible(&cryptoproc_wait);
1970 + CRYPTO_DRIVER_UNLOCK();
1972 + wait_for_completion(&cryptoproc_exited);
1974 + CRYPTO_DRIVER_LOCK();
1975 + p = cryptoretproc;
1976 + cryptoretproc = (pid_t) -1;
1977 + kill_pid(p, SIGTERM, 1);
1978 + wake_up_interruptible(&cryptoretproc_wait);
1979 + CRYPTO_DRIVER_UNLOCK();
1981 + wait_for_completion(&cryptoretproc_exited);
1983 + /* XXX flush queues??? */
1985 + /*
1986 + * Reclaim dynamically allocated resources.
1987 + */
1988 + if (crypto_drivers != NULL)
1989 + kfree(crypto_drivers);
1991 + if (cryptodesc_zone != NULL)
1992 + kmem_cache_destroy(cryptodesc_zone);
1993 + if (cryptop_zone != NULL)
1994 + kmem_cache_destroy(cryptop_zone);
1998 +EXPORT_SYMBOL(crypto_newsession);
1999 +EXPORT_SYMBOL(crypto_freesession);
2000 +EXPORT_SYMBOL(crypto_get_driverid);
2001 +EXPORT_SYMBOL(crypto_kregister);
2002 +EXPORT_SYMBOL(crypto_register);
2003 +EXPORT_SYMBOL(crypto_unregister);
2004 +EXPORT_SYMBOL(crypto_unregister_all);
2005 +EXPORT_SYMBOL(crypto_unblock);
2006 +EXPORT_SYMBOL(crypto_dispatch);
2007 +EXPORT_SYMBOL(crypto_kdispatch);
2008 +EXPORT_SYMBOL(crypto_freereq);
2009 +EXPORT_SYMBOL(crypto_getreq);
2010 +EXPORT_SYMBOL(crypto_done);
2011 +EXPORT_SYMBOL(crypto_kdone);
2012 +EXPORT_SYMBOL(crypto_getfeat);
2013 +EXPORT_SYMBOL(crypto_userasymcrypto);
2014 +EXPORT_SYMBOL(crypto_getcaps);
2015 +EXPORT_SYMBOL(crypto_find_driver);
2016 +EXPORT_SYMBOL(crypto_find_device_byhid);
2018 +module_init(crypto_init);
2019 +module_exit(crypto_exit);
2021 +MODULE_LICENSE("BSD");
2022 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
2023 +MODULE_DESCRIPTION("OCF (OpenBSD Cryptographic Framework)");
2024 diff -Nur linux-2.6.30.orig/crypto/ocf/cryptodev.c linux-2.6.30/crypto/ocf/cryptodev.c
2025 --- linux-2.6.30.orig/crypto/ocf/cryptodev.c 1970-01-01 01:00:00.000000000 +0100
2026 +++ linux-2.6.30/crypto/ocf/cryptodev.c 2009-06-11 10:55:27.000000000 +0200
2027 @@ -0,0 +1,1048 @@
2028 +/* $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $ */
2030 +/*-
2031 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
2032 + * Copyright (C) 2006-2007 David McCullough
2033 + * Copyright (C) 2004-2005 Intel Corporation.
2034 + * The license and original author are listed below.
2036 + * Copyright (c) 2001 Theo de Raadt
2037 + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
2039 + * Redistribution and use in source and binary forms, with or without
2040 + * modification, are permitted provided that the following conditions
2041 + * are met:
2043 + * 1. Redistributions of source code must retain the above copyright
2044 + * notice, this list of conditions and the following disclaimer.
2045 + * 2. Redistributions in binary form must reproduce the above copyright
2046 + * notice, this list of conditions and the following disclaimer in the
2047 + * documentation and/or other materials provided with the distribution.
2048 + * 3. The name of the author may not be used to endorse or promote products
2049 + * derived from this software without specific prior written permission.
2051 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
2052 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
2053 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
2054 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
2055 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
2056 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2057 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2058 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2059 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2060 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2062 + * Effort sponsored in part by the Defense Advanced Research Projects
2063 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
2064 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
2066 +__FBSDID("$FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.34 2007/05/09 19:37:02 gnn Exp $");
2067 + */
2069 +#ifndef AUTOCONF_INCLUDED
2070 +#include <linux/config.h>
2071 +#endif
2072 +#include <linux/types.h>
2073 +#include <linux/time.h>
2074 +#include <linux/delay.h>
2075 +#include <linux/list.h>
2076 +#include <linux/init.h>
2077 +#include <linux/sched.h>
2078 +#include <linux/unistd.h>
2079 +#include <linux/module.h>
2080 +#include <linux/wait.h>
2081 +#include <linux/slab.h>
2082 +#include <linux/fs.h>
2083 +#include <linux/dcache.h>
2084 +#include <linux/file.h>
2085 +#include <linux/mount.h>
2086 +#include <linux/miscdevice.h>
2087 +#include <linux/version.h>
2088 +#include <asm/uaccess.h>
2090 +#include <cryptodev.h>
2091 +#include <uio.h>
2093 +extern asmlinkage long sys_dup(unsigned int fildes);
2095 +#define debug cryptodev_debug
2096 +int cryptodev_debug = 0;
2097 +module_param(cryptodev_debug, int, 0644);
2098 +MODULE_PARM_DESC(cryptodev_debug, "Enable cryptodev debug");
2100 +struct csession_info {
2101 + u_int16_t blocksize;
2102 + u_int16_t minkey, maxkey;
2104 + u_int16_t keysize;
2105 + /* u_int16_t hashsize; */
2106 + u_int16_t authsize;
2107 + /* u_int16_t ctxsize; */
2110 +struct csession {
2111 + struct list_head list;
2112 + u_int64_t sid;
2113 + u_int32_t ses;
2115 + wait_queue_head_t waitq;
2117 + u_int32_t cipher;
2119 + u_int32_t mac;
2121 + caddr_t key;
2122 + int keylen;
2123 + u_char tmp_iv[EALG_MAX_BLOCK_LEN];
2125 + caddr_t mackey;
2126 + int mackeylen;
2128 + struct csession_info info;
2130 + struct iovec iovec;
2131 + struct uio uio;
2132 + int error;
2135 +struct fcrypt {
2136 + struct list_head csessions;
2137 + int sesn;
2140 +static struct csession *csefind(struct fcrypt *, u_int);
2141 +static int csedelete(struct fcrypt *, struct csession *);
2142 +static struct csession *cseadd(struct fcrypt *, struct csession *);
2143 +static struct csession *csecreate(struct fcrypt *, u_int64_t,
2144 + struct cryptoini *crie, struct cryptoini *cria, struct csession_info *);
2145 +static int csefree(struct csession *);
2147 +static int cryptodev_op(struct csession *, struct crypt_op *);
2148 +static int cryptodev_key(struct crypt_kop *);
2149 +static int cryptodev_find(struct crypt_find_op *);
2151 +static int cryptodev_cb(void *);
2152 +static int cryptodev_open(struct inode *inode, struct file *filp);
2155 + * Check a crypto identifier to see if it requested
2156 + * a valid crid and it's capabilities match.
2157 + */
2158 +static int
2159 +checkcrid(int crid)
2161 + int hid = crid & ~(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
2162 + int typ = crid & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
2163 + int caps = 0;
2165 + /* if the user hasn't selected a driver, then just call newsession */
2166 + if (hid == 0 && typ != 0)
2167 + return 0;
2169 + caps = crypto_getcaps(hid);
2171 + /* didn't find anything with capabilities */
2172 + if (caps == 0) {
2173 + dprintk("%s: hid=%x typ=%x not matched\n", __FUNCTION__, hid, typ);
2174 + return EINVAL;
2177 + /* the user didn't specify SW or HW, so the driver is ok */
2178 + if (typ == 0)
2179 + return 0;
2181 + /* if the type specified didn't match */
2182 + if (typ != (caps & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE))) {
2183 + dprintk("%s: hid=%x typ=%x caps=%x not matched\n", __FUNCTION__,
2184 + hid, typ, caps);
2185 + return EINVAL;
2188 + return 0;
2191 +static int
2192 +cryptodev_op(struct csession *cse, struct crypt_op *cop)
2194 + struct cryptop *crp = NULL;
2195 + struct cryptodesc *crde = NULL, *crda = NULL;
2196 + int error = 0;
2198 + dprintk("%s()\n", __FUNCTION__);
2199 + if (cop->len > CRYPTO_MAX_DATA_LEN) {
2200 + dprintk("%s: %d > %d\n", __FUNCTION__, cop->len, CRYPTO_MAX_DATA_LEN);
2201 + return (E2BIG);
2204 + if (cse->info.blocksize && (cop->len % cse->info.blocksize) != 0) {
2205 + dprintk("%s: blocksize=%d len=%d\n", __FUNCTION__, cse->info.blocksize,
2206 + cop->len);
2207 + return (EINVAL);
2210 + cse->uio.uio_iov = &cse->iovec;
2211 + cse->uio.uio_iovcnt = 1;
2212 + cse->uio.uio_offset = 0;
2213 +#if 0
2214 + cse->uio.uio_resid = cop->len;
2215 + cse->uio.uio_segflg = UIO_SYSSPACE;
2216 + cse->uio.uio_rw = UIO_WRITE;
2217 + cse->uio.uio_td = td;
2218 +#endif
2219 + cse->uio.uio_iov[0].iov_len = cop->len;
2220 + if (cse->info.authsize)
2221 + cse->uio.uio_iov[0].iov_len += cse->info.authsize;
2222 + cse->uio.uio_iov[0].iov_base = kmalloc(cse->uio.uio_iov[0].iov_len,
2223 + GFP_KERNEL);
2225 + if (cse->uio.uio_iov[0].iov_base == NULL) {
2226 + dprintk("%s: iov_base kmalloc(%d) failed\n", __FUNCTION__,
2227 + cse->uio.uio_iov[0].iov_len);
2228 + return (ENOMEM);
2231 + crp = crypto_getreq((cse->info.blocksize != 0) + (cse->info.authsize != 0));
2232 + if (crp == NULL) {
2233 + dprintk("%s: ENOMEM\n", __FUNCTION__);
2234 + error = ENOMEM;
2235 + goto bail;
2238 + if (cse->info.authsize) {
2239 + crda = crp->crp_desc;
2240 + if (cse->info.blocksize)
2241 + crde = crda->crd_next;
2242 + } else {
2243 + if (cse->info.blocksize)
2244 + crde = crp->crp_desc;
2245 + else {
2246 + dprintk("%s: bad request\n", __FUNCTION__);
2247 + error = EINVAL;
2248 + goto bail;
2252 + if ((error = copy_from_user(cse->uio.uio_iov[0].iov_base, cop->src,
2253 + cop->len))) {
2254 + dprintk("%s: bad copy\n", __FUNCTION__);
2255 + goto bail;
2258 + if (crda) {
2259 + crda->crd_skip = 0;
2260 + crda->crd_len = cop->len;
2261 + crda->crd_inject = cop->len;
2263 + crda->crd_alg = cse->mac;
2264 + crda->crd_key = cse->mackey;
2265 + crda->crd_klen = cse->mackeylen * 8;
2268 + if (crde) {
2269 + if (cop->op == COP_ENCRYPT)
2270 + crde->crd_flags |= CRD_F_ENCRYPT;
2271 + else
2272 + crde->crd_flags &= ~CRD_F_ENCRYPT;
2273 + crde->crd_len = cop->len;
2274 + crde->crd_inject = 0;
2276 + crde->crd_alg = cse->cipher;
2277 + crde->crd_key = cse->key;
2278 + crde->crd_klen = cse->keylen * 8;
2281 + crp->crp_ilen = cse->uio.uio_iov[0].iov_len;
2282 + crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
2283 + | (cop->flags & COP_F_BATCH);
2284 + crp->crp_buf = (caddr_t)&cse->uio;
2285 + crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
2286 + crp->crp_sid = cse->sid;
2287 + crp->crp_opaque = (void *)cse;
2289 + if (cop->iv) {
2290 + if (crde == NULL) {
2291 + error = EINVAL;
2292 + dprintk("%s no crde\n", __FUNCTION__);
2293 + goto bail;
2295 + if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
2296 + error = EINVAL;
2297 + dprintk("%s arc4 with IV\n", __FUNCTION__);
2298 + goto bail;
2300 + if ((error = copy_from_user(cse->tmp_iv, cop->iv,
2301 + cse->info.blocksize))) {
2302 + dprintk("%s bad iv copy\n", __FUNCTION__);
2303 + goto bail;
2305 + memcpy(crde->crd_iv, cse->tmp_iv, cse->info.blocksize);
2306 + crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
2307 + crde->crd_skip = 0;
2308 + } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
2309 + crde->crd_skip = 0;
2310 + } else if (crde) {
2311 + crde->crd_flags |= CRD_F_IV_PRESENT;
2312 + crde->crd_skip = cse->info.blocksize;
2313 + crde->crd_len -= cse->info.blocksize;
2316 + if (cop->mac && crda == NULL) {
2317 + error = EINVAL;
2318 + dprintk("%s no crda\n", __FUNCTION__);
2319 + goto bail;
2322 + /*
2323 + * Let the dispatch run unlocked, then, interlock against the
2324 + * callback before checking if the operation completed and going
2325 + * to sleep. This insures drivers don't inherit our lock which
2326 + * results in a lock order reversal between crypto_dispatch forced
2327 + * entry and the crypto_done callback into us.
2328 + */
2329 + error = crypto_dispatch(crp);
2330 + if (error == 0) {
2331 + dprintk("%s about to WAIT\n", __FUNCTION__);
2332 + /*
2333 + * we really need to wait for driver to complete to maintain
2334 + * state, luckily interrupts will be remembered
2335 + */
2336 + do {
2337 + error = wait_event_interruptible(crp->crp_waitq,
2338 + ((crp->crp_flags & CRYPTO_F_DONE) != 0));
2339 + /*
2340 + * we can't break out of this loop or we will leave behind
2341 + * a huge mess, however, staying here means if your driver
2342 + * is broken user applications can hang and not be killed.
2343 + * The solution, fix your driver :-)
2344 + */
2345 + if (error) {
2346 + schedule();
2347 + error = 0;
2349 + } while ((crp->crp_flags & CRYPTO_F_DONE) == 0);
2350 + dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
2353 + if (crp->crp_etype != 0) {
2354 + error = crp->crp_etype;
2355 + dprintk("%s error in crp processing\n", __FUNCTION__);
2356 + goto bail;
2359 + if (cse->error) {
2360 + error = cse->error;
2361 + dprintk("%s error in cse processing\n", __FUNCTION__);
2362 + goto bail;
2365 + if (cop->dst && (error = copy_to_user(cop->dst,
2366 + cse->uio.uio_iov[0].iov_base, cop->len))) {
2367 + dprintk("%s bad dst copy\n", __FUNCTION__);
2368 + goto bail;
2371 + if (cop->mac &&
2372 + (error=copy_to_user(cop->mac,
2373 + (caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
2374 + cse->info.authsize))) {
2375 + dprintk("%s bad mac copy\n", __FUNCTION__);
2376 + goto bail;
2379 +bail:
2380 + if (crp)
2381 + crypto_freereq(crp);
2382 + if (cse->uio.uio_iov[0].iov_base)
2383 + kfree(cse->uio.uio_iov[0].iov_base);
2385 + return (error);
2388 +static int
2389 +cryptodev_cb(void *op)
2391 + struct cryptop *crp = (struct cryptop *) op;
2392 + struct csession *cse = (struct csession *)crp->crp_opaque;
2393 + int error;
2395 + dprintk("%s()\n", __FUNCTION__);
2396 + error = crp->crp_etype;
2397 + if (error == EAGAIN) {
2398 + crp->crp_flags &= ~CRYPTO_F_DONE;
2399 +#ifdef NOTYET
2400 + /*
2401 + * DAVIDM I am fairly sure that we should turn this into a batch
2402 + * request to stop bad karma/lockup, revisit
2403 + */
2404 + crp->crp_flags |= CRYPTO_F_BATCH;
2405 +#endif
2406 + return crypto_dispatch(crp);
2408 + if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
2409 + cse->error = error;
2410 + wake_up_interruptible(&crp->crp_waitq);
2412 + return (0);
2415 +static int
2416 +cryptodevkey_cb(void *op)
2418 + struct cryptkop *krp = (struct cryptkop *) op;
2419 + dprintk("%s()\n", __FUNCTION__);
2420 + wake_up_interruptible(&krp->krp_waitq);
2421 + return (0);
2424 +static int
2425 +cryptodev_key(struct crypt_kop *kop)
2427 + struct cryptkop *krp = NULL;
2428 + int error = EINVAL;
2429 + int in, out, size, i;
2431 + dprintk("%s()\n", __FUNCTION__);
2432 + if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) {
2433 + dprintk("%s params too big\n", __FUNCTION__);
2434 + return (EFBIG);
2437 + in = kop->crk_iparams;
2438 + out = kop->crk_oparams;
2439 + switch (kop->crk_op) {
2440 + case CRK_MOD_EXP:
2441 + if (in == 3 && out == 1)
2442 + break;
2443 + return (EINVAL);
2444 + case CRK_MOD_EXP_CRT:
2445 + if (in == 6 && out == 1)
2446 + break;
2447 + return (EINVAL);
2448 + case CRK_DSA_SIGN:
2449 + if (in == 5 && out == 2)
2450 + break;
2451 + return (EINVAL);
2452 + case CRK_DSA_VERIFY:
2453 + if (in == 7 && out == 0)
2454 + break;
2455 + return (EINVAL);
2456 + case CRK_DH_COMPUTE_KEY:
2457 + if (in == 3 && out == 1)
2458 + break;
2459 + return (EINVAL);
2460 + default:
2461 + return (EINVAL);
2464 + krp = (struct cryptkop *)kmalloc(sizeof *krp, GFP_KERNEL);
2465 + if (!krp)
2466 + return (ENOMEM);
2467 + bzero(krp, sizeof *krp);
2468 + krp->krp_op = kop->crk_op;
2469 + krp->krp_status = kop->crk_status;
2470 + krp->krp_iparams = kop->crk_iparams;
2471 + krp->krp_oparams = kop->crk_oparams;
2472 + krp->krp_crid = kop->crk_crid;
2473 + krp->krp_status = 0;
2474 + krp->krp_flags = CRYPTO_KF_CBIMM;
2475 + krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
2476 + init_waitqueue_head(&krp->krp_waitq);
2478 + for (i = 0; i < CRK_MAXPARAM; i++)
2479 + krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
2480 + for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
2481 + size = (krp->krp_param[i].crp_nbits + 7) / 8;
2482 + if (size == 0)
2483 + continue;
2484 + krp->krp_param[i].crp_p = (caddr_t) kmalloc(size, GFP_KERNEL);
2485 + if (i >= krp->krp_iparams)
2486 + continue;
2487 + error = copy_from_user(krp->krp_param[i].crp_p,
2488 + kop->crk_param[i].crp_p, size);
2489 + if (error)
2490 + goto fail;
2493 + error = crypto_kdispatch(krp);
2494 + if (error)
2495 + goto fail;
2497 + do {
2498 + error = wait_event_interruptible(krp->krp_waitq,
2499 + ((krp->krp_flags & CRYPTO_KF_DONE) != 0));
2500 + /*
2501 + * we can't break out of this loop or we will leave behind
2502 + * a huge mess, however, staying here means if your driver
2503 + * is broken user applications can hang and not be killed.
2504 + * The solution, fix your driver :-)
2505 + */
2506 + if (error) {
2507 + schedule();
2508 + error = 0;
2510 + } while ((krp->krp_flags & CRYPTO_KF_DONE) == 0);
2512 + dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
2514 + kop->crk_crid = krp->krp_crid; /* device that did the work */
2515 + if (krp->krp_status != 0) {
2516 + error = krp->krp_status;
2517 + goto fail;
2520 + for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) {
2521 + size = (krp->krp_param[i].crp_nbits + 7) / 8;
2522 + if (size == 0)
2523 + continue;
2524 + error = copy_to_user(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p,
2525 + size);
2526 + if (error)
2527 + goto fail;
2530 +fail:
2531 + if (krp) {
2532 + kop->crk_status = krp->krp_status;
2533 + for (i = 0; i < CRK_MAXPARAM; i++) {
2534 + if (krp->krp_param[i].crp_p)
2535 + kfree(krp->krp_param[i].crp_p);
2537 + kfree(krp);
2539 + return (error);
2542 +static int
2543 +cryptodev_find(struct crypt_find_op *find)
2545 + device_t dev;
2547 + if (find->crid != -1) {
2548 + dev = crypto_find_device_byhid(find->crid);
2549 + if (dev == NULL)
2550 + return (ENOENT);
2551 + strlcpy(find->name, device_get_nameunit(dev),
2552 + sizeof(find->name));
2553 + } else {
2554 + find->crid = crypto_find_driver(find->name);
2555 + if (find->crid == -1)
2556 + return (ENOENT);
2558 + return (0);
2561 +static struct csession *
2562 +csefind(struct fcrypt *fcr, u_int ses)
2564 + struct csession *cse;
2566 + dprintk("%s()\n", __FUNCTION__);
2567 + list_for_each_entry(cse, &fcr->csessions, list)
2568 + if (cse->ses == ses)
2569 + return (cse);
2570 + return (NULL);
2573 +static int
2574 +csedelete(struct fcrypt *fcr, struct csession *cse_del)
2576 + struct csession *cse;
2578 + dprintk("%s()\n", __FUNCTION__);
2579 + list_for_each_entry(cse, &fcr->csessions, list) {
2580 + if (cse == cse_del) {
2581 + list_del(&cse->list);
2582 + return (1);
2585 + return (0);
2588 +static struct csession *
2589 +cseadd(struct fcrypt *fcr, struct csession *cse)
2591 + dprintk("%s()\n", __FUNCTION__);
2592 + list_add_tail(&cse->list, &fcr->csessions);
2593 + cse->ses = fcr->sesn++;
2594 + return (cse);
2597 +static struct csession *
2598 +csecreate(struct fcrypt *fcr, u_int64_t sid, struct cryptoini *crie,
2599 + struct cryptoini *cria, struct csession_info *info)
2601 + struct csession *cse;
2603 + dprintk("%s()\n", __FUNCTION__);
2604 + cse = (struct csession *) kmalloc(sizeof(struct csession), GFP_KERNEL);
2605 + if (cse == NULL)
2606 + return NULL;
2607 + memset(cse, 0, sizeof(struct csession));
2609 + INIT_LIST_HEAD(&cse->list);
2610 + init_waitqueue_head(&cse->waitq);
2612 + cse->key = crie->cri_key;
2613 + cse->keylen = crie->cri_klen/8;
2614 + cse->mackey = cria->cri_key;
2615 + cse->mackeylen = cria->cri_klen/8;
2616 + cse->sid = sid;
2617 + cse->cipher = crie->cri_alg;
2618 + cse->mac = cria->cri_alg;
2619 + cse->info = *info;
2620 + cseadd(fcr, cse);
2621 + return (cse);
2624 +static int
2625 +csefree(struct csession *cse)
2627 + int error;
2629 + dprintk("%s()\n", __FUNCTION__);
2630 + error = crypto_freesession(cse->sid);
2631 + if (cse->key)
2632 + kfree(cse->key);
2633 + if (cse->mackey)
2634 + kfree(cse->mackey);
2635 + kfree(cse);
2636 + return(error);
2639 +static int
2640 +cryptodev_ioctl(
2641 + struct inode *inode,
2642 + struct file *filp,
2643 + unsigned int cmd,
2644 + unsigned long arg)
2646 + struct cryptoini cria, crie;
2647 + struct fcrypt *fcr = filp->private_data;
2648 + struct csession *cse;
2649 + struct csession_info info;
2650 + struct session2_op sop;
2651 + struct crypt_op cop;
2652 + struct crypt_kop kop;
2653 + struct crypt_find_op fop;
2654 + u_int64_t sid;
2655 + u_int32_t ses;
2656 + int feat, fd, error = 0, crid;
2657 + mm_segment_t fs;
2659 + dprintk("%s(cmd=%x arg=%lx)\n", __FUNCTION__, cmd, arg);
2661 + switch (cmd) {
2663 + case CRIOGET: {
2664 + dprintk("%s(CRIOGET)\n", __FUNCTION__);
2665 + fs = get_fs();
2666 + set_fs(get_ds());
2667 + for (fd = 0; fd < files_fdtable(current->files)->max_fds; fd++)
2668 + if (files_fdtable(current->files)->fd[fd] == filp)
2669 + break;
2670 + fd = sys_dup(fd);
2671 + set_fs(fs);
2672 + put_user(fd, (int *) arg);
2673 + return IS_ERR_VALUE(fd) ? fd : 0;
2676 +#define CIOCGSESSSTR (cmd == CIOCGSESSION ? "CIOCGSESSION" : "CIOCGSESSION2")
2677 + case CIOCGSESSION:
2678 + case CIOCGSESSION2:
2679 + dprintk("%s(%s)\n", __FUNCTION__, CIOCGSESSSTR);
2680 + memset(&crie, 0, sizeof(crie));
2681 + memset(&cria, 0, sizeof(cria));
2682 + memset(&info, 0, sizeof(info));
2683 + memset(&sop, 0, sizeof(sop));
2685 + if (copy_from_user(&sop, (void*)arg, (cmd == CIOCGSESSION) ?
2686 + sizeof(struct session_op) : sizeof(sop))) {
2687 + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
2688 + error = EFAULT;
2689 + goto bail;
2692 + switch (sop.cipher) {
2693 + case 0:
2694 + dprintk("%s(%s) - no cipher\n", __FUNCTION__, CIOCGSESSSTR);
2695 + break;
2696 + case CRYPTO_NULL_CBC:
2697 + info.blocksize = NULL_BLOCK_LEN;
2698 + info.minkey = NULL_MIN_KEY_LEN;
2699 + info.maxkey = NULL_MAX_KEY_LEN;
2700 + break;
2701 + case CRYPTO_DES_CBC:
2702 + info.blocksize = DES_BLOCK_LEN;
2703 + info.minkey = DES_MIN_KEY_LEN;
2704 + info.maxkey = DES_MAX_KEY_LEN;
2705 + break;
2706 + case CRYPTO_3DES_CBC:
2707 + info.blocksize = DES3_BLOCK_LEN;
2708 + info.minkey = DES3_MIN_KEY_LEN;
2709 + info.maxkey = DES3_MAX_KEY_LEN;
2710 + break;
2711 + case CRYPTO_BLF_CBC:
2712 + info.blocksize = BLOWFISH_BLOCK_LEN;
2713 + info.minkey = BLOWFISH_MIN_KEY_LEN;
2714 + info.maxkey = BLOWFISH_MAX_KEY_LEN;
2715 + break;
2716 + case CRYPTO_CAST_CBC:
2717 + info.blocksize = CAST128_BLOCK_LEN;
2718 + info.minkey = CAST128_MIN_KEY_LEN;
2719 + info.maxkey = CAST128_MAX_KEY_LEN;
2720 + break;
2721 + case CRYPTO_SKIPJACK_CBC:
2722 + info.blocksize = SKIPJACK_BLOCK_LEN;
2723 + info.minkey = SKIPJACK_MIN_KEY_LEN;
2724 + info.maxkey = SKIPJACK_MAX_KEY_LEN;
2725 + break;
2726 + case CRYPTO_AES_CBC:
2727 + info.blocksize = AES_BLOCK_LEN;
2728 + info.minkey = AES_MIN_KEY_LEN;
2729 + info.maxkey = AES_MAX_KEY_LEN;
2730 + break;
2731 + case CRYPTO_ARC4:
2732 + info.blocksize = ARC4_BLOCK_LEN;
2733 + info.minkey = ARC4_MIN_KEY_LEN;
2734 + info.maxkey = ARC4_MAX_KEY_LEN;
2735 + break;
2736 + case CRYPTO_CAMELLIA_CBC:
2737 + info.blocksize = CAMELLIA_BLOCK_LEN;
2738 + info.minkey = CAMELLIA_MIN_KEY_LEN;
2739 + info.maxkey = CAMELLIA_MAX_KEY_LEN;
2740 + break;
2741 + default:
2742 + dprintk("%s(%s) - bad cipher\n", __FUNCTION__, CIOCGSESSSTR);
2743 + error = EINVAL;
2744 + goto bail;
2747 + switch (sop.mac) {
2748 + case 0:
2749 + dprintk("%s(%s) - no mac\n", __FUNCTION__, CIOCGSESSSTR);
2750 + break;
2751 + case CRYPTO_NULL_HMAC:
2752 + info.authsize = NULL_HASH_LEN;
2753 + break;
2754 + case CRYPTO_MD5:
2755 + info.authsize = MD5_HASH_LEN;
2756 + break;
2757 + case CRYPTO_SHA1:
2758 + info.authsize = SHA1_HASH_LEN;
2759 + break;
2760 + case CRYPTO_SHA2_256:
2761 + info.authsize = SHA2_256_HASH_LEN;
2762 + break;
2763 + case CRYPTO_SHA2_384:
2764 + info.authsize = SHA2_384_HASH_LEN;
2765 + break;
2766 + case CRYPTO_SHA2_512:
2767 + info.authsize = SHA2_512_HASH_LEN;
2768 + break;
2769 + case CRYPTO_RIPEMD160:
2770 + info.authsize = RIPEMD160_HASH_LEN;
2771 + break;
2772 + case CRYPTO_MD5_HMAC:
2773 + info.authsize = MD5_HASH_LEN;
2774 + break;
2775 + case CRYPTO_SHA1_HMAC:
2776 + info.authsize = SHA1_HASH_LEN;
2777 + break;
2778 + case CRYPTO_SHA2_256_HMAC:
2779 + info.authsize = SHA2_256_HASH_LEN;
2780 + break;
2781 + case CRYPTO_SHA2_384_HMAC:
2782 + info.authsize = SHA2_384_HASH_LEN;
2783 + break;
2784 + case CRYPTO_SHA2_512_HMAC:
2785 + info.authsize = SHA2_512_HASH_LEN;
2786 + break;
2787 + case CRYPTO_RIPEMD160_HMAC:
2788 + info.authsize = RIPEMD160_HASH_LEN;
2789 + break;
2790 + default:
2791 + dprintk("%s(%s) - bad mac\n", __FUNCTION__, CIOCGSESSSTR);
2792 + error = EINVAL;
2793 + goto bail;
2796 + if (info.blocksize) {
2797 + crie.cri_alg = sop.cipher;
2798 + crie.cri_klen = sop.keylen * 8;
2799 + if ((info.maxkey && sop.keylen > info.maxkey) ||
2800 + sop.keylen < info.minkey) {
2801 + dprintk("%s(%s) - bad key\n", __FUNCTION__, CIOCGSESSSTR);
2802 + error = EINVAL;
2803 + goto bail;
2806 + crie.cri_key = (u_int8_t *) kmalloc(crie.cri_klen/8+1, GFP_KERNEL);
2807 + if (copy_from_user(crie.cri_key, sop.key,
2808 + crie.cri_klen/8)) {
2809 + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
2810 + error = EFAULT;
2811 + goto bail;
2813 + if (info.authsize)
2814 + crie.cri_next = &cria;
2817 + if (info.authsize) {
2818 + cria.cri_alg = sop.mac;
2819 + cria.cri_klen = sop.mackeylen * 8;
2820 + if ((info.maxkey && sop.mackeylen > info.maxkey) ||
2821 + sop.keylen < info.minkey) {
2822 + dprintk("%s(%s) - mackeylen %d\n", __FUNCTION__, CIOCGSESSSTR,
2823 + sop.mackeylen);
2824 + error = EINVAL;
2825 + goto bail;
2828 + if (cria.cri_klen) {
2829 + cria.cri_key = (u_int8_t *) kmalloc(cria.cri_klen/8,GFP_KERNEL);
2830 + if (copy_from_user(cria.cri_key, sop.mackey,
2831 + cria.cri_klen / 8)) {
2832 + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
2833 + error = EFAULT;
2834 + goto bail;
2839 + /* NB: CIOGSESSION2 has the crid */
2840 + if (cmd == CIOCGSESSION2) {
2841 + crid = sop.crid;
2842 + error = checkcrid(crid);
2843 + if (error) {
2844 + dprintk("%s(%s) - checkcrid %x\n", __FUNCTION__,
2845 + CIOCGSESSSTR, error);
2846 + goto bail;
2848 + } else {
2849 + /* allow either HW or SW to be used */
2850 + crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
2852 + error = crypto_newsession(&sid, (info.blocksize ? &crie : &cria), crid);
2853 + if (error) {
2854 + dprintk("%s(%s) - newsession %d\n",__FUNCTION__,CIOCGSESSSTR,error);
2855 + goto bail;
2858 + cse = csecreate(fcr, sid, &crie, &cria, &info);
2859 + if (cse == NULL) {
2860 + crypto_freesession(sid);
2861 + error = EINVAL;
2862 + dprintk("%s(%s) - csecreate failed\n", __FUNCTION__, CIOCGSESSSTR);
2863 + goto bail;
2865 + sop.ses = cse->ses;
2867 + if (cmd == CIOCGSESSION2) {
2868 + /* return hardware/driver id */
2869 + sop.crid = CRYPTO_SESID2HID(cse->sid);
2872 + if (copy_to_user((void*)arg, &sop, (cmd == CIOCGSESSION) ?
2873 + sizeof(struct session_op) : sizeof(sop))) {
2874 + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
2875 + error = EFAULT;
2877 +bail:
2878 + if (error) {
2879 + dprintk("%s(%s) - bail %d\n", __FUNCTION__, CIOCGSESSSTR, error);
2880 + if (crie.cri_key)
2881 + kfree(crie.cri_key);
2882 + if (cria.cri_key)
2883 + kfree(cria.cri_key);
2885 + break;
2886 + case CIOCFSESSION:
2887 + dprintk("%s(CIOCFSESSION)\n", __FUNCTION__);
2888 + get_user(ses, (uint32_t*)arg);
2889 + cse = csefind(fcr, ses);
2890 + if (cse == NULL) {
2891 + error = EINVAL;
2892 + dprintk("%s(CIOCFSESSION) - Fail %d\n", __FUNCTION__, error);
2893 + break;
2895 + csedelete(fcr, cse);
2896 + error = csefree(cse);
2897 + break;
2898 + case CIOCCRYPT:
2899 + dprintk("%s(CIOCCRYPT)\n", __FUNCTION__);
2900 + if(copy_from_user(&cop, (void*)arg, sizeof(cop))) {
2901 + dprintk("%s(CIOCCRYPT) - bad copy\n", __FUNCTION__);
2902 + error = EFAULT;
2903 + goto bail;
2905 + cse = csefind(fcr, cop.ses);
2906 + if (cse == NULL) {
2907 + error = EINVAL;
2908 + dprintk("%s(CIOCCRYPT) - Fail %d\n", __FUNCTION__, error);
2909 + break;
2911 + error = cryptodev_op(cse, &cop);
2912 + if(copy_to_user((void*)arg, &cop, sizeof(cop))) {
2913 + dprintk("%s(CIOCCRYPT) - bad return copy\n", __FUNCTION__);
2914 + error = EFAULT;
2915 + goto bail;
2917 + break;
2918 + case CIOCKEY:
2919 + case CIOCKEY2:
2920 + dprintk("%s(CIOCKEY)\n", __FUNCTION__);
2921 + if (!crypto_userasymcrypto)
2922 + return (EPERM); /* XXX compat? */
2923 + if(copy_from_user(&kop, (void*)arg, sizeof(kop))) {
2924 + dprintk("%s(CIOCKEY) - bad copy\n", __FUNCTION__);
2925 + error = EFAULT;
2926 + goto bail;
2928 + if (cmd == CIOCKEY) {
2929 + /* NB: crypto core enforces s/w driver use */
2930 + kop.crk_crid =
2931 + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
2933 + error = cryptodev_key(&kop);
2934 + if(copy_to_user((void*)arg, &kop, sizeof(kop))) {
2935 + dprintk("%s(CIOCGKEY) - bad return copy\n", __FUNCTION__);
2936 + error = EFAULT;
2937 + goto bail;
2939 + break;
2940 + case CIOCASYMFEAT:
2941 + dprintk("%s(CIOCASYMFEAT)\n", __FUNCTION__);
2942 + if (!crypto_userasymcrypto) {
2943 + /*
2944 + * NB: if user asym crypto operations are
2945 + * not permitted return "no algorithms"
2946 + * so well-behaved applications will just
2947 + * fallback to doing them in software.
2948 + */
2949 + feat = 0;
2950 + } else
2951 + error = crypto_getfeat(&feat);
2952 + if (!error) {
2953 + error = copy_to_user((void*)arg, &feat, sizeof(feat));
2955 + break;
2956 + case CIOCFINDDEV:
2957 + if (copy_from_user(&fop, (void*)arg, sizeof(fop))) {
2958 + dprintk("%s(CIOCFINDDEV) - bad copy\n", __FUNCTION__);
2959 + error = EFAULT;
2960 + goto bail;
2962 + error = cryptodev_find(&fop);
2963 + if (copy_to_user((void*)arg, &fop, sizeof(fop))) {
2964 + dprintk("%s(CIOCFINDDEV) - bad return copy\n", __FUNCTION__);
2965 + error = EFAULT;
2966 + goto bail;
2968 + break;
2969 + default:
2970 + dprintk("%s(unknown ioctl 0x%x)\n", __FUNCTION__, cmd);
2971 + error = EINVAL;
2972 + break;
2974 + return(-error);
2977 +#ifdef HAVE_UNLOCKED_IOCTL
2978 +static long
2979 +cryptodev_unlocked_ioctl(
2980 + struct file *filp,
2981 + unsigned int cmd,
2982 + unsigned long arg)
2984 + return cryptodev_ioctl(NULL, filp, cmd, arg);
2986 +#endif
2988 +static int
2989 +cryptodev_open(struct inode *inode, struct file *filp)
2991 + struct fcrypt *fcr;
2993 + dprintk("%s()\n", __FUNCTION__);
2994 + if (filp->private_data) {
2995 + printk("cryptodev: Private data already exists !\n");
2996 + return(0);
2999 + fcr = kmalloc(sizeof(*fcr), GFP_KERNEL);
3000 + if (!fcr) {
3001 + dprintk("%s() - malloc failed\n", __FUNCTION__);
3002 + return(-ENOMEM);
3004 + memset(fcr, 0, sizeof(*fcr));
3006 + INIT_LIST_HEAD(&fcr->csessions);
3007 + filp->private_data = fcr;
3008 + return(0);
3011 +static int
3012 +cryptodev_release(struct inode *inode, struct file *filp)
3014 + struct fcrypt *fcr = filp->private_data;
3015 + struct csession *cse, *tmp;
3017 + dprintk("%s()\n", __FUNCTION__);
3018 + if (!filp) {
3019 + printk("cryptodev: No private data on release\n");
3020 + return(0);
3023 + list_for_each_entry_safe(cse, tmp, &fcr->csessions, list) {
3024 + list_del(&cse->list);
3025 + (void)csefree(cse);
3027 + filp->private_data = NULL;
3028 + kfree(fcr);
3029 + return(0);
3032 +static struct file_operations cryptodev_fops = {
3033 + .owner = THIS_MODULE,
3034 + .open = cryptodev_open,
3035 + .release = cryptodev_release,
3036 + .ioctl = cryptodev_ioctl,
3037 +#ifdef HAVE_UNLOCKED_IOCTL
3038 + .unlocked_ioctl = cryptodev_unlocked_ioctl,
3039 +#endif
3042 +static struct miscdevice cryptodev = {
3043 + .minor = CRYPTODEV_MINOR,
3044 + .name = "crypto",
3045 + .fops = &cryptodev_fops,
3048 +static int __init
3049 +cryptodev_init(void)
3051 + int rc;
3053 + dprintk("%s(%p)\n", __FUNCTION__, cryptodev_init);
3054 + rc = misc_register(&cryptodev);
3055 + if (rc) {
3056 + printk(KERN_ERR "cryptodev: registration of /dev/crypto failed\n");
3057 + return(rc);
3060 + return(0);
3063 +static void __exit
3064 +cryptodev_exit(void)
3066 + dprintk("%s()\n", __FUNCTION__);
3067 + misc_deregister(&cryptodev);
3070 +module_init(cryptodev_init);
3071 +module_exit(cryptodev_exit);
3073 +MODULE_LICENSE("BSD");
3074 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
3075 +MODULE_DESCRIPTION("Cryptodev (user interface to OCF)");
3076 diff -Nur linux-2.6.30.orig/crypto/ocf/cryptodev.h linux-2.6.30/crypto/ocf/cryptodev.h
3077 --- linux-2.6.30.orig/crypto/ocf/cryptodev.h 1970-01-01 01:00:00.000000000 +0100
3078 +++ linux-2.6.30/crypto/ocf/cryptodev.h 2009-06-11 10:55:27.000000000 +0200
3079 @@ -0,0 +1,478 @@
3080 +/* $FreeBSD: src/sys/opencrypto/cryptodev.h,v 1.25 2007/05/09 19:37:02 gnn Exp $ */
3081 +/* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */
3083 +/*-
3084 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
3085 + * Copyright (C) 2006-2007 David McCullough
3086 + * Copyright (C) 2004-2005 Intel Corporation.
3087 + * The license and original author are listed below.
3089 + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
3090 + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
3092 + * This code was written by Angelos D. Keromytis in Athens, Greece, in
3093 + * February 2000. Network Security Technologies Inc. (NSTI) kindly
3094 + * supported the development of this code.
3096 + * Copyright (c) 2000 Angelos D. Keromytis
3098 + * Permission to use, copy, and modify this software with or without fee
3099 + * is hereby granted, provided that this entire notice is included in
3100 + * all source code copies of any software which is or includes a copy or
3101 + * modification of this software.
3103 + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
3104 + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
3105 + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
3106 + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
3107 + * PURPOSE.
3109 + * Copyright (c) 2001 Theo de Raadt
3111 + * Redistribution and use in source and binary forms, with or without
3112 + * modification, are permitted provided that the following conditions
3113 + * are met:
3115 + * 1. Redistributions of source code must retain the above copyright
3116 + * notice, this list of conditions and the following disclaimer.
3117 + * 2. Redistributions in binary form must reproduce the above copyright
3118 + * notice, this list of conditions and the following disclaimer in the
3119 + * documentation and/or other materials provided with the distribution.
3120 + * 3. The name of the author may not be used to endorse or promote products
3121 + * derived from this software without specific prior written permission.
3123 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
3124 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
3125 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
3126 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
3127 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
3128 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
3129 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
3130 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3131 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
3132 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3134 + * Effort sponsored in part by the Defense Advanced Research Projects
3135 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
3136 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
3138 + */
3140 +#ifndef _CRYPTO_CRYPTO_H_
3141 +#define _CRYPTO_CRYPTO_H_
3143 +/* Some initial values */
3144 +#define CRYPTO_DRIVERS_INITIAL 4
3145 +#define CRYPTO_SW_SESSIONS 32
3147 +/* Hash values */
3148 +#define NULL_HASH_LEN 0
3149 +#define MD5_HASH_LEN 16
3150 +#define SHA1_HASH_LEN 20
3151 +#define RIPEMD160_HASH_LEN 20
3152 +#define SHA2_256_HASH_LEN 32
3153 +#define SHA2_384_HASH_LEN 48
3154 +#define SHA2_512_HASH_LEN 64
3155 +#define MD5_KPDK_HASH_LEN 16
3156 +#define SHA1_KPDK_HASH_LEN 20
3157 +/* Maximum hash algorithm result length */
3158 +#define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */
3160 +/* HMAC values */
3161 +#define NULL_HMAC_BLOCK_LEN 1
3162 +#define MD5_HMAC_BLOCK_LEN 64
3163 +#define SHA1_HMAC_BLOCK_LEN 64
3164 +#define RIPEMD160_HMAC_BLOCK_LEN 64
3165 +#define SHA2_256_HMAC_BLOCK_LEN 64
3166 +#define SHA2_384_HMAC_BLOCK_LEN 128
3167 +#define SHA2_512_HMAC_BLOCK_LEN 128
3168 +/* Maximum HMAC block length */
3169 +#define HMAC_MAX_BLOCK_LEN SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */
3170 +#define HMAC_IPAD_VAL 0x36
3171 +#define HMAC_OPAD_VAL 0x5C
3173 +/* Encryption algorithm block sizes */
3174 +#define NULL_BLOCK_LEN 1
3175 +#define DES_BLOCK_LEN 8
3176 +#define DES3_BLOCK_LEN 8
3177 +#define BLOWFISH_BLOCK_LEN 8
3178 +#define SKIPJACK_BLOCK_LEN 8
3179 +#define CAST128_BLOCK_LEN 8
3180 +#define RIJNDAEL128_BLOCK_LEN 16
3181 +#define AES_BLOCK_LEN RIJNDAEL128_BLOCK_LEN
3182 +#define CAMELLIA_BLOCK_LEN 16
3183 +#define ARC4_BLOCK_LEN 1
3184 +#define EALG_MAX_BLOCK_LEN AES_BLOCK_LEN /* Keep this updated */
3186 +/* Encryption algorithm min and max key sizes */
3187 +#define NULL_MIN_KEY_LEN 0
3188 +#define NULL_MAX_KEY_LEN 0
3189 +#define DES_MIN_KEY_LEN 8
3190 +#define DES_MAX_KEY_LEN 8
3191 +#define DES3_MIN_KEY_LEN 24
3192 +#define DES3_MAX_KEY_LEN 24
3193 +#define BLOWFISH_MIN_KEY_LEN 4
3194 +#define BLOWFISH_MAX_KEY_LEN 56
3195 +#define SKIPJACK_MIN_KEY_LEN 10
3196 +#define SKIPJACK_MAX_KEY_LEN 10
3197 +#define CAST128_MIN_KEY_LEN 5
3198 +#define CAST128_MAX_KEY_LEN 16
3199 +#define RIJNDAEL128_MIN_KEY_LEN 16
3200 +#define RIJNDAEL128_MAX_KEY_LEN 32
3201 +#define AES_MIN_KEY_LEN RIJNDAEL128_MIN_KEY_LEN
3202 +#define AES_MAX_KEY_LEN RIJNDAEL128_MAX_KEY_LEN
3203 +#define CAMELLIA_MIN_KEY_LEN 16
3204 +#define CAMELLIA_MAX_KEY_LEN 32
3205 +#define ARC4_MIN_KEY_LEN 1
3206 +#define ARC4_MAX_KEY_LEN 256
3208 +/* Max size of data that can be processed */
3209 +#define CRYPTO_MAX_DATA_LEN 64*1024 - 1
3211 +#define CRYPTO_ALGORITHM_MIN 1
3212 +#define CRYPTO_DES_CBC 1
3213 +#define CRYPTO_3DES_CBC 2
3214 +#define CRYPTO_BLF_CBC 3
3215 +#define CRYPTO_CAST_CBC 4
3216 +#define CRYPTO_SKIPJACK_CBC 5
3217 +#define CRYPTO_MD5_HMAC 6
3218 +#define CRYPTO_SHA1_HMAC 7
3219 +#define CRYPTO_RIPEMD160_HMAC 8
3220 +#define CRYPTO_MD5_KPDK 9
3221 +#define CRYPTO_SHA1_KPDK 10
3222 +#define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */
3223 +#define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */
3224 +#define CRYPTO_ARC4 12
3225 +#define CRYPTO_MD5 13
3226 +#define CRYPTO_SHA1 14
3227 +#define CRYPTO_NULL_HMAC 15
3228 +#define CRYPTO_NULL_CBC 16
3229 +#define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */
3230 +#define CRYPTO_SHA2_256_HMAC 18
3231 +#define CRYPTO_SHA2_384_HMAC 19
3232 +#define CRYPTO_SHA2_512_HMAC 20
3233 +#define CRYPTO_CAMELLIA_CBC 21
3234 +#define CRYPTO_SHA2_256 22
3235 +#define CRYPTO_SHA2_384 23
3236 +#define CRYPTO_SHA2_512 24
3237 +#define CRYPTO_RIPEMD160 25
3238 +#define CRYPTO_ALGORITHM_MAX 25 /* Keep updated - see below */
3240 +/* Algorithm flags */
3241 +#define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */
3242 +#define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */
3243 +#define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */
3246 + * Crypto driver/device flags. They can set in the crid
3247 + * parameter when creating a session or submitting a key
3248 + * op to affect the device/driver assigned. If neither
3249 + * of these are specified then the crid is assumed to hold
3250 + * the driver id of an existing (and suitable) device that
3251 + * must be used to satisfy the request.
3252 + */
3253 +#define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */
3254 +#define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */
3256 +/* NB: deprecated */
3257 +struct session_op {
3258 + u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
3259 + u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
3261 + u_int32_t keylen; /* cipher key */
3262 + caddr_t key;
3263 + int mackeylen; /* mac key */
3264 + caddr_t mackey;
3266 + u_int32_t ses; /* returns: session # */
3269 +struct session2_op {
3270 + u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
3271 + u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
3273 + u_int32_t keylen; /* cipher key */
3274 + caddr_t key;
3275 + int mackeylen; /* mac key */
3276 + caddr_t mackey;
3278 + u_int32_t ses; /* returns: session # */
3279 + int crid; /* driver id + flags (rw) */
3280 + int pad[4]; /* for future expansion */
3283 +struct crypt_op {
3284 + u_int32_t ses;
3285 + u_int16_t op; /* i.e. COP_ENCRYPT */
3286 +#define COP_NONE 0
3287 +#define COP_ENCRYPT 1
3288 +#define COP_DECRYPT 2
3289 + u_int16_t flags;
3290 +#define COP_F_BATCH 0x0008 /* Batch op if possible */
3291 + u_int len;
3292 + caddr_t src, dst; /* become iov[] inside kernel */
3293 + caddr_t mac; /* must be big enough for chosen MAC */
3294 + caddr_t iv;
3298 + * Parameters for looking up a crypto driver/device by
3299 + * device name or by id. The latter are returned for
3300 + * created sessions (crid) and completed key operations.
3301 + */
3302 +struct crypt_find_op {
3303 + int crid; /* driver id + flags */
3304 + char name[32]; /* device/driver name */
3307 +/* bignum parameter, in packed bytes, ... */
3308 +struct crparam {
3309 + caddr_t crp_p;
3310 + u_int crp_nbits;
3313 +#define CRK_MAXPARAM 8
3315 +struct crypt_kop {
3316 + u_int crk_op; /* ie. CRK_MOD_EXP or other */
3317 + u_int crk_status; /* return status */
3318 + u_short crk_iparams; /* # of input parameters */
3319 + u_short crk_oparams; /* # of output parameters */
3320 + u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */
3321 + struct crparam crk_param[CRK_MAXPARAM];
3323 +#define CRK_ALGORITM_MIN 0
3324 +#define CRK_MOD_EXP 0
3325 +#define CRK_MOD_EXP_CRT 1
3326 +#define CRK_DSA_SIGN 2
3327 +#define CRK_DSA_VERIFY 3
3328 +#define CRK_DH_COMPUTE_KEY 4
3329 +#define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */
3331 +#define CRF_MOD_EXP (1 << CRK_MOD_EXP)
3332 +#define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT)
3333 +#define CRF_DSA_SIGN (1 << CRK_DSA_SIGN)
3334 +#define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY)
3335 +#define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY)
3338 + * done against open of /dev/crypto, to get a cloned descriptor.
3339 + * Please use F_SETFD against the cloned descriptor.
3340 + */
3341 +#define CRIOGET _IOWR('c', 100, u_int32_t)
3342 +#define CRIOASYMFEAT CIOCASYMFEAT
3343 +#define CRIOFINDDEV CIOCFINDDEV
3345 +/* the following are done against the cloned descriptor */
3346 +#define CIOCGSESSION _IOWR('c', 101, struct session_op)
3347 +#define CIOCFSESSION _IOW('c', 102, u_int32_t)
3348 +#define CIOCCRYPT _IOWR('c', 103, struct crypt_op)
3349 +#define CIOCKEY _IOWR('c', 104, struct crypt_kop)
3350 +#define CIOCASYMFEAT _IOR('c', 105, u_int32_t)
3351 +#define CIOCGSESSION2 _IOWR('c', 106, struct session2_op)
3352 +#define CIOCKEY2 _IOWR('c', 107, struct crypt_kop)
3353 +#define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op)
3355 +struct cryptotstat {
3356 + struct timespec acc; /* total accumulated time */
3357 + struct timespec min; /* min time */
3358 + struct timespec max; /* max time */
3359 + u_int32_t count; /* number of observations */
3362 +struct cryptostats {
3363 + u_int32_t cs_ops; /* symmetric crypto ops submitted */
3364 + u_int32_t cs_errs; /* symmetric crypto ops that failed */
3365 + u_int32_t cs_kops; /* asymetric/key ops submitted */
3366 + u_int32_t cs_kerrs; /* asymetric/key ops that failed */
3367 + u_int32_t cs_intrs; /* crypto swi thread activations */
3368 + u_int32_t cs_rets; /* crypto return thread activations */
3369 + u_int32_t cs_blocks; /* symmetric op driver block */
3370 + u_int32_t cs_kblocks; /* symmetric op driver block */
3371 + /*
3372 + * When CRYPTO_TIMING is defined at compile time and the
3373 + * sysctl debug.crypto is set to 1, the crypto system will
3374 + * accumulate statistics about how long it takes to process
3375 + * crypto requests at various points during processing.
3376 + */
3377 + struct cryptotstat cs_invoke; /* crypto_dipsatch -> crypto_invoke */
3378 + struct cryptotstat cs_done; /* crypto_invoke -> crypto_done */
3379 + struct cryptotstat cs_cb; /* crypto_done -> callback */
3380 + struct cryptotstat cs_finis; /* callback -> callback return */
3382 + u_int32_t cs_drops; /* crypto ops dropped due to congestion */
3385 +#ifdef __KERNEL__
3387 +/* Standard initialization structure beginning */
3388 +struct cryptoini {
3389 + int cri_alg; /* Algorithm to use */
3390 + int cri_klen; /* Key length, in bits */
3391 + int cri_mlen; /* Number of bytes we want from the
3392 + entire hash. 0 means all. */
3393 + caddr_t cri_key; /* key to use */
3394 + u_int8_t cri_iv[EALG_MAX_BLOCK_LEN]; /* IV to use */
3395 + struct cryptoini *cri_next;
3398 +/* Describe boundaries of a single crypto operation */
3399 +struct cryptodesc {
3400 + int crd_skip; /* How many bytes to ignore from start */
3401 + int crd_len; /* How many bytes to process */
3402 + int crd_inject; /* Where to inject results, if applicable */
3403 + int crd_flags;
3405 +#define CRD_F_ENCRYPT 0x01 /* Set when doing encryption */
3406 +#define CRD_F_IV_PRESENT 0x02 /* When encrypting, IV is already in
3407 + place, so don't copy. */
3408 +#define CRD_F_IV_EXPLICIT 0x04 /* IV explicitly provided */
3409 +#define CRD_F_DSA_SHA_NEEDED 0x08 /* Compute SHA-1 of buffer for DSA */
3410 +#define CRD_F_KEY_EXPLICIT 0x10 /* Key explicitly provided */
3411 +#define CRD_F_COMP 0x0f /* Set when doing compression */
3413 + struct cryptoini CRD_INI; /* Initialization/context data */
3414 +#define crd_iv CRD_INI.cri_iv
3415 +#define crd_key CRD_INI.cri_key
3416 +#define crd_alg CRD_INI.cri_alg
3417 +#define crd_klen CRD_INI.cri_klen
3419 + struct cryptodesc *crd_next;
3422 +/* Structure describing complete operation */
3423 +struct cryptop {
3424 + struct list_head crp_next;
3425 + wait_queue_head_t crp_waitq;
3427 + u_int64_t crp_sid; /* Session ID */
3428 + int crp_ilen; /* Input data total length */
3429 + int crp_olen; /* Result total length */
3431 + int crp_etype; /*
3432 + * Error type (zero means no error).
3433 + * All error codes except EAGAIN
3434 + * indicate possible data corruption (as in,
3435 + * the data have been touched). On all
3436 + * errors, the crp_sid may have changed
3437 + * (reset to a new one), so the caller
3438 + * should always check and use the new
3439 + * value on future requests.
3440 + */
3441 + int crp_flags;
3443 +#define CRYPTO_F_SKBUF 0x0001 /* Input/output are skbuf chains */
3444 +#define CRYPTO_F_IOV 0x0002 /* Input/output are uio */
3445 +#define CRYPTO_F_REL 0x0004 /* Must return data in same place */
3446 +#define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */
3447 +#define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */
3448 +#define CRYPTO_F_DONE 0x0020 /* Operation completed */
3449 +#define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */
3451 + caddr_t crp_buf; /* Data to be processed */
3452 + caddr_t crp_opaque; /* Opaque pointer, passed along */
3453 + struct cryptodesc *crp_desc; /* Linked list of processing descriptors */
3455 + int (*crp_callback)(struct cryptop *); /* Callback function */
3458 +#define CRYPTO_BUF_CONTIG 0x0
3459 +#define CRYPTO_BUF_IOV 0x1
3460 +#define CRYPTO_BUF_SKBUF 0x2
3462 +#define CRYPTO_OP_DECRYPT 0x0
3463 +#define CRYPTO_OP_ENCRYPT 0x1
3466 + * Hints passed to process methods.
3467 + */
3468 +#define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */
3470 +struct cryptkop {
3471 + struct list_head krp_next;
3472 + wait_queue_head_t krp_waitq;
3474 + int krp_flags;
3475 +#define CRYPTO_KF_DONE 0x0001 /* Operation completed */
3476 +#define CRYPTO_KF_CBIMM 0x0002 /* Do callback immediately */
3478 + u_int krp_op; /* ie. CRK_MOD_EXP or other */
3479 + u_int krp_status; /* return status */
3480 + u_short krp_iparams; /* # of input parameters */
3481 + u_short krp_oparams; /* # of output parameters */
3482 + u_int krp_crid; /* desired device, etc. */
3483 + u_int32_t krp_hid;
3484 + struct crparam krp_param[CRK_MAXPARAM]; /* kvm */
3485 + int (*krp_callback)(struct cryptkop *);
3488 +#include <ocf-compat.h>
3491 + * Session ids are 64 bits. The lower 32 bits contain a "local id" which
3492 + * is a driver-private session identifier. The upper 32 bits contain a
3493 + * "hardware id" used by the core crypto code to identify the driver and
3494 + * a copy of the driver's capabilities that can be used by client code to
3495 + * optimize operation.
3496 + */
3497 +#define CRYPTO_SESID2HID(_sid) (((_sid) >> 32) & 0x00ffffff)
3498 +#define CRYPTO_SESID2CAPS(_sid) (((_sid) >> 32) & 0xff000000)
3499 +#define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff)
3501 +extern int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
3502 +extern int crypto_freesession(u_int64_t sid);
3503 +#define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE
3504 +#define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE
3505 +#define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */
3506 +extern int32_t crypto_get_driverid(device_t dev, int flags);
3507 +extern int crypto_find_driver(const char *);
3508 +extern device_t crypto_find_device_byhid(int hid);
3509 +extern int crypto_getcaps(int hid);
3510 +extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
3511 + u_int32_t flags);
3512 +extern int crypto_kregister(u_int32_t, int, u_int32_t);
3513 +extern int crypto_unregister(u_int32_t driverid, int alg);
3514 +extern int crypto_unregister_all(u_int32_t driverid);
3515 +extern int crypto_dispatch(struct cryptop *crp);
3516 +extern int crypto_kdispatch(struct cryptkop *);
3517 +#define CRYPTO_SYMQ 0x1
3518 +#define CRYPTO_ASYMQ 0x2
3519 +extern int crypto_unblock(u_int32_t, int);
3520 +extern void crypto_done(struct cryptop *crp);
3521 +extern void crypto_kdone(struct cryptkop *);
3522 +extern int crypto_getfeat(int *);
3524 +extern void crypto_freereq(struct cryptop *crp);
3525 +extern struct cryptop *crypto_getreq(int num);
3527 +extern int crypto_usercrypto; /* userland may do crypto requests */
3528 +extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */
3529 +extern int crypto_devallowsoft; /* only use hardware crypto */
3532 + * random number support, crypto_unregister_all will unregister
3533 + */
3534 +extern int crypto_rregister(u_int32_t driverid,
3535 + int (*read_random)(void *arg, u_int32_t *buf, int len), void *arg);
3536 +extern int crypto_runregister_all(u_int32_t driverid);
3539 + * Crypto-related utility routines used mainly by drivers.
3541 + * XXX these don't really belong here; but for now they're
3542 + * kept apart from the rest of the system.
3543 + */
3544 +struct uio;
3545 +extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
3546 +extern void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
3547 +extern struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
3549 +extern void crypto_copyback(int flags, caddr_t buf, int off, int size,
3550 + caddr_t in);
3551 +extern void crypto_copydata(int flags, caddr_t buf, int off, int size,
3552 + caddr_t out);
3553 +extern int crypto_apply(int flags, caddr_t buf, int off, int len,
3554 + int (*f)(void *, void *, u_int), void *arg);
3556 +#endif /* __KERNEL__ */
3557 +#endif /* _CRYPTO_CRYPTO_H_ */
3558 diff -Nur linux-2.6.30.orig/crypto/ocf/cryptosoft.c linux-2.6.30/crypto/ocf/cryptosoft.c
3559 --- linux-2.6.30.orig/crypto/ocf/cryptosoft.c 1970-01-01 01:00:00.000000000 +0100
3560 +++ linux-2.6.30/crypto/ocf/cryptosoft.c 2009-06-11 10:55:27.000000000 +0200
3561 @@ -0,0 +1,898 @@
3563 + * An OCF module that uses the linux kernel cryptoapi, based on the
3564 + * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
3565 + * but is mostly unrecognisable,
3567 + * Written by David McCullough <david_mccullough@securecomputing.com>
3568 + * Copyright (C) 2004-2007 David McCullough
3569 + * Copyright (C) 2004-2005 Intel Corporation.
3571 + * LICENSE TERMS
3573 + * The free distribution and use of this software in both source and binary
3574 + * form is allowed (with or without changes) provided that:
3576 + * 1. distributions of this source code include the above copyright
3577 + * notice, this list of conditions and the following disclaimer;
3579 + * 2. distributions in binary form include the above copyright
3580 + * notice, this list of conditions and the following disclaimer
3581 + * in the documentation and/or other associated materials;
3583 + * 3. the copyright holder's name is not used to endorse products
3584 + * built using this software without specific written permission.
3586 + * ALTERNATIVELY, provided that this notice is retained in full, this product
3587 + * may be distributed under the terms of the GNU General Public License (GPL),
3588 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
3590 + * DISCLAIMER
3592 + * This software is provided 'as is' with no explicit or implied warranties
3593 + * in respect of its properties, including, but not limited to, correctness
3594 + * and/or fitness for purpose.
3595 + * ---------------------------------------------------------------------------
3596 + */
3598 +#ifndef AUTOCONF_INCLUDED
3599 +#include <linux/config.h>
3600 +#endif
3601 +#include <linux/module.h>
3602 +#include <linux/init.h>
3603 +#include <linux/list.h>
3604 +#include <linux/slab.h>
3605 +#include <linux/sched.h>
3606 +#include <linux/wait.h>
3607 +#include <linux/crypto.h>
3608 +#include <linux/mm.h>
3609 +#include <linux/skbuff.h>
3610 +#include <linux/random.h>
3611 +#include <linux/scatterlist.h>
3613 +#include <cryptodev.h>
3614 +#include <uio.h>
3616 +struct {
3617 + softc_device_decl sc_dev;
3618 +} swcr_softc;
3620 +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
3622 +/* Software session entry */
3624 +#define SW_TYPE_CIPHER 0
3625 +#define SW_TYPE_HMAC 1
3626 +#define SW_TYPE_AUTH2 2
3627 +#define SW_TYPE_HASH 3
3628 +#define SW_TYPE_COMP 4
3629 +#define SW_TYPE_BLKCIPHER 5
3631 +struct swcr_data {
3632 + int sw_type;
3633 + int sw_alg;
3634 + struct crypto_tfm *sw_tfm;
3635 + union {
3636 + struct {
3637 + char *sw_key;
3638 + int sw_klen;
3639 + int sw_mlen;
3640 + } hmac;
3641 + void *sw_comp_buf;
3642 + } u;
3643 + struct swcr_data *sw_next;
3646 +#ifndef CRYPTO_TFM_MODE_CBC
3648 + * As of linux-2.6.21 this is no longer defined, and presumably no longer
3649 + * needed to be passed into the crypto core code.
3650 + */
3651 +#define CRYPTO_TFM_MODE_CBC 0
3652 +#define CRYPTO_TFM_MODE_ECB 0
3653 +#endif
3655 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
3656 + /*
3657 + * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
3658 + * API into old API.
3659 + */
3661 + /* Symmetric/Block Cipher */
3662 + struct blkcipher_desc
3664 + struct crypto_tfm *tfm;
3665 + void *info;
3666 + };
3667 + #define ecb(X) #X
3668 + #define cbc(X) #X
3669 + #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
3670 + #define crypto_blkcipher_cast(X) X
3671 + #define crypto_blkcipher_tfm(X) X
3672 + #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
3673 + #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
3674 + #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
3675 + #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
3676 + #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
3677 + crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
3678 + #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
3679 + crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
3681 + /* Hash/HMAC/Digest */
3682 + struct hash_desc
3684 + struct crypto_tfm *tfm;
3685 + };
3686 + #define hmac(X) #X
3687 + #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
3688 + #define crypto_hash_cast(X) X
3689 + #define crypto_hash_tfm(X) X
3690 + #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
3691 + #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
3692 + #define crypto_hash_digest(W, X, Y, Z) \
3693 + crypto_digest_digest((W)->tfm, X, sg_num, Z)
3695 + /* Asymmetric Cipher */
3696 + #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
3698 + /* Compression */
3699 + #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
3700 + #define crypto_comp_tfm(X) X
3701 + #define crypto_comp_cast(X) X
3702 + #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
3703 +#else
3704 + #define ecb(X) "ecb(" #X ")"
3705 + #define cbc(X) "cbc(" #X ")"
3706 + #define hmac(X) "hmac(" #X ")"
3707 +#endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
3709 +struct crypto_details
3711 + char *alg_name;
3712 + int mode;
3713 + int sw_type;
3717 + * This needs to be kept updated with CRYPTO_xxx list (cryptodev.h).
3718 + * If the Algorithm is not supported, then insert a {NULL, 0, 0} entry.
3720 + * IMPORTANT: The index to the array IS CRYPTO_xxx.
3721 + */
3722 +static struct crypto_details crypto_details[CRYPTO_ALGORITHM_MAX + 1] = {
3723 + { NULL, 0, 0 },
3724 + /* CRYPTO_xxx index starts at 1 */
3725 + { cbc(des), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
3726 + { cbc(des3_ede), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
3727 + { cbc(blowfish), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
3728 + { cbc(cast5), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
3729 + { cbc(skipjack), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
3730 + { hmac(md5), 0, SW_TYPE_HMAC },
3731 + { hmac(sha1), 0, SW_TYPE_HMAC },
3732 + { hmac(ripemd160), 0, SW_TYPE_HMAC },
3733 + { "md5-kpdk??", 0, SW_TYPE_HASH },
3734 + { "sha1-kpdk??", 0, SW_TYPE_HASH },
3735 + { cbc(aes), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
3736 + { ecb(arc4), CRYPTO_TFM_MODE_ECB, SW_TYPE_BLKCIPHER },
3737 + { "md5", 0, SW_TYPE_HASH },
3738 + { "sha1", 0, SW_TYPE_HASH },
3739 + { hmac(digest_null), 0, SW_TYPE_HMAC },
3740 + { cbc(cipher_null), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
3741 + { "deflate", 0, SW_TYPE_COMP },
3742 + { hmac(sha256), 0, SW_TYPE_HMAC },
3743 + { hmac(sha384), 0, SW_TYPE_HMAC },
3744 + { hmac(sha512), 0, SW_TYPE_HMAC },
3745 + { cbc(camellia), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
3746 + { "sha256", 0, SW_TYPE_HASH },
3747 + { "sha384", 0, SW_TYPE_HASH },
3748 + { "sha512", 0, SW_TYPE_HASH },
3749 + { "ripemd160", 0, SW_TYPE_HASH },
3752 +int32_t swcr_id = -1;
3753 +module_param(swcr_id, int, 0444);
3754 +MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
3756 +int swcr_fail_if_compression_grows = 1;
3757 +module_param(swcr_fail_if_compression_grows, int, 0644);
3758 +MODULE_PARM_DESC(swcr_fail_if_compression_grows,
3759 + "Treat compression that results in more data as a failure");
3761 +static struct swcr_data **swcr_sessions = NULL;
3762 +static u_int32_t swcr_sesnum = 0;
3764 +static int swcr_process(device_t, struct cryptop *, int);
3765 +static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
3766 +static int swcr_freesession(device_t, u_int64_t);
3768 +static device_method_t swcr_methods = {
3769 + /* crypto device methods */
3770 + DEVMETHOD(cryptodev_newsession, swcr_newsession),
3771 + DEVMETHOD(cryptodev_freesession,swcr_freesession),
3772 + DEVMETHOD(cryptodev_process, swcr_process),
3775 +#define debug swcr_debug
3776 +int swcr_debug = 0;
3777 +module_param(swcr_debug, int, 0644);
3778 +MODULE_PARM_DESC(swcr_debug, "Enable debug");
3781 + * Generate a new software session.
3782 + */
3783 +static int
3784 +swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
3786 + struct swcr_data **swd;
3787 + u_int32_t i;
3788 + int error;
3789 + char *algo;
3790 + int mode, sw_type;
3792 + dprintk("%s()\n", __FUNCTION__);
3793 + if (sid == NULL || cri == NULL) {
3794 + dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
3795 + return EINVAL;
3798 + if (swcr_sessions) {
3799 + for (i = 1; i < swcr_sesnum; i++)
3800 + if (swcr_sessions[i] == NULL)
3801 + break;
3802 + } else
3803 + i = 1; /* NB: to silence compiler warning */
3805 + if (swcr_sessions == NULL || i == swcr_sesnum) {
3806 + if (swcr_sessions == NULL) {
3807 + i = 1; /* We leave swcr_sessions[0] empty */
3808 + swcr_sesnum = CRYPTO_SW_SESSIONS;
3809 + } else
3810 + swcr_sesnum *= 2;
3812 + swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
3813 + if (swd == NULL) {
3814 + /* Reset session number */
3815 + if (swcr_sesnum == CRYPTO_SW_SESSIONS)
3816 + swcr_sesnum = 0;
3817 + else
3818 + swcr_sesnum /= 2;
3819 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
3820 + return ENOBUFS;
3822 + memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
3824 + /* Copy existing sessions */
3825 + if (swcr_sessions) {
3826 + memcpy(swd, swcr_sessions,
3827 + (swcr_sesnum / 2) * sizeof(struct swcr_data *));
3828 + kfree(swcr_sessions);
3831 + swcr_sessions = swd;
3834 + swd = &swcr_sessions[i];
3835 + *sid = i;
3837 + while (cri) {
3838 + *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
3839 + SLAB_ATOMIC);
3840 + if (*swd == NULL) {
3841 + swcr_freesession(NULL, i);
3842 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
3843 + return ENOBUFS;
3845 + memset(*swd, 0, sizeof(struct swcr_data));
3847 + if (cri->cri_alg > CRYPTO_ALGORITHM_MAX) {
3848 + printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
3849 + swcr_freesession(NULL, i);
3850 + return EINVAL;
3853 + algo = crypto_details[cri->cri_alg].alg_name;
3854 + if (!algo || !*algo) {
3855 + printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
3856 + swcr_freesession(NULL, i);
3857 + return EINVAL;
3860 + mode = crypto_details[cri->cri_alg].mode;
3861 + sw_type = crypto_details[cri->cri_alg].sw_type;
3863 + /* Algorithm specific configuration */
3864 + switch (cri->cri_alg) {
3865 + case CRYPTO_NULL_CBC:
3866 + cri->cri_klen = 0; /* make it work with crypto API */
3867 + break;
3868 + default:
3869 + break;
3872 + if (sw_type == SW_TYPE_BLKCIPHER) {
3873 + dprintk("%s crypto_alloc_blkcipher(%s, 0x%x)\n", __FUNCTION__,
3874 + algo, mode);
3876 + (*swd)->sw_tfm = crypto_blkcipher_tfm(
3877 + crypto_alloc_blkcipher(algo, 0,
3878 + CRYPTO_ALG_ASYNC));
3879 + if (!(*swd)->sw_tfm) {
3880 + dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s,0x%x)\n",
3881 + algo,mode);
3882 + swcr_freesession(NULL, i);
3883 + return EINVAL;
3886 + if (debug) {
3887 + dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
3888 + __FUNCTION__,cri->cri_klen,(cri->cri_klen + 7)/8);
3889 + for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
3891 + dprintk("%s0x%x", (i % 8) ? " " : "\n ",cri->cri_key[i]);
3893 + dprintk("\n");
3895 + error = crypto_blkcipher_setkey(
3896 + crypto_blkcipher_cast((*swd)->sw_tfm), cri->cri_key,
3897 + (cri->cri_klen + 7) / 8);
3898 + if (error) {
3899 + printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
3900 + (*swd)->sw_tfm->crt_flags);
3901 + swcr_freesession(NULL, i);
3902 + return error;
3904 + } else if (sw_type == SW_TYPE_HMAC || sw_type == SW_TYPE_HASH) {
3905 + dprintk("%s crypto_alloc_hash(%s, 0x%x)\n", __FUNCTION__,
3906 + algo, mode);
3908 + (*swd)->sw_tfm = crypto_hash_tfm(
3909 + crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
3911 + if (!(*swd)->sw_tfm) {
3912 + dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
3913 + algo, mode);
3914 + swcr_freesession(NULL, i);
3915 + return EINVAL;
3918 + (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
3919 + (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
3920 + SLAB_ATOMIC);
3921 + if ((*swd)->u.hmac.sw_key == NULL) {
3922 + swcr_freesession(NULL, i);
3923 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
3924 + return ENOBUFS;
3926 + memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
3927 + if (cri->cri_mlen) {
3928 + (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
3929 + } else {
3930 + (*swd)->u.hmac.sw_mlen =
3931 + crypto_hash_digestsize(
3932 + crypto_hash_cast((*swd)->sw_tfm));
3934 + } else if (sw_type == SW_TYPE_COMP) {
3935 + (*swd)->sw_tfm = crypto_comp_tfm(
3936 + crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
3937 + if (!(*swd)->sw_tfm) {
3938 + dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
3939 + algo, mode);
3940 + swcr_freesession(NULL, i);
3941 + return EINVAL;
3943 + (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
3944 + if ((*swd)->u.sw_comp_buf == NULL) {
3945 + swcr_freesession(NULL, i);
3946 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
3947 + return ENOBUFS;
3949 + } else {
3950 + printk("cryptosoft: Unhandled sw_type %d\n", sw_type);
3951 + swcr_freesession(NULL, i);
3952 + return EINVAL;
3955 + (*swd)->sw_alg = cri->cri_alg;
3956 + (*swd)->sw_type = sw_type;
3958 + cri = cri->cri_next;
3959 + swd = &((*swd)->sw_next);
3961 + return 0;
3965 + * Free a session.
3966 + */
3967 +static int
3968 +swcr_freesession(device_t dev, u_int64_t tid)
3970 + struct swcr_data *swd;
3971 + u_int32_t sid = CRYPTO_SESID2LID(tid);
3973 + dprintk("%s()\n", __FUNCTION__);
3974 + if (sid > swcr_sesnum || swcr_sessions == NULL ||
3975 + swcr_sessions[sid] == NULL) {
3976 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
3977 + return(EINVAL);
3980 + /* Silently accept and return */
3981 + if (sid == 0)
3982 + return(0);
3984 + while ((swd = swcr_sessions[sid]) != NULL) {
3985 + swcr_sessions[sid] = swd->sw_next;
3986 + if (swd->sw_tfm)
3987 + crypto_free_tfm(swd->sw_tfm);
3988 + if (swd->sw_type == SW_TYPE_COMP) {
3989 + if (swd->u.sw_comp_buf)
3990 + kfree(swd->u.sw_comp_buf);
3991 + } else {
3992 + if (swd->u.hmac.sw_key)
3993 + kfree(swd->u.hmac.sw_key);
3995 + kfree(swd);
3997 + return 0;
4001 + * Process a software request.
4002 + */
4003 +static int
4004 +swcr_process(device_t dev, struct cryptop *crp, int hint)
4006 + struct cryptodesc *crd;
4007 + struct swcr_data *sw;
4008 + u_int32_t lid;
4009 +#define SCATTERLIST_MAX 16
4010 + struct scatterlist sg[SCATTERLIST_MAX];
4011 + int sg_num, sg_len, skip;
4012 + struct sk_buff *skb = NULL;
4013 + struct uio *uiop = NULL;
4015 + dprintk("%s()\n", __FUNCTION__);
4016 + /* Sanity check */
4017 + if (crp == NULL) {
4018 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
4019 + return EINVAL;
4022 + crp->crp_etype = 0;
4024 + if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
4025 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
4026 + crp->crp_etype = EINVAL;
4027 + goto done;
4030 + lid = crp->crp_sid & 0xffffffff;
4031 + if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
4032 + swcr_sessions[lid] == NULL) {
4033 + crp->crp_etype = ENOENT;
4034 + dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
4035 + goto done;
4038 + /*
4039 + * do some error checking outside of the loop for SKB and IOV processing
4040 + * this leaves us with valid skb or uiop pointers for later
4041 + */
4042 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
4043 + skb = (struct sk_buff *) crp->crp_buf;
4044 + if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
4045 + printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
4046 + skb_shinfo(skb)->nr_frags);
4047 + goto done;
4049 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
4050 + uiop = (struct uio *) crp->crp_buf;
4051 + if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
4052 + printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
4053 + uiop->uio_iovcnt);
4054 + goto done;
4058 + /* Go through crypto descriptors, processing as we go */
4059 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
4060 + /*
4061 + * Find the crypto context.
4063 + * XXX Note that the logic here prevents us from having
4064 + * XXX the same algorithm multiple times in a session
4065 + * XXX (or rather, we can but it won't give us the right
4066 + * XXX results). To do that, we'd need some way of differentiating
4067 + * XXX between the various instances of an algorithm (so we can
4068 + * XXX locate the correct crypto context).
4069 + */
4070 + for (sw = swcr_sessions[lid]; sw && sw->sw_alg != crd->crd_alg;
4071 + sw = sw->sw_next)
4074 + /* No such context ? */
4075 + if (sw == NULL) {
4076 + crp->crp_etype = EINVAL;
4077 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
4078 + goto done;
4081 + skip = crd->crd_skip;
4083 + /*
4084 + * setup the SG list skip from the start of the buffer
4085 + */
4086 + memset(sg, 0, sizeof(sg));
4087 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
4088 + int i, len;
4090 + sg_num = 0;
4091 + sg_len = 0;
4093 + if (skip < skb_headlen(skb)) {
4094 + len = skb_headlen(skb) - skip;
4095 + if (len + sg_len > crd->crd_len)
4096 + len = crd->crd_len - sg_len;
4097 + sg_set_page(&sg[sg_num],
4098 + virt_to_page(skb->data + skip), len,
4099 + offset_in_page(skb->data + skip));
4100 + sg_len += len;
4101 + sg_num++;
4102 + skip = 0;
4103 + } else
4104 + skip -= skb_headlen(skb);
4106 + for (i = 0; sg_len < crd->crd_len &&
4107 + i < skb_shinfo(skb)->nr_frags &&
4108 + sg_num < SCATTERLIST_MAX; i++) {
4109 + if (skip < skb_shinfo(skb)->frags[i].size) {
4110 + len = skb_shinfo(skb)->frags[i].size - skip;
4111 + if (len + sg_len > crd->crd_len)
4112 + len = crd->crd_len - sg_len;
4113 + sg_set_page(&sg[sg_num],
4114 + skb_shinfo(skb)->frags[i].page,
4115 + len,
4116 + skb_shinfo(skb)->frags[i].page_offset + skip);
4117 + sg_len += len;
4118 + sg_num++;
4119 + skip = 0;
4120 + } else
4121 + skip -= skb_shinfo(skb)->frags[i].size;
4123 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
4124 + int len;
4126 + sg_len = 0;
4127 + for (sg_num = 0; sg_len <= crd->crd_len &&
4128 + sg_num < uiop->uio_iovcnt &&
4129 + sg_num < SCATTERLIST_MAX; sg_num++) {
4130 + if (skip <= uiop->uio_iov[sg_num].iov_len) {
4131 + len = uiop->uio_iov[sg_num].iov_len - skip;
4132 + if (len + sg_len > crd->crd_len)
4133 + len = crd->crd_len - sg_len;
4134 + sg_set_page(&sg[sg_num],
4135 + virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
4136 + len,
4137 + offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
4138 + sg_len += len;
4139 + skip = 0;
4140 + } else
4141 + skip -= uiop->uio_iov[sg_num].iov_len;
4143 + } else {
4144 + sg_len = (crp->crp_ilen - skip);
4145 + if (sg_len > crd->crd_len)
4146 + sg_len = crd->crd_len;
4147 + sg_set_page(&sg[0], virt_to_page(crp->crp_buf + skip),
4148 + sg_len, offset_in_page(crp->crp_buf + skip));
4149 + sg_num = 1;
4153 + switch (sw->sw_type) {
4154 + case SW_TYPE_BLKCIPHER: {
4155 + unsigned char iv[EALG_MAX_BLOCK_LEN];
4156 + unsigned char *ivp = iv;
4157 + int ivsize =
4158 + crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
4159 + struct blkcipher_desc desc;
4161 + if (sg_len < crypto_blkcipher_blocksize(
4162 + crypto_blkcipher_cast(sw->sw_tfm))) {
4163 + crp->crp_etype = EINVAL;
4164 + dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
4165 + sg_len, crypto_blkcipher_blocksize(
4166 + crypto_blkcipher_cast(sw->sw_tfm)));
4167 + goto done;
4170 + if (ivsize > sizeof(iv)) {
4171 + crp->crp_etype = EINVAL;
4172 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
4173 + goto done;
4176 + if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
4177 + int i, error;
4179 + if (debug) {
4180 + dprintk("%s key:", __FUNCTION__);
4181 + for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
4182 + dprintk("%s0x%x", (i % 8) ? " " : "\n ",
4183 + crd->crd_key[i]);
4184 + dprintk("\n");
4186 + error = crypto_blkcipher_setkey(
4187 + crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
4188 + (crd->crd_klen + 7) / 8);
4189 + if (error) {
4190 + dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
4191 + error, sw->sw_tfm->crt_flags);
4192 + crp->crp_etype = -error;
4196 + memset(&desc, 0, sizeof(desc));
4197 + desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
4199 + if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
4201 + if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
4202 + ivp = crd->crd_iv;
4203 + } else {
4204 + get_random_bytes(ivp, ivsize);
4206 + /*
4207 + * do we have to copy the IV back to the buffer ?
4208 + */
4209 + if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
4210 + crypto_copyback(crp->crp_flags, crp->crp_buf,
4211 + crd->crd_inject, ivsize, (caddr_t)ivp);
4213 + desc.info = ivp;
4214 + crypto_blkcipher_encrypt_iv(&desc, sg, sg, sg_len);
4216 + } else { /*decrypt */
4218 + if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
4219 + ivp = crd->crd_iv;
4220 + } else {
4221 + crypto_copydata(crp->crp_flags, crp->crp_buf,
4222 + crd->crd_inject, ivsize, (caddr_t)ivp);
4224 + desc.info = ivp;
4225 + crypto_blkcipher_decrypt_iv(&desc, sg, sg, sg_len);
4227 + } break;
4228 + case SW_TYPE_HMAC:
4229 + case SW_TYPE_HASH:
4231 + char result[HASH_MAX_LEN];
4232 + struct hash_desc desc;
4234 + /* check we have room for the result */
4235 + if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
4236 + dprintk(
4237 + "cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d digestsize=%d\n",
4238 + crp->crp_ilen, crd->crd_skip + sg_len, crd->crd_inject,
4239 + sw->u.hmac.sw_mlen);
4240 + crp->crp_etype = EINVAL;
4241 + goto done;
4244 + memset(&desc, 0, sizeof(desc));
4245 + desc.tfm = crypto_hash_cast(sw->sw_tfm);
4247 + memset(result, 0, sizeof(result));
4249 + if (sw->sw_type == SW_TYPE_HMAC) {
4250 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
4251 + crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
4252 + sg, sg_num, result);
4253 +#else
4254 + crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
4255 + sw->u.hmac.sw_klen);
4256 + crypto_hash_digest(&desc, sg, sg_len, result);
4257 +#endif /* #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
4259 + } else { /* SW_TYPE_HASH */
4260 + crypto_hash_digest(&desc, sg, sg_len, result);
4263 + crypto_copyback(crp->crp_flags, crp->crp_buf,
4264 + crd->crd_inject, sw->u.hmac.sw_mlen, result);
4266 + break;
4268 + case SW_TYPE_COMP: {
4269 + void *ibuf = NULL;
4270 + void *obuf = sw->u.sw_comp_buf;
4271 + int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
4272 + int ret = 0;
4274 + /*
4275 + * we need to use an additional copy if there is more than one
4276 + * input chunk since the kernel comp routines do not handle
4277 + * SG yet. Otherwise we just use the input buffer as is.
4278 + * Rather than allocate another buffer we just split the tmp
4279 + * buffer we already have.
4280 + * Perhaps we should just use zlib directly ?
4281 + */
4282 + if (sg_num > 1) {
4283 + int blk;
4285 + ibuf = obuf;
4286 + for (blk = 0; blk < sg_num; blk++) {
4287 + memcpy(obuf, sg_virt(&sg[blk]),
4288 + sg[blk].length);
4289 + obuf += sg[blk].length;
4291 + olen -= sg_len;
4292 + } else
4293 + ibuf = sg_virt(&sg[0]);
4295 + if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
4296 + ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
4297 + ibuf, ilen, obuf, &olen);
4298 + if (!ret && olen > crd->crd_len) {
4299 + dprintk("cryptosoft: ERANGE compress %d into %d\n",
4300 + crd->crd_len, olen);
4301 + if (swcr_fail_if_compression_grows)
4302 + ret = ERANGE;
4304 + } else { /* decompress */
4305 + ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
4306 + ibuf, ilen, obuf, &olen);
4307 + if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
4308 + dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
4309 + "space for %d,at offset %d\n",
4310 + crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
4311 + ret = ETOOSMALL;
4314 + if (ret)
4315 + dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
4317 + /*
4318 + * on success copy result back,
4319 + * linux crpyto API returns -errno, we need to fix that
4320 + */
4321 + crp->crp_etype = ret < 0 ? -ret : ret;
4322 + if (ret == 0) {
4323 + /* copy back the result and return it's size */
4324 + crypto_copyback(crp->crp_flags, crp->crp_buf,
4325 + crd->crd_inject, olen, obuf);
4326 + crp->crp_olen = olen;
4330 + } break;
4332 + default:
4333 + /* Unknown/unsupported algorithm */
4334 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
4335 + crp->crp_etype = EINVAL;
4336 + goto done;
4340 +done:
4341 + crypto_done(crp);
4342 + return 0;
4345 +static int
4346 +cryptosoft_init(void)
4348 + int i, sw_type, mode;
4349 + char *algo;
4351 + dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
4353 + softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
4355 + swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
4356 + CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
4357 + if (swcr_id < 0) {
4358 + printk("Software crypto device cannot initialize!");
4359 + return -ENODEV;
4362 +#define REGISTER(alg) \
4363 + crypto_register(swcr_id, alg, 0,0);
4365 + for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; ++i)
4368 + algo = crypto_details[i].alg_name;
4369 + if (!algo || !*algo)
4371 + dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
4372 + continue;
4375 + mode = crypto_details[i].mode;
4376 + sw_type = crypto_details[i].sw_type;
4378 + switch (sw_type)
4380 + case SW_TYPE_CIPHER:
4381 + if (crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC))
4383 + REGISTER(i);
4385 + else
4387 + dprintk("%s:CIPHER algorithm %d:'%s' not supported\n",
4388 + __FUNCTION__, i, algo);
4390 + break;
4391 + case SW_TYPE_HMAC:
4392 + if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
4394 + REGISTER(i);
4396 + else
4398 + dprintk("%s:HMAC algorithm %d:'%s' not supported\n",
4399 + __FUNCTION__, i, algo);
4401 + break;
4402 + case SW_TYPE_HASH:
4403 + if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
4405 + REGISTER(i);
4407 + else
4409 + dprintk("%s:HASH algorithm %d:'%s' not supported\n",
4410 + __FUNCTION__, i, algo);
4412 + break;
4413 + case SW_TYPE_COMP:
4414 + if (crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC))
4416 + REGISTER(i);
4418 + else
4420 + dprintk("%s:COMP algorithm %d:'%s' not supported\n",
4421 + __FUNCTION__, i, algo);
4423 + break;
4424 + case SW_TYPE_BLKCIPHER:
4425 + if (crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC))
4427 + REGISTER(i);
4429 + else
4431 + dprintk("%s:BLKCIPHER algorithm %d:'%s' not supported\n",
4432 + __FUNCTION__, i, algo);
4434 + break;
4435 + default:
4436 + dprintk(
4437 + "%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
4438 + __FUNCTION__, sw_type, i, algo);
4439 + break;
4443 + return(0);
4446 +static void
4447 +cryptosoft_exit(void)
4449 + dprintk("%s()\n", __FUNCTION__);
4450 + crypto_unregister_all(swcr_id);
4451 + swcr_id = -1;
4454 +module_init(cryptosoft_init);
4455 +module_exit(cryptosoft_exit);
4457 +MODULE_LICENSE("Dual BSD/GPL");
4458 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
4459 +MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");
4460 diff -Nur linux-2.6.30.orig/crypto/ocf/ep80579/icp_asym.c linux-2.6.30/crypto/ocf/ep80579/icp_asym.c
4461 --- linux-2.6.30.orig/crypto/ocf/ep80579/icp_asym.c 1970-01-01 01:00:00.000000000 +0100
4462 +++ linux-2.6.30/crypto/ocf/ep80579/icp_asym.c 2009-06-11 10:55:27.000000000 +0200
4463 @@ -0,0 +1,1375 @@
4464 +/***************************************************************************
4466 + * This file is provided under a dual BSD/GPLv2 license. When using or
4467 + * redistributing this file, you may do so under either license.
4468 + *
4469 + * GPL LICENSE SUMMARY
4470 + *
4471 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
4472 + *
4473 + * This program is free software; you can redistribute it and/or modify
4474 + * it under the terms of version 2 of the GNU General Public License as
4475 + * published by the Free Software Foundation.
4476 + *
4477 + * This program is distributed in the hope that it will be useful, but
4478 + * WITHOUT ANY WARRANTY; without even the implied warranty of
4479 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
4480 + * General Public License for more details.
4481 + *
4482 + * You should have received a copy of the GNU General Public License
4483 + * along with this program; if not, write to the Free Software
4484 + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
4485 + * The full GNU General Public License is included in this distribution
4486 + * in the file called LICENSE.GPL.
4487 + *
4488 + * Contact Information:
4489 + * Intel Corporation
4490 + *
4491 + * BSD LICENSE
4492 + *
4493 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
4494 + * All rights reserved.
4495 + *
4496 + * Redistribution and use in source and binary forms, with or without
4497 + * modification, are permitted provided that the following conditions
4498 + * are met:
4499 + *
4500 + * * Redistributions of source code must retain the above copyright
4501 + * notice, this list of conditions and the following disclaimer.
4502 + * * Redistributions in binary form must reproduce the above copyright
4503 + * notice, this list of conditions and the following disclaimer in
4504 + * the documentation and/or other materials provided with the
4505 + * distribution.
4506 + * * Neither the name of Intel Corporation nor the names of its
4507 + * contributors may be used to endorse or promote products derived
4508 + * from this software without specific prior written permission.
4509 + *
4510 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
4511 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
4512 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
4513 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
4514 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
4515 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
4516 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
4517 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
4518 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4519 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
4520 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4521 + *
4522 + *
4523 + * version: Security.L.1.0.130
4525 + ***************************************************************************/
4527 +#include "icp_ocf.h"
4529 +/*The following define values (containing the word 'INDEX') are used to find
4530 +the index of each input buffer of the crypto_kop struct (see OCF cryptodev.h).
4531 +These values were found through analysis of the OCF OpenSSL patch. If the
4532 +calling program uses different input buffer positions, these defines will have
4533 +to be changed.*/
4535 +/*DIFFIE HELLMAN buffer index values*/
4536 +#define ICP_DH_KRP_PARAM_PRIME_INDEX (0)
4537 +#define ICP_DH_KRP_PARAM_BASE_INDEX (1)
4538 +#define ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX (2)
4539 +#define ICP_DH_KRP_PARAM_RESULT_INDEX (3)
4541 +/*MOD EXP buffer index values*/
4542 +#define ICP_MOD_EXP_KRP_PARAM_BASE_INDEX (0)
4543 +#define ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX (1)
4544 +#define ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX (2)
4545 +#define ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX (3)
4547 +#define SINGLE_BYTE_VALUE (4)
4549 +/*MOD EXP CRT buffer index values*/
4550 +#define ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX (0)
4551 +#define ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX (1)
4552 +#define ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX (2)
4553 +#define ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX (3)
4554 +#define ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX (4)
4555 +#define ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX (5)
4556 +#define ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX (6)
4558 +/*DSA sign buffer index values*/
4559 +#define ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX (0)
4560 +#define ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX (1)
4561 +#define ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX (2)
4562 +#define ICP_DSA_SIGN_KRP_PARAM_G_INDEX (3)
4563 +#define ICP_DSA_SIGN_KRP_PARAM_X_INDEX (4)
4564 +#define ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX (5)
4565 +#define ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX (6)
4567 +/*DSA verify buffer index values*/
4568 +#define ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX (0)
4569 +#define ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX (1)
4570 +#define ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX (2)
4571 +#define ICP_DSA_VERIFY_KRP_PARAM_G_INDEX (3)
4572 +#define ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX (4)
4573 +#define ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX (5)
4574 +#define ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX (6)
4576 +/*DSA sign prime Q vs random number K size check values*/
4577 +#define DONT_RUN_LESS_THAN_CHECK (0)
4578 +#define FAIL_A_IS_GREATER_THAN_B (1)
4579 +#define FAIL_A_IS_EQUAL_TO_B (1)
4580 +#define SUCCESS_A_IS_LESS_THAN_B (0)
4581 +#define DSA_SIGN_RAND_GEN_VAL_CHECK_MAX_ITERATIONS (500)
4583 +/* We need to set a cryptokp success value just in case it is set or allocated
4584 + and not set to zero outside of this module */
4585 +#define CRYPTO_OP_SUCCESS (0)
4587 +static int icp_ocfDrvDHComputeKey(struct cryptkop *krp);
4589 +static int icp_ocfDrvModExp(struct cryptkop *krp);
4591 +static int icp_ocfDrvModExpCRT(struct cryptkop *krp);
4593 +static int
4594 +icp_ocfDrvCheckALessThanB(CpaFlatBuffer * pK, CpaFlatBuffer * pQ, int *doCheck);
4596 +static int icp_ocfDrvDsaSign(struct cryptkop *krp);
4598 +static int icp_ocfDrvDsaVerify(struct cryptkop *krp);
4600 +static void
4601 +icp_ocfDrvDhP1CallBack(void *callbackTag,
4602 + CpaStatus status,
4603 + void *pOpData, CpaFlatBuffer * pLocalOctetStringPV);
4605 +static void
4606 +icp_ocfDrvModExpCallBack(void *callbackTag,
4607 + CpaStatus status,
4608 + void *pOpData, CpaFlatBuffer * pResult);
4610 +static void
4611 +icp_ocfDrvModExpCRTCallBack(void *callbackTag,
4612 + CpaStatus status,
4613 + void *pOpData, CpaFlatBuffer * pOutputData);
4615 +static void
4616 +icp_ocfDrvDsaVerifyCallBack(void *callbackTag,
4617 + CpaStatus status,
4618 + void *pOpData, CpaBoolean verifyStatus);
4620 +static void
4621 +icp_ocfDrvDsaRSSignCallBack(void *callbackTag,
4622 + CpaStatus status,
4623 + void *pOpData,
4624 + CpaBoolean protocolStatus,
4625 + CpaFlatBuffer * pR, CpaFlatBuffer * pS);
4627 +/* Name : icp_ocfDrvPkeProcess
4629 + * Description : This function will choose which PKE process to follow
4630 + * based on the input arguments
4631 + */
4632 +int icp_ocfDrvPkeProcess(device_t dev, struct cryptkop *krp, int hint)
4634 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
4636 + if (NULL == krp) {
4637 + DPRINTK("%s(): Invalid input parameters, cryptkop = %p\n",
4638 + __FUNCTION__, krp);
4639 + return EINVAL;
4642 + if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
4643 + krp->krp_status = ECANCELED;
4644 + return ECANCELED;
4647 + switch (krp->krp_op) {
4648 + case CRK_DH_COMPUTE_KEY:
4649 + DPRINTK("%s() doing DH_COMPUTE_KEY\n", __FUNCTION__);
4650 + lacStatus = icp_ocfDrvDHComputeKey(krp);
4651 + if (CPA_STATUS_SUCCESS != lacStatus) {
4652 + EPRINTK("%s(): icp_ocfDrvDHComputeKey failed "
4653 + "(%d).\n", __FUNCTION__, lacStatus);
4654 + krp->krp_status = ECANCELED;
4655 + return ECANCELED;
4658 + break;
4660 + case CRK_MOD_EXP:
4661 + DPRINTK("%s() doing MOD_EXP \n", __FUNCTION__);
4662 + lacStatus = icp_ocfDrvModExp(krp);
4663 + if (CPA_STATUS_SUCCESS != lacStatus) {
4664 + EPRINTK("%s(): icp_ocfDrvModExp failed (%d).\n",
4665 + __FUNCTION__, lacStatus);
4666 + krp->krp_status = ECANCELED;
4667 + return ECANCELED;
4670 + break;
4672 + case CRK_MOD_EXP_CRT:
4673 + DPRINTK("%s() doing MOD_EXP_CRT \n", __FUNCTION__);
4674 + lacStatus = icp_ocfDrvModExpCRT(krp);
4675 + if (CPA_STATUS_SUCCESS != lacStatus) {
4676 + EPRINTK("%s(): icp_ocfDrvModExpCRT "
4677 + "failed (%d).\n", __FUNCTION__, lacStatus);
4678 + krp->krp_status = ECANCELED;
4679 + return ECANCELED;
4682 + break;
4684 + case CRK_DSA_SIGN:
4685 + DPRINTK("%s() doing DSA_SIGN \n", __FUNCTION__);
4686 + lacStatus = icp_ocfDrvDsaSign(krp);
4687 + if (CPA_STATUS_SUCCESS != lacStatus) {
4688 + EPRINTK("%s(): icp_ocfDrvDsaSign "
4689 + "failed (%d).\n", __FUNCTION__, lacStatus);
4690 + krp->krp_status = ECANCELED;
4691 + return ECANCELED;
4694 + break;
4696 + case CRK_DSA_VERIFY:
4697 + DPRINTK("%s() doing DSA_VERIFY \n", __FUNCTION__);
4698 + lacStatus = icp_ocfDrvDsaVerify(krp);
4699 + if (CPA_STATUS_SUCCESS != lacStatus) {
4700 + EPRINTK("%s(): icp_ocfDrvDsaVerify "
4701 + "failed (%d).\n", __FUNCTION__, lacStatus);
4702 + krp->krp_status = ECANCELED;
4703 + return ECANCELED;
4706 + break;
4708 + default:
4709 + EPRINTK("%s(): Asymettric function not "
4710 + "supported (%d).\n", __FUNCTION__, krp->krp_op);
4711 + krp->krp_status = EOPNOTSUPP;
4712 + return EOPNOTSUPP;
4715 + return ICP_OCF_DRV_STATUS_SUCCESS;
4718 +/* Name : icp_ocfDrvSwapBytes
4720 + * Description : This function is used to swap the byte order of a buffer.
4721 + * It has been seen that in general we are passed little endian byte order
4722 + * buffers, but LAC only accepts big endian byte order buffers.
4723 + */
4724 +static void inline
4725 +icp_ocfDrvSwapBytes(u_int8_t * num, u_int32_t buff_len_bytes)
4728 + int i;
4729 + u_int8_t *end_ptr;
4730 + u_int8_t hold_val;
4732 + end_ptr = num + (buff_len_bytes - 1);
4733 + buff_len_bytes = buff_len_bytes >> 1;
4734 + for (i = 0; i < buff_len_bytes; i++) {
4735 + hold_val = *num;
4736 + *num = *end_ptr;
4737 + num++;
4738 + *end_ptr = hold_val;
4739 + end_ptr--;
4743 +/* Name : icp_ocfDrvDHComputeKey
4745 + * Description : This function will map Diffie Hellman calls from OCF
4746 + * to the LAC API. OCF uses this function for Diffie Hellman Phase1 and
4747 + * Phase2. LAC has a separate Diffie Hellman Phase2 call, however both phases
4748 + * break down to a modular exponentiation.
4749 + */
4750 +static int icp_ocfDrvDHComputeKey(struct cryptkop *krp)
4752 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
4753 + void *callbackTag = NULL;
4754 + CpaCyDhPhase1KeyGenOpData *pPhase1OpData = NULL;
4755 + CpaFlatBuffer *pLocalOctetStringPV = NULL;
4756 + uint32_t dh_prime_len_bytes = 0, dh_prime_len_bits = 0;
4758 + /* Input checks - check prime is a multiple of 8 bits to allow for
4759 + allocation later */
4760 + dh_prime_len_bits =
4761 + (krp->krp_param[ICP_DH_KRP_PARAM_PRIME_INDEX].crp_nbits);
4763 + /* LAC can reject prime lengths based on prime key sizes, we just
4764 + need to make sure we can allocate space for the base and
4765 + exponent buffers correctly */
4766 + if ((dh_prime_len_bits % NUM_BITS_IN_BYTE) != 0) {
4767 + APRINTK("%s(): Warning Prime number buffer size is not a "
4768 + "multiple of 8 bits\n", __FUNCTION__);
4771 + /* Result storage space should be the same size as the prime as this
4772 + value can take up the same amount of storage space */
4773 + if (dh_prime_len_bits !=
4774 + krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_nbits) {
4775 + DPRINTK("%s(): Return Buffer must be the same size "
4776 + "as the Prime buffer\n", __FUNCTION__);
4777 + krp->krp_status = EINVAL;
4778 + return EINVAL;
4780 + /* Switch to size in bytes */
4781 + BITS_TO_BYTES(dh_prime_len_bytes, dh_prime_len_bits);
4783 + callbackTag = krp;
4785 + pPhase1OpData = kmem_cache_zalloc(drvDH_zone, GFP_KERNEL);
4786 + if (NULL == pPhase1OpData) {
4787 + APRINTK("%s():Failed to get memory for key gen data\n",
4788 + __FUNCTION__);
4789 + krp->krp_status = ENOMEM;
4790 + return ENOMEM;
4793 + pLocalOctetStringPV = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
4794 + if (NULL == pLocalOctetStringPV) {
4795 + APRINTK("%s():Failed to get memory for pLocalOctetStringPV\n",
4796 + __FUNCTION__);
4797 + kmem_cache_free(drvDH_zone, pPhase1OpData);
4798 + krp->krp_status = ENOMEM;
4799 + return ENOMEM;
4802 + /* Link parameters */
4803 + pPhase1OpData->primeP.pData =
4804 + krp->krp_param[ICP_DH_KRP_PARAM_PRIME_INDEX].crp_p;
4806 + pPhase1OpData->primeP.dataLenInBytes = dh_prime_len_bytes;
4808 + icp_ocfDrvSwapBytes(pPhase1OpData->primeP.pData, dh_prime_len_bytes);
4810 + pPhase1OpData->baseG.pData =
4811 + krp->krp_param[ICP_DH_KRP_PARAM_BASE_INDEX].crp_p;
4813 + BITS_TO_BYTES(pPhase1OpData->baseG.dataLenInBytes,
4814 + krp->krp_param[ICP_DH_KRP_PARAM_BASE_INDEX].crp_nbits);
4816 + icp_ocfDrvSwapBytes(pPhase1OpData->baseG.pData,
4817 + pPhase1OpData->baseG.dataLenInBytes);
4819 + pPhase1OpData->privateValueX.pData =
4820 + krp->krp_param[ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX].crp_p;
4822 + BITS_TO_BYTES(pPhase1OpData->privateValueX.dataLenInBytes,
4823 + krp->krp_param[ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX].
4824 + crp_nbits);
4826 + icp_ocfDrvSwapBytes(pPhase1OpData->privateValueX.pData,
4827 + pPhase1OpData->privateValueX.dataLenInBytes);
4829 + /* Output parameters */
4830 + pLocalOctetStringPV->pData =
4831 + krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_p;
4833 + BITS_TO_BYTES(pLocalOctetStringPV->dataLenInBytes,
4834 + krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_nbits);
4836 + lacStatus = cpaCyDhKeyGenPhase1(CPA_INSTANCE_HANDLE_SINGLE,
4837 + icp_ocfDrvDhP1CallBack,
4838 + callbackTag, pPhase1OpData,
4839 + pLocalOctetStringPV);
4841 + if (CPA_STATUS_SUCCESS != lacStatus) {
4842 + EPRINTK("%s(): DH Phase 1 Key Gen failed (%d).\n",
4843 + __FUNCTION__, lacStatus);
4844 + icp_ocfDrvFreeFlatBuffer(pLocalOctetStringPV);
4845 + kmem_cache_free(drvDH_zone, pPhase1OpData);
4848 + return lacStatus;
4851 +/* Name : icp_ocfDrvModExp
4853 + * Description : This function will map ordinary Modular Exponentiation calls
4854 + * from OCF to the LAC API.
4856 + */
4857 +static int icp_ocfDrvModExp(struct cryptkop *krp)
4859 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
4860 + void *callbackTag = NULL;
4861 + CpaCyLnModExpOpData *pModExpOpData = NULL;
4862 + CpaFlatBuffer *pResult = NULL;
4864 + if ((krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_nbits %
4865 + NUM_BITS_IN_BYTE) != 0) {
4866 + DPRINTK("%s(): Warning - modulus buffer size (%d) is not a "
4867 + "multiple of 8 bits\n", __FUNCTION__,
4868 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].
4869 + crp_nbits);
4872 + /* Result storage space should be the same size as the prime as this
4873 + value can take up the same amount of storage space */
4874 + if (krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_nbits >
4875 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].crp_nbits) {
4876 + APRINTK("%s(): Return Buffer size must be the same or"
4877 + " greater than the Modulus buffer\n", __FUNCTION__);
4878 + krp->krp_status = EINVAL;
4879 + return EINVAL;
4882 + callbackTag = krp;
4884 + pModExpOpData = kmem_cache_zalloc(drvLnModExp_zone, GFP_KERNEL);
4885 + if (NULL == pModExpOpData) {
4886 + APRINTK("%s():Failed to get memory for key gen data\n",
4887 + __FUNCTION__);
4888 + krp->krp_status = ENOMEM;
4889 + return ENOMEM;
4892 + pResult = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
4893 + if (NULL == pResult) {
4894 + APRINTK("%s():Failed to get memory for ModExp result\n",
4895 + __FUNCTION__);
4896 + kmem_cache_free(drvLnModExp_zone, pModExpOpData);
4897 + krp->krp_status = ENOMEM;
4898 + return ENOMEM;
4901 + /* Link parameters */
4902 + pModExpOpData->modulus.pData =
4903 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_p;
4904 + BITS_TO_BYTES(pModExpOpData->modulus.dataLenInBytes,
4905 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].
4906 + crp_nbits);
4908 + icp_ocfDrvSwapBytes(pModExpOpData->modulus.pData,
4909 + pModExpOpData->modulus.dataLenInBytes);
4911 + /*OCF patch to Openswan Pluto regularly sends the base value as 2
4912 + bits in size. In this case, it has been found it is better to
4913 + use the base size memory space as the input buffer (if the number
4914 + is in bits is less than a byte, the number of bits is the input
4915 + value) */
4916 + if (krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits <
4917 + NUM_BITS_IN_BYTE) {
4918 + DPRINTK("%s : base is small (%d)\n", __FUNCTION__, krp->
4919 + krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits);
4920 + pModExpOpData->base.dataLenInBytes = SINGLE_BYTE_VALUE;
4921 + pModExpOpData->base.pData =
4922 + (uint8_t *) & (krp->
4923 + krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
4924 + crp_nbits);
4925 + *((uint32_t *) pModExpOpData->base.pData) =
4926 + htonl(*((uint32_t *) pModExpOpData->base.pData));
4928 + } else {
4930 + DPRINTK("%s : base is big (%d)\n", __FUNCTION__, krp->
4931 + krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits);
4932 + pModExpOpData->base.pData =
4933 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_p;
4934 + BITS_TO_BYTES(pModExpOpData->base.dataLenInBytes,
4935 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
4936 + crp_nbits);
4937 + icp_ocfDrvSwapBytes(pModExpOpData->base.pData,
4938 + pModExpOpData->base.dataLenInBytes);
4941 + pModExpOpData->exponent.pData =
4942 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX].crp_p;
4943 + BITS_TO_BYTES(pModExpOpData->exponent.dataLenInBytes,
4944 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX].
4945 + crp_nbits);
4947 + icp_ocfDrvSwapBytes(pModExpOpData->exponent.pData,
4948 + pModExpOpData->exponent.dataLenInBytes);
4949 + /* Output parameters */
4950 + pResult->pData =
4951 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].crp_p,
4952 + BITS_TO_BYTES(pResult->dataLenInBytes,
4953 + krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].
4954 + crp_nbits);
4956 + lacStatus = cpaCyLnModExp(CPA_INSTANCE_HANDLE_SINGLE,
4957 + icp_ocfDrvModExpCallBack,
4958 + callbackTag, pModExpOpData, pResult);
4960 + if (CPA_STATUS_SUCCESS != lacStatus) {
4961 + EPRINTK("%s(): Mod Exp Operation failed (%d).\n",
4962 + __FUNCTION__, lacStatus);
4963 + krp->krp_status = ECANCELED;
4964 + icp_ocfDrvFreeFlatBuffer(pResult);
4965 + kmem_cache_free(drvLnModExp_zone, pModExpOpData);
4968 + return lacStatus;
4971 +/* Name : icp_ocfDrvModExpCRT
4973 + * Description : This function will map ordinary Modular Exponentiation Chinese
4974 + * Remainder Theorem implementaion calls from OCF to the LAC API.
4976 + * Note : Mod Exp CRT for this driver is accelerated through LAC RSA type 2
4977 + * decrypt operation. Therefore P and Q input values must always be prime
4978 + * numbers. Although basic primality checks are done in LAC, it is up to the
4979 + * user to do any correct prime number checking before passing the inputs.
4980 + */
4982 +static int icp_ocfDrvModExpCRT(struct cryptkop *krp)
4984 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
4985 + CpaCyRsaDecryptOpData *rsaDecryptOpData = NULL;
4986 + void *callbackTag = NULL;
4987 + CpaFlatBuffer *pOutputData = NULL;
4989 + /*Parameter input checks are all done by LAC, no need to repeat
4990 + them here. */
4991 + callbackTag = krp;
4993 + rsaDecryptOpData = kmem_cache_zalloc(drvRSADecrypt_zone, GFP_KERNEL);
4994 + if (NULL == rsaDecryptOpData) {
4995 + APRINTK("%s():Failed to get memory"
4996 + " for MOD EXP CRT Op data struct\n", __FUNCTION__);
4997 + krp->krp_status = ENOMEM;
4998 + return ENOMEM;
5001 + rsaDecryptOpData->pRecipientPrivateKey
5002 + = kmem_cache_zalloc(drvRSAPrivateKey_zone, GFP_KERNEL);
5003 + if (NULL == rsaDecryptOpData->pRecipientPrivateKey) {
5004 + APRINTK("%s():Failed to get memory for MOD EXP CRT"
5005 + " private key values struct\n", __FUNCTION__);
5006 + kmem_cache_free(drvRSADecrypt_zone, rsaDecryptOpData);
5007 + krp->krp_status = ENOMEM;
5008 + return ENOMEM;
5011 + rsaDecryptOpData->pRecipientPrivateKey->
5012 + version = CPA_CY_RSA_VERSION_TWO_PRIME;
5013 + rsaDecryptOpData->pRecipientPrivateKey->
5014 + privateKeyRepType = CPA_CY_RSA_PRIVATE_KEY_REP_TYPE_2;
5016 + pOutputData = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
5017 + if (NULL == pOutputData) {
5018 + APRINTK("%s():Failed to get memory"
5019 + " for MOD EXP CRT output data\n", __FUNCTION__);
5020 + kmem_cache_free(drvRSAPrivateKey_zone,
5021 + rsaDecryptOpData->pRecipientPrivateKey);
5022 + kmem_cache_free(drvRSADecrypt_zone, rsaDecryptOpData);
5023 + krp->krp_status = ENOMEM;
5024 + return ENOMEM;
5027 + rsaDecryptOpData->pRecipientPrivateKey->
5028 + version = CPA_CY_RSA_VERSION_TWO_PRIME;
5029 + rsaDecryptOpData->pRecipientPrivateKey->
5030 + privateKeyRepType = CPA_CY_RSA_PRIVATE_KEY_REP_TYPE_2;
5032 + /* Link parameters */
5033 + rsaDecryptOpData->inputData.pData =
5034 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX].crp_p;
5035 + BITS_TO_BYTES(rsaDecryptOpData->inputData.dataLenInBytes,
5036 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX].
5037 + crp_nbits);
5039 + icp_ocfDrvSwapBytes(rsaDecryptOpData->inputData.pData,
5040 + rsaDecryptOpData->inputData.dataLenInBytes);
5042 + rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.prime1P.pData =
5043 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX].crp_p;
5044 + BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
5045 + prime1P.dataLenInBytes,
5046 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX].
5047 + crp_nbits);
5049 + icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
5050 + privateKeyRep2.prime1P.pData,
5051 + rsaDecryptOpData->pRecipientPrivateKey->
5052 + privateKeyRep2.prime1P.dataLenInBytes);
5054 + rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.prime2Q.pData =
5055 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX].crp_p;
5056 + BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
5057 + prime2Q.dataLenInBytes,
5058 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX].
5059 + crp_nbits);
5061 + icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
5062 + privateKeyRep2.prime2Q.pData,
5063 + rsaDecryptOpData->pRecipientPrivateKey->
5064 + privateKeyRep2.prime2Q.dataLenInBytes);
5066 + rsaDecryptOpData->pRecipientPrivateKey->
5067 + privateKeyRep2.exponent1Dp.pData =
5068 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX].crp_p;
5069 + BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
5070 + exponent1Dp.dataLenInBytes,
5071 + krp->
5072 + krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX].
5073 + crp_nbits);
5075 + icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
5076 + privateKeyRep2.exponent1Dp.pData,
5077 + rsaDecryptOpData->pRecipientPrivateKey->
5078 + privateKeyRep2.exponent1Dp.dataLenInBytes);
5080 + rsaDecryptOpData->pRecipientPrivateKey->
5081 + privateKeyRep2.exponent2Dq.pData =
5082 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX].crp_p;
5083 + BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->
5084 + privateKeyRep2.exponent2Dq.dataLenInBytes,
5085 + krp->
5086 + krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX].
5087 + crp_nbits);
5089 + icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
5090 + privateKeyRep2.exponent2Dq.pData,
5091 + rsaDecryptOpData->pRecipientPrivateKey->
5092 + privateKeyRep2.exponent2Dq.dataLenInBytes);
5094 + rsaDecryptOpData->pRecipientPrivateKey->
5095 + privateKeyRep2.coefficientQInv.pData =
5096 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX].crp_p;
5097 + BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->
5098 + privateKeyRep2.coefficientQInv.dataLenInBytes,
5099 + krp->
5100 + krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX].
5101 + crp_nbits);
5103 + icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
5104 + privateKeyRep2.coefficientQInv.pData,
5105 + rsaDecryptOpData->pRecipientPrivateKey->
5106 + privateKeyRep2.coefficientQInv.dataLenInBytes);
5108 + /* Output Parameter */
5109 + pOutputData->pData =
5110 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX].crp_p;
5111 + BITS_TO_BYTES(pOutputData->dataLenInBytes,
5112 + krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX].
5113 + crp_nbits);
5115 + lacStatus = cpaCyRsaDecrypt(CPA_INSTANCE_HANDLE_SINGLE,
5116 + icp_ocfDrvModExpCRTCallBack,
5117 + callbackTag, rsaDecryptOpData, pOutputData);
5119 + if (CPA_STATUS_SUCCESS != lacStatus) {
5120 + EPRINTK("%s(): Mod Exp CRT Operation failed (%d).\n",
5121 + __FUNCTION__, lacStatus);
5122 + krp->krp_status = ECANCELED;
5123 + icp_ocfDrvFreeFlatBuffer(pOutputData);
5124 + kmem_cache_free(drvRSAPrivateKey_zone,
5125 + rsaDecryptOpData->pRecipientPrivateKey);
5126 + kmem_cache_free(drvRSADecrypt_zone, rsaDecryptOpData);
5129 + return lacStatus;
5132 +/* Name : icp_ocfDrvCheckALessThanB
5134 + * Description : This function will check whether the first argument is less
5135 + * than the second. It is used to check whether the DSA RS sign Random K
5136 + * value is less than the Prime Q value (as defined in the specification)
5138 + */
5139 +static int
5140 +icp_ocfDrvCheckALessThanB(CpaFlatBuffer * pK, CpaFlatBuffer * pQ, int *doCheck)
5143 + uint8_t *MSB_K = pK->pData;
5144 + uint8_t *MSB_Q = pQ->pData;
5145 + uint32_t buffer_lengths_in_bytes = pQ->dataLenInBytes;
5147 + if (DONT_RUN_LESS_THAN_CHECK == *doCheck) {
5148 + return FAIL_A_IS_GREATER_THAN_B;
5151 +/*Check MSBs
5152 +if A == B, check next MSB
5153 +if A > B, return A_IS_GREATER_THAN_B
5154 +if A < B, return A_IS_LESS_THAN_B (success)
5156 + while (*MSB_K == *MSB_Q) {
5157 + MSB_K++;
5158 + MSB_Q++;
5160 + buffer_lengths_in_bytes--;
5161 + if (0 == buffer_lengths_in_bytes) {
5162 + DPRINTK("%s() Buffers have equal value!!\n",
5163 + __FUNCTION__);
5164 + return FAIL_A_IS_EQUAL_TO_B;
5169 + if (*MSB_K < *MSB_Q) {
5170 + return SUCCESS_A_IS_LESS_THAN_B;
5171 + } else {
5172 + return FAIL_A_IS_GREATER_THAN_B;
5177 +/* Name : icp_ocfDrvDsaSign
5179 + * Description : This function will map DSA RS Sign from OCF to the LAC API.
5181 + * NOTE: From looking at OCF patch to OpenSSL and even the number of input
5182 + * parameters, OCF expects us to generate the random seed value. This value
5183 + * is generated and passed to LAC, however the number is discared in the
5184 + * callback and not returned to the user.
5185 + */
5186 +static int icp_ocfDrvDsaSign(struct cryptkop *krp)
5188 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
5189 + CpaCyDsaRSSignOpData *dsaRsSignOpData = NULL;
5190 + void *callbackTag = NULL;
5191 + CpaCyRandGenOpData randGenOpData;
5192 + int primeQSizeInBytes = 0;
5193 + int doCheck = 0;
5194 + CpaFlatBuffer randData;
5195 + CpaBoolean protocolStatus = CPA_FALSE;
5196 + CpaFlatBuffer *pR = NULL;
5197 + CpaFlatBuffer *pS = NULL;
5199 + callbackTag = krp;
5201 + BITS_TO_BYTES(primeQSizeInBytes,
5202 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].
5203 + crp_nbits);
5205 + if (DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES != primeQSizeInBytes) {
5206 + APRINTK("%s(): DSA PRIME Q size not equal to the "
5207 + "FIPS defined 20bytes, = %d\n",
5208 + __FUNCTION__, primeQSizeInBytes);
5209 + krp->krp_status = EDOM;
5210 + return EDOM;
5213 + dsaRsSignOpData = kmem_cache_zalloc(drvDSARSSign_zone, GFP_KERNEL);
5214 + if (NULL == dsaRsSignOpData) {
5215 + APRINTK("%s():Failed to get memory"
5216 + " for DSA RS Sign Op data struct\n", __FUNCTION__);
5217 + krp->krp_status = ENOMEM;
5218 + return ENOMEM;
5221 + dsaRsSignOpData->K.pData =
5222 + kmem_cache_alloc(drvDSARSSignKValue_zone, GFP_ATOMIC);
5224 + if (NULL == dsaRsSignOpData->K.pData) {
5225 + APRINTK("%s():Failed to get memory"
5226 + " for DSA RS Sign Op Random value\n", __FUNCTION__);
5227 + kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
5228 + krp->krp_status = ENOMEM;
5229 + return ENOMEM;
5232 + pR = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
5233 + if (NULL == pR) {
5234 + APRINTK("%s():Failed to get memory"
5235 + " for DSA signature R\n", __FUNCTION__);
5236 + kmem_cache_free(drvDSARSSignKValue_zone,
5237 + dsaRsSignOpData->K.pData);
5238 + kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
5239 + krp->krp_status = ENOMEM;
5240 + return ENOMEM;
5243 + pS = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
5244 + if (NULL == pS) {
5245 + APRINTK("%s():Failed to get memory"
5246 + " for DSA signature S\n", __FUNCTION__);
5247 + icp_ocfDrvFreeFlatBuffer(pR);
5248 + kmem_cache_free(drvDSARSSignKValue_zone,
5249 + dsaRsSignOpData->K.pData);
5250 + kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
5251 + krp->krp_status = ENOMEM;
5252 + return ENOMEM;
5255 + /*link prime number parameter for ease of processing */
5256 + dsaRsSignOpData->P.pData =
5257 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX].crp_p;
5258 + BITS_TO_BYTES(dsaRsSignOpData->P.dataLenInBytes,
5259 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX].
5260 + crp_nbits);
5262 + icp_ocfDrvSwapBytes(dsaRsSignOpData->P.pData,
5263 + dsaRsSignOpData->P.dataLenInBytes);
5265 + dsaRsSignOpData->Q.pData =
5266 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].crp_p;
5267 + BITS_TO_BYTES(dsaRsSignOpData->Q.dataLenInBytes,
5268 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].
5269 + crp_nbits);
5271 + icp_ocfDrvSwapBytes(dsaRsSignOpData->Q.pData,
5272 + dsaRsSignOpData->Q.dataLenInBytes);
5274 + /*generate random number with equal buffer size to Prime value Q,
5275 + but value less than Q */
5276 + dsaRsSignOpData->K.dataLenInBytes = dsaRsSignOpData->Q.dataLenInBytes;
5278 + randGenOpData.generateBits = CPA_TRUE;
5279 + randGenOpData.lenInBytes = dsaRsSignOpData->K.dataLenInBytes;
5281 + icp_ocfDrvPtrAndLenToFlatBuffer(dsaRsSignOpData->K.pData,
5282 + dsaRsSignOpData->K.dataLenInBytes,
5283 + &randData);
5285 + doCheck = 0;
5286 + while (icp_ocfDrvCheckALessThanB(&(dsaRsSignOpData->K),
5287 + &(dsaRsSignOpData->Q), &doCheck)) {
5289 + if (CPA_STATUS_SUCCESS
5290 + != cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
5291 + NULL, NULL, &randGenOpData, &randData)) {
5292 + APRINTK("%s(): ERROR - Failed to generate DSA RS Sign K"
5293 + "value\n", __FUNCTION__);
5294 + icp_ocfDrvFreeFlatBuffer(pS);
5295 + icp_ocfDrvFreeFlatBuffer(pR);
5296 + kmem_cache_free(drvDSARSSignKValue_zone,
5297 + dsaRsSignOpData->K.pData);
5298 + kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
5299 + krp->krp_status = EAGAIN;
5300 + return EAGAIN;
5303 + doCheck++;
5304 + if (DSA_SIGN_RAND_GEN_VAL_CHECK_MAX_ITERATIONS == doCheck) {
5305 + APRINTK("%s(): ERROR - Failed to find DSA RS Sign K "
5306 + "value less than Q value\n", __FUNCTION__);
5307 + icp_ocfDrvFreeFlatBuffer(pS);
5308 + icp_ocfDrvFreeFlatBuffer(pR);
5309 + kmem_cache_free(drvDSARSSignKValue_zone,
5310 + dsaRsSignOpData->K.pData);
5311 + kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
5312 + krp->krp_status = EAGAIN;
5313 + return EAGAIN;
5317 + /*Rand Data - no need to swap bytes for pK */
5319 + /* Link parameters */
5320 + dsaRsSignOpData->G.pData =
5321 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_G_INDEX].crp_p;
5322 + BITS_TO_BYTES(dsaRsSignOpData->G.dataLenInBytes,
5323 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_G_INDEX].crp_nbits);
5325 + icp_ocfDrvSwapBytes(dsaRsSignOpData->G.pData,
5326 + dsaRsSignOpData->G.dataLenInBytes);
5328 + dsaRsSignOpData->X.pData =
5329 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_X_INDEX].crp_p;
5330 + BITS_TO_BYTES(dsaRsSignOpData->X.dataLenInBytes,
5331 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_X_INDEX].crp_nbits);
5332 + icp_ocfDrvSwapBytes(dsaRsSignOpData->X.pData,
5333 + dsaRsSignOpData->X.dataLenInBytes);
5335 + dsaRsSignOpData->M.pData =
5336 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX].crp_p;
5337 + BITS_TO_BYTES(dsaRsSignOpData->M.dataLenInBytes,
5338 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX].
5339 + crp_nbits);
5340 + icp_ocfDrvSwapBytes(dsaRsSignOpData->M.pData,
5341 + dsaRsSignOpData->M.dataLenInBytes);
5343 + /* Output Parameters */
5344 + pS->pData = krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX].crp_p;
5345 + BITS_TO_BYTES(pS->dataLenInBytes,
5346 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX].
5347 + crp_nbits);
5349 + pR->pData = krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX].crp_p;
5350 + BITS_TO_BYTES(pR->dataLenInBytes,
5351 + krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX].
5352 + crp_nbits);
5354 + lacStatus = cpaCyDsaSignRS(CPA_INSTANCE_HANDLE_SINGLE,
5355 + icp_ocfDrvDsaRSSignCallBack,
5356 + callbackTag, dsaRsSignOpData,
5357 + &protocolStatus, pR, pS);
5359 + if (CPA_STATUS_SUCCESS != lacStatus) {
5360 + EPRINTK("%s(): DSA RS Sign Operation failed (%d).\n",
5361 + __FUNCTION__, lacStatus);
5362 + krp->krp_status = ECANCELED;
5363 + icp_ocfDrvFreeFlatBuffer(pS);
5364 + icp_ocfDrvFreeFlatBuffer(pR);
5365 + kmem_cache_free(drvDSARSSignKValue_zone,
5366 + dsaRsSignOpData->K.pData);
5367 + kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
5370 + return lacStatus;
5373 +/* Name : icp_ocfDrvDsaVerify
5375 + * Description : This function will map DSA RS Verify from OCF to the LAC API.
5377 + */
5378 +static int icp_ocfDrvDsaVerify(struct cryptkop *krp)
5380 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
5381 + CpaCyDsaVerifyOpData *dsaVerifyOpData = NULL;
5382 + void *callbackTag = NULL;
5383 + CpaBoolean verifyStatus = CPA_FALSE;
5385 + callbackTag = krp;
5387 + dsaVerifyOpData = kmem_cache_zalloc(drvDSAVerify_zone, GFP_KERNEL);
5388 + if (NULL == dsaVerifyOpData) {
5389 + APRINTK("%s():Failed to get memory"
5390 + " for DSA Verify Op data struct\n", __FUNCTION__);
5391 + krp->krp_status = ENOMEM;
5392 + return ENOMEM;
5395 + /* Link parameters */
5396 + dsaVerifyOpData->P.pData =
5397 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX].crp_p;
5398 + BITS_TO_BYTES(dsaVerifyOpData->P.dataLenInBytes,
5399 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX].
5400 + crp_nbits);
5401 + icp_ocfDrvSwapBytes(dsaVerifyOpData->P.pData,
5402 + dsaVerifyOpData->P.dataLenInBytes);
5404 + dsaVerifyOpData->Q.pData =
5405 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX].crp_p;
5406 + BITS_TO_BYTES(dsaVerifyOpData->Q.dataLenInBytes,
5407 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX].
5408 + crp_nbits);
5409 + icp_ocfDrvSwapBytes(dsaVerifyOpData->Q.pData,
5410 + dsaVerifyOpData->Q.dataLenInBytes);
5412 + dsaVerifyOpData->G.pData =
5413 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_G_INDEX].crp_p;
5414 + BITS_TO_BYTES(dsaVerifyOpData->G.dataLenInBytes,
5415 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_G_INDEX].
5416 + crp_nbits);
5417 + icp_ocfDrvSwapBytes(dsaVerifyOpData->G.pData,
5418 + dsaVerifyOpData->G.dataLenInBytes);
5420 + dsaVerifyOpData->Y.pData =
5421 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX].crp_p;
5422 + BITS_TO_BYTES(dsaVerifyOpData->Y.dataLenInBytes,
5423 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX].
5424 + crp_nbits);
5425 + icp_ocfDrvSwapBytes(dsaVerifyOpData->Y.pData,
5426 + dsaVerifyOpData->Y.dataLenInBytes);
5428 + dsaVerifyOpData->M.pData =
5429 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX].crp_p;
5430 + BITS_TO_BYTES(dsaVerifyOpData->M.dataLenInBytes,
5431 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX].
5432 + crp_nbits);
5433 + icp_ocfDrvSwapBytes(dsaVerifyOpData->M.pData,
5434 + dsaVerifyOpData->M.dataLenInBytes);
5436 + dsaVerifyOpData->R.pData =
5437 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX].crp_p;
5438 + BITS_TO_BYTES(dsaVerifyOpData->R.dataLenInBytes,
5439 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX].
5440 + crp_nbits);
5441 + icp_ocfDrvSwapBytes(dsaVerifyOpData->R.pData,
5442 + dsaVerifyOpData->R.dataLenInBytes);
5444 + dsaVerifyOpData->S.pData =
5445 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX].crp_p;
5446 + BITS_TO_BYTES(dsaVerifyOpData->S.dataLenInBytes,
5447 + krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX].
5448 + crp_nbits);
5449 + icp_ocfDrvSwapBytes(dsaVerifyOpData->S.pData,
5450 + dsaVerifyOpData->S.dataLenInBytes);
5452 + lacStatus = cpaCyDsaVerify(CPA_INSTANCE_HANDLE_SINGLE,
5453 + icp_ocfDrvDsaVerifyCallBack,
5454 + callbackTag, dsaVerifyOpData, &verifyStatus);
5456 + if (CPA_STATUS_SUCCESS != lacStatus) {
5457 + EPRINTK("%s(): DSA Verify Operation failed (%d).\n",
5458 + __FUNCTION__, lacStatus);
5459 + kmem_cache_free(drvDSAVerify_zone, dsaVerifyOpData);
5460 + krp->krp_status = ECANCELED;
5463 + return lacStatus;
5466 +/* Name : icp_ocfDrvReadRandom
5468 + * Description : This function will map RNG functionality calls from OCF
5469 + * to the LAC API.
5470 + */
5471 +int icp_ocfDrvReadRandom(void *arg, uint32_t * buf, int maxwords)
5473 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
5474 + CpaCyRandGenOpData randGenOpData;
5475 + CpaFlatBuffer randData;
5477 + if (NULL == buf) {
5478 + APRINTK("%s(): Invalid input parameters\n", __FUNCTION__);
5479 + return EINVAL;
5482 + /* maxwords here is number of integers to generate data for */
5483 + randGenOpData.generateBits = CPA_TRUE;
5485 + randGenOpData.lenInBytes = maxwords * sizeof(uint32_t);
5487 + icp_ocfDrvPtrAndLenToFlatBuffer((Cpa8U *) buf,
5488 + randGenOpData.lenInBytes, &randData);
5490 + lacStatus = cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
5491 + NULL, NULL, &randGenOpData, &randData);
5492 + if (CPA_STATUS_SUCCESS != lacStatus) {
5493 + EPRINTK("%s(): icp_LacSymRandGen failed (%d). \n",
5494 + __FUNCTION__, lacStatus);
5495 + return RETURN_RAND_NUM_GEN_FAILED;
5498 + return randGenOpData.lenInBytes / sizeof(uint32_t);
5501 +/* Name : icp_ocfDrvDhP1Callback
5503 + * Description : When this function returns it signifies that the LAC
5504 + * component has completed the DH operation.
5505 + */
5506 +static void
5507 +icp_ocfDrvDhP1CallBack(void *callbackTag,
5508 + CpaStatus status,
5509 + void *pOpData, CpaFlatBuffer * pLocalOctetStringPV)
5511 + struct cryptkop *krp = NULL;
5512 + CpaCyDhPhase1KeyGenOpData *pPhase1OpData = NULL;
5514 + if (NULL == callbackTag) {
5515 + DPRINTK("%s(): Invalid input parameters - "
5516 + "callbackTag data is NULL\n", __FUNCTION__);
5517 + return;
5519 + krp = (struct cryptkop *)callbackTag;
5521 + if (NULL == pOpData) {
5522 + DPRINTK("%s(): Invalid input parameters - "
5523 + "Operation Data is NULL\n", __FUNCTION__);
5524 + krp->krp_status = ECANCELED;
5525 + crypto_kdone(krp);
5526 + return;
5528 + pPhase1OpData = (CpaCyDhPhase1KeyGenOpData *) pOpData;
5530 + if (NULL == pLocalOctetStringPV) {
5531 + DPRINTK("%s(): Invalid input parameters - "
5532 + "pLocalOctetStringPV Data is NULL\n", __FUNCTION__);
5533 + memset(pPhase1OpData, 0, sizeof(CpaCyDhPhase1KeyGenOpData));
5534 + kmem_cache_free(drvDH_zone, pPhase1OpData);
5535 + krp->krp_status = ECANCELED;
5536 + crypto_kdone(krp);
5537 + return;
5540 + if (CPA_STATUS_SUCCESS == status) {
5541 + krp->krp_status = CRYPTO_OP_SUCCESS;
5542 + } else {
5543 + APRINTK("%s(): Diffie Hellman Phase1 Key Gen failed - "
5544 + "Operation Status = %d\n", __FUNCTION__, status);
5545 + krp->krp_status = ECANCELED;
5548 + icp_ocfDrvSwapBytes(pLocalOctetStringPV->pData,
5549 + pLocalOctetStringPV->dataLenInBytes);
5551 + icp_ocfDrvFreeFlatBuffer(pLocalOctetStringPV);
5552 + memset(pPhase1OpData, 0, sizeof(CpaCyDhPhase1KeyGenOpData));
5553 + kmem_cache_free(drvDH_zone, pPhase1OpData);
5555 + crypto_kdone(krp);
5557 + return;
5560 +/* Name : icp_ocfDrvModExpCallBack
5562 + * Description : When this function returns it signifies that the LAC
5563 + * component has completed the Mod Exp operation.
5564 + */
5565 +static void
5566 +icp_ocfDrvModExpCallBack(void *callbackTag,
5567 + CpaStatus status,
5568 + void *pOpdata, CpaFlatBuffer * pResult)
5570 + struct cryptkop *krp = NULL;
5571 + CpaCyLnModExpOpData *pLnModExpOpData = NULL;
5573 + if (NULL == callbackTag) {
5574 + DPRINTK("%s(): Invalid input parameters - "
5575 + "callbackTag data is NULL\n", __FUNCTION__);
5576 + return;
5578 + krp = (struct cryptkop *)callbackTag;
5580 + if (NULL == pOpdata) {
5581 + DPRINTK("%s(): Invalid Mod Exp input parameters - "
5582 + "Operation Data is NULL\n", __FUNCTION__);
5583 + krp->krp_status = ECANCELED;
5584 + crypto_kdone(krp);
5585 + return;
5587 + pLnModExpOpData = (CpaCyLnModExpOpData *) pOpdata;
5589 + if (NULL == pResult) {
5590 + DPRINTK("%s(): Invalid input parameters - "
5591 + "pResult data is NULL\n", __FUNCTION__);
5592 + krp->krp_status = ECANCELED;
5593 + memset(pLnModExpOpData, 0, sizeof(CpaCyLnModExpOpData));
5594 + kmem_cache_free(drvLnModExp_zone, pLnModExpOpData);
5595 + crypto_kdone(krp);
5596 + return;
5599 + if (CPA_STATUS_SUCCESS == status) {
5600 + krp->krp_status = CRYPTO_OP_SUCCESS;
5601 + } else {
5602 + APRINTK("%s(): LAC Mod Exp Operation failed - "
5603 + "Operation Status = %d\n", __FUNCTION__, status);
5604 + krp->krp_status = ECANCELED;
5607 + icp_ocfDrvSwapBytes(pResult->pData, pResult->dataLenInBytes);
5609 + /*switch base size value back to original */
5610 + if (pLnModExpOpData->base.pData ==
5611 + (uint8_t *) & (krp->
5612 + krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
5613 + crp_nbits)) {
5614 + *((uint32_t *) pLnModExpOpData->base.pData) =
5615 + ntohl(*((uint32_t *) pLnModExpOpData->base.pData));
5617 + icp_ocfDrvFreeFlatBuffer(pResult);
5618 + memset(pLnModExpOpData, 0, sizeof(CpaCyLnModExpOpData));
5619 + kmem_cache_free(drvLnModExp_zone, pLnModExpOpData);
5621 + crypto_kdone(krp);
5623 + return;
5627 +/* Name : icp_ocfDrvModExpCRTCallBack
5629 + * Description : When this function returns it signifies that the LAC
5630 + * component has completed the Mod Exp CRT operation.
5631 + */
5632 +static void
5633 +icp_ocfDrvModExpCRTCallBack(void *callbackTag,
5634 + CpaStatus status,
5635 + void *pOpData, CpaFlatBuffer * pOutputData)
5637 + struct cryptkop *krp = NULL;
5638 + CpaCyRsaDecryptOpData *pDecryptData = NULL;
5640 + if (NULL == callbackTag) {
5641 + DPRINTK("%s(): Invalid input parameters - "
5642 + "callbackTag data is NULL\n", __FUNCTION__);
5643 + return;
5646 + krp = (struct cryptkop *)callbackTag;
5648 + if (NULL == pOpData) {
5649 + DPRINTK("%s(): Invalid input parameters - "
5650 + "Operation Data is NULL\n", __FUNCTION__);
5651 + krp->krp_status = ECANCELED;
5652 + crypto_kdone(krp);
5653 + return;
5655 + pDecryptData = (CpaCyRsaDecryptOpData *) pOpData;
5657 + if (NULL == pOutputData) {
5658 + DPRINTK("%s(): Invalid input parameter - "
5659 + "pOutputData is NULL\n", __FUNCTION__);
5660 + memset(pDecryptData->pRecipientPrivateKey, 0,
5661 + sizeof(CpaCyRsaPrivateKey));
5662 + kmem_cache_free(drvRSAPrivateKey_zone,
5663 + pDecryptData->pRecipientPrivateKey);
5664 + memset(pDecryptData, 0, sizeof(CpaCyRsaDecryptOpData));
5665 + kmem_cache_free(drvRSADecrypt_zone, pDecryptData);
5666 + krp->krp_status = ECANCELED;
5667 + crypto_kdone(krp);
5668 + return;
5671 + if (CPA_STATUS_SUCCESS == status) {
5672 + krp->krp_status = CRYPTO_OP_SUCCESS;
5673 + } else {
5674 + APRINTK("%s(): LAC Mod Exp CRT operation failed - "
5675 + "Operation Status = %d\n", __FUNCTION__, status);
5676 + krp->krp_status = ECANCELED;
5679 + icp_ocfDrvSwapBytes(pOutputData->pData, pOutputData->dataLenInBytes);
5681 + icp_ocfDrvFreeFlatBuffer(pOutputData);
5682 + memset(pDecryptData->pRecipientPrivateKey, 0,
5683 + sizeof(CpaCyRsaPrivateKey));
5684 + kmem_cache_free(drvRSAPrivateKey_zone,
5685 + pDecryptData->pRecipientPrivateKey);
5686 + memset(pDecryptData, 0, sizeof(CpaCyRsaDecryptOpData));
5687 + kmem_cache_free(drvRSADecrypt_zone, pDecryptData);
5689 + crypto_kdone(krp);
5691 + return;
5694 +/* Name : icp_ocfDrvDsaRSSignCallBack
5696 + * Description : When this function returns it signifies that the LAC
5697 + * component has completed the DSA RS sign operation.
5698 + */
5699 +static void
5700 +icp_ocfDrvDsaRSSignCallBack(void *callbackTag,
5701 + CpaStatus status,
5702 + void *pOpData,
5703 + CpaBoolean protocolStatus,
5704 + CpaFlatBuffer * pR, CpaFlatBuffer * pS)
5706 + struct cryptkop *krp = NULL;
5707 + CpaCyDsaRSSignOpData *pSignData = NULL;
5709 + if (NULL == callbackTag) {
5710 + DPRINTK("%s(): Invalid input parameters - "
5711 + "callbackTag data is NULL\n", __FUNCTION__);
5712 + return;
5715 + krp = (struct cryptkop *)callbackTag;
5717 + if (NULL == pOpData) {
5718 + DPRINTK("%s(): Invalid input parameters - "
5719 + "Operation Data is NULL\n", __FUNCTION__);
5720 + krp->krp_status = ECANCELED;
5721 + crypto_kdone(krp);
5722 + return;
5724 + pSignData = (CpaCyDsaRSSignOpData *) pOpData;
5726 + if (NULL == pR) {
5727 + DPRINTK("%s(): Invalid input parameter - "
5728 + "pR sign is NULL\n", __FUNCTION__);
5729 + icp_ocfDrvFreeFlatBuffer(pS);
5730 + kmem_cache_free(drvDSARSSign_zone, pSignData);
5731 + krp->krp_status = ECANCELED;
5732 + crypto_kdone(krp);
5733 + return;
5736 + if (NULL == pS) {
5737 + DPRINTK("%s(): Invalid input parameter - "
5738 + "pS sign is NULL\n", __FUNCTION__);
5739 + icp_ocfDrvFreeFlatBuffer(pR);
5740 + kmem_cache_free(drvDSARSSign_zone, pSignData);
5741 + krp->krp_status = ECANCELED;
5742 + crypto_kdone(krp);
5743 + return;
5746 + if (CPA_STATUS_SUCCESS != status) {
5747 + APRINTK("%s(): LAC DSA RS Sign operation failed - "
5748 + "Operation Status = %d\n", __FUNCTION__, status);
5749 + krp->krp_status = ECANCELED;
5750 + } else {
5751 + krp->krp_status = CRYPTO_OP_SUCCESS;
5753 + if (CPA_TRUE != protocolStatus) {
5754 + DPRINTK("%s(): LAC DSA RS Sign operation failed due "
5755 + "to protocol error\n", __FUNCTION__);
5756 + krp->krp_status = EIO;
5760 + /* Swap bytes only when the callback status is successful and
5761 + protocolStatus is set to true */
5762 + if (CPA_STATUS_SUCCESS == status && CPA_TRUE == protocolStatus) {
5763 + icp_ocfDrvSwapBytes(pR->pData, pR->dataLenInBytes);
5764 + icp_ocfDrvSwapBytes(pS->pData, pS->dataLenInBytes);
5767 + icp_ocfDrvFreeFlatBuffer(pR);
5768 + icp_ocfDrvFreeFlatBuffer(pS);
5769 + memset(pSignData->K.pData, 0, pSignData->K.dataLenInBytes);
5770 + kmem_cache_free(drvDSARSSignKValue_zone, pSignData->K.pData);
5771 + memset(pSignData, 0, sizeof(CpaCyDsaRSSignOpData));
5772 + kmem_cache_free(drvDSARSSign_zone, pSignData);
5773 + crypto_kdone(krp);
5775 + return;
5778 +/* Name : icp_ocfDrvDsaVerifyCallback
5780 + * Description : When this function returns it signifies that the LAC
5781 + * component has completed the DSA Verify operation.
5782 + */
5783 +static void
5784 +icp_ocfDrvDsaVerifyCallBack(void *callbackTag,
5785 + CpaStatus status,
5786 + void *pOpData, CpaBoolean verifyStatus)
5789 + struct cryptkop *krp = NULL;
5790 + CpaCyDsaVerifyOpData *pVerData = NULL;
5792 + if (NULL == callbackTag) {
5793 + DPRINTK("%s(): Invalid input parameters - "
5794 + "callbackTag data is NULL\n", __FUNCTION__);
5795 + return;
5798 + krp = (struct cryptkop *)callbackTag;
5800 + if (NULL == pOpData) {
5801 + DPRINTK("%s(): Invalid input parameters - "
5802 + "Operation Data is NULL\n", __FUNCTION__);
5803 + krp->krp_status = ECANCELED;
5804 + crypto_kdone(krp);
5805 + return;
5807 + pVerData = (CpaCyDsaVerifyOpData *) pOpData;
5809 + if (CPA_STATUS_SUCCESS != status) {
5810 + APRINTK("%s(): LAC DSA Verify operation failed - "
5811 + "Operation Status = %d\n", __FUNCTION__, status);
5812 + krp->krp_status = ECANCELED;
5813 + } else {
5814 + krp->krp_status = CRYPTO_OP_SUCCESS;
5816 + if (CPA_TRUE != verifyStatus) {
5817 + DPRINTK("%s(): DSA signature invalid\n", __FUNCTION__);
5818 + krp->krp_status = EIO;
5822 + /* Swap bytes only when the callback status is successful and
5823 + verifyStatus is set to true */
5824 + /*Just swapping back the key values for now. Possibly all
5825 + swapped buffers need to be reverted */
5826 + if (CPA_STATUS_SUCCESS == status && CPA_TRUE == verifyStatus) {
5827 + icp_ocfDrvSwapBytes(pVerData->R.pData,
5828 + pVerData->R.dataLenInBytes);
5829 + icp_ocfDrvSwapBytes(pVerData->S.pData,
5830 + pVerData->S.dataLenInBytes);
5833 + memset(pVerData, 0, sizeof(CpaCyDsaVerifyOpData));
5834 + kmem_cache_free(drvDSAVerify_zone, pVerData);
5835 + crypto_kdone(krp);
5837 + return;
5839 diff -Nur linux-2.6.30.orig/crypto/ocf/ep80579/icp_common.c linux-2.6.30/crypto/ocf/ep80579/icp_common.c
5840 --- linux-2.6.30.orig/crypto/ocf/ep80579/icp_common.c 1970-01-01 01:00:00.000000000 +0100
5841 +++ linux-2.6.30/crypto/ocf/ep80579/icp_common.c 2009-06-11 10:55:27.000000000 +0200
5842 @@ -0,0 +1,891 @@
5843 +/***************************************************************************
5845 + * This file is provided under a dual BSD/GPLv2 license. When using or
5846 + * redistributing this file, you may do so under either license.
5847 + *
5848 + * GPL LICENSE SUMMARY
5849 + *
5850 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
5851 + *
5852 + * This program is free software; you can redistribute it and/or modify
5853 + * it under the terms of version 2 of the GNU General Public License as
5854 + * published by the Free Software Foundation.
5855 + *
5856 + * This program is distributed in the hope that it will be useful, but
5857 + * WITHOUT ANY WARRANTY; without even the implied warranty of
5858 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
5859 + * General Public License for more details.
5860 + *
5861 + * You should have received a copy of the GNU General Public License
5862 + * along with this program; if not, write to the Free Software
5863 + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
5864 + * The full GNU General Public License is included in this distribution
5865 + * in the file called LICENSE.GPL.
5866 + *
5867 + * Contact Information:
5868 + * Intel Corporation
5869 + *
5870 + * BSD LICENSE
5871 + *
5872 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
5873 + * All rights reserved.
5874 + *
5875 + * Redistribution and use in source and binary forms, with or without
5876 + * modification, are permitted provided that the following conditions
5877 + * are met:
5878 + *
5879 + * * Redistributions of source code must retain the above copyright
5880 + * notice, this list of conditions and the following disclaimer.
5881 + * * Redistributions in binary form must reproduce the above copyright
5882 + * notice, this list of conditions and the following disclaimer in
5883 + * the documentation and/or other materials provided with the
5884 + * distribution.
5885 + * * Neither the name of Intel Corporation nor the names of its
5886 + * contributors may be used to endorse or promote products derived
5887 + * from this software without specific prior written permission.
5888 + *
5889 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
5890 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
5891 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
5892 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
5893 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
5894 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
5895 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
5896 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
5897 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5898 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
5899 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5900 + *
5901 + *
5902 + * version: Security.L.1.0.130
5904 + ***************************************************************************/
5907 + * An OCF module that uses Intel® QuickAssist Integrated Accelerator to do the
5908 + * crypto.
5910 + * This driver requires the ICP Access Library that is available from Intel in
5911 + * order to operate.
5912 + */
5914 +#include "icp_ocf.h"
5916 +#define ICP_OCF_COMP_NAME "ICP_OCF"
5917 +#define ICP_OCF_VER_MAIN (2)
5918 +#define ICP_OCF_VER_MJR (0)
5919 +#define ICP_OCF_VER_MNR (0)
5921 +#define MAX_DEREG_RETRIES (100)
5922 +#define DEFAULT_DEREG_RETRIES (10)
5923 +#define DEFAULT_DEREG_DELAY_IN_JIFFIES (10)
5925 +/* This defines the maximum number of sessions possible between OCF
5926 + and the OCF Tolapai Driver. If set to zero, there is no limit. */
5927 +#define DEFAULT_OCF_TO_DRV_MAX_SESSION_COUNT (0)
5928 +#define NUM_SUPPORTED_CAPABILITIES (21)
5930 +/*Slabs zones*/
5931 +struct kmem_cache *drvSessionData_zone = NULL;
5932 +struct kmem_cache *drvOpData_zone = NULL;
5933 +struct kmem_cache *drvDH_zone = NULL;
5934 +struct kmem_cache *drvLnModExp_zone = NULL;
5935 +struct kmem_cache *drvRSADecrypt_zone = NULL;
5936 +struct kmem_cache *drvRSAPrivateKey_zone = NULL;
5937 +struct kmem_cache *drvDSARSSign_zone = NULL;
5938 +struct kmem_cache *drvDSARSSignKValue_zone = NULL;
5939 +struct kmem_cache *drvDSAVerify_zone = NULL;
5941 +/*Slab zones for flatbuffers and bufferlist*/
5942 +struct kmem_cache *drvFlatBuffer_zone = NULL;
5944 +static int icp_ocfDrvInit(void);
5945 +static void icp_ocfDrvExit(void);
5946 +static void icp_ocfDrvFreeCaches(void);
5947 +static void icp_ocfDrvDeferedFreeLacSessionProcess(void *arg);
5949 +int32_t icp_ocfDrvDriverId = INVALID_DRIVER_ID;
5951 +/* Module parameter - gives the number of times LAC deregistration shall be
5952 + re-tried */
5953 +int num_dereg_retries = DEFAULT_DEREG_RETRIES;
5955 +/* Module parameter - gives the delay time in jiffies before a LAC session
5956 + shall be attempted to be deregistered again */
5957 +int dereg_retry_delay_in_jiffies = DEFAULT_DEREG_DELAY_IN_JIFFIES;
5959 +/* Module parameter - gives the maximum number of sessions possible between
5960 + OCF and the OCF Tolapai Driver. If set to zero, there is no limit.*/
5961 +int max_sessions = DEFAULT_OCF_TO_DRV_MAX_SESSION_COUNT;
5963 +/* This is set when the module is removed from the system, no further
5964 + processing can take place if this is set */
5965 +atomic_t icp_ocfDrvIsExiting = ATOMIC_INIT(0);
5967 +/* This is used to show how many lac sessions were not deregistered*/
5968 +atomic_t lac_session_failed_dereg_count = ATOMIC_INIT(0);
5970 +/* This is used to track the number of registered sessions between OCF and
5971 + * and the OCF Tolapai driver, when max_session is set to value other than
5972 + * zero. This ensures that the max_session set for the OCF and the driver
5973 + * is equal to the LAC registered sessions */
5974 +atomic_t num_ocf_to_drv_registered_sessions = ATOMIC_INIT(0);
5976 +/* Head of linked list used to store session data */
5977 +struct list_head icp_ocfDrvGlobalSymListHead;
5978 +struct list_head icp_ocfDrvGlobalSymListHead_FreeMemList;
5980 +spinlock_t icp_ocfDrvSymSessInfoListSpinlock = SPIN_LOCK_UNLOCKED;
5981 +rwlock_t icp_kmem_cache_destroy_alloc_lock = RW_LOCK_UNLOCKED;
5983 +struct workqueue_struct *icp_ocfDrvFreeLacSessionWorkQ;
5985 +struct icp_drvBuffListInfo defBuffListInfo;
5987 +static struct {
5988 + softc_device_decl sc_dev;
5989 +} icpDev;
5991 +static device_method_t icp_methods = {
5992 + /* crypto device methods */
5993 + DEVMETHOD(cryptodev_newsession, icp_ocfDrvNewSession),
5994 + DEVMETHOD(cryptodev_freesession, icp_ocfDrvFreeLACSession),
5995 + DEVMETHOD(cryptodev_process, icp_ocfDrvSymProcess),
5996 + DEVMETHOD(cryptodev_kprocess, icp_ocfDrvPkeProcess),
5999 +module_param(num_dereg_retries, int, S_IRUGO);
6000 +module_param(dereg_retry_delay_in_jiffies, int, S_IRUGO);
6001 +module_param(max_sessions, int, S_IRUGO);
6003 +MODULE_PARM_DESC(num_dereg_retries,
6004 + "Number of times to retry LAC Sym Session Deregistration. "
6005 + "Default 10, Max 100");
6006 +MODULE_PARM_DESC(dereg_retry_delay_in_jiffies, "Delay in jiffies "
6007 + "(added to a schedule() function call) before a LAC Sym "
6008 + "Session Dereg is retried. Default 10");
6009 +MODULE_PARM_DESC(max_sessions, "This sets the maximum number of sessions "
6010 + "between OCF and this driver. If this value is set to zero, "
6011 + "max session count checking is disabled. Default is zero(0)");
6013 +/* Name : icp_ocfDrvInit
6015 + * Description : This function will register all the symmetric and asymmetric
6016 + * functionality that will be accelerated by the hardware. It will also
6017 + * get a unique driver ID from the OCF and initialise all slab caches
6018 + */
6019 +static int __init icp_ocfDrvInit(void)
6021 + int ocfStatus = 0;
6023 + IPRINTK("=== %s ver %d.%d.%d ===\n", ICP_OCF_COMP_NAME,
6024 + ICP_OCF_VER_MAIN, ICP_OCF_VER_MJR, ICP_OCF_VER_MNR);
6026 + if (MAX_DEREG_RETRIES < num_dereg_retries) {
6027 + EPRINTK("Session deregistration retry count set to greater "
6028 + "than %d", MAX_DEREG_RETRIES);
6029 + return -1;
6032 + /* Initialize and Start the Cryptographic component */
6033 + if (CPA_STATUS_SUCCESS !=
6034 + cpaCyStartInstance(CPA_INSTANCE_HANDLE_SINGLE)) {
6035 + EPRINTK("Failed to initialize and start the instance "
6036 + "of the Cryptographic component.\n");
6037 + return -1;
6040 + /* Set the default size of BufferList to allocate */
6041 + memset(&defBuffListInfo, 0, sizeof(struct icp_drvBuffListInfo));
6042 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
6043 + icp_ocfDrvBufferListMemInfo(ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS,
6044 + &defBuffListInfo)) {
6045 + EPRINTK("Failed to get bufferlist memory info.\n");
6046 + return -1;
6049 + /*Register OCF Tolapai Driver with OCF */
6050 + memset(&icpDev, 0, sizeof(icpDev));
6051 + softc_device_init(&icpDev, "icp", 0, icp_methods);
6053 + icp_ocfDrvDriverId = crypto_get_driverid(softc_get_device(&icpDev),
6054 + CRYPTOCAP_F_HARDWARE);
6056 + if (icp_ocfDrvDriverId < 0) {
6057 + EPRINTK("%s : ICP driver failed to register with OCF!\n",
6058 + __FUNCTION__);
6059 + return -ENODEV;
6062 + /*Create all the slab caches used by the OCF Tolapai Driver */
6063 + drvSessionData_zone =
6064 + ICP_CACHE_CREATE("ICP Session Data", struct icp_drvSessionData);
6065 + ICP_CACHE_NULL_CHECK(drvSessionData_zone);
6067 + /*
6068 + * Allocation of the OpData includes the allocation space for meta data.
6069 + * The memory after the opData structure is reserved for this meta data.
6070 + */
6071 + drvOpData_zone =
6072 + kmem_cache_create("ICP Op Data", sizeof(struct icp_drvOpData) +
6073 + defBuffListInfo.metaSize ,0, SLAB_HWCACHE_ALIGN, NULL, NULL);
6076 + ICP_CACHE_NULL_CHECK(drvOpData_zone);
6078 + drvDH_zone = ICP_CACHE_CREATE("ICP DH data", CpaCyDhPhase1KeyGenOpData);
6079 + ICP_CACHE_NULL_CHECK(drvDH_zone);
6081 + drvLnModExp_zone =
6082 + ICP_CACHE_CREATE("ICP ModExp data", CpaCyLnModExpOpData);
6083 + ICP_CACHE_NULL_CHECK(drvLnModExp_zone);
6085 + drvRSADecrypt_zone =
6086 + ICP_CACHE_CREATE("ICP RSA decrypt data", CpaCyRsaDecryptOpData);
6087 + ICP_CACHE_NULL_CHECK(drvRSADecrypt_zone);
6089 + drvRSAPrivateKey_zone =
6090 + ICP_CACHE_CREATE("ICP RSA private key data", CpaCyRsaPrivateKey);
6091 + ICP_CACHE_NULL_CHECK(drvRSAPrivateKey_zone);
6093 + drvDSARSSign_zone =
6094 + ICP_CACHE_CREATE("ICP DSA Sign", CpaCyDsaRSSignOpData);
6095 + ICP_CACHE_NULL_CHECK(drvDSARSSign_zone);
6097 + /*too awkward to use a macro here */
6098 + drvDSARSSignKValue_zone =
6099 + kmem_cache_create("ICP DSA Sign Rand Val",
6100 + DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES, 0,
6101 + SLAB_HWCACHE_ALIGN, NULL, NULL);
6102 + ICP_CACHE_NULL_CHECK(drvDSARSSignKValue_zone);
6104 + drvDSAVerify_zone =
6105 + ICP_CACHE_CREATE("ICP DSA Verify", CpaCyDsaVerifyOpData);
6106 + ICP_CACHE_NULL_CHECK(drvDSAVerify_zone);
6108 + drvFlatBuffer_zone =
6109 + ICP_CACHE_CREATE("ICP Flat Buffers", CpaFlatBuffer);
6110 + ICP_CACHE_NULL_CHECK(drvFlatBuffer_zone);
6112 + /* Register the ICP symmetric crypto support. */
6113 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_NULL_CBC);
6114 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_DES_CBC);
6115 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_3DES_CBC);
6116 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_AES_CBC);
6117 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_ARC4);
6118 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_MD5);
6119 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_MD5_HMAC);
6120 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA1);
6121 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA1_HMAC);
6122 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_256);
6123 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_256_HMAC);
6124 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_384);
6125 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_384_HMAC);
6126 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_512);
6127 + ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_512_HMAC);
6129 + /* Register the ICP asymmetric algorithm support */
6130 + ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_DH_COMPUTE_KEY);
6131 + ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_MOD_EXP);
6132 + ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_MOD_EXP_CRT);
6133 + ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_DSA_SIGN);
6134 + ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_DSA_VERIFY);
6136 + /* Register the ICP random number generator support */
6137 + if (OCF_REGISTRATION_STATUS_SUCCESS ==
6138 + crypto_rregister(icp_ocfDrvDriverId, icp_ocfDrvReadRandom, NULL)) {
6139 + ocfStatus++;
6142 + if (OCF_ZERO_FUNCTIONALITY_REGISTERED == ocfStatus) {
6143 + DPRINTK("%s: Failed to register any device capabilities\n",
6144 + __FUNCTION__);
6145 + icp_ocfDrvFreeCaches();
6146 + icp_ocfDrvDriverId = INVALID_DRIVER_ID;
6147 + return -ECANCELED;
6150 + DPRINTK("%s: Registered %d of %d device capabilities\n",
6151 + __FUNCTION__, ocfStatus, NUM_SUPPORTED_CAPABILITIES);
6153 +/*Session data linked list used during module exit*/
6154 + INIT_LIST_HEAD(&icp_ocfDrvGlobalSymListHead);
6155 + INIT_LIST_HEAD(&icp_ocfDrvGlobalSymListHead_FreeMemList);
6157 + icp_ocfDrvFreeLacSessionWorkQ =
6158 + create_singlethread_workqueue("ocfLacDeregWorkQueue");
6160 + return 0;
6163 +/* Name : icp_ocfDrvExit
6165 + * Description : This function will deregister all the symmetric sessions
6166 + * registered with the LAC component. It will also deregister all symmetric
6167 + * and asymmetric functionality that can be accelerated by the hardware via OCF
6168 + * and random number generation if it is enabled.
6169 + */
6170 +static void icp_ocfDrvExit(void)
6172 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
6173 + struct icp_drvSessionData *sessionData = NULL;
6174 + struct icp_drvSessionData *tempSessionData = NULL;
6175 + int i, remaining_delay_time_in_jiffies = 0;
6176 + /* There is a possibility of a process or new session command being */
6177 + /* sent before this variable is incremented. The aim of this variable */
6178 + /* is to stop a loop of calls creating a deadlock situation which */
6179 + /* would prevent the driver from exiting. */
6181 + atomic_inc(&icp_ocfDrvIsExiting);
6183 + /*Existing sessions will be routed to another driver after these calls */
6184 + crypto_unregister_all(icp_ocfDrvDriverId);
6185 + crypto_runregister_all(icp_ocfDrvDriverId);
6187 + /*If any sessions are waiting to be deregistered, do that. This also
6188 + flushes the work queue */
6189 + destroy_workqueue(icp_ocfDrvFreeLacSessionWorkQ);
6191 + /*ENTER CRITICAL SECTION */
6192 + spin_lock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
6193 + list_for_each_entry_safe(tempSessionData, sessionData,
6194 + &icp_ocfDrvGlobalSymListHead, listNode) {
6195 + for (i = 0; i < num_dereg_retries; i++) {
6196 + /*No harm if bad input - LAC will handle error cases */
6197 + if (ICP_SESSION_RUNNING == tempSessionData->inUse) {
6198 + lacStatus =
6199 + cpaCySymRemoveSession
6200 + (CPA_INSTANCE_HANDLE_SINGLE,
6201 + tempSessionData->sessHandle);
6202 + if (CPA_STATUS_SUCCESS == lacStatus) {
6203 + /* Succesfully deregistered */
6204 + break;
6205 + } else if (CPA_STATUS_RETRY != lacStatus) {
6206 + atomic_inc
6207 + (&lac_session_failed_dereg_count);
6208 + break;
6211 + /*schedule_timout returns the time left for completion if
6212 + * this task is set to TASK_INTERRUPTIBLE */
6213 + remaining_delay_time_in_jiffies =
6214 + dereg_retry_delay_in_jiffies;
6215 + while (0 > remaining_delay_time_in_jiffies) {
6216 + remaining_delay_time_in_jiffies =
6217 + schedule_timeout
6218 + (remaining_delay_time_in_jiffies);
6221 + DPRINTK
6222 + ("%s(): Retry %d to deregistrate the session\n",
6223 + __FUNCTION__, i);
6227 + /*remove from current list */
6228 + list_del(&(tempSessionData->listNode));
6229 + /*add to free mem linked list */
6230 + list_add(&(tempSessionData->listNode),
6231 + &icp_ocfDrvGlobalSymListHead_FreeMemList);
6235 + /*EXIT CRITICAL SECTION */
6236 + spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
6238 + /*set back to initial values */
6239 + sessionData = NULL;
6240 + /*still have a reference in our list! */
6241 + tempSessionData = NULL;
6242 + /*free memory */
6243 + list_for_each_entry_safe(tempSessionData, sessionData,
6244 + &icp_ocfDrvGlobalSymListHead_FreeMemList,
6245 + listNode) {
6247 + list_del(&(tempSessionData->listNode));
6248 + /* Free allocated CpaCySymSessionCtx */
6249 + if (NULL != tempSessionData->sessHandle) {
6250 + kfree(tempSessionData->sessHandle);
6252 + memset(tempSessionData, 0, sizeof(struct icp_drvSessionData));
6253 + kmem_cache_free(drvSessionData_zone, tempSessionData);
6256 + if (0 != atomic_read(&lac_session_failed_dereg_count)) {
6257 + DPRINTK("%s(): %d LAC sessions were not deregistered "
6258 + "correctly. This is not a clean exit! \n",
6259 + __FUNCTION__,
6260 + atomic_read(&lac_session_failed_dereg_count));
6263 + icp_ocfDrvFreeCaches();
6264 + icp_ocfDrvDriverId = INVALID_DRIVER_ID;
6266 + /* Shutdown the Cryptographic component */
6267 + lacStatus = cpaCyStopInstance(CPA_INSTANCE_HANDLE_SINGLE);
6268 + if (CPA_STATUS_SUCCESS != lacStatus) {
6269 + DPRINTK("%s(): Failed to stop instance of the "
6270 + "Cryptographic component.(status == %d)\n",
6271 + __FUNCTION__, lacStatus);
6276 +/* Name : icp_ocfDrvFreeCaches
6278 + * Description : This function deregisters all slab caches
6279 + */
6280 +static void icp_ocfDrvFreeCaches(void)
6282 + if (atomic_read(&icp_ocfDrvIsExiting) != CPA_TRUE) {
6283 + atomic_set(&icp_ocfDrvIsExiting, 1);
6286 + /*Sym Zones */
6287 + ICP_CACHE_DESTROY(drvSessionData_zone);
6288 + ICP_CACHE_DESTROY(drvOpData_zone);
6290 + /*Asym zones */
6291 + ICP_CACHE_DESTROY(drvDH_zone);
6292 + ICP_CACHE_DESTROY(drvLnModExp_zone);
6293 + ICP_CACHE_DESTROY(drvRSADecrypt_zone);
6294 + ICP_CACHE_DESTROY(drvRSAPrivateKey_zone);
6295 + ICP_CACHE_DESTROY(drvDSARSSignKValue_zone);
6296 + ICP_CACHE_DESTROY(drvDSARSSign_zone);
6297 + ICP_CACHE_DESTROY(drvDSAVerify_zone);
6299 + /*FlatBuffer and BufferList Zones */
6300 + ICP_CACHE_DESTROY(drvFlatBuffer_zone);
6304 +/* Name : icp_ocfDrvDeregRetry
6306 + * Description : This function will try to farm the session deregistration
6307 + * off to a work queue. If it fails, nothing more can be done and it
6308 + * returns an error
6309 + */
6311 +int icp_ocfDrvDeregRetry(CpaCySymSessionCtx sessionToDeregister)
6313 + struct icp_ocfDrvFreeLacSession *workstore = NULL;
6315 + DPRINTK("%s(): Retry - Deregistering session (%p)\n",
6316 + __FUNCTION__, sessionToDeregister);
6318 + /*make sure the session is not available to be allocated during this
6319 + process */
6320 + atomic_inc(&lac_session_failed_dereg_count);
6322 + /*Farm off to work queue */
6323 + workstore =
6324 + kmalloc(sizeof(struct icp_ocfDrvFreeLacSession), GFP_ATOMIC);
6325 + if (NULL == workstore) {
6326 + DPRINTK("%s(): unable to free session - no memory available "
6327 + "for work queue\n", __FUNCTION__);
6328 + return ENOMEM;
6331 + workstore->sessionToDeregister = sessionToDeregister;
6333 + INIT_WORK(&(workstore->work), icp_ocfDrvDeferedFreeLacSessionProcess,
6334 + workstore);
6335 + queue_work(icp_ocfDrvFreeLacSessionWorkQ, &(workstore->work));
6337 + return ICP_OCF_DRV_STATUS_SUCCESS;
6341 +/* Name : icp_ocfDrvDeferedFreeLacSessionProcess
6343 + * Description : This function will retry (module input parameter)
6344 + * 'num_dereg_retries' times to deregister any symmetric session that recieves a
6345 + * CPA_STATUS_RETRY message from the LAC component. This function is run in
6346 + * Thread context because it is called from a worker thread
6347 + */
6348 +static void icp_ocfDrvDeferedFreeLacSessionProcess(void *arg)
6350 + struct icp_ocfDrvFreeLacSession *workstore = NULL;
6351 + CpaCySymSessionCtx sessionToDeregister = NULL;
6352 + int i = 0;
6353 + int remaining_delay_time_in_jiffies = 0;
6354 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
6356 + workstore = (struct icp_ocfDrvFreeLacSession *)arg;
6357 + if (NULL == workstore) {
6358 + DPRINTK("%s() function called with null parameter \n",
6359 + __FUNCTION__);
6360 + return;
6363 + sessionToDeregister = workstore->sessionToDeregister;
6364 + kfree(workstore);
6366 + /*if exiting, give deregistration one more blast only */
6367 + if (atomic_read(&icp_ocfDrvIsExiting) == CPA_TRUE) {
6368 + lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
6369 + sessionToDeregister);
6371 + if (lacStatus != CPA_STATUS_SUCCESS) {
6372 + DPRINTK("%s() Failed to Dereg LAC session %p "
6373 + "during module exit\n", __FUNCTION__,
6374 + sessionToDeregister);
6375 + return;
6378 + atomic_dec(&lac_session_failed_dereg_count);
6379 + return;
6382 + for (i = 0; i <= num_dereg_retries; i++) {
6383 + lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
6384 + sessionToDeregister);
6386 + if (lacStatus == CPA_STATUS_SUCCESS) {
6387 + atomic_dec(&lac_session_failed_dereg_count);
6388 + return;
6390 + if (lacStatus != CPA_STATUS_RETRY) {
6391 + DPRINTK("%s() Failed to deregister session - lacStatus "
6392 + " = %d", __FUNCTION__, lacStatus);
6393 + break;
6396 + /*schedule_timout returns the time left for completion if this
6397 + task is set to TASK_INTERRUPTIBLE */
6398 + remaining_delay_time_in_jiffies = dereg_retry_delay_in_jiffies;
6399 + while (0 > remaining_delay_time_in_jiffies) {
6400 + remaining_delay_time_in_jiffies =
6401 + schedule_timeout(remaining_delay_time_in_jiffies);
6406 + DPRINTK("%s(): Unable to deregister session\n", __FUNCTION__);
6407 + DPRINTK("%s(): Number of unavailable LAC sessions = %d\n", __FUNCTION__,
6408 + atomic_read(&lac_session_failed_dereg_count));
6411 +/* Name : icp_ocfDrvPtrAndLenToFlatBuffer
6413 + * Description : This function converts a "pointer and length" buffer
6414 + * structure to Fredericksburg Flat Buffer (CpaFlatBuffer) format.
6416 + * This function assumes that the data passed in are valid.
6417 + */
6418 +inline void
6419 +icp_ocfDrvPtrAndLenToFlatBuffer(void *pData, uint32_t len,
6420 + CpaFlatBuffer * pFlatBuffer)
6422 + pFlatBuffer->pData = pData;
6423 + pFlatBuffer->dataLenInBytes = len;
6426 +/* Name : icp_ocfDrvSingleSkBuffToFlatBuffer
6428 + * Description : This function converts a single socket buffer (sk_buff)
6429 + * structure to a Fredericksburg Flat Buffer (CpaFlatBuffer) format.
6431 + * This function assumes that the data passed in are valid.
6432 + */
6433 +static inline void
6434 +icp_ocfDrvSingleSkBuffToFlatBuffer(struct sk_buff *pSkb,
6435 + CpaFlatBuffer * pFlatBuffer)
6437 + pFlatBuffer->pData = pSkb->data;
6438 + pFlatBuffer->dataLenInBytes = skb_headlen(pSkb);
6441 +/* Name : icp_ocfDrvSkBuffToBufferList
6443 + * Description : This function converts a socket buffer (sk_buff) structure to
6444 + * Fredericksburg Scatter/Gather (CpaBufferList) buffer format.
6446 + * This function assumes that the bufferlist has been allocated with the correct
6447 + * number of buffer arrays.
6448 + *
6449 + */
6450 +inline int
6451 +icp_ocfDrvSkBuffToBufferList(struct sk_buff *pSkb, CpaBufferList * bufferList)
6453 + CpaFlatBuffer *curFlatBuffer = NULL;
6454 + char *skbuffPageAddr = NULL;
6455 + struct sk_buff *pCurFrag = NULL;
6456 + struct skb_shared_info *pShInfo = NULL;
6457 + uint32_t page_offset = 0, i = 0;
6459 + DPRINTK("%s(): Entry Point\n", __FUNCTION__);
6461 + /*
6462 + * In all cases, the first skb needs to be translated to FlatBuffer.
6463 + * Perform a buffer translation for the first skbuff
6464 + */
6465 + curFlatBuffer = bufferList->pBuffers;
6466 + icp_ocfDrvSingleSkBuffToFlatBuffer(pSkb, curFlatBuffer);
6468 + /* Set the userData to point to the original sk_buff */
6469 + bufferList->pUserData = (void *)pSkb;
6471 + /* We now know we'll have at least one element in the SGL */
6472 + bufferList->numBuffers = 1;
6474 + if (0 == skb_is_nonlinear(pSkb)) {
6475 + /* Is a linear buffer - therefore it's a single skbuff */
6476 + DPRINTK("%s(): Exit Point\n", __FUNCTION__);
6477 + return ICP_OCF_DRV_STATUS_SUCCESS;
6480 + curFlatBuffer++;
6481 + pShInfo = skb_shinfo(pSkb);
6482 + if (pShInfo->frag_list != NULL && pShInfo->nr_frags != 0) {
6483 + EPRINTK("%s():"
6484 + "Translation for a combination of frag_list "
6485 + "and frags[] array not supported!\n", __FUNCTION__);
6486 + return ICP_OCF_DRV_STATUS_FAIL;
6487 + } else if (pShInfo->frag_list != NULL) {
6488 + /*
6489 + * Non linear skbuff supported through frag_list
6490 + * Perform translation for each fragment (sk_buff)
6491 + * in the frag_list of the first sk_buff.
6492 + */
6493 + for (pCurFrag = pShInfo->frag_list;
6494 + pCurFrag != NULL; pCurFrag = pCurFrag->next) {
6495 + icp_ocfDrvSingleSkBuffToFlatBuffer(pCurFrag,
6496 + curFlatBuffer);
6497 + curFlatBuffer++;
6498 + bufferList->numBuffers++;
6500 + } else if (pShInfo->nr_frags != 0) {
6501 + /*
6502 + * Perform translation for each fragment in frags array
6503 + * and add to the BufferList
6504 + */
6505 + for (i = 0; i < pShInfo->nr_frags; i++) {
6506 + /* Get the page address and offset of this frag */
6507 + skbuffPageAddr = (char *)pShInfo->frags[i].page;
6508 + page_offset = pShInfo->frags[i].page_offset;
6510 + /* Convert a pointer and length to a flat buffer */
6511 + icp_ocfDrvPtrAndLenToFlatBuffer(skbuffPageAddr +
6512 + page_offset,
6513 + pShInfo->frags[i].size,
6514 + curFlatBuffer);
6515 + curFlatBuffer++;
6516 + bufferList->numBuffers++;
6518 + } else {
6519 + EPRINTK("%s():" "Could not recognize skbuff fragments!\n",
6520 + __FUNCTION__);
6521 + return ICP_OCF_DRV_STATUS_FAIL;
6524 + DPRINTK("%s(): Exit Point\n", __FUNCTION__);
6525 + return ICP_OCF_DRV_STATUS_SUCCESS;
6528 +/* Name : icp_ocfDrvBufferListToSkBuff
6530 + * Description : This function converts a Fredericksburg Scatter/Gather
6531 + * (CpaBufferList) buffer format to socket buffer structure.
6532 + */
6533 +inline int
6534 +icp_ocfDrvBufferListToSkBuff(CpaBufferList * bufferList, struct sk_buff **skb)
6536 + DPRINTK("%s(): Entry Point\n", __FUNCTION__);
6538 + /* Retrieve the orignal skbuff */
6539 + *skb = (struct sk_buff *)bufferList->pUserData;
6540 + if (NULL == *skb) {
6541 + EPRINTK("%s():"
6542 + "Error on converting from a BufferList. "
6543 + "The BufferList does not contain an sk_buff.\n",
6544 + __FUNCTION__);
6545 + return ICP_OCF_DRV_STATUS_FAIL;
6547 + DPRINTK("%s(): Exit Point\n", __FUNCTION__);
6548 + return ICP_OCF_DRV_STATUS_SUCCESS;
6551 +/* Name : icp_ocfDrvPtrAndLenToBufferList
6553 + * Description : This function converts a "pointer and length" buffer
6554 + * structure to Fredericksburg Scatter/Gather Buffer (CpaBufferList) format.
6556 + * This function assumes that the data passed in are valid.
6557 + */
6558 +inline void
6559 +icp_ocfDrvPtrAndLenToBufferList(void *pDataIn, uint32_t length,
6560 + CpaBufferList * pBufferList)
6562 + pBufferList->numBuffers = 1;
6563 + pBufferList->pBuffers->pData = pDataIn;
6564 + pBufferList->pBuffers->dataLenInBytes = length;
6567 +/* Name : icp_ocfDrvBufferListToPtrAndLen
6569 + * Description : This function converts Fredericksburg Scatter/Gather Buffer
6570 + * (CpaBufferList) format to a "pointer and length" buffer structure.
6572 + * This function assumes that the data passed in are valid.
6573 + */
6574 +inline void
6575 +icp_ocfDrvBufferListToPtrAndLen(CpaBufferList * pBufferList,
6576 + void **ppDataOut, uint32_t * pLength)
6578 + *ppDataOut = pBufferList->pBuffers->pData;
6579 + *pLength = pBufferList->pBuffers->dataLenInBytes;
6582 +/* Name : icp_ocfDrvBufferListMemInfo
6584 + * Description : This function will set the number of flat buffers in
6585 + * bufferlist, the size of memory to allocate for the pPrivateMetaData
6586 + * member of the CpaBufferList.
6587 + */
6588 +int
6589 +icp_ocfDrvBufferListMemInfo(uint16_t numBuffers,
6590 + struct icp_drvBuffListInfo *buffListInfo)
6592 + buffListInfo->numBuffers = numBuffers;
6594 + if (CPA_STATUS_SUCCESS !=
6595 + cpaCyBufferListGetMetaSize(CPA_INSTANCE_HANDLE_SINGLE,
6596 + buffListInfo->numBuffers,
6597 + &(buffListInfo->metaSize))) {
6598 + EPRINTK("%s() Failed to get buffer list meta size.\n",
6599 + __FUNCTION__);
6600 + return ICP_OCF_DRV_STATUS_FAIL;
6603 + return ICP_OCF_DRV_STATUS_SUCCESS;
6606 +/* Name : icp_ocfDrvGetSkBuffFrags
6608 + * Description : This function will determine the number of
6609 + * fragments in a socket buffer(sk_buff).
6610 + */
6611 +inline uint16_t icp_ocfDrvGetSkBuffFrags(struct sk_buff * pSkb)
6613 + uint16_t numFrags = 0;
6614 + struct sk_buff *pCurFrag = NULL;
6615 + struct skb_shared_info *pShInfo = NULL;
6617 + if (NULL == pSkb)
6618 + return 0;
6620 + numFrags = 1;
6621 + if (0 == skb_is_nonlinear(pSkb)) {
6622 + /* Linear buffer - it's a single skbuff */
6623 + return numFrags;
6626 + pShInfo = skb_shinfo(pSkb);
6627 + if (NULL != pShInfo->frag_list && 0 != pShInfo->nr_frags) {
6628 + EPRINTK("%s(): Combination of frag_list "
6629 + "and frags[] array not supported!\n", __FUNCTION__);
6630 + return 0;
6631 + } else if (0 != pShInfo->nr_frags) {
6632 + numFrags += pShInfo->nr_frags;
6633 + return numFrags;
6634 + } else if (NULL != pShInfo->frag_list) {
6635 + for (pCurFrag = pShInfo->frag_list;
6636 + pCurFrag != NULL; pCurFrag = pCurFrag->next) {
6637 + numFrags++;
6639 + return numFrags;
6640 + } else {
6641 + return 0;
6645 +/* Name : icp_ocfDrvFreeFlatBuffer
6647 + * Description : This function will deallocate flat buffer.
6648 + */
6649 +inline void icp_ocfDrvFreeFlatBuffer(CpaFlatBuffer * pFlatBuffer)
6651 + if (pFlatBuffer != NULL) {
6652 + memset(pFlatBuffer, 0, sizeof(CpaFlatBuffer));
6653 + kmem_cache_free(drvFlatBuffer_zone, pFlatBuffer);
6657 +/* Name : icp_ocfDrvAllocMetaData
6659 + * Description : This function will allocate memory for the
6660 + * pPrivateMetaData member of CpaBufferList.
6661 + */
6662 +inline int
6663 +icp_ocfDrvAllocMetaData(CpaBufferList * pBufferList,
6664 + const struct icp_drvOpData *pOpData)
6666 + Cpa32U metaSize = 0;
6668 + if (pBufferList->numBuffers <= ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS){
6669 + void *pOpDataStartAddr = (void *)pOpData;
6671 + if (0 == defBuffListInfo.metaSize) {
6672 + pBufferList->pPrivateMetaData = NULL;
6673 + return ICP_OCF_DRV_STATUS_SUCCESS;
6675 + /*
6676 + * The meta data allocation has been included as part of the
6677 + * op data. It has been pre-allocated in memory just after the
6678 + * icp_drvOpData structure.
6679 + */
6680 + pBufferList->pPrivateMetaData = pOpDataStartAddr +
6681 + sizeof(struct icp_drvOpData);
6682 + } else {
6683 + if (CPA_STATUS_SUCCESS !=
6684 + cpaCyBufferListGetMetaSize(CPA_INSTANCE_HANDLE_SINGLE,
6685 + pBufferList->numBuffers,
6686 + &metaSize)) {
6687 + EPRINTK("%s() Failed to get buffer list meta size.\n",
6688 + __FUNCTION__);
6689 + return ICP_OCF_DRV_STATUS_FAIL;
6692 + if (0 == metaSize) {
6693 + pBufferList->pPrivateMetaData = NULL;
6694 + return ICP_OCF_DRV_STATUS_SUCCESS;
6697 + pBufferList->pPrivateMetaData = kmalloc(metaSize, GFP_ATOMIC);
6699 + if (NULL == pBufferList->pPrivateMetaData) {
6700 + EPRINTK("%s() Failed to allocate pPrivateMetaData.\n",
6701 + __FUNCTION__);
6702 + return ICP_OCF_DRV_STATUS_FAIL;
6705 + return ICP_OCF_DRV_STATUS_SUCCESS;
6708 +/* Name : icp_ocfDrvFreeMetaData
6710 + * Description : This function will deallocate pPrivateMetaData memory.
6711 + */
6712 +inline void icp_ocfDrvFreeMetaData(CpaBufferList * pBufferList)
6714 + if (NULL == pBufferList->pPrivateMetaData) {
6715 + return;
6718 + /*
6719 + * Only free the meta data if the BufferList has more than
6720 + * ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS number of buffers.
6721 + * Otherwise, the meta data shall be freed when the icp_drvOpData is
6722 + * freed.
6723 + */
6724 + if (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS < pBufferList->numBuffers){
6725 + kfree(pBufferList->pPrivateMetaData);
6729 +module_init(icp_ocfDrvInit);
6730 +module_exit(icp_ocfDrvExit);
6731 +MODULE_LICENSE("Dual BSD/GPL");
6732 +MODULE_AUTHOR("Intel");
6733 +MODULE_DESCRIPTION("OCF Driver for Intel Quick Assist crypto acceleration");
6734 diff -Nur linux-2.6.30.orig/crypto/ocf/ep80579/icp_ocf.h linux-2.6.30/crypto/ocf/ep80579/icp_ocf.h
6735 --- linux-2.6.30.orig/crypto/ocf/ep80579/icp_ocf.h 1970-01-01 01:00:00.000000000 +0100
6736 +++ linux-2.6.30/crypto/ocf/ep80579/icp_ocf.h 2009-06-11 10:55:27.000000000 +0200
6737 @@ -0,0 +1,363 @@
6738 +/***************************************************************************
6740 + * This file is provided under a dual BSD/GPLv2 license. When using or
6741 + * redistributing this file, you may do so under either license.
6742 + *
6743 + * GPL LICENSE SUMMARY
6744 + *
6745 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
6746 + *
6747 + * This program is free software; you can redistribute it and/or modify
6748 + * it under the terms of version 2 of the GNU General Public License as
6749 + * published by the Free Software Foundation.
6750 + *
6751 + * This program is distributed in the hope that it will be useful, but
6752 + * WITHOUT ANY WARRANTY; without even the implied warranty of
6753 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
6754 + * General Public License for more details.
6755 + *
6756 + * You should have received a copy of the GNU General Public License
6757 + * along with this program; if not, write to the Free Software
6758 + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6759 + * The full GNU General Public License is included in this distribution
6760 + * in the file called LICENSE.GPL.
6761 + *
6762 + * Contact Information:
6763 + * Intel Corporation
6764 + *
6765 + * BSD LICENSE
6766 + *
6767 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
6768 + * All rights reserved.
6769 + *
6770 + * Redistribution and use in source and binary forms, with or without
6771 + * modification, are permitted provided that the following conditions
6772 + * are met:
6773 + *
6774 + * * Redistributions of source code must retain the above copyright
6775 + * notice, this list of conditions and the following disclaimer.
6776 + * * Redistributions in binary form must reproduce the above copyright
6777 + * notice, this list of conditions and the following disclaimer in
6778 + * the documentation and/or other materials provided with the
6779 + * distribution.
6780 + * * Neither the name of Intel Corporation nor the names of its
6781 + * contributors may be used to endorse or promote products derived
6782 + * from this software without specific prior written permission.
6783 + *
6784 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
6785 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
6786 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
6787 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
6788 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
6789 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
6790 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
6791 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
6792 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
6793 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
6794 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6795 + *
6796 + *
6797 + * version: Security.L.1.0.130
6799 + ***************************************************************************/
6802 + * OCF drv driver header file for the Intel ICP processor.
6803 + */
6805 +#ifndef ICP_OCF_H
6806 +#define ICP_OCF_H
6808 +#include <linux/crypto.h>
6809 +#include <linux/delay.h>
6810 +#include <linux/skbuff.h>
6812 +#include "cryptodev.h"
6813 +#include "uio.h"
6815 +#include "cpa.h"
6816 +#include "cpa_cy_im.h"
6817 +#include "cpa_cy_sym.h"
6818 +#include "cpa_cy_rand.h"
6819 +#include "cpa_cy_dh.h"
6820 +#include "cpa_cy_rsa.h"
6821 +#include "cpa_cy_ln.h"
6822 +#include "cpa_cy_common.h"
6823 +#include "cpa_cy_dsa.h"
6825 +#define NUM_BITS_IN_BYTE (8)
6826 +#define NUM_BITS_IN_BYTE_MINUS_ONE (NUM_BITS_IN_BYTE -1)
6827 +#define INVALID_DRIVER_ID (-1)
6828 +#define RETURN_RAND_NUM_GEN_FAILED (-1)
6830 +/*This is define means only one operation can be chained to another
6831 +(resulting in one chain of two operations)*/
6832 +#define MAX_NUM_OF_CHAINED_OPS (1)
6833 +/*This is the max block cipher initialisation vector*/
6834 +#define MAX_IV_LEN_IN_BYTES (20)
6835 +/*This is used to check whether the OCF to this driver session limit has
6836 + been disabled*/
6837 +#define NO_OCF_TO_DRV_MAX_SESSIONS (0)
6839 +/*OCF values mapped here*/
6840 +#define ICP_SHA1_DIGEST_SIZE_IN_BYTES (SHA1_HASH_LEN)
6841 +#define ICP_SHA256_DIGEST_SIZE_IN_BYTES (SHA2_256_HASH_LEN)
6842 +#define ICP_SHA384_DIGEST_SIZE_IN_BYTES (SHA2_384_HASH_LEN)
6843 +#define ICP_SHA512_DIGEST_SIZE_IN_BYTES (SHA2_512_HASH_LEN)
6844 +#define ICP_MD5_DIGEST_SIZE_IN_BYTES (MD5_HASH_LEN)
6845 +#define ARC4_COUNTER_LEN (ARC4_BLOCK_LEN)
6847 +#define OCF_REGISTRATION_STATUS_SUCCESS (0)
6848 +#define OCF_ZERO_FUNCTIONALITY_REGISTERED (0)
6849 +#define ICP_OCF_DRV_NO_CRYPTO_PROCESS_ERROR (0)
6850 +#define ICP_OCF_DRV_STATUS_SUCCESS (0)
6851 +#define ICP_OCF_DRV_STATUS_FAIL (1)
6853 +/*Turn on/off debug options*/
6854 +#define ICP_OCF_PRINT_DEBUG_MESSAGES (0)
6855 +#define ICP_OCF_PRINT_KERN_ALERT (1)
6856 +#define ICP_OCF_PRINT_KERN_ERRS (1)
6858 +/*DSA Prime Q size in bytes (as defined in the standard) */
6859 +#define DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES (20)
6861 +/*MACRO DEFINITIONS*/
6863 +#define BITS_TO_BYTES(bytes, bits) \
6864 + bytes = (bits + NUM_BITS_IN_BYTE_MINUS_ONE) / NUM_BITS_IN_BYTE
6866 +#define ICP_CACHE_CREATE(cache_ID, cache_name) \
6867 + kmem_cache_create(cache_ID, sizeof(cache_name),0, \
6868 + SLAB_HWCACHE_ALIGN, NULL, NULL);
6870 +#define ICP_CACHE_NULL_CHECK(slab_zone) \
6871 +{ \
6872 + if(NULL == slab_zone){ \
6873 + icp_ocfDrvFreeCaches(); \
6874 + EPRINTK("%s() line %d: Not enough memory!\n", \
6875 + __FUNCTION__, __LINE__); \
6876 + return ENOMEM; \
6877 + } \
6880 +#define ICP_CACHE_DESTROY(slab_zone) \
6881 +{ \
6882 + if(NULL != slab_zone){ \
6883 + kmem_cache_destroy(slab_zone); \
6884 + slab_zone = NULL; \
6885 + } \
6888 +#define ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(alg) \
6889 +{ \
6890 + if(OCF_REGISTRATION_STATUS_SUCCESS == \
6891 + crypto_register(icp_ocfDrvDriverId, \
6892 + alg, \
6893 + 0, \
6894 + 0)) { \
6895 + ocfStatus++; \
6896 + } \
6899 +#define ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(alg) \
6900 +{ \
6901 + if(OCF_REGISTRATION_STATUS_SUCCESS == \
6902 + crypto_kregister(icp_ocfDrvDriverId, \
6903 + alg, \
6904 + 0)){ \
6905 + ocfStatus++; \
6906 + } \
6909 +#if ICP_OCF_PRINT_DEBUG_MESSAGES == 1
6910 +#define DPRINTK(args...) \
6911 +{ \
6912 + printk(args); \
6915 +#else //ICP_OCF_PRINT_DEBUG_MESSAGES == 1
6917 +#define DPRINTK(args...)
6919 +#endif //ICP_OCF_PRINT_DEBUG_MESSAGES == 1
6921 +#if ICP_OCF_PRINT_KERN_ALERT == 1
6922 +#define APRINTK(args...) \
6923 +{ \
6924 + printk(KERN_ALERT args); \
6927 +#else //ICP_OCF_PRINT_KERN_ALERT == 1
6929 +#define APRINTK(args...)
6931 +#endif //ICP_OCF_PRINT_KERN_ALERT == 1
6933 +#if ICP_OCF_PRINT_KERN_ERRS == 1
6934 +#define EPRINTK(args...) \
6935 +{ \
6936 + printk(KERN_ERR args); \
6939 +#else //ICP_OCF_PRINT_KERN_ERRS == 1
6941 +#define EPRINTK(args...)
6943 +#endif //ICP_OCF_PRINT_KERN_ERRS == 1
6945 +#define IPRINTK(args...) \
6946 +{ \
6947 + printk(KERN_INFO args); \
6950 +/*END OF MACRO DEFINITIONS*/
6952 +typedef enum {
6953 + ICP_OCF_DRV_ALG_CIPHER = 0,
6954 + ICP_OCF_DRV_ALG_HASH
6955 +} icp_ocf_drv_alg_type_t;
6957 +/* These are all defined in icp_common.c */
6958 +extern atomic_t lac_session_failed_dereg_count;
6959 +extern atomic_t icp_ocfDrvIsExiting;
6960 +extern atomic_t num_ocf_to_drv_registered_sessions;
6962 +/*These are use inputs used in icp_sym.c and icp_common.c
6963 + They are instantiated in icp_common.c*/
6964 +extern int max_sessions;
6966 +extern int32_t icp_ocfDrvDriverId;
6967 +extern struct list_head icp_ocfDrvGlobalSymListHead;
6968 +extern struct list_head icp_ocfDrvGlobalSymListHead_FreeMemList;
6969 +extern struct workqueue_struct *icp_ocfDrvFreeLacSessionWorkQ;
6970 +extern spinlock_t icp_ocfDrvSymSessInfoListSpinlock;
6971 +extern rwlock_t icp_kmem_cache_destroy_alloc_lock;
6973 +/*Slab zones for symettric functionality, instantiated in icp_common.c*/
6974 +extern struct kmem_cache *drvSessionData_zone;
6975 +extern struct kmem_cache *drvOpData_zone;
6977 +/*Slabs zones for asymettric functionality, instantiated in icp_common.c*/
6978 +extern struct kmem_cache *drvDH_zone;
6979 +extern struct kmem_cache *drvLnModExp_zone;
6980 +extern struct kmem_cache *drvRSADecrypt_zone;
6981 +extern struct kmem_cache *drvRSAPrivateKey_zone;
6982 +extern struct kmem_cache *drvDSARSSign_zone;
6983 +extern struct kmem_cache *drvDSARSSignKValue_zone;
6984 +extern struct kmem_cache *drvDSAVerify_zone;
6986 +/*Slab zones for flatbuffers and bufferlist*/
6987 +extern struct kmem_cache *drvFlatBuffer_zone;
6989 +#define ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS (16)
6991 +struct icp_drvBuffListInfo {
6992 + Cpa16U numBuffers;
6993 + Cpa32U metaSize;
6994 + Cpa32U metaOffset;
6995 + Cpa32U buffListSize;
6997 +extern struct icp_drvBuffListInfo defBuffListInfo;
7000 +* This struct is used to keep a reference to the relevant node in the list
7001 +* of sessionData structs, to the buffer type required by OCF and to the OCF
7002 +* provided crp struct that needs to be returned. All this info is needed in
7003 +* the callback function.
7005 +* IV can sometimes be stored in non-contiguous memory (e.g. skbuff
7006 +* linked/frag list, therefore a contiguous memory space for the IV data must be
7007 +* created and passed to LAC
7010 +struct icp_drvOpData {
7011 + CpaCySymOpData lacOpData;
7012 + uint32_t digestSizeInBytes;
7013 + struct cryptop *crp;
7014 + uint8_t bufferType;
7015 + uint8_t ivData[MAX_IV_LEN_IN_BYTES];
7016 + uint16_t numBufferListArray;
7017 + CpaBufferList srcBuffer;
7018 + CpaFlatBuffer bufferListArray[ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS];
7019 + CpaBoolean verifyResult;
7021 +/*Values used to derisk chances of performs being called against
7022 +deregistered sessions (for which the slab page has been reclaimed)
7023 +This is not a fix - since page frames are reclaimed from a slab, one cannot
7024 +rely on that memory not being re-used by another app.*/
7025 +typedef enum {
7026 + ICP_SESSION_INITIALISED = 0x5C5C5C,
7027 + ICP_SESSION_RUNNING = 0x005C00,
7028 + ICP_SESSION_DEREGISTERED = 0xC5C5C5
7029 +} usage_derisk;
7032 +This is the OCF<->OCF_DRV session object:
7034 +1.The first member is a listNode. These session objects are added to a linked
7035 + list in order to make it easier to remove them all at session exit time.
7036 +2.The second member is used to give the session object state and derisk the
7037 + possibility of OCF batch calls executing against a deregistered session (as
7038 + described above).
7039 +3.The third member is a LAC<->OCF_DRV session handle (initialised with the first
7040 + perform request for that session).
7041 +4.The fourth is the LAC session context. All the parameters for this structure
7042 + are only known when the first perform request for this session occurs. That is
7043 + why the OCF Tolapai Driver only registers a new LAC session at perform time
7045 +struct icp_drvSessionData {
7046 + struct list_head listNode;
7047 + usage_derisk inUse;
7048 + CpaCySymSessionCtx sessHandle;
7049 + CpaCySymSessionSetupData lacSessCtx;
7052 +/* This struct is required for deferred session
7053 + deregistration as a work queue function can
7054 + only have one argument*/
7055 +struct icp_ocfDrvFreeLacSession {
7056 + CpaCySymSessionCtx sessionToDeregister;
7057 + struct work_struct work;
7060 +int icp_ocfDrvNewSession(device_t dev, uint32_t * sild, struct cryptoini *cri);
7062 +int icp_ocfDrvFreeLACSession(device_t dev, uint64_t sid);
7064 +int icp_ocfDrvSymProcess(device_t dev, struct cryptop *crp, int hint);
7066 +int icp_ocfDrvPkeProcess(device_t dev, struct cryptkop *krp, int hint);
7068 +int icp_ocfDrvReadRandom(void *arg, uint32_t * buf, int maxwords);
7070 +int icp_ocfDrvDeregRetry(CpaCySymSessionCtx sessionToDeregister);
7072 +int icp_ocfDrvSkBuffToBufferList(struct sk_buff *skb,
7073 + CpaBufferList * bufferList);
7075 +int icp_ocfDrvBufferListToSkBuff(CpaBufferList * bufferList,
7076 + struct sk_buff **skb);
7078 +void icp_ocfDrvPtrAndLenToFlatBuffer(void *pData, uint32_t len,
7079 + CpaFlatBuffer * pFlatBuffer);
7081 +void icp_ocfDrvPtrAndLenToBufferList(void *pDataIn, uint32_t length,
7082 + CpaBufferList * pBufferList);
7084 +void icp_ocfDrvBufferListToPtrAndLen(CpaBufferList * pBufferList,
7085 + void **ppDataOut, uint32_t * pLength);
7087 +int icp_ocfDrvBufferListMemInfo(uint16_t numBuffers,
7088 + struct icp_drvBuffListInfo *buffListInfo);
7090 +uint16_t icp_ocfDrvGetSkBuffFrags(struct sk_buff *pSkb);
7092 +void icp_ocfDrvFreeFlatBuffer(CpaFlatBuffer * pFlatBuffer);
7094 +int icp_ocfDrvAllocMetaData(CpaBufferList * pBufferList,
7095 + const struct icp_drvOpData *pOpData);
7097 +void icp_ocfDrvFreeMetaData(CpaBufferList * pBufferList);
7099 +#endif
7100 +/* ICP_OCF_H */
7101 diff -Nur linux-2.6.30.orig/crypto/ocf/ep80579/icp_sym.c linux-2.6.30/crypto/ocf/ep80579/icp_sym.c
7102 --- linux-2.6.30.orig/crypto/ocf/ep80579/icp_sym.c 1970-01-01 01:00:00.000000000 +0100
7103 +++ linux-2.6.30/crypto/ocf/ep80579/icp_sym.c 2009-06-11 10:55:27.000000000 +0200
7104 @@ -0,0 +1,1382 @@
7105 +/***************************************************************************
7107 + * This file is provided under a dual BSD/GPLv2 license. When using or
7108 + * redistributing this file, you may do so under either license.
7109 + *
7110 + * GPL LICENSE SUMMARY
7111 + *
7112 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
7113 + *
7114 + * This program is free software; you can redistribute it and/or modify
7115 + * it under the terms of version 2 of the GNU General Public License as
7116 + * published by the Free Software Foundation.
7117 + *
7118 + * This program is distributed in the hope that it will be useful, but
7119 + * WITHOUT ANY WARRANTY; without even the implied warranty of
7120 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
7121 + * General Public License for more details.
7122 + *
7123 + * You should have received a copy of the GNU General Public License
7124 + * along with this program; if not, write to the Free Software
7125 + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
7126 + * The full GNU General Public License is included in this distribution
7127 + * in the file called LICENSE.GPL.
7128 + *
7129 + * Contact Information:
7130 + * Intel Corporation
7131 + *
7132 + * BSD LICENSE
7133 + *
7134 + * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
7135 + * All rights reserved.
7136 + *
7137 + * Redistribution and use in source and binary forms, with or without
7138 + * modification, are permitted provided that the following conditions
7139 + * are met:
7140 + *
7141 + * * Redistributions of source code must retain the above copyright
7142 + * notice, this list of conditions and the following disclaimer.
7143 + * * Redistributions in binary form must reproduce the above copyright
7144 + * notice, this list of conditions and the following disclaimer in
7145 + * the documentation and/or other materials provided with the
7146 + * distribution.
7147 + * * Neither the name of Intel Corporation nor the names of its
7148 + * contributors may be used to endorse or promote products derived
7149 + * from this software without specific prior written permission.
7150 + *
7151 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
7152 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
7153 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
7154 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
7155 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
7156 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
7157 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
7158 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
7159 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
7160 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
7161 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7162 + *
7163 + *
7164 + * version: Security.L.1.0.130
7166 + ***************************************************************************/
7168 + * An OCF module that uses the API for Intel® QuickAssist Technology to do the
7169 + * cryptography.
7171 + * This driver requires the ICP Access Library that is available from Intel in
7172 + * order to operate.
7173 + */
7175 +#include "icp_ocf.h"
7177 +/*This is the call back function for all symmetric cryptographic processes.
7178 + Its main functionality is to free driver crypto operation structure and to
7179 + call back to OCF*/
7180 +static void
7181 +icp_ocfDrvSymCallBack(void *callbackTag,
7182 + CpaStatus status,
7183 + const CpaCySymOp operationType,
7184 + void *pOpData,
7185 + CpaBufferList * pDstBuffer, CpaBoolean verifyResult);
7187 +/*This function is used to extract crypto processing information from the OCF
7188 + inputs, so as that it may be passed onto LAC*/
7189 +static int
7190 +icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
7191 + struct cryptodesc *crp_desc);
7193 +/*This function checks whether the crp_desc argument pertains to a digest or a
7194 + cipher operation*/
7195 +static int icp_ocfDrvAlgCheck(struct cryptodesc *crp_desc);
7197 +/*This function copies all the passed in session context information and stores
7198 + it in a LAC context structure*/
7199 +static int
7200 +icp_ocfDrvAlgorithmSetup(struct cryptoini *cri,
7201 + CpaCySymSessionSetupData * lacSessCtx);
7203 +/*This top level function is used to find a pointer to where a digest is
7204 + stored/needs to be inserted. */
7205 +static uint8_t *icp_ocfDrvDigestPointerFind(struct icp_drvOpData *drvOpData,
7206 + struct cryptodesc *crp_desc);
7208 +/*This function is called when a digest pointer has to be found within a
7209 + SKBUFF.*/
7210 +static inline uint8_t *icp_ocfDrvSkbuffDigestPointerFind(struct icp_drvOpData
7211 + *drvOpData,
7212 + int offsetInBytes,
7213 + uint32_t
7214 + digestSizeInBytes);
7216 +/*The following two functions are called if the SKBUFF digest pointer is not
7217 + positioned in the linear portion of the buffer (i.e. it is in a linked SKBUFF
7218 + or page fragment).*/
7219 +/*This function takes care of the page fragment case.*/
7220 +static inline uint8_t *icp_ocfDrvDigestSkbNRFragsCheck(struct sk_buff *skb,
7221 + struct skb_shared_info
7222 + *skb_shared,
7223 + int offsetInBytes,
7224 + uint32_t
7225 + digestSizeInBytes);
7227 +/*This function takes care of the linked list case.*/
7228 +static inline uint8_t *icp_ocfDrvDigestSkbFragListCheck(struct sk_buff *skb,
7229 + struct skb_shared_info
7230 + *skb_shared,
7231 + int offsetInBytes,
7232 + uint32_t
7233 + digestSizeInBytes);
7235 +/*This function is used to free an OCF->OCF_DRV session object*/
7236 +static void icp_ocfDrvFreeOCFSession(struct icp_drvSessionData *sessionData);
7238 +/*max IOV buffs supported in a UIO structure*/
7239 +#define NUM_IOV_SUPPORTED (1)
7241 +/* Name : icp_ocfDrvSymCallBack
7243 + * Description : When this function returns it signifies that the LAC
7244 + * component has completed the relevant symmetric operation.
7246 + * Notes : The callbackTag is a pointer to an icp_drvOpData. This memory
7247 + * object was passed to LAC for the cryptographic processing and contains all
7248 + * the relevant information for cleaning up buffer handles etc. so that the
7249 + * OCF Tolapai Driver portion of this crypto operation can be fully completed.
7250 + */
7251 +static void
7252 +icp_ocfDrvSymCallBack(void *callbackTag,
7253 + CpaStatus status,
7254 + const CpaCySymOp operationType,
7255 + void *pOpData,
7256 + CpaBufferList * pDstBuffer, CpaBoolean verifyResult)
7258 + struct cryptop *crp = NULL;
7259 + struct icp_drvOpData *temp_drvOpData =
7260 + (struct icp_drvOpData *)callbackTag;
7261 + uint64_t *tempBasePtr = NULL;
7262 + uint32_t tempLen = 0;
7264 + if (NULL == temp_drvOpData) {
7265 + DPRINTK("%s(): The callback from the LAC component"
7266 + " has failed due to Null userOpaque data"
7267 + "(status == %d).\n", __FUNCTION__, status);
7268 + DPRINTK("%s(): Unable to call OCF back! \n", __FUNCTION__);
7269 + return;
7272 + crp = temp_drvOpData->crp;
7273 + crp->crp_etype = ICP_OCF_DRV_NO_CRYPTO_PROCESS_ERROR;
7275 + if (NULL == pOpData) {
7276 + DPRINTK("%s(): The callback from the LAC component"
7277 + " has failed due to Null Symmetric Op data"
7278 + "(status == %d).\n", __FUNCTION__, status);
7279 + crp->crp_etype = ECANCELED;
7280 + crypto_done(crp);
7281 + return;
7284 + if (NULL == pDstBuffer) {
7285 + DPRINTK("%s(): The callback from the LAC component"
7286 + " has failed due to Null Dst Bufferlist data"
7287 + "(status == %d).\n", __FUNCTION__, status);
7288 + crp->crp_etype = ECANCELED;
7289 + crypto_done(crp);
7290 + return;
7293 + if (CPA_STATUS_SUCCESS == status) {
7295 + if (temp_drvOpData->bufferType == CRYPTO_F_SKBUF) {
7296 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
7297 + icp_ocfDrvBufferListToSkBuff(pDstBuffer,
7298 + (struct sk_buff **)
7299 + &(crp->crp_buf))) {
7300 + EPRINTK("%s(): BufferList to SkBuff "
7301 + "conversion error.\n", __FUNCTION__);
7302 + crp->crp_etype = EPERM;
7304 + } else {
7305 + icp_ocfDrvBufferListToPtrAndLen(pDstBuffer,
7306 + (void **)&tempBasePtr,
7307 + &tempLen);
7308 + crp->crp_olen = (int)tempLen;
7311 + } else {
7312 + DPRINTK("%s(): The callback from the LAC component has failed"
7313 + "(status == %d).\n", __FUNCTION__, status);
7315 + crp->crp_etype = ECANCELED;
7318 + if (temp_drvOpData->numBufferListArray >
7319 + ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
7320 + kfree(pDstBuffer->pBuffers);
7322 + icp_ocfDrvFreeMetaData(pDstBuffer);
7323 + kmem_cache_free(drvOpData_zone, temp_drvOpData);
7325 + /* Invoke the OCF callback function */
7326 + crypto_done(crp);
7328 + return;
7331 +/* Name : icp_ocfDrvNewSession
7333 + * Description : This function will create a new Driver<->OCF session
7335 + * Notes : LAC session registration happens during the first perform call.
7336 + * That is the first time we know all information about a given session.
7337 + */
7338 +int icp_ocfDrvNewSession(device_t dev, uint32_t * sid, struct cryptoini *cri)
7340 + struct icp_drvSessionData *sessionData = NULL;
7341 + uint32_t delete_session = 0;
7343 + /* The SID passed in should be our driver ID. We can return the */
7344 + /* local ID (LID) which is a unique identifier which we can use */
7345 + /* to differentiate between the encrypt/decrypt LAC session handles */
7346 + if (NULL == sid) {
7347 + EPRINTK("%s(): Invalid input parameters - NULL sid.\n",
7348 + __FUNCTION__);
7349 + return EINVAL;
7352 + if (NULL == cri) {
7353 + EPRINTK("%s(): Invalid input parameters - NULL cryptoini.\n",
7354 + __FUNCTION__);
7355 + return EINVAL;
7358 + if (icp_ocfDrvDriverId != *sid) {
7359 + EPRINTK("%s(): Invalid input parameters - bad driver ID\n",
7360 + __FUNCTION__);
7361 + EPRINTK("\t sid = 0x08%p \n \t cri = 0x08%p \n", sid, cri);
7362 + return EINVAL;
7365 + sessionData = kmem_cache_zalloc(drvSessionData_zone, GFP_ATOMIC);
7366 + if (NULL == sessionData) {
7367 + DPRINTK("%s():No memory for Session Data\n", __FUNCTION__);
7368 + return ENOMEM;
7371 + /*ENTER CRITICAL SECTION */
7372 + spin_lock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
7373 + /*put this check in the spinlock so no new sessions can be added to the
7374 + linked list when we are exiting */
7375 + if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
7376 + delete_session++;
7378 + } else if (NO_OCF_TO_DRV_MAX_SESSIONS != max_sessions) {
7379 + if (atomic_read(&num_ocf_to_drv_registered_sessions) >=
7380 + (max_sessions -
7381 + atomic_read(&lac_session_failed_dereg_count))) {
7382 + delete_session++;
7383 + } else {
7384 + atomic_inc(&num_ocf_to_drv_registered_sessions);
7385 + /* Add to session data linked list */
7386 + list_add(&(sessionData->listNode),
7387 + &icp_ocfDrvGlobalSymListHead);
7390 + } else if (NO_OCF_TO_DRV_MAX_SESSIONS == max_sessions) {
7391 + list_add(&(sessionData->listNode),
7392 + &icp_ocfDrvGlobalSymListHead);
7395 + sessionData->inUse = ICP_SESSION_INITIALISED;
7397 + /*EXIT CRITICAL SECTION */
7398 + spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
7400 + if (delete_session) {
7401 + DPRINTK("%s():No Session handles available\n", __FUNCTION__);
7402 + kmem_cache_free(drvSessionData_zone, sessionData);
7403 + return EPERM;
7406 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
7407 + icp_ocfDrvAlgorithmSetup(cri, &(sessionData->lacSessCtx))) {
7408 + DPRINTK("%s():algorithm not supported\n", __FUNCTION__);
7409 + icp_ocfDrvFreeOCFSession(sessionData);
7410 + return EINVAL;
7413 + if (cri->cri_next) {
7414 + if (cri->cri_next->cri_next != NULL) {
7415 + DPRINTK("%s():only two chained algorithms supported\n",
7416 + __FUNCTION__);
7417 + icp_ocfDrvFreeOCFSession(sessionData);
7418 + return EPERM;
7421 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
7422 + icp_ocfDrvAlgorithmSetup(cri->cri_next,
7423 + &(sessionData->lacSessCtx))) {
7424 + DPRINTK("%s():second algorithm not supported\n",
7425 + __FUNCTION__);
7426 + icp_ocfDrvFreeOCFSession(sessionData);
7427 + return EINVAL;
7430 + sessionData->lacSessCtx.symOperation =
7431 + CPA_CY_SYM_OP_ALGORITHM_CHAINING;
7434 + *sid = (uint32_t) sessionData;
7436 + return ICP_OCF_DRV_STATUS_SUCCESS;
7439 +/* Name : icp_ocfDrvAlgorithmSetup
7441 + * Description : This function builds the session context data from the
7442 + * information supplied through OCF. Algorithm chain order and whether the
7443 + * session is Encrypt/Decrypt can only be found out at perform time however, so
7444 + * the session is registered with LAC at that time.
7445 + */
7446 +static int
7447 +icp_ocfDrvAlgorithmSetup(struct cryptoini *cri,
7448 + CpaCySymSessionSetupData * lacSessCtx)
7451 + lacSessCtx->sessionPriority = CPA_CY_PRIORITY_NORMAL;
7453 + switch (cri->cri_alg) {
7455 + case CRYPTO_NULL_CBC:
7456 + DPRINTK("%s(): NULL CBC\n", __FUNCTION__);
7457 + lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
7458 + lacSessCtx->cipherSetupData.cipherAlgorithm =
7459 + CPA_CY_SYM_CIPHER_NULL;
7460 + lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
7461 + cri->cri_klen / NUM_BITS_IN_BYTE;
7462 + lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
7463 + break;
7465 + case CRYPTO_DES_CBC:
7466 + DPRINTK("%s(): DES CBC\n", __FUNCTION__);
7467 + lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
7468 + lacSessCtx->cipherSetupData.cipherAlgorithm =
7469 + CPA_CY_SYM_CIPHER_DES_CBC;
7470 + lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
7471 + cri->cri_klen / NUM_BITS_IN_BYTE;
7472 + lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
7473 + break;
7475 + case CRYPTO_3DES_CBC:
7476 + DPRINTK("%s(): 3DES CBC\n", __FUNCTION__);
7477 + lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
7478 + lacSessCtx->cipherSetupData.cipherAlgorithm =
7479 + CPA_CY_SYM_CIPHER_3DES_CBC;
7480 + lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
7481 + cri->cri_klen / NUM_BITS_IN_BYTE;
7482 + lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
7483 + break;
7485 + case CRYPTO_AES_CBC:
7486 + DPRINTK("%s(): AES CBC\n", __FUNCTION__);
7487 + lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
7488 + lacSessCtx->cipherSetupData.cipherAlgorithm =
7489 + CPA_CY_SYM_CIPHER_AES_CBC;
7490 + lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
7491 + cri->cri_klen / NUM_BITS_IN_BYTE;
7492 + lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
7493 + break;
7495 + case CRYPTO_ARC4:
7496 + DPRINTK("%s(): ARC4\n", __FUNCTION__);
7497 + lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
7498 + lacSessCtx->cipherSetupData.cipherAlgorithm =
7499 + CPA_CY_SYM_CIPHER_ARC4;
7500 + lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
7501 + cri->cri_klen / NUM_BITS_IN_BYTE;
7502 + lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
7503 + break;
7505 + case CRYPTO_SHA1:
7506 + DPRINTK("%s(): SHA1\n", __FUNCTION__);
7507 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
7508 + lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
7509 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
7510 + lacSessCtx->hashSetupData.digestResultLenInBytes =
7511 + (cri->cri_mlen ?
7512 + cri->cri_mlen : ICP_SHA1_DIGEST_SIZE_IN_BYTES);
7514 + break;
7516 + case CRYPTO_SHA1_HMAC:
7517 + DPRINTK("%s(): SHA1_HMAC\n", __FUNCTION__);
7518 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
7519 + lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
7520 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
7521 + lacSessCtx->hashSetupData.digestResultLenInBytes =
7522 + (cri->cri_mlen ?
7523 + cri->cri_mlen : ICP_SHA1_DIGEST_SIZE_IN_BYTES);
7524 + lacSessCtx->hashSetupData.authModeSetupData.authKey =
7525 + cri->cri_key;
7526 + lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
7527 + cri->cri_klen / NUM_BITS_IN_BYTE;
7528 + lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
7530 + break;
7532 + case CRYPTO_SHA2_256:
7533 + DPRINTK("%s(): SHA256\n", __FUNCTION__);
7534 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
7535 + lacSessCtx->hashSetupData.hashAlgorithm =
7536 + CPA_CY_SYM_HASH_SHA256;
7537 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
7538 + lacSessCtx->hashSetupData.digestResultLenInBytes =
7539 + (cri->cri_mlen ?
7540 + cri->cri_mlen : ICP_SHA256_DIGEST_SIZE_IN_BYTES);
7542 + break;
7544 + case CRYPTO_SHA2_256_HMAC:
7545 + DPRINTK("%s(): SHA256_HMAC\n", __FUNCTION__);
7546 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
7547 + lacSessCtx->hashSetupData.hashAlgorithm =
7548 + CPA_CY_SYM_HASH_SHA256;
7549 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
7550 + lacSessCtx->hashSetupData.digestResultLenInBytes =
7551 + (cri->cri_mlen ?
7552 + cri->cri_mlen : ICP_SHA256_DIGEST_SIZE_IN_BYTES);
7553 + lacSessCtx->hashSetupData.authModeSetupData.authKey =
7554 + cri->cri_key;
7555 + lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
7556 + cri->cri_klen / NUM_BITS_IN_BYTE;
7557 + lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
7559 + break;
7561 + case CRYPTO_SHA2_384:
7562 + DPRINTK("%s(): SHA384\n", __FUNCTION__);
7563 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
7564 + lacSessCtx->hashSetupData.hashAlgorithm =
7565 + CPA_CY_SYM_HASH_SHA384;
7566 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
7567 + lacSessCtx->hashSetupData.digestResultLenInBytes =
7568 + (cri->cri_mlen ?
7569 + cri->cri_mlen : ICP_SHA384_DIGEST_SIZE_IN_BYTES);
7571 + break;
7573 + case CRYPTO_SHA2_384_HMAC:
7574 + DPRINTK("%s(): SHA384_HMAC\n", __FUNCTION__);
7575 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
7576 + lacSessCtx->hashSetupData.hashAlgorithm =
7577 + CPA_CY_SYM_HASH_SHA384;
7578 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
7579 + lacSessCtx->hashSetupData.digestResultLenInBytes =
7580 + (cri->cri_mlen ?
7581 + cri->cri_mlen : ICP_SHA384_DIGEST_SIZE_IN_BYTES);
7582 + lacSessCtx->hashSetupData.authModeSetupData.authKey =
7583 + cri->cri_key;
7584 + lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
7585 + cri->cri_klen / NUM_BITS_IN_BYTE;
7586 + lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
7588 + break;
7590 + case CRYPTO_SHA2_512:
7591 + DPRINTK("%s(): SHA512\n", __FUNCTION__);
7592 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
7593 + lacSessCtx->hashSetupData.hashAlgorithm =
7594 + CPA_CY_SYM_HASH_SHA512;
7595 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
7596 + lacSessCtx->hashSetupData.digestResultLenInBytes =
7597 + (cri->cri_mlen ?
7598 + cri->cri_mlen : ICP_SHA512_DIGEST_SIZE_IN_BYTES);
7600 + break;
7602 + case CRYPTO_SHA2_512_HMAC:
7603 + DPRINTK("%s(): SHA512_HMAC\n", __FUNCTION__);
7604 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
7605 + lacSessCtx->hashSetupData.hashAlgorithm =
7606 + CPA_CY_SYM_HASH_SHA512;
7607 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
7608 + lacSessCtx->hashSetupData.digestResultLenInBytes =
7609 + (cri->cri_mlen ?
7610 + cri->cri_mlen : ICP_SHA512_DIGEST_SIZE_IN_BYTES);
7611 + lacSessCtx->hashSetupData.authModeSetupData.authKey =
7612 + cri->cri_key;
7613 + lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
7614 + cri->cri_klen / NUM_BITS_IN_BYTE;
7615 + lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
7617 + break;
7619 + case CRYPTO_MD5:
7620 + DPRINTK("%s(): MD5\n", __FUNCTION__);
7621 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
7622 + lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
7623 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
7624 + lacSessCtx->hashSetupData.digestResultLenInBytes =
7625 + (cri->cri_mlen ?
7626 + cri->cri_mlen : ICP_MD5_DIGEST_SIZE_IN_BYTES);
7628 + break;
7630 + case CRYPTO_MD5_HMAC:
7631 + DPRINTK("%s(): MD5_HMAC\n", __FUNCTION__);
7632 + lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
7633 + lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
7634 + lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
7635 + lacSessCtx->hashSetupData.digestResultLenInBytes =
7636 + (cri->cri_mlen ?
7637 + cri->cri_mlen : ICP_MD5_DIGEST_SIZE_IN_BYTES);
7638 + lacSessCtx->hashSetupData.authModeSetupData.authKey =
7639 + cri->cri_key;
7640 + lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
7641 + cri->cri_klen / NUM_BITS_IN_BYTE;
7642 + lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
7644 + break;
7646 + default:
7647 + DPRINTK("%s(): ALG Setup FAIL\n", __FUNCTION__);
7648 + return ICP_OCF_DRV_STATUS_FAIL;
7651 + return ICP_OCF_DRV_STATUS_SUCCESS;
7654 +/* Name : icp_ocfDrvFreeOCFSession
7656 + * Description : This function deletes all existing Session data representing
7657 + * the Cryptographic session established between OCF and this driver. This
7658 + * also includes freeing the memory allocated for the session context. The
7659 + * session object is also removed from the session linked list.
7660 + */
7661 +static void icp_ocfDrvFreeOCFSession(struct icp_drvSessionData *sessionData)
7664 + sessionData->inUse = ICP_SESSION_DEREGISTERED;
7666 + /*ENTER CRITICAL SECTION */
7667 + spin_lock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
7669 + if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
7670 + /*If the Driver is exiting, allow that process to
7671 + handle any deletions */
7672 + /*EXIT CRITICAL SECTION */
7673 + spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
7674 + return;
7677 + atomic_dec(&num_ocf_to_drv_registered_sessions);
7679 + list_del(&(sessionData->listNode));
7681 + /*EXIT CRITICAL SECTION */
7682 + spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
7684 + if (NULL != sessionData->sessHandle) {
7685 + kfree(sessionData->sessHandle);
7687 + kmem_cache_free(drvSessionData_zone, sessionData);
7690 +/* Name : icp_ocfDrvFreeLACSession
7692 + * Description : This attempts to deregister a LAC session. If it fails, the
7693 + * deregistation retry function is called.
7694 + */
7695 +int icp_ocfDrvFreeLACSession(device_t dev, uint64_t sid)
7697 + CpaCySymSessionCtx sessionToDeregister = NULL;
7698 + struct icp_drvSessionData *sessionData = NULL;
7699 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
7700 + int retval = 0;
7702 + sessionData = (struct icp_drvSessionData *)CRYPTO_SESID2LID(sid);
7703 + if (NULL == sessionData) {
7704 + EPRINTK("%s(): OCF Free session called with Null Session ID.\n",
7705 + __FUNCTION__);
7706 + return EINVAL;
7709 + sessionToDeregister = sessionData->sessHandle;
7711 + if (ICP_SESSION_INITIALISED == sessionData->inUse) {
7712 + DPRINTK("%s() Session not registered with LAC\n", __FUNCTION__);
7713 + } else if (NULL == sessionData->sessHandle) {
7714 + EPRINTK
7715 + ("%s(): OCF Free session called with Null Session Handle.\n",
7716 + __FUNCTION__);
7717 + return EINVAL;
7718 + } else {
7719 + lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
7720 + sessionToDeregister);
7721 + if (CPA_STATUS_RETRY == lacStatus) {
7722 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
7723 + icp_ocfDrvDeregRetry(&sessionToDeregister)) {
7724 + /* the retry function increments the
7725 + dereg failed count */
7726 + DPRINTK("%s(): LAC failed to deregister the "
7727 + "session. (localSessionId= %p)\n",
7728 + __FUNCTION__, sessionToDeregister);
7729 + retval = EPERM;
7732 + } else if (CPA_STATUS_SUCCESS != lacStatus) {
7733 + DPRINTK("%s(): LAC failed to deregister the session. "
7734 + "localSessionId= %p, lacStatus = %d\n",
7735 + __FUNCTION__, sessionToDeregister, lacStatus);
7736 + atomic_inc(&lac_session_failed_dereg_count);
7737 + retval = EPERM;
7741 + icp_ocfDrvFreeOCFSession(sessionData);
7742 + return retval;
7746 +/* Name : icp_ocfDrvAlgCheck
7748 + * Description : This function checks whether the cryptodesc argument pertains
7749 + * to a sym or hash function
7750 + */
7751 +static int icp_ocfDrvAlgCheck(struct cryptodesc *crp_desc)
7754 + if (crp_desc->crd_alg == CRYPTO_3DES_CBC ||
7755 + crp_desc->crd_alg == CRYPTO_AES_CBC ||
7756 + crp_desc->crd_alg == CRYPTO_DES_CBC ||
7757 + crp_desc->crd_alg == CRYPTO_NULL_CBC ||
7758 + crp_desc->crd_alg == CRYPTO_ARC4) {
7759 + return ICP_OCF_DRV_ALG_CIPHER;
7762 + return ICP_OCF_DRV_ALG_HASH;
7765 +/* Name : icp_ocfDrvSymProcess
7767 + * Description : This function will map symmetric functionality calls from OCF
7768 + * to the LAC API. It will also allocate memory to store the session context.
7769 + *
7770 + * Notes: If it is the first perform call for a given session, then a LAC
7771 + * session is registered. After the session is registered, no checks as
7772 + * to whether session paramaters have changed (e.g. alg chain order) are
7773 + * done.
7774 + */
7775 +int icp_ocfDrvSymProcess(device_t dev, struct cryptop *crp, int hint)
7777 + struct icp_drvSessionData *sessionData = NULL;
7778 + struct icp_drvOpData *drvOpData = NULL;
7779 + CpaStatus lacStatus = CPA_STATUS_SUCCESS;
7780 + Cpa32U sessionCtxSizeInBytes = 0;
7781 + uint16_t numBufferListArray = 0;
7783 + if (NULL == crp) {
7784 + DPRINTK("%s(): Invalid input parameters, cryptop is NULL\n",
7785 + __FUNCTION__);
7786 + return EINVAL;
7789 + if (NULL == crp->crp_desc) {
7790 + DPRINTK("%s(): Invalid input parameters, no crp_desc attached "
7791 + "to crp\n", __FUNCTION__);
7792 + crp->crp_etype = EINVAL;
7793 + return EINVAL;
7796 + if (NULL == crp->crp_buf) {
7797 + DPRINTK("%s(): Invalid input parameters, no buffer attached "
7798 + "to crp\n", __FUNCTION__);
7799 + crp->crp_etype = EINVAL;
7800 + return EINVAL;
7803 + if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
7804 + crp->crp_etype = EFAULT;
7805 + return EFAULT;
7808 + sessionData = (struct icp_drvSessionData *)
7809 + (CRYPTO_SESID2LID(crp->crp_sid));
7810 + if (NULL == sessionData) {
7811 + DPRINTK("%s(): Invalid input parameters, Null Session ID \n",
7812 + __FUNCTION__);
7813 + crp->crp_etype = EINVAL;
7814 + return EINVAL;
7817 +/*If we get a request against a deregisted session, cancel operation*/
7818 + if (ICP_SESSION_DEREGISTERED == sessionData->inUse) {
7819 + DPRINTK("%s(): Session ID %d was deregistered \n",
7820 + __FUNCTION__, (int)(CRYPTO_SESID2LID(crp->crp_sid)));
7821 + crp->crp_etype = EFAULT;
7822 + return EFAULT;
7825 +/*If none of the session states are set, then the session structure was either
7826 + not initialised properly or we are reading from a freed memory area (possible
7827 + due to OCF batch mode not removing queued requests against deregistered
7828 + sessions*/
7829 + if (ICP_SESSION_INITIALISED != sessionData->inUse &&
7830 + ICP_SESSION_RUNNING != sessionData->inUse) {
7831 + DPRINTK("%s(): Session - ID %d - not properly initialised or "
7832 + "memory freed back to the kernel \n",
7833 + __FUNCTION__, (int)(CRYPTO_SESID2LID(crp->crp_sid)));
7834 + crp->crp_etype = EINVAL;
7835 + return EINVAL;
7838 + /*For the below checks, remember error checking is already done in LAC.
7839 + We're not validating inputs subsequent to registration */
7840 + if (sessionData->inUse == ICP_SESSION_INITIALISED) {
7841 + DPRINTK("%s(): Initialising session\n", __FUNCTION__);
7843 + if (NULL != crp->crp_desc->crd_next) {
7844 + if (ICP_OCF_DRV_ALG_CIPHER ==
7845 + icp_ocfDrvAlgCheck(crp->crp_desc)) {
7847 + sessionData->lacSessCtx.algChainOrder =
7848 + CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
7850 + if (crp->crp_desc->crd_flags & CRD_F_ENCRYPT) {
7851 + sessionData->lacSessCtx.cipherSetupData.
7852 + cipherDirection =
7853 + CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
7854 + } else {
7855 + sessionData->lacSessCtx.cipherSetupData.
7856 + cipherDirection =
7857 + CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
7859 + } else {
7860 + sessionData->lacSessCtx.algChainOrder =
7861 + CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
7863 + if (crp->crp_desc->crd_next->crd_flags &
7864 + CRD_F_ENCRYPT) {
7865 + sessionData->lacSessCtx.cipherSetupData.
7866 + cipherDirection =
7867 + CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
7868 + } else {
7869 + sessionData->lacSessCtx.cipherSetupData.
7870 + cipherDirection =
7871 + CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
7876 + } else if (ICP_OCF_DRV_ALG_CIPHER ==
7877 + icp_ocfDrvAlgCheck(crp->crp_desc)) {
7878 + if (crp->crp_desc->crd_flags & CRD_F_ENCRYPT) {
7879 + sessionData->lacSessCtx.cipherSetupData.
7880 + cipherDirection =
7881 + CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
7882 + } else {
7883 + sessionData->lacSessCtx.cipherSetupData.
7884 + cipherDirection =
7885 + CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
7890 + /*No action required for standalone Auth here */
7892 + /* Allocate memory for SymSessionCtx before the Session Registration */
7893 + lacStatus =
7894 + cpaCySymSessionCtxGetSize(CPA_INSTANCE_HANDLE_SINGLE,
7895 + &(sessionData->lacSessCtx),
7896 + &sessionCtxSizeInBytes);
7897 + if (CPA_STATUS_SUCCESS != lacStatus) {
7898 + EPRINTK("%s(): cpaCySymSessionCtxGetSize failed - %d\n",
7899 + __FUNCTION__, lacStatus);
7900 + return EINVAL;
7902 + sessionData->sessHandle =
7903 + kmalloc(sessionCtxSizeInBytes, GFP_ATOMIC);
7904 + if (NULL == sessionData->sessHandle) {
7905 + EPRINTK
7906 + ("%s(): Failed to get memory for SymSessionCtx\n",
7907 + __FUNCTION__);
7908 + return ENOMEM;
7911 + lacStatus = cpaCySymInitSession(CPA_INSTANCE_HANDLE_SINGLE,
7912 + icp_ocfDrvSymCallBack,
7913 + &(sessionData->lacSessCtx),
7914 + sessionData->sessHandle);
7916 + if (CPA_STATUS_SUCCESS != lacStatus) {
7917 + EPRINTK("%s(): cpaCySymInitSession failed -%d \n",
7918 + __FUNCTION__, lacStatus);
7919 + return EFAULT;
7922 + sessionData->inUse = ICP_SESSION_RUNNING;
7925 + drvOpData = kmem_cache_zalloc(drvOpData_zone, GFP_ATOMIC);
7926 + if (NULL == drvOpData) {
7927 + EPRINTK("%s():Failed to get memory for drvOpData\n",
7928 + __FUNCTION__);
7929 + crp->crp_etype = ENOMEM;
7930 + return ENOMEM;
7933 + drvOpData->lacOpData.pSessionCtx = sessionData->sessHandle;
7934 + drvOpData->digestSizeInBytes = sessionData->lacSessCtx.hashSetupData.
7935 + digestResultLenInBytes;
7936 + drvOpData->crp = crp;
7938 + /* Set the default buffer list array memory allocation */
7939 + drvOpData->srcBuffer.pBuffers = drvOpData->bufferListArray;
7940 + drvOpData->numBufferListArray = ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS;
7942 + /*
7943 + * Allocate buffer list array memory allocation if the
7944 + * data fragment is more than the default allocation
7945 + */
7946 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
7947 + numBufferListArray = icp_ocfDrvGetSkBuffFrags((struct sk_buff *)
7948 + crp->crp_buf);
7949 + if (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS < numBufferListArray) {
7950 + DPRINTK("%s() numBufferListArray more than default\n",
7951 + __FUNCTION__);
7952 + drvOpData->srcBuffer.pBuffers = NULL;
7953 + drvOpData->srcBuffer.pBuffers =
7954 + kmalloc(numBufferListArray *
7955 + sizeof(CpaFlatBuffer), GFP_ATOMIC);
7956 + if (NULL == drvOpData->srcBuffer.pBuffers) {
7957 + EPRINTK("%s() Failed to get memory for "
7958 + "pBuffers\n", __FUNCTION__);
7959 + kmem_cache_free(drvOpData_zone, drvOpData);
7960 + crp->crp_etype = ENOMEM;
7961 + return ENOMEM;
7963 + drvOpData->numBufferListArray = numBufferListArray;
7967 + /*
7968 + * Check the type of buffer structure we got and convert it into
7969 + * CpaBufferList format.
7970 + */
7971 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
7972 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
7973 + icp_ocfDrvSkBuffToBufferList((struct sk_buff *)crp->crp_buf,
7974 + &(drvOpData->srcBuffer))) {
7975 + EPRINTK("%s():Failed to translate from SK_BUF "
7976 + "to bufferlist\n", __FUNCTION__);
7977 + crp->crp_etype = EINVAL;
7978 + goto err;
7981 + drvOpData->bufferType = CRYPTO_F_SKBUF;
7982 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
7983 + /* OCF only supports IOV of one entry. */
7984 + if (NUM_IOV_SUPPORTED ==
7985 + ((struct uio *)(crp->crp_buf))->uio_iovcnt) {
7987 + icp_ocfDrvPtrAndLenToBufferList(((struct uio *)(crp->
7988 + crp_buf))->
7989 + uio_iov[0].iov_base,
7990 + ((struct uio *)(crp->
7991 + crp_buf))->
7992 + uio_iov[0].iov_len,
7993 + &(drvOpData->
7994 + srcBuffer));
7996 + drvOpData->bufferType = CRYPTO_F_IOV;
7998 + } else {
7999 + DPRINTK("%s():Unable to handle IOVs with lengths of "
8000 + "greater than one!\n", __FUNCTION__);
8001 + crp->crp_etype = EINVAL;
8002 + goto err;
8005 + } else {
8006 + icp_ocfDrvPtrAndLenToBufferList(crp->crp_buf,
8007 + crp->crp_ilen,
8008 + &(drvOpData->srcBuffer));
8010 + drvOpData->bufferType = CRYPTO_BUF_CONTIG;
8013 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
8014 + icp_ocfDrvProcessDataSetup(drvOpData, drvOpData->crp->crp_desc)) {
8015 + crp->crp_etype = EINVAL;
8016 + goto err;
8019 + if (drvOpData->crp->crp_desc->crd_next != NULL) {
8020 + if (icp_ocfDrvProcessDataSetup(drvOpData, drvOpData->crp->
8021 + crp_desc->crd_next)) {
8022 + crp->crp_etype = EINVAL;
8023 + goto err;
8028 + /* Allocate srcBuffer's private meta data */
8029 + if (ICP_OCF_DRV_STATUS_SUCCESS !=
8030 + icp_ocfDrvAllocMetaData(&(drvOpData->srcBuffer), drvOpData)) {
8031 + EPRINTK("%s() icp_ocfDrvAllocMetaData failed\n", __FUNCTION__);
8032 + memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
8033 + crp->crp_etype = EINVAL;
8034 + goto err;
8037 + /* Perform "in-place" crypto operation */
8038 + lacStatus = cpaCySymPerformOp(CPA_INSTANCE_HANDLE_SINGLE,
8039 + (void *)drvOpData,
8040 + &(drvOpData->lacOpData),
8041 + &(drvOpData->srcBuffer),
8042 + &(drvOpData->srcBuffer),
8043 + &(drvOpData->verifyResult));
8044 + if (CPA_STATUS_RETRY == lacStatus) {
8045 + DPRINTK("%s(): cpaCySymPerformOp retry, lacStatus = %d\n",
8046 + __FUNCTION__, lacStatus);
8047 + memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
8048 + crp->crp_etype = EINVAL;
8049 + goto err;
8051 + if (CPA_STATUS_SUCCESS != lacStatus) {
8052 + EPRINTK("%s(): cpaCySymPerformOp failed, lacStatus = %d\n",
8053 + __FUNCTION__, lacStatus);
8054 + memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
8055 + crp->crp_etype = EINVAL;
8056 + goto err;
8059 + return 0; //OCF success status value
8061 + err:
8062 + if (drvOpData->numBufferListArray > ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
8063 + kfree(drvOpData->srcBuffer.pBuffers);
8065 + icp_ocfDrvFreeMetaData(&(drvOpData->srcBuffer));
8066 + kmem_cache_free(drvOpData_zone, drvOpData);
8068 + return crp->crp_etype;
8071 +/* Name : icp_ocfDrvProcessDataSetup
8073 + * Description : This function will setup all the cryptographic operation data
8074 + * that is required by LAC to execute the operation.
8075 + */
8076 +static int icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
8077 + struct cryptodesc *crp_desc)
8079 + CpaCyRandGenOpData randGenOpData;
8080 + CpaFlatBuffer randData;
8082 + drvOpData->lacOpData.packetType = CPA_CY_SYM_PACKET_TYPE_FULL;
8084 + /* Convert from the cryptop to the ICP LAC crypto parameters */
8085 + switch (crp_desc->crd_alg) {
8086 + case CRYPTO_NULL_CBC:
8087 + drvOpData->lacOpData.
8088 + cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
8089 + drvOpData->lacOpData.
8090 + messageLenToCipherInBytes = crp_desc->crd_len;
8091 + drvOpData->verifyResult = CPA_FALSE;
8092 + drvOpData->lacOpData.ivLenInBytes = NULL_BLOCK_LEN;
8093 + break;
8094 + case CRYPTO_DES_CBC:
8095 + drvOpData->lacOpData.
8096 + cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
8097 + drvOpData->lacOpData.
8098 + messageLenToCipherInBytes = crp_desc->crd_len;
8099 + drvOpData->verifyResult = CPA_FALSE;
8100 + drvOpData->lacOpData.ivLenInBytes = DES_BLOCK_LEN;
8101 + break;
8102 + case CRYPTO_3DES_CBC:
8103 + drvOpData->lacOpData.
8104 + cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
8105 + drvOpData->lacOpData.
8106 + messageLenToCipherInBytes = crp_desc->crd_len;
8107 + drvOpData->verifyResult = CPA_FALSE;
8108 + drvOpData->lacOpData.ivLenInBytes = DES3_BLOCK_LEN;
8109 + break;
8110 + case CRYPTO_ARC4:
8111 + drvOpData->lacOpData.
8112 + cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
8113 + drvOpData->lacOpData.
8114 + messageLenToCipherInBytes = crp_desc->crd_len;
8115 + drvOpData->verifyResult = CPA_FALSE;
8116 + drvOpData->lacOpData.ivLenInBytes = ARC4_COUNTER_LEN;
8117 + break;
8118 + case CRYPTO_AES_CBC:
8119 + drvOpData->lacOpData.
8120 + cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
8121 + drvOpData->lacOpData.
8122 + messageLenToCipherInBytes = crp_desc->crd_len;
8123 + drvOpData->verifyResult = CPA_FALSE;
8124 + drvOpData->lacOpData.ivLenInBytes = RIJNDAEL128_BLOCK_LEN;
8125 + break;
8126 + case CRYPTO_SHA1:
8127 + case CRYPTO_SHA1_HMAC:
8128 + case CRYPTO_SHA2_256:
8129 + case CRYPTO_SHA2_256_HMAC:
8130 + case CRYPTO_SHA2_384:
8131 + case CRYPTO_SHA2_384_HMAC:
8132 + case CRYPTO_SHA2_512:
8133 + case CRYPTO_SHA2_512_HMAC:
8134 + case CRYPTO_MD5:
8135 + case CRYPTO_MD5_HMAC:
8136 + drvOpData->lacOpData.
8137 + hashStartSrcOffsetInBytes = crp_desc->crd_skip;
8138 + drvOpData->lacOpData.
8139 + messageLenToHashInBytes = crp_desc->crd_len;
8140 + drvOpData->lacOpData.
8141 + pDigestResult =
8142 + icp_ocfDrvDigestPointerFind(drvOpData, crp_desc);
8144 + if (NULL == drvOpData->lacOpData.pDigestResult) {
8145 + DPRINTK("%s(): ERROR - could not calculate "
8146 + "Digest Result memory address\n", __FUNCTION__);
8147 + return ICP_OCF_DRV_STATUS_FAIL;
8150 + drvOpData->lacOpData.digestVerify = CPA_FALSE;
8151 + break;
8152 + default:
8153 + DPRINTK("%s(): Crypto process error - algorithm not "
8154 + "found \n", __FUNCTION__);
8155 + return ICP_OCF_DRV_STATUS_FAIL;
8158 + /* Figure out what the IV is supposed to be */
8159 + if ((crp_desc->crd_alg == CRYPTO_DES_CBC) ||
8160 + (crp_desc->crd_alg == CRYPTO_3DES_CBC) ||
8161 + (crp_desc->crd_alg == CRYPTO_AES_CBC)) {
8162 + /*ARC4 doesn't use an IV */
8163 + if (crp_desc->crd_flags & CRD_F_IV_EXPLICIT) {
8164 + /* Explicit IV provided to OCF */
8165 + drvOpData->lacOpData.pIv = crp_desc->crd_iv;
8166 + } else {
8167 + /* IV is not explicitly provided to OCF */
8169 + /* Point the LAC OP Data IV pointer to our allocated
8170 + storage location for this session. */
8171 + drvOpData->lacOpData.pIv = drvOpData->ivData;
8173 + if ((crp_desc->crd_flags & CRD_F_ENCRYPT) &&
8174 + ((crp_desc->crd_flags & CRD_F_IV_PRESENT) == 0)) {
8176 + /* Encrypting - need to create IV */
8177 + randGenOpData.generateBits = CPA_TRUE;
8178 + randGenOpData.lenInBytes = MAX_IV_LEN_IN_BYTES;
8180 + icp_ocfDrvPtrAndLenToFlatBuffer((Cpa8U *)
8181 + drvOpData->
8182 + ivData,
8183 + MAX_IV_LEN_IN_BYTES,
8184 + &randData);
8186 + if (CPA_STATUS_SUCCESS !=
8187 + cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
8188 + NULL, NULL,
8189 + &randGenOpData, &randData)) {
8190 + DPRINTK("%s(): ERROR - Failed to"
8191 + " generate"
8192 + " Initialisation Vector\n",
8193 + __FUNCTION__);
8194 + return ICP_OCF_DRV_STATUS_FAIL;
8197 + crypto_copyback(drvOpData->crp->
8198 + crp_flags,
8199 + drvOpData->crp->crp_buf,
8200 + crp_desc->crd_inject,
8201 + drvOpData->lacOpData.
8202 + ivLenInBytes,
8203 + (caddr_t) (drvOpData->lacOpData.
8204 + pIv));
8205 + } else {
8206 + /* Reading IV from buffer */
8207 + crypto_copydata(drvOpData->crp->
8208 + crp_flags,
8209 + drvOpData->crp->crp_buf,
8210 + crp_desc->crd_inject,
8211 + drvOpData->lacOpData.
8212 + ivLenInBytes,
8213 + (caddr_t) (drvOpData->lacOpData.
8214 + pIv));
8221 + return ICP_OCF_DRV_STATUS_SUCCESS;
8224 +/* Name : icp_ocfDrvDigestPointerFind
8226 + * Description : This function is used to find the memory address of where the
8227 + * digest information shall be stored in. Input buffer types are an skbuff, iov
8228 + * or flat buffer. The address is found using the buffer data start address and
8229 + * an offset.
8231 + * Note: In the case of a linux skbuff, the digest address may exist within
8232 + * a memory space linked to from the start buffer. These linked memory spaces
8233 + * must be traversed by the data length offset in order to find the digest start
8234 + * address. Whether there is enough space for the digest must also be checked.
8235 + */
8237 +static uint8_t *icp_ocfDrvDigestPointerFind(struct icp_drvOpData *drvOpData,
8238 + struct cryptodesc *crp_desc)
8241 + int offsetInBytes = crp_desc->crd_inject;
8242 + uint32_t digestSizeInBytes = drvOpData->digestSizeInBytes;
8243 + uint8_t *flat_buffer_base = NULL;
8244 + int flat_buffer_length = 0;
8245 + struct sk_buff *skb;
8247 + if (drvOpData->crp->crp_flags & CRYPTO_F_SKBUF) {
8248 + /*check if enough overall space to store hash */
8249 + skb = (struct sk_buff *)(drvOpData->crp->crp_buf);
8251 + if (skb->len < (offsetInBytes + digestSizeInBytes)) {
8252 + DPRINTK("%s() Not enough space for Digest"
8253 + " payload after the offset (%d), "
8254 + "digest size (%d) \n", __FUNCTION__,
8255 + offsetInBytes, digestSizeInBytes);
8256 + return NULL;
8259 + return icp_ocfDrvSkbuffDigestPointerFind(drvOpData,
8260 + offsetInBytes,
8261 + digestSizeInBytes);
8263 + } else {
8264 + /* IOV or flat buffer */
8265 + if (drvOpData->crp->crp_flags & CRYPTO_F_IOV) {
8266 + /*single IOV check has already been done */
8267 + flat_buffer_base = ((struct uio *)
8268 + (drvOpData->crp->crp_buf))->
8269 + uio_iov[0].iov_base;
8270 + flat_buffer_length = ((struct uio *)
8271 + (drvOpData->crp->crp_buf))->
8272 + uio_iov[0].iov_len;
8273 + } else {
8274 + flat_buffer_base = (uint8_t *) drvOpData->crp->crp_buf;
8275 + flat_buffer_length = drvOpData->crp->crp_ilen;
8278 + if (flat_buffer_length < (offsetInBytes + digestSizeInBytes)) {
8279 + DPRINTK("%s() Not enough space for Digest "
8280 + "(IOV/Flat Buffer) \n", __FUNCTION__);
8281 + return NULL;
8282 + } else {
8283 + return (uint8_t *) (flat_buffer_base + offsetInBytes);
8286 + DPRINTK("%s() Should not reach this point\n", __FUNCTION__);
8287 + return NULL;
8290 +/* Name : icp_ocfDrvSkbuffDigestPointerFind
8292 + * Description : This function is used by icp_ocfDrvDigestPointerFind to process
8293 + * the non-linear portion of the skbuff if the fragmentation type is a linked
8294 + * list (frag_list is not NULL in the skb_shared_info structure)
8295 + */
8296 +static inline uint8_t *icp_ocfDrvSkbuffDigestPointerFind(struct icp_drvOpData
8297 + *drvOpData,
8298 + int offsetInBytes,
8299 + uint32_t
8300 + digestSizeInBytes)
8303 + struct sk_buff *skb = NULL;
8304 + struct skb_shared_info *skb_shared = NULL;
8306 + uint32_t skbuffisnonlinear = 0;
8308 + uint32_t skbheadlen = 0;
8310 + skb = (struct sk_buff *)(drvOpData->crp->crp_buf);
8311 + skbuffisnonlinear = skb_is_nonlinear(skb);
8313 + skbheadlen = skb_headlen(skb);
8315 + /*Linear skb checks */
8316 + if (skbheadlen > offsetInBytes) {
8318 + if (skbheadlen >= (offsetInBytes + digestSizeInBytes)) {
8319 + return (uint8_t *) (skb->data + offsetInBytes);
8320 + } else {
8321 + DPRINTK("%s() Auth payload stretches "
8322 + "accross contiguous memory\n", __FUNCTION__);
8323 + return NULL;
8325 + } else {
8326 + if (skbuffisnonlinear) {
8327 + offsetInBytes -= skbheadlen;
8328 + } else {
8329 + DPRINTK("%s() Offset outside of buffer boundaries\n",
8330 + __FUNCTION__);
8331 + return NULL;
8335 + /*Non Linear checks */
8336 + skb_shared = (struct skb_shared_info *)(skb->end);
8337 + if (unlikely(NULL == skb_shared)) {
8338 + DPRINTK("%s() skbuff shared info stucture is NULL! \n",
8339 + __FUNCTION__);
8340 + return NULL;
8341 + } else if ((0 != skb_shared->nr_frags) &&
8342 + (skb_shared->frag_list != NULL)) {
8343 + DPRINTK("%s() skbuff nr_frags AND "
8344 + "frag_list not supported \n", __FUNCTION__);
8345 + return NULL;
8348 + /*TCP segmentation more likely than IP fragmentation */
8349 + if (likely(0 != skb_shared->nr_frags)) {
8350 + return icp_ocfDrvDigestSkbNRFragsCheck(skb, skb_shared,
8351 + offsetInBytes,
8352 + digestSizeInBytes);
8353 + } else if (skb_shared->frag_list != NULL) {
8354 + return icp_ocfDrvDigestSkbFragListCheck(skb, skb_shared,
8355 + offsetInBytes,
8356 + digestSizeInBytes);
8357 + } else {
8358 + DPRINTK("%s() skbuff is non-linear but does not show any "
8359 + "linked data\n", __FUNCTION__);
8360 + return NULL;
8365 +/* Name : icp_ocfDrvDigestSkbNRFragsCheck
8367 + * Description : This function is used by icp_ocfDrvSkbuffDigestPointerFind to
8368 + * process the non-linear portion of the skbuff, if the fragmentation type is
8369 + * page fragments
8370 + */
8371 +static inline uint8_t *icp_ocfDrvDigestSkbNRFragsCheck(struct sk_buff *skb,
8372 + struct skb_shared_info
8373 + *skb_shared,
8374 + int offsetInBytes,
8375 + uint32_t
8376 + digestSizeInBytes)
8378 + int i = 0;
8379 + /*nr_frags starts from 1 */
8380 + if (MAX_SKB_FRAGS < skb_shared->nr_frags) {
8381 + DPRINTK("%s error processing skbuff "
8382 + "page frame -- MAX FRAGS exceeded \n", __FUNCTION__);
8383 + return NULL;
8386 + for (i = 0; i < skb_shared->nr_frags; i++) {
8388 + if (offsetInBytes >= skb_shared->frags[i].size) {
8389 + /*offset still greater than data position */
8390 + offsetInBytes -= skb_shared->frags[i].size;
8391 + } else {
8392 + /* found the page containing start of hash */
8394 + if (NULL == skb_shared->frags[i].page) {
8395 + DPRINTK("%s() Linked page is NULL!\n",
8396 + __FUNCTION__);
8397 + return NULL;
8400 + if (offsetInBytes + digestSizeInBytes >
8401 + skb_shared->frags[i].size) {
8402 + DPRINTK("%s() Auth payload stretches accross "
8403 + "contiguous memory\n", __FUNCTION__);
8404 + return NULL;
8405 + } else {
8406 + return (uint8_t *) (skb_shared->frags[i].page +
8407 + skb_shared->frags[i].
8408 + page_offset +
8409 + offsetInBytes);
8412 + /*only possible if internal page sizes are set wrong */
8413 + if (offsetInBytes < 0) {
8414 + DPRINTK("%s error processing skbuff page frame "
8415 + "-- offset calculation \n", __FUNCTION__);
8416 + return NULL;
8419 + /*only possible if internal page sizes are set wrong */
8420 + DPRINTK("%s error processing skbuff page frame "
8421 + "-- ran out of page fragments, remaining offset = %d \n",
8422 + __FUNCTION__, offsetInBytes);
8423 + return NULL;
8427 +/* Name : icp_ocfDrvDigestSkbFragListCheck
8429 + * Description : This function is used by icp_ocfDrvSkbuffDigestPointerFind to
8430 + * process the non-linear portion of the skbuff, if the fragmentation type is
8431 + * a linked list
8432 + *
8433 + */
8434 +static inline uint8_t *icp_ocfDrvDigestSkbFragListCheck(struct sk_buff *skb,
8435 + struct skb_shared_info
8436 + *skb_shared,
8437 + int offsetInBytes,
8438 + uint32_t
8439 + digestSizeInBytes)
8442 + struct sk_buff *skb_list = skb_shared->frag_list;
8443 + /*check added for readability */
8444 + if (NULL == skb_list) {
8445 + DPRINTK("%s error processing skbuff "
8446 + "-- no more list! \n", __FUNCTION__);
8447 + return NULL;
8450 + for (; skb_list; skb_list = skb_list->next) {
8451 + if (NULL == skb_list) {
8452 + DPRINTK("%s error processing skbuff "
8453 + "-- no more list! \n", __FUNCTION__);
8454 + return NULL;
8457 + if (offsetInBytes >= skb_list->len) {
8458 + offsetInBytes -= skb_list->len;
8460 + } else {
8461 + if (offsetInBytes + digestSizeInBytes > skb_list->len) {
8462 + DPRINTK("%s() Auth payload stretches accross "
8463 + "contiguous memory\n", __FUNCTION__);
8464 + return NULL;
8465 + } else {
8466 + return (uint8_t *)
8467 + (skb_list->data + offsetInBytes);
8472 + /*This check is only needed if internal skb_list length values
8473 + are set wrong. */
8474 + if (0 > offsetInBytes) {
8475 + DPRINTK("%s() error processing skbuff object -- offset "
8476 + "calculation \n", __FUNCTION__);
8477 + return NULL;
8482 + /*catch all for unusual for-loop exit.
8483 + This code should never be reached */
8484 + DPRINTK("%s() Catch-All hit! Process error.\n", __FUNCTION__);
8485 + return NULL;
8487 diff -Nur linux-2.6.30.orig/crypto/ocf/ep80579/Makefile linux-2.6.30/crypto/ocf/ep80579/Makefile
8488 --- linux-2.6.30.orig/crypto/ocf/ep80579/Makefile 1970-01-01 01:00:00.000000000 +0100
8489 +++ linux-2.6.30/crypto/ocf/ep80579/Makefile 2009-06-11 10:55:27.000000000 +0200
8490 @@ -0,0 +1,107 @@
8491 +#########################################################################
8493 +# Targets supported
8494 +# all - builds everything and installs
8495 +# install - identical to all
8496 +# depend - build dependencies
8497 +# clean - clears derived objects except the .depend files
8498 +# distclean- clears all derived objects and the .depend file
8500 +# @par
8501 +# This file is provided under a dual BSD/GPLv2 license. When using or
8502 +# redistributing this file, you may do so under either license.
8504 +# GPL LICENSE SUMMARY
8506 +# Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
8508 +# This program is free software; you can redistribute it and/or modify
8509 +# it under the terms of version 2 of the GNU General Public License as
8510 +# published by the Free Software Foundation.
8512 +# This program is distributed in the hope that it will be useful, but
8513 +# WITHOUT ANY WARRANTY; without even the implied warranty of
8514 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
8515 +# General Public License for more details.
8517 +# You should have received a copy of the GNU General Public License
8518 +# along with this program; if not, write to the Free Software
8519 +# Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8520 +# The full GNU General Public License is included in this distribution
8521 +# in the file called LICENSE.GPL.
8523 +# Contact Information:
8524 +# Intel Corporation
8526 +# BSD LICENSE
8528 +# Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
8529 +# All rights reserved.
8531 +# Redistribution and use in source and binary forms, with or without
8532 +# modification, are permitted provided that the following conditions
8533 +# are met:
8535 +# * Redistributions of source code must retain the above copyright
8536 +# notice, this list of conditions and the following disclaimer.
8537 +# * Redistributions in binary form must reproduce the above copyright
8538 +# notice, this list of conditions and the following disclaimer in
8539 +# the documentation and/or other materials provided with the
8540 +# distribution.
8541 +# * Neither the name of Intel Corporation nor the names of its
8542 +# contributors may be used to endorse or promote products derived
8543 +# from this software without specific prior written permission.
8545 +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
8546 +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
8547 +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
8548 +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
8549 +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
8550 +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
8551 +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
8552 +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
8553 +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
8554 +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
8555 +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8558 +# version: Security.L.1.0.130
8559 +############################################################################
8562 +####################Common variables and definitions########################
8564 +# Ensure The ENV_DIR environmental var is defined.
8565 +ifndef ICP_ENV_DIR
8566 +$(error ICP_ENV_DIR is undefined. Please set the path to your environment makefile \
8567 + "-> setenv ICP_ENV_DIR <path>")
8568 +endif
8570 +#Add your project environment Makefile
8571 +include $(ICP_ENV_DIR)/environment.mk
8573 +#include the makefile with all the default and common Make variable definitions
8574 +include $(ICP_BUILDSYSTEM_PATH)/build_files/common.mk
8576 +#Add the name for the executable, Library or Module output definitions
8577 +OUTPUT_NAME= icp_ocf
8579 +# List of Source Files to be compiled
8580 +SOURCES= icp_common.c icp_sym.c icp_asym.c
8582 +#common includes between all supported OSes
8583 +INCLUDES= -I $(ICP_API_DIR) -I$(ICP_LAC_API) \
8584 +-I$(ICP_OCF_SRC_DIR)
8586 +# The location of the os level makefile needs to be changed.
8587 +include $(ICP_ENV_DIR)/$(ICP_OS)_$(ICP_OS_LEVEL).mk
8589 +# On the line directly below list the outputs you wish to build for,
8590 +# e.g "lib_static lib_shared exe module" as show below
8591 +install: module
8593 +###################Include rules makefiles########################
8594 +include $(ICP_BUILDSYSTEM_PATH)/build_files/rules.mk
8595 +###################End of Rules inclusion#########################
8598 diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/hifn7751.c linux-2.6.30/crypto/ocf/hifn/hifn7751.c
8599 --- linux-2.6.30.orig/crypto/ocf/hifn/hifn7751.c 1970-01-01 01:00:00.000000000 +0100
8600 +++ linux-2.6.30/crypto/ocf/hifn/hifn7751.c 2009-06-11 10:55:27.000000000 +0200
8601 @@ -0,0 +1,2970 @@
8602 +/* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */
8604 +/*-
8605 + * Invertex AEON / Hifn 7751 driver
8606 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
8607 + * Copyright (c) 1999 Theo de Raadt
8608 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
8609 + * http://www.netsec.net
8610 + * Copyright (c) 2003 Hifn Inc.
8612 + * This driver is based on a previous driver by Invertex, for which they
8613 + * requested: Please send any comments, feedback, bug-fixes, or feature
8614 + * requests to software@invertex.com.
8616 + * Redistribution and use in source and binary forms, with or without
8617 + * modification, are permitted provided that the following conditions
8618 + * are met:
8620 + * 1. Redistributions of source code must retain the above copyright
8621 + * notice, this list of conditions and the following disclaimer.
8622 + * 2. Redistributions in binary form must reproduce the above copyright
8623 + * notice, this list of conditions and the following disclaimer in the
8624 + * documentation and/or other materials provided with the distribution.
8625 + * 3. The name of the author may not be used to endorse or promote products
8626 + * derived from this software without specific prior written permission.
8628 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
8629 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
8630 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
8631 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
8632 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
8633 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
8634 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
8635 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
8636 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
8637 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
8639 + * Effort sponsored in part by the Defense Advanced Research Projects
8640 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
8641 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
8644 +__FBSDID("$FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.40 2007/03/21 03:42:49 sam Exp $");
8645 + */
8648 + * Driver for various Hifn encryption processors.
8649 + */
8650 +#ifndef AUTOCONF_INCLUDED
8651 +#include <linux/config.h>
8652 +#endif
8653 +#include <linux/module.h>
8654 +#include <linux/init.h>
8655 +#include <linux/list.h>
8656 +#include <linux/slab.h>
8657 +#include <linux/wait.h>
8658 +#include <linux/sched.h>
8659 +#include <linux/pci.h>
8660 +#include <linux/delay.h>
8661 +#include <linux/interrupt.h>
8662 +#include <linux/spinlock.h>
8663 +#include <linux/random.h>
8664 +#include <linux/version.h>
8665 +#include <linux/skbuff.h>
8666 +#include <asm/io.h>
8668 +#include <cryptodev.h>
8669 +#include <uio.h>
8670 +#include <hifn/hifn7751reg.h>
8671 +#include <hifn/hifn7751var.h>
8673 +#if 1
8674 +#define DPRINTF(a...) if (hifn_debug) { \
8675 + printk("%s: ", sc ? \
8676 + device_get_nameunit(sc->sc_dev) : "hifn"); \
8677 + printk(a); \
8678 + } else
8679 +#else
8680 +#define DPRINTF(a...)
8681 +#endif
8683 +static inline int
8684 +pci_get_revid(struct pci_dev *dev)
8686 + u8 rid = 0;
8687 + pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
8688 + return rid;
8691 +static struct hifn_stats hifnstats;
8693 +#define debug hifn_debug
8694 +int hifn_debug = 0;
8695 +module_param(hifn_debug, int, 0644);
8696 +MODULE_PARM_DESC(hifn_debug, "Enable debug");
8698 +int hifn_maxbatch = 1;
8699 +module_param(hifn_maxbatch, int, 0644);
8700 +MODULE_PARM_DESC(hifn_maxbatch, "max ops to batch w/o interrupt");
8702 +#ifdef MODULE_PARM
8703 +char *hifn_pllconfig = NULL;
8704 +MODULE_PARM(hifn_pllconfig, "s");
8705 +#else
8706 +char hifn_pllconfig[32]; /* This setting is RO after loading */
8707 +module_param_string(hifn_pllconfig, hifn_pllconfig, 32, 0444);
8708 +#endif
8709 +MODULE_PARM_DESC(hifn_pllconfig, "PLL config, ie., pci66, ext33, ...");
8711 +#ifdef HIFN_VULCANDEV
8712 +#include <sys/conf.h>
8713 +#include <sys/uio.h>
8715 +static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
8716 +#endif
8719 + * Prototypes and count for the pci_device structure
8720 + */
8721 +static int hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent);
8722 +static void hifn_remove(struct pci_dev *dev);
8724 +static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
8725 +static int hifn_freesession(device_t, u_int64_t);
8726 +static int hifn_process(device_t, struct cryptop *, int);
8728 +static device_method_t hifn_methods = {
8729 + /* crypto device methods */
8730 + DEVMETHOD(cryptodev_newsession, hifn_newsession),
8731 + DEVMETHOD(cryptodev_freesession,hifn_freesession),
8732 + DEVMETHOD(cryptodev_process, hifn_process),
8735 +static void hifn_reset_board(struct hifn_softc *, int);
8736 +static void hifn_reset_puc(struct hifn_softc *);
8737 +static void hifn_puc_wait(struct hifn_softc *);
8738 +static int hifn_enable_crypto(struct hifn_softc *);
8739 +static void hifn_set_retry(struct hifn_softc *sc);
8740 +static void hifn_init_dma(struct hifn_softc *);
8741 +static void hifn_init_pci_registers(struct hifn_softc *);
8742 +static int hifn_sramsize(struct hifn_softc *);
8743 +static int hifn_dramsize(struct hifn_softc *);
8744 +static int hifn_ramtype(struct hifn_softc *);
8745 +static void hifn_sessions(struct hifn_softc *);
8746 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
8747 +static irqreturn_t hifn_intr(int irq, void *arg);
8748 +#else
8749 +static irqreturn_t hifn_intr(int irq, void *arg, struct pt_regs *regs);
8750 +#endif
8751 +static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
8752 +static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
8753 +static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
8754 +static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
8755 +static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
8756 +static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
8757 +static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
8758 +static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
8759 +static int hifn_init_pubrng(struct hifn_softc *);
8760 +static void hifn_tick(unsigned long arg);
8761 +static void hifn_abort(struct hifn_softc *);
8762 +static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
8764 +static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
8765 +static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
8767 +#ifdef CONFIG_OCF_RANDOMHARVEST
8768 +static int hifn_read_random(void *arg, u_int32_t *buf, int len);
8769 +#endif
8771 +#define HIFN_MAX_CHIPS 8
8772 +static struct hifn_softc *hifn_chip_idx[HIFN_MAX_CHIPS];
8774 +static __inline u_int32_t
8775 +READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
8777 + u_int32_t v = readl(sc->sc_bar0 + reg);
8778 + sc->sc_bar0_lastreg = (bus_size_t) -1;
8779 + return (v);
8781 +#define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val)
8783 +static __inline u_int32_t
8784 +READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
8786 + u_int32_t v = readl(sc->sc_bar1 + reg);
8787 + sc->sc_bar1_lastreg = (bus_size_t) -1;
8788 + return (v);
8790 +#define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val)
8793 + * map in a given buffer (great on some arches :-)
8794 + */
8796 +static int
8797 +pci_map_uio(struct hifn_softc *sc, struct hifn_operand *buf, struct uio *uio)
8799 + struct iovec *iov = uio->uio_iov;
8801 + DPRINTF("%s()\n", __FUNCTION__);
8803 + buf->mapsize = 0;
8804 + for (buf->nsegs = 0; buf->nsegs < uio->uio_iovcnt; ) {
8805 + buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
8806 + iov->iov_base, iov->iov_len,
8807 + PCI_DMA_BIDIRECTIONAL);
8808 + buf->segs[buf->nsegs].ds_len = iov->iov_len;
8809 + buf->mapsize += iov->iov_len;
8810 + iov++;
8811 + buf->nsegs++;
8813 + /* identify this buffer by the first segment */
8814 + buf->map = (void *) buf->segs[0].ds_addr;
8815 + return(0);
8819 + * map in a given sk_buff
8820 + */
8822 +static int
8823 +pci_map_skb(struct hifn_softc *sc,struct hifn_operand *buf,struct sk_buff *skb)
8825 + int i;
8827 + DPRINTF("%s()\n", __FUNCTION__);
8829 + buf->mapsize = 0;
8831 + buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
8832 + skb->data, skb_headlen(skb), PCI_DMA_BIDIRECTIONAL);
8833 + buf->segs[0].ds_len = skb_headlen(skb);
8834 + buf->mapsize += buf->segs[0].ds_len;
8836 + buf->nsegs = 1;
8838 + for (i = 0; i < skb_shinfo(skb)->nr_frags; ) {
8839 + buf->segs[buf->nsegs].ds_len = skb_shinfo(skb)->frags[i].size;
8840 + buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
8841 + page_address(skb_shinfo(skb)->frags[i].page) +
8842 + skb_shinfo(skb)->frags[i].page_offset,
8843 + buf->segs[buf->nsegs].ds_len, PCI_DMA_BIDIRECTIONAL);
8844 + buf->mapsize += buf->segs[buf->nsegs].ds_len;
8845 + buf->nsegs++;
8848 + /* identify this buffer by the first segment */
8849 + buf->map = (void *) buf->segs[0].ds_addr;
8850 + return(0);
8854 + * map in a given contiguous buffer
8855 + */
8857 +static int
8858 +pci_map_buf(struct hifn_softc *sc,struct hifn_operand *buf, void *b, int len)
8860 + DPRINTF("%s()\n", __FUNCTION__);
8862 + buf->mapsize = 0;
8863 + buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
8864 + b, len, PCI_DMA_BIDIRECTIONAL);
8865 + buf->segs[0].ds_len = len;
8866 + buf->mapsize += buf->segs[0].ds_len;
8867 + buf->nsegs = 1;
8869 + /* identify this buffer by the first segment */
8870 + buf->map = (void *) buf->segs[0].ds_addr;
8871 + return(0);
8874 +#if 0 /* not needed at this time */
8875 +static void
8876 +pci_sync_iov(struct hifn_softc *sc, struct hifn_operand *buf)
8878 + int i;
8880 + DPRINTF("%s()\n", __FUNCTION__);
8881 + for (i = 0; i < buf->nsegs; i++)
8882 + pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
8883 + buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
8885 +#endif
8887 +static void
8888 +pci_unmap_buf(struct hifn_softc *sc, struct hifn_operand *buf)
8890 + int i;
8891 + DPRINTF("%s()\n", __FUNCTION__);
8892 + for (i = 0; i < buf->nsegs; i++) {
8893 + pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
8894 + buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
8895 + buf->segs[i].ds_addr = 0;
8896 + buf->segs[i].ds_len = 0;
8898 + buf->nsegs = 0;
8899 + buf->mapsize = 0;
8900 + buf->map = 0;
8903 +static const char*
8904 +hifn_partname(struct hifn_softc *sc)
8906 + /* XXX sprintf numbers when not decoded */
8907 + switch (pci_get_vendor(sc->sc_pcidev)) {
8908 + case PCI_VENDOR_HIFN:
8909 + switch (pci_get_device(sc->sc_pcidev)) {
8910 + case PCI_PRODUCT_HIFN_6500: return "Hifn 6500";
8911 + case PCI_PRODUCT_HIFN_7751: return "Hifn 7751";
8912 + case PCI_PRODUCT_HIFN_7811: return "Hifn 7811";
8913 + case PCI_PRODUCT_HIFN_7951: return "Hifn 7951";
8914 + case PCI_PRODUCT_HIFN_7955: return "Hifn 7955";
8915 + case PCI_PRODUCT_HIFN_7956: return "Hifn 7956";
8917 + return "Hifn unknown-part";
8918 + case PCI_VENDOR_INVERTEX:
8919 + switch (pci_get_device(sc->sc_pcidev)) {
8920 + case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
8922 + return "Invertex unknown-part";
8923 + case PCI_VENDOR_NETSEC:
8924 + switch (pci_get_device(sc->sc_pcidev)) {
8925 + case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751";
8927 + return "NetSec unknown-part";
8929 + return "Unknown-vendor unknown-part";
8932 +static u_int
8933 +checkmaxmin(struct pci_dev *dev, const char *what, u_int v, u_int min, u_int max)
8935 + struct hifn_softc *sc = pci_get_drvdata(dev);
8936 + if (v > max) {
8937 + device_printf(sc->sc_dev, "Warning, %s %u out of range, "
8938 + "using max %u\n", what, v, max);
8939 + v = max;
8940 + } else if (v < min) {
8941 + device_printf(sc->sc_dev, "Warning, %s %u out of range, "
8942 + "using min %u\n", what, v, min);
8943 + v = min;
8945 + return v;
8949 + * Select PLL configuration for 795x parts. This is complicated in
8950 + * that we cannot determine the optimal parameters without user input.
8951 + * The reference clock is derived from an external clock through a
8952 + * multiplier. The external clock is either the host bus (i.e. PCI)
8953 + * or an external clock generator. When using the PCI bus we assume
8954 + * the clock is either 33 or 66 MHz; for an external source we cannot
8955 + * tell the speed.
8957 + * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
8958 + * for an external source, followed by the frequency. We calculate
8959 + * the appropriate multiplier and PLL register contents accordingly.
8960 + * When no configuration is given we default to "pci66" since that
8961 + * always will allow the card to work. If a card is using the PCI
8962 + * bus clock and in a 33MHz slot then it will be operating at half
8963 + * speed until the correct information is provided.
8965 + * We use a default setting of "ext66" because according to Mike Ham
8966 + * of HiFn, almost every board in existence has an external crystal
8967 + * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
8968 + * because PCI33 can have clocks from 0 to 33Mhz, and some have
8969 + * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
8970 + */
8971 +static void
8972 +hifn_getpllconfig(struct pci_dev *dev, u_int *pll)
8974 + const char *pllspec = hifn_pllconfig;
8975 + u_int freq, mul, fl, fh;
8976 + u_int32_t pllconfig;
8977 + char *nxt;
8979 + if (pllspec == NULL)
8980 + pllspec = "ext66";
8981 + fl = 33, fh = 66;
8982 + pllconfig = 0;
8983 + if (strncmp(pllspec, "ext", 3) == 0) {
8984 + pllspec += 3;
8985 + pllconfig |= HIFN_PLL_REF_SEL;
8986 + switch (pci_get_device(dev)) {
8987 + case PCI_PRODUCT_HIFN_7955:
8988 + case PCI_PRODUCT_HIFN_7956:
8989 + fl = 20, fh = 100;
8990 + break;
8991 +#ifdef notyet
8992 + case PCI_PRODUCT_HIFN_7954:
8993 + fl = 20, fh = 66;
8994 + break;
8995 +#endif
8997 + } else if (strncmp(pllspec, "pci", 3) == 0)
8998 + pllspec += 3;
8999 + freq = strtoul(pllspec, &nxt, 10);
9000 + if (nxt == pllspec)
9001 + freq = 66;
9002 + else
9003 + freq = checkmaxmin(dev, "frequency", freq, fl, fh);
9004 + /*
9005 + * Calculate multiplier. We target a Fck of 266 MHz,
9006 + * allowing only even values, possibly rounded down.
9007 + * Multipliers > 8 must set the charge pump current.
9008 + */
9009 + mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
9010 + pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
9011 + if (mul > 8)
9012 + pllconfig |= HIFN_PLL_IS;
9013 + *pll = pllconfig;
9017 + * Attach an interface that successfully probed.
9018 + */
9019 +static int
9020 +hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent)
9022 + struct hifn_softc *sc = NULL;
9023 + char rbase;
9024 + u_int16_t ena, rev;
9025 + int rseg, rc;
9026 + unsigned long mem_start, mem_len;
9027 + static int num_chips = 0;
9029 + DPRINTF("%s()\n", __FUNCTION__);
9031 + if (pci_enable_device(dev) < 0)
9032 + return(-ENODEV);
9034 + if (pci_set_mwi(dev))
9035 + return(-ENODEV);
9037 + if (!dev->irq) {
9038 + printk("hifn: found device with no IRQ assigned. check BIOS settings!");
9039 + pci_disable_device(dev);
9040 + return(-ENODEV);
9043 + sc = (struct hifn_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
9044 + if (!sc)
9045 + return(-ENOMEM);
9046 + memset(sc, 0, sizeof(*sc));
9048 + softc_device_init(sc, "hifn", num_chips, hifn_methods);
9050 + sc->sc_pcidev = dev;
9051 + sc->sc_irq = -1;
9052 + sc->sc_cid = -1;
9053 + sc->sc_num = num_chips++;
9054 + if (sc->sc_num < HIFN_MAX_CHIPS)
9055 + hifn_chip_idx[sc->sc_num] = sc;
9057 + pci_set_drvdata(sc->sc_pcidev, sc);
9059 + spin_lock_init(&sc->sc_mtx);
9061 + /* XXX handle power management */
9063 + /*
9064 + * The 7951 and 795x have a random number generator and
9065 + * public key support; note this.
9066 + */
9067 + if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
9068 + (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
9069 + pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
9070 + pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
9071 + sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
9072 + /*
9073 + * The 7811 has a random number generator and
9074 + * we also note it's identity 'cuz of some quirks.
9075 + */
9076 + if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
9077 + pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
9078 + sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
9080 + /*
9081 + * The 795x parts support AES.
9082 + */
9083 + if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
9084 + (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
9085 + pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
9086 + sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
9087 + /*
9088 + * Select PLL configuration. This depends on the
9089 + * bus and board design and must be manually configured
9090 + * if the default setting is unacceptable.
9091 + */
9092 + hifn_getpllconfig(dev, &sc->sc_pllconfig);
9095 + /*
9096 + * Setup PCI resources. Note that we record the bus
9097 + * tag and handle for each register mapping, this is
9098 + * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
9099 + * and WRITE_REG_1 macros throughout the driver.
9100 + */
9101 + mem_start = pci_resource_start(sc->sc_pcidev, 0);
9102 + mem_len = pci_resource_len(sc->sc_pcidev, 0);
9103 + sc->sc_bar0 = (ocf_iomem_t) ioremap(mem_start, mem_len);
9104 + if (!sc->sc_bar0) {
9105 + device_printf(sc->sc_dev, "cannot map bar%d register space\n", 0);
9106 + goto fail;
9108 + sc->sc_bar0_lastreg = (bus_size_t) -1;
9110 + mem_start = pci_resource_start(sc->sc_pcidev, 1);
9111 + mem_len = pci_resource_len(sc->sc_pcidev, 1);
9112 + sc->sc_bar1 = (ocf_iomem_t) ioremap(mem_start, mem_len);
9113 + if (!sc->sc_bar1) {
9114 + device_printf(sc->sc_dev, "cannot map bar%d register space\n", 1);
9115 + goto fail;
9117 + sc->sc_bar1_lastreg = (bus_size_t) -1;
9119 + /* fix up the bus size */
9120 + if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
9121 + device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
9122 + goto fail;
9124 + if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
9125 + device_printf(sc->sc_dev,
9126 + "No usable consistent DMA configuration, aborting.\n");
9127 + goto fail;
9130 + hifn_set_retry(sc);
9132 + /*
9133 + * Setup the area where the Hifn DMA's descriptors
9134 + * and associated data structures.
9135 + */
9136 + sc->sc_dma = (struct hifn_dma *) pci_alloc_consistent(dev,
9137 + sizeof(*sc->sc_dma),
9138 + &sc->sc_dma_physaddr);
9139 + if (!sc->sc_dma) {
9140 + device_printf(sc->sc_dev, "cannot alloc sc_dma\n");
9141 + goto fail;
9143 + bzero(sc->sc_dma, sizeof(*sc->sc_dma));
9145 + /*
9146 + * Reset the board and do the ``secret handshake''
9147 + * to enable the crypto support. Then complete the
9148 + * initialization procedure by setting up the interrupt
9149 + * and hooking in to the system crypto support so we'll
9150 + * get used for system services like the crypto device,
9151 + * IPsec, RNG device, etc.
9152 + */
9153 + hifn_reset_board(sc, 0);
9155 + if (hifn_enable_crypto(sc) != 0) {
9156 + device_printf(sc->sc_dev, "crypto enabling failed\n");
9157 + goto fail;
9159 + hifn_reset_puc(sc);
9161 + hifn_init_dma(sc);
9162 + hifn_init_pci_registers(sc);
9164 + pci_set_master(sc->sc_pcidev);
9166 + /* XXX can't dynamically determine ram type for 795x; force dram */
9167 + if (sc->sc_flags & HIFN_IS_7956)
9168 + sc->sc_drammodel = 1;
9169 + else if (hifn_ramtype(sc))
9170 + goto fail;
9172 + if (sc->sc_drammodel == 0)
9173 + hifn_sramsize(sc);
9174 + else
9175 + hifn_dramsize(sc);
9177 + /*
9178 + * Workaround for NetSec 7751 rev A: half ram size because two
9179 + * of the address lines were left floating
9180 + */
9181 + if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
9182 + pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
9183 + pci_get_revid(dev) == 0x61) /*XXX???*/
9184 + sc->sc_ramsize >>= 1;
9186 + /*
9187 + * Arrange the interrupt line.
9188 + */
9189 + rc = request_irq(dev->irq, hifn_intr, IRQF_SHARED, "hifn", sc);
9190 + if (rc) {
9191 + device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
9192 + goto fail;
9194 + sc->sc_irq = dev->irq;
9196 + hifn_sessions(sc);
9198 + /*
9199 + * NB: Keep only the low 16 bits; this masks the chip id
9200 + * from the 7951.
9201 + */
9202 + rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
9204 + rseg = sc->sc_ramsize / 1024;
9205 + rbase = 'K';
9206 + if (sc->sc_ramsize >= (1024 * 1024)) {
9207 + rbase = 'M';
9208 + rseg /= 1024;
9210 + device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
9211 + hifn_partname(sc), rev,
9212 + rseg, rbase, sc->sc_drammodel ? 'd' : 's');
9213 + if (sc->sc_flags & HIFN_IS_7956)
9214 + printf(", pll=0x%x<%s clk, %ux mult>",
9215 + sc->sc_pllconfig,
9216 + sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
9217 + 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
9218 + printf("\n");
9220 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
9221 + if (sc->sc_cid < 0) {
9222 + device_printf(sc->sc_dev, "could not get crypto driver id\n");
9223 + goto fail;
9226 + WRITE_REG_0(sc, HIFN_0_PUCNFG,
9227 + READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
9228 + ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
9230 + switch (ena) {
9231 + case HIFN_PUSTAT_ENA_2:
9232 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
9233 + crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
9234 + if (sc->sc_flags & HIFN_HAS_AES)
9235 + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
9236 + /*FALLTHROUGH*/
9237 + case HIFN_PUSTAT_ENA_1:
9238 + crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
9239 + crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
9240 + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
9241 + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
9242 + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
9243 + break;
9246 + if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
9247 + hifn_init_pubrng(sc);
9249 + init_timer(&sc->sc_tickto);
9250 + sc->sc_tickto.function = hifn_tick;
9251 + sc->sc_tickto.data = (unsigned long) sc->sc_num;
9252 + mod_timer(&sc->sc_tickto, jiffies + HZ);
9254 + return (0);
9256 +fail:
9257 + if (sc->sc_cid >= 0)
9258 + crypto_unregister_all(sc->sc_cid);
9259 + if (sc->sc_irq != -1)
9260 + free_irq(sc->sc_irq, sc);
9261 + if (sc->sc_dma) {
9262 + /* Turn off DMA polling */
9263 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
9264 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
9266 + pci_free_consistent(sc->sc_pcidev,
9267 + sizeof(*sc->sc_dma),
9268 + sc->sc_dma, sc->sc_dma_physaddr);
9270 + kfree(sc);
9271 + return (-ENXIO);
9275 + * Detach an interface that successfully probed.
9276 + */
9277 +static void
9278 +hifn_remove(struct pci_dev *dev)
9280 + struct hifn_softc *sc = pci_get_drvdata(dev);
9281 + unsigned long l_flags;
9283 + DPRINTF("%s()\n", __FUNCTION__);
9285 + KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
9287 + /* disable interrupts */
9288 + HIFN_LOCK(sc);
9289 + WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
9290 + HIFN_UNLOCK(sc);
9292 + /*XXX other resources */
9293 + del_timer_sync(&sc->sc_tickto);
9295 + /* Turn off DMA polling */
9296 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
9297 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
9299 + crypto_unregister_all(sc->sc_cid);
9301 + free_irq(sc->sc_irq, sc);
9303 + pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
9304 + sc->sc_dma, sc->sc_dma_physaddr);
9308 +static int
9309 +hifn_init_pubrng(struct hifn_softc *sc)
9311 + int i;
9313 + DPRINTF("%s()\n", __FUNCTION__);
9315 + if ((sc->sc_flags & HIFN_IS_7811) == 0) {
9316 + /* Reset 7951 public key/rng engine */
9317 + WRITE_REG_1(sc, HIFN_1_PUB_RESET,
9318 + READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
9320 + for (i = 0; i < 100; i++) {
9321 + DELAY(1000);
9322 + if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
9323 + HIFN_PUBRST_RESET) == 0)
9324 + break;
9327 + if (i == 100) {
9328 + device_printf(sc->sc_dev, "public key init failed\n");
9329 + return (1);
9333 + /* Enable the rng, if available */
9334 +#ifdef CONFIG_OCF_RANDOMHARVEST
9335 + if (sc->sc_flags & HIFN_HAS_RNG) {
9336 + if (sc->sc_flags & HIFN_IS_7811) {
9337 + u_int32_t r;
9338 + r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
9339 + if (r & HIFN_7811_RNGENA_ENA) {
9340 + r &= ~HIFN_7811_RNGENA_ENA;
9341 + WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
9343 + WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
9344 + HIFN_7811_RNGCFG_DEFL);
9345 + r |= HIFN_7811_RNGENA_ENA;
9346 + WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
9347 + } else
9348 + WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
9349 + READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
9350 + HIFN_RNGCFG_ENA);
9352 + sc->sc_rngfirst = 1;
9353 + crypto_rregister(sc->sc_cid, hifn_read_random, sc);
9355 +#endif
9357 + /* Enable public key engine, if available */
9358 + if (sc->sc_flags & HIFN_HAS_PUBLIC) {
9359 + WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
9360 + sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
9361 + WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
9362 +#ifdef HIFN_VULCANDEV
9363 + sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
9364 + UID_ROOT, GID_WHEEL, 0666,
9365 + "vulcanpk");
9366 + sc->sc_pkdev->si_drv1 = sc;
9367 +#endif
9370 + return (0);
9373 +#ifdef CONFIG_OCF_RANDOMHARVEST
9374 +static int
9375 +hifn_read_random(void *arg, u_int32_t *buf, int len)
9377 + struct hifn_softc *sc = (struct hifn_softc *) arg;
9378 + u_int32_t sts;
9379 + int i, rc = 0;
9381 + if (len <= 0)
9382 + return rc;
9384 + if (sc->sc_flags & HIFN_IS_7811) {
9385 + /* ONLY VALID ON 7811!!!! */
9386 + for (i = 0; i < 5; i++) {
9387 + sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
9388 + if (sts & HIFN_7811_RNGSTS_UFL) {
9389 + device_printf(sc->sc_dev,
9390 + "RNG underflow: disabling\n");
9391 + /* DAVIDM perhaps return -1 */
9392 + break;
9394 + if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
9395 + break;
9397 + /*
9398 + * There are at least two words in the RNG FIFO
9399 + * at this point.
9400 + */
9401 + if (rc < len)
9402 + buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
9403 + if (rc < len)
9404 + buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
9406 + } else
9407 + buf[rc++] = READ_REG_1(sc, HIFN_1_RNG_DATA);
9409 + /* NB: discard first data read */
9410 + if (sc->sc_rngfirst) {
9411 + sc->sc_rngfirst = 0;
9412 + rc = 0;
9415 + return(rc);
9417 +#endif /* CONFIG_OCF_RANDOMHARVEST */
9419 +static void
9420 +hifn_puc_wait(struct hifn_softc *sc)
9422 + int i;
9423 + int reg = HIFN_0_PUCTRL;
9425 + if (sc->sc_flags & HIFN_IS_7956) {
9426 + reg = HIFN_0_PUCTRL2;
9429 + for (i = 5000; i > 0; i--) {
9430 + DELAY(1);
9431 + if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
9432 + break;
9434 + if (!i)
9435 + device_printf(sc->sc_dev, "proc unit did not reset(0x%x)\n",
9436 + READ_REG_0(sc, HIFN_0_PUCTRL));
9440 + * Reset the processing unit.
9441 + */
9442 +static void
9443 +hifn_reset_puc(struct hifn_softc *sc)
9445 + /* Reset processing unit */
9446 + int reg = HIFN_0_PUCTRL;
9448 + if (sc->sc_flags & HIFN_IS_7956) {
9449 + reg = HIFN_0_PUCTRL2;
9451 + WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
9453 + hifn_puc_wait(sc);
9457 + * Set the Retry and TRDY registers; note that we set them to
9458 + * zero because the 7811 locks up when forced to retry (section
9459 + * 3.6 of "Specification Update SU-0014-04". Not clear if we
9460 + * should do this for all Hifn parts, but it doesn't seem to hurt.
9461 + */
9462 +static void
9463 +hifn_set_retry(struct hifn_softc *sc)
9465 + DPRINTF("%s()\n", __FUNCTION__);
9466 + /* NB: RETRY only responds to 8-bit reads/writes */
9467 + pci_write_config_byte(sc->sc_pcidev, HIFN_RETRY_TIMEOUT, 0);
9468 + pci_write_config_dword(sc->sc_pcidev, HIFN_TRDY_TIMEOUT, 0);
9472 + * Resets the board. Values in the regesters are left as is
9473 + * from the reset (i.e. initial values are assigned elsewhere).
9474 + */
9475 +static void
9476 +hifn_reset_board(struct hifn_softc *sc, int full)
9478 + u_int32_t reg;
9480 + DPRINTF("%s()\n", __FUNCTION__);
9481 + /*
9482 + * Set polling in the DMA configuration register to zero. 0x7 avoids
9483 + * resetting the board and zeros out the other fields.
9484 + */
9485 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
9486 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
9488 + /*
9489 + * Now that polling has been disabled, we have to wait 1 ms
9490 + * before resetting the board.
9491 + */
9492 + DELAY(1000);
9494 + /* Reset the DMA unit */
9495 + if (full) {
9496 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
9497 + DELAY(1000);
9498 + } else {
9499 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
9500 + HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
9501 + hifn_reset_puc(sc);
9504 + KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
9505 + bzero(sc->sc_dma, sizeof(*sc->sc_dma));
9507 + /* Bring dma unit out of reset */
9508 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
9509 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
9511 + hifn_puc_wait(sc);
9512 + hifn_set_retry(sc);
9514 + if (sc->sc_flags & HIFN_IS_7811) {
9515 + for (reg = 0; reg < 1000; reg++) {
9516 + if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
9517 + HIFN_MIPSRST_CRAMINIT)
9518 + break;
9519 + DELAY(1000);
9521 + if (reg == 1000)
9522 + device_printf(sc->sc_dev, ": cram init timeout\n");
9523 + } else {
9524 + /* set up DMA configuration register #2 */
9525 + /* turn off all PK and BAR0 swaps */
9526 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
9527 + (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
9528 + (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
9529 + (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
9530 + (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
9534 +static u_int32_t
9535 +hifn_next_signature(u_int32_t a, u_int cnt)
9537 + int i;
9538 + u_int32_t v;
9540 + for (i = 0; i < cnt; i++) {
9542 + /* get the parity */
9543 + v = a & 0x80080125;
9544 + v ^= v >> 16;
9545 + v ^= v >> 8;
9546 + v ^= v >> 4;
9547 + v ^= v >> 2;
9548 + v ^= v >> 1;
9550 + a = (v & 1) ^ (a << 1);
9553 + return a;
9558 + * Checks to see if crypto is already enabled. If crypto isn't enable,
9559 + * "hifn_enable_crypto" is called to enable it. The check is important,
9560 + * as enabling crypto twice will lock the board.
9561 + */
9562 +static int
9563 +hifn_enable_crypto(struct hifn_softc *sc)
9565 + u_int32_t dmacfg, ramcfg, encl, addr, i;
9566 + char offtbl[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
9567 + 0x00, 0x00, 0x00, 0x00 };
9569 + DPRINTF("%s()\n", __FUNCTION__);
9571 + ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
9572 + dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
9574 + /*
9575 + * The RAM config register's encrypt level bit needs to be set before
9576 + * every read performed on the encryption level register.
9577 + */
9578 + WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
9580 + encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
9582 + /*
9583 + * Make sure we don't re-unlock. Two unlocks kills chip until the
9584 + * next reboot.
9585 + */
9586 + if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
9587 +#ifdef HIFN_DEBUG
9588 + if (hifn_debug)
9589 + device_printf(sc->sc_dev,
9590 + "Strong crypto already enabled!\n");
9591 +#endif
9592 + goto report;
9595 + if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
9596 +#ifdef HIFN_DEBUG
9597 + if (hifn_debug)
9598 + device_printf(sc->sc_dev,
9599 + "Unknown encryption level 0x%x\n", encl);
9600 +#endif
9601 + return 1;
9604 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
9605 + HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
9606 + DELAY(1000);
9607 + addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
9608 + DELAY(1000);
9609 + WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
9610 + DELAY(1000);
9612 + for (i = 0; i <= 12; i++) {
9613 + addr = hifn_next_signature(addr, offtbl[i] + 0x101);
9614 + WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
9616 + DELAY(1000);
9619 + WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
9620 + encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
9622 +#ifdef HIFN_DEBUG
9623 + if (hifn_debug) {
9624 + if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
9625 + device_printf(sc->sc_dev, "Engine is permanently "
9626 + "locked until next system reset!\n");
9627 + else
9628 + device_printf(sc->sc_dev, "Engine enabled "
9629 + "successfully!\n");
9631 +#endif
9633 +report:
9634 + WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
9635 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
9637 + switch (encl) {
9638 + case HIFN_PUSTAT_ENA_1:
9639 + case HIFN_PUSTAT_ENA_2:
9640 + break;
9641 + case HIFN_PUSTAT_ENA_0:
9642 + default:
9643 + device_printf(sc->sc_dev, "disabled\n");
9644 + break;
9647 + return 0;
9651 + * Give initial values to the registers listed in the "Register Space"
9652 + * section of the HIFN Software Development reference manual.
9653 + */
9654 +static void
9655 +hifn_init_pci_registers(struct hifn_softc *sc)
9657 + DPRINTF("%s()\n", __FUNCTION__);
9659 + /* write fixed values needed by the Initialization registers */
9660 + WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
9661 + WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
9662 + WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
9664 + /* write all 4 ring address registers */
9665 + WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
9666 + offsetof(struct hifn_dma, cmdr[0]));
9667 + WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
9668 + offsetof(struct hifn_dma, srcr[0]));
9669 + WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
9670 + offsetof(struct hifn_dma, dstr[0]));
9671 + WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
9672 + offsetof(struct hifn_dma, resr[0]));
9674 + DELAY(2000);
9676 + /* write status register */
9677 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
9678 + HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
9679 + HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
9680 + HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
9681 + HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
9682 + HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
9683 + HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
9684 + HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
9685 + HIFN_DMACSR_S_WAIT |
9686 + HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
9687 + HIFN_DMACSR_C_WAIT |
9688 + HIFN_DMACSR_ENGINE |
9689 + ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
9690 + HIFN_DMACSR_PUBDONE : 0) |
9691 + ((sc->sc_flags & HIFN_IS_7811) ?
9692 + HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
9694 + sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
9695 + sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
9696 + HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
9697 + HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
9698 + ((sc->sc_flags & HIFN_IS_7811) ?
9699 + HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
9700 + sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
9701 + WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
9704 + if (sc->sc_flags & HIFN_IS_7956) {
9705 + u_int32_t pll;
9707 + WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
9708 + HIFN_PUCNFG_TCALLPHASES |
9709 + HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
9711 + /* turn off the clocks and insure bypass is set */
9712 + pll = READ_REG_1(sc, HIFN_1_PLL);
9713 + pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
9714 + | HIFN_PLL_BP | HIFN_PLL_MBSET;
9715 + WRITE_REG_1(sc, HIFN_1_PLL, pll);
9716 + DELAY(10*1000); /* 10ms */
9718 + /* change configuration */
9719 + pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
9720 + WRITE_REG_1(sc, HIFN_1_PLL, pll);
9721 + DELAY(10*1000); /* 10ms */
9723 + /* disable bypass */
9724 + pll &= ~HIFN_PLL_BP;
9725 + WRITE_REG_1(sc, HIFN_1_PLL, pll);
9726 + /* enable clocks with new configuration */
9727 + pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
9728 + WRITE_REG_1(sc, HIFN_1_PLL, pll);
9729 + } else {
9730 + WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
9731 + HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
9732 + HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
9733 + (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
9736 + WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
9737 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
9738 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
9739 + ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
9740 + ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
9744 + * The maximum number of sessions supported by the card
9745 + * is dependent on the amount of context ram, which
9746 + * encryption algorithms are enabled, and how compression
9747 + * is configured. This should be configured before this
9748 + * routine is called.
9749 + */
9750 +static void
9751 +hifn_sessions(struct hifn_softc *sc)
9753 + u_int32_t pucnfg;
9754 + int ctxsize;
9756 + DPRINTF("%s()\n", __FUNCTION__);
9758 + pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
9760 + if (pucnfg & HIFN_PUCNFG_COMPSING) {
9761 + if (pucnfg & HIFN_PUCNFG_ENCCNFG)
9762 + ctxsize = 128;
9763 + else
9764 + ctxsize = 512;
9765 + /*
9766 + * 7955/7956 has internal context memory of 32K
9767 + */
9768 + if (sc->sc_flags & HIFN_IS_7956)
9769 + sc->sc_maxses = 32768 / ctxsize;
9770 + else
9771 + sc->sc_maxses = 1 +
9772 + ((sc->sc_ramsize - 32768) / ctxsize);
9773 + } else
9774 + sc->sc_maxses = sc->sc_ramsize / 16384;
9776 + if (sc->sc_maxses > 2048)
9777 + sc->sc_maxses = 2048;
9781 + * Determine ram type (sram or dram). Board should be just out of a reset
9782 + * state when this is called.
9783 + */
9784 +static int
9785 +hifn_ramtype(struct hifn_softc *sc)
9787 + u_int8_t data[8], dataexpect[8];
9788 + int i;
9790 + for (i = 0; i < sizeof(data); i++)
9791 + data[i] = dataexpect[i] = 0x55;
9792 + if (hifn_writeramaddr(sc, 0, data))
9793 + return (-1);
9794 + if (hifn_readramaddr(sc, 0, data))
9795 + return (-1);
9796 + if (bcmp(data, dataexpect, sizeof(data)) != 0) {
9797 + sc->sc_drammodel = 1;
9798 + return (0);
9801 + for (i = 0; i < sizeof(data); i++)
9802 + data[i] = dataexpect[i] = 0xaa;
9803 + if (hifn_writeramaddr(sc, 0, data))
9804 + return (-1);
9805 + if (hifn_readramaddr(sc, 0, data))
9806 + return (-1);
9807 + if (bcmp(data, dataexpect, sizeof(data)) != 0) {
9808 + sc->sc_drammodel = 1;
9809 + return (0);
9812 + return (0);
9815 +#define HIFN_SRAM_MAX (32 << 20)
9816 +#define HIFN_SRAM_STEP_SIZE 16384
9817 +#define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
9819 +static int
9820 +hifn_sramsize(struct hifn_softc *sc)
9822 + u_int32_t a;
9823 + u_int8_t data[8];
9824 + u_int8_t dataexpect[sizeof(data)];
9825 + int32_t i;
9827 + for (i = 0; i < sizeof(data); i++)
9828 + data[i] = dataexpect[i] = i ^ 0x5a;
9830 + for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
9831 + a = i * HIFN_SRAM_STEP_SIZE;
9832 + bcopy(&i, data, sizeof(i));
9833 + hifn_writeramaddr(sc, a, data);
9836 + for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
9837 + a = i * HIFN_SRAM_STEP_SIZE;
9838 + bcopy(&i, dataexpect, sizeof(i));
9839 + if (hifn_readramaddr(sc, a, data) < 0)
9840 + return (0);
9841 + if (bcmp(data, dataexpect, sizeof(data)) != 0)
9842 + return (0);
9843 + sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
9846 + return (0);
9850 + * XXX For dram boards, one should really try all of the
9851 + * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
9852 + * is already set up correctly.
9853 + */
9854 +static int
9855 +hifn_dramsize(struct hifn_softc *sc)
9857 + u_int32_t cnfg;
9859 + if (sc->sc_flags & HIFN_IS_7956) {
9860 + /*
9861 + * 7955/7956 have a fixed internal ram of only 32K.
9862 + */
9863 + sc->sc_ramsize = 32768;
9864 + } else {
9865 + cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
9866 + HIFN_PUCNFG_DRAMMASK;
9867 + sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
9869 + return (0);
9872 +static void
9873 +hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
9875 + struct hifn_dma *dma = sc->sc_dma;
9877 + DPRINTF("%s()\n", __FUNCTION__);
9879 + if (dma->cmdi == HIFN_D_CMD_RSIZE) {
9880 + dma->cmdi = 0;
9881 + dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
9882 + wmb();
9883 + dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
9884 + HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
9885 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
9887 + *cmdp = dma->cmdi++;
9888 + dma->cmdk = dma->cmdi;
9890 + if (dma->srci == HIFN_D_SRC_RSIZE) {
9891 + dma->srci = 0;
9892 + dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
9893 + wmb();
9894 + dma->srcr[HIFN_D_SRC_RSIZE].l |= htole32(HIFN_D_VALID);
9895 + HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
9896 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
9898 + *srcp = dma->srci++;
9899 + dma->srck = dma->srci;
9901 + if (dma->dsti == HIFN_D_DST_RSIZE) {
9902 + dma->dsti = 0;
9903 + dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
9904 + wmb();
9905 + dma->dstr[HIFN_D_DST_RSIZE].l |= htole32(HIFN_D_VALID);
9906 + HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
9907 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
9909 + *dstp = dma->dsti++;
9910 + dma->dstk = dma->dsti;
9912 + if (dma->resi == HIFN_D_RES_RSIZE) {
9913 + dma->resi = 0;
9914 + dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
9915 + wmb();
9916 + dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
9917 + HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
9918 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
9920 + *resp = dma->resi++;
9921 + dma->resk = dma->resi;
9924 +static int
9925 +hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
9927 + struct hifn_dma *dma = sc->sc_dma;
9928 + hifn_base_command_t wc;
9929 + const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
9930 + int r, cmdi, resi, srci, dsti;
9932 + DPRINTF("%s()\n", __FUNCTION__);
9934 + wc.masks = htole16(3 << 13);
9935 + wc.session_num = htole16(addr >> 14);
9936 + wc.total_source_count = htole16(8);
9937 + wc.total_dest_count = htole16(addr & 0x3fff);
9939 + hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
9941 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
9942 + HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
9943 + HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
9945 + /* build write command */
9946 + bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
9947 + *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
9948 + bcopy(data, &dma->test_src, sizeof(dma->test_src));
9950 + dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
9951 + + offsetof(struct hifn_dma, test_src));
9952 + dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
9953 + + offsetof(struct hifn_dma, test_dst));
9955 + dma->cmdr[cmdi].l = htole32(16 | masks);
9956 + dma->srcr[srci].l = htole32(8 | masks);
9957 + dma->dstr[dsti].l = htole32(4 | masks);
9958 + dma->resr[resi].l = htole32(4 | masks);
9960 + for (r = 10000; r >= 0; r--) {
9961 + DELAY(10);
9962 + if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
9963 + break;
9965 + if (r == 0) {
9966 + device_printf(sc->sc_dev, "writeramaddr -- "
9967 + "result[%d](addr %d) still valid\n", resi, addr);
9968 + r = -1;
9969 + return (-1);
9970 + } else
9971 + r = 0;
9973 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
9974 + HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
9975 + HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
9977 + return (r);
9980 +static int
9981 +hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
9983 + struct hifn_dma *dma = sc->sc_dma;
9984 + hifn_base_command_t rc;
9985 + const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
9986 + int r, cmdi, srci, dsti, resi;
9988 + DPRINTF("%s()\n", __FUNCTION__);
9990 + rc.masks = htole16(2 << 13);
9991 + rc.session_num = htole16(addr >> 14);
9992 + rc.total_source_count = htole16(addr & 0x3fff);
9993 + rc.total_dest_count = htole16(8);
9995 + hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
9997 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
9998 + HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
9999 + HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
10001 + bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
10002 + *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
10004 + dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
10005 + offsetof(struct hifn_dma, test_src));
10006 + dma->test_src = 0;
10007 + dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr +
10008 + offsetof(struct hifn_dma, test_dst));
10009 + dma->test_dst = 0;
10010 + dma->cmdr[cmdi].l = htole32(8 | masks);
10011 + dma->srcr[srci].l = htole32(8 | masks);
10012 + dma->dstr[dsti].l = htole32(8 | masks);
10013 + dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
10015 + for (r = 10000; r >= 0; r--) {
10016 + DELAY(10);
10017 + if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
10018 + break;
10020 + if (r == 0) {
10021 + device_printf(sc->sc_dev, "readramaddr -- "
10022 + "result[%d](addr %d) still valid\n", resi, addr);
10023 + r = -1;
10024 + } else {
10025 + r = 0;
10026 + bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
10029 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
10030 + HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
10031 + HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
10033 + return (r);
10037 + * Initialize the descriptor rings.
10038 + */
10039 +static void
10040 +hifn_init_dma(struct hifn_softc *sc)
10042 + struct hifn_dma *dma = sc->sc_dma;
10043 + int i;
10045 + DPRINTF("%s()\n", __FUNCTION__);
10047 + hifn_set_retry(sc);
10049 + /* initialize static pointer values */
10050 + for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
10051 + dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
10052 + offsetof(struct hifn_dma, command_bufs[i][0]));
10053 + for (i = 0; i < HIFN_D_RES_RSIZE; i++)
10054 + dma->resr[i].p = htole32(sc->sc_dma_physaddr +
10055 + offsetof(struct hifn_dma, result_bufs[i][0]));
10057 + dma->cmdr[HIFN_D_CMD_RSIZE].p =
10058 + htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
10059 + dma->srcr[HIFN_D_SRC_RSIZE].p =
10060 + htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
10061 + dma->dstr[HIFN_D_DST_RSIZE].p =
10062 + htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
10063 + dma->resr[HIFN_D_RES_RSIZE].p =
10064 + htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
10066 + dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
10067 + dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
10068 + dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
10072 + * Writes out the raw command buffer space. Returns the
10073 + * command buffer size.
10074 + */
10075 +static u_int
10076 +hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
10078 + struct hifn_softc *sc = NULL;
10079 + u_int8_t *buf_pos;
10080 + hifn_base_command_t *base_cmd;
10081 + hifn_mac_command_t *mac_cmd;
10082 + hifn_crypt_command_t *cry_cmd;
10083 + int using_mac, using_crypt, len, ivlen;
10084 + u_int32_t dlen, slen;
10086 + DPRINTF("%s()\n", __FUNCTION__);
10088 + buf_pos = buf;
10089 + using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
10090 + using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
10092 + base_cmd = (hifn_base_command_t *)buf_pos;
10093 + base_cmd->masks = htole16(cmd->base_masks);
10094 + slen = cmd->src_mapsize;
10095 + if (cmd->sloplen)
10096 + dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
10097 + else
10098 + dlen = cmd->dst_mapsize;
10099 + base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
10100 + base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
10101 + dlen >>= 16;
10102 + slen >>= 16;
10103 + base_cmd->session_num = htole16(
10104 + ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
10105 + ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
10106 + buf_pos += sizeof(hifn_base_command_t);
10108 + if (using_mac) {
10109 + mac_cmd = (hifn_mac_command_t *)buf_pos;
10110 + dlen = cmd->maccrd->crd_len;
10111 + mac_cmd->source_count = htole16(dlen & 0xffff);
10112 + dlen >>= 16;
10113 + mac_cmd->masks = htole16(cmd->mac_masks |
10114 + ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
10115 + mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
10116 + mac_cmd->reserved = 0;
10117 + buf_pos += sizeof(hifn_mac_command_t);
10120 + if (using_crypt) {
10121 + cry_cmd = (hifn_crypt_command_t *)buf_pos;
10122 + dlen = cmd->enccrd->crd_len;
10123 + cry_cmd->source_count = htole16(dlen & 0xffff);
10124 + dlen >>= 16;
10125 + cry_cmd->masks = htole16(cmd->cry_masks |
10126 + ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
10127 + cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
10128 + cry_cmd->reserved = 0;
10129 + buf_pos += sizeof(hifn_crypt_command_t);
10132 + if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
10133 + bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
10134 + buf_pos += HIFN_MAC_KEY_LENGTH;
10137 + if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
10138 + switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
10139 + case HIFN_CRYPT_CMD_ALG_3DES:
10140 + bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
10141 + buf_pos += HIFN_3DES_KEY_LENGTH;
10142 + break;
10143 + case HIFN_CRYPT_CMD_ALG_DES:
10144 + bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
10145 + buf_pos += HIFN_DES_KEY_LENGTH;
10146 + break;
10147 + case HIFN_CRYPT_CMD_ALG_RC4:
10148 + len = 256;
10149 + do {
10150 + int clen;
10152 + clen = MIN(cmd->cklen, len);
10153 + bcopy(cmd->ck, buf_pos, clen);
10154 + len -= clen;
10155 + buf_pos += clen;
10156 + } while (len > 0);
10157 + bzero(buf_pos, 4);
10158 + buf_pos += 4;
10159 + break;
10160 + case HIFN_CRYPT_CMD_ALG_AES:
10161 + /*
10162 + * AES keys are variable 128, 192 and
10163 + * 256 bits (16, 24 and 32 bytes).
10164 + */
10165 + bcopy(cmd->ck, buf_pos, cmd->cklen);
10166 + buf_pos += cmd->cklen;
10167 + break;
10171 + if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
10172 + switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
10173 + case HIFN_CRYPT_CMD_ALG_AES:
10174 + ivlen = HIFN_AES_IV_LENGTH;
10175 + break;
10176 + default:
10177 + ivlen = HIFN_IV_LENGTH;
10178 + break;
10180 + bcopy(cmd->iv, buf_pos, ivlen);
10181 + buf_pos += ivlen;
10184 + if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
10185 + bzero(buf_pos, 8);
10186 + buf_pos += 8;
10189 + return (buf_pos - buf);
10192 +static int
10193 +hifn_dmamap_aligned(struct hifn_operand *op)
10195 + struct hifn_softc *sc = NULL;
10196 + int i;
10198 + DPRINTF("%s()\n", __FUNCTION__);
10200 + for (i = 0; i < op->nsegs; i++) {
10201 + if (op->segs[i].ds_addr & 3)
10202 + return (0);
10203 + if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
10204 + return (0);
10206 + return (1);
10209 +static __inline int
10210 +hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
10212 + struct hifn_dma *dma = sc->sc_dma;
10214 + if (++idx == HIFN_D_DST_RSIZE) {
10215 + dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
10216 + HIFN_D_MASKDONEIRQ);
10217 + HIFN_DSTR_SYNC(sc, idx,
10218 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
10219 + idx = 0;
10221 + return (idx);
10224 +static int
10225 +hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
10227 + struct hifn_dma *dma = sc->sc_dma;
10228 + struct hifn_operand *dst = &cmd->dst;
10229 + u_int32_t p, l;
10230 + int idx, used = 0, i;
10232 + DPRINTF("%s()\n", __FUNCTION__);
10234 + idx = dma->dsti;
10235 + for (i = 0; i < dst->nsegs - 1; i++) {
10236 + dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
10237 + dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
10238 + wmb();
10239 + dma->dstr[idx].l |= htole32(HIFN_D_VALID);
10240 + HIFN_DSTR_SYNC(sc, idx,
10241 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
10242 + used++;
10244 + idx = hifn_dmamap_dstwrap(sc, idx);
10247 + if (cmd->sloplen == 0) {
10248 + p = dst->segs[i].ds_addr;
10249 + l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
10250 + dst->segs[i].ds_len;
10251 + } else {
10252 + p = sc->sc_dma_physaddr +
10253 + offsetof(struct hifn_dma, slop[cmd->slopidx]);
10254 + l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
10255 + sizeof(u_int32_t);
10257 + if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
10258 + dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
10259 + dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ |
10260 + (dst->segs[i].ds_len - cmd->sloplen));
10261 + wmb();
10262 + dma->dstr[idx].l |= htole32(HIFN_D_VALID);
10263 + HIFN_DSTR_SYNC(sc, idx,
10264 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
10265 + used++;
10267 + idx = hifn_dmamap_dstwrap(sc, idx);
10270 + dma->dstr[idx].p = htole32(p);
10271 + dma->dstr[idx].l = htole32(l);
10272 + wmb();
10273 + dma->dstr[idx].l |= htole32(HIFN_D_VALID);
10274 + HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
10275 + used++;
10277 + idx = hifn_dmamap_dstwrap(sc, idx);
10279 + dma->dsti = idx;
10280 + dma->dstu += used;
10281 + return (idx);
10284 +static __inline int
10285 +hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
10287 + struct hifn_dma *dma = sc->sc_dma;
10289 + if (++idx == HIFN_D_SRC_RSIZE) {
10290 + dma->srcr[idx].l = htole32(HIFN_D_VALID |
10291 + HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
10292 + HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
10293 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
10294 + idx = 0;
10296 + return (idx);
10299 +static int
10300 +hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
10302 + struct hifn_dma *dma = sc->sc_dma;
10303 + struct hifn_operand *src = &cmd->src;
10304 + int idx, i;
10305 + u_int32_t last = 0;
10307 + DPRINTF("%s()\n", __FUNCTION__);
10309 + idx = dma->srci;
10310 + for (i = 0; i < src->nsegs; i++) {
10311 + if (i == src->nsegs - 1)
10312 + last = HIFN_D_LAST;
10314 + dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
10315 + dma->srcr[idx].l = htole32(src->segs[i].ds_len |
10316 + HIFN_D_MASKDONEIRQ | last);
10317 + wmb();
10318 + dma->srcr[idx].l |= htole32(HIFN_D_VALID);
10319 + HIFN_SRCR_SYNC(sc, idx,
10320 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
10322 + idx = hifn_dmamap_srcwrap(sc, idx);
10324 + dma->srci = idx;
10325 + dma->srcu += src->nsegs;
10326 + return (idx);
10330 +static int
10331 +hifn_crypto(
10332 + struct hifn_softc *sc,
10333 + struct hifn_command *cmd,
10334 + struct cryptop *crp,
10335 + int hint)
10337 + struct hifn_dma *dma = sc->sc_dma;
10338 + u_int32_t cmdlen, csr;
10339 + int cmdi, resi, err = 0;
10340 + unsigned long l_flags;
10342 + DPRINTF("%s()\n", __FUNCTION__);
10344 + /*
10345 + * need 1 cmd, and 1 res
10347 + * NB: check this first since it's easy.
10348 + */
10349 + HIFN_LOCK(sc);
10350 + if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
10351 + (dma->resu + 1) > HIFN_D_RES_RSIZE) {
10352 +#ifdef HIFN_DEBUG
10353 + if (hifn_debug) {
10354 + device_printf(sc->sc_dev,
10355 + "cmd/result exhaustion, cmdu %u resu %u\n",
10356 + dma->cmdu, dma->resu);
10358 +#endif
10359 + hifnstats.hst_nomem_cr++;
10360 + sc->sc_needwakeup |= CRYPTO_SYMQ;
10361 + HIFN_UNLOCK(sc);
10362 + return (ERESTART);
10365 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
10366 + if (pci_map_skb(sc, &cmd->src, cmd->src_skb)) {
10367 + hifnstats.hst_nomem_load++;
10368 + err = ENOMEM;
10369 + goto err_srcmap1;
10371 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
10372 + if (pci_map_uio(sc, &cmd->src, cmd->src_io)) {
10373 + hifnstats.hst_nomem_load++;
10374 + err = ENOMEM;
10375 + goto err_srcmap1;
10377 + } else {
10378 + if (pci_map_buf(sc, &cmd->src, cmd->src_buf, crp->crp_ilen)) {
10379 + hifnstats.hst_nomem_load++;
10380 + err = ENOMEM;
10381 + goto err_srcmap1;
10385 + if (hifn_dmamap_aligned(&cmd->src)) {
10386 + cmd->sloplen = cmd->src_mapsize & 3;
10387 + cmd->dst = cmd->src;
10388 + } else {
10389 + if (crp->crp_flags & CRYPTO_F_IOV) {
10390 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
10391 + err = EINVAL;
10392 + goto err_srcmap;
10393 + } else if (crp->crp_flags & CRYPTO_F_SKBUF) {
10394 +#ifdef NOTYET
10395 + int totlen, len;
10396 + struct mbuf *m, *m0, *mlast;
10398 + KASSERT(cmd->dst_m == cmd->src_m,
10399 + ("hifn_crypto: dst_m initialized improperly"));
10400 + hifnstats.hst_unaligned++;
10401 + /*
10402 + * Source is not aligned on a longword boundary.
10403 + * Copy the data to insure alignment. If we fail
10404 + * to allocate mbufs or clusters while doing this
10405 + * we return ERESTART so the operation is requeued
10406 + * at the crypto later, but only if there are
10407 + * ops already posted to the hardware; otherwise we
10408 + * have no guarantee that we'll be re-entered.
10409 + */
10410 + totlen = cmd->src_mapsize;
10411 + if (cmd->src_m->m_flags & M_PKTHDR) {
10412 + len = MHLEN;
10413 + MGETHDR(m0, M_DONTWAIT, MT_DATA);
10414 + if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
10415 + m_free(m0);
10416 + m0 = NULL;
10418 + } else {
10419 + len = MLEN;
10420 + MGET(m0, M_DONTWAIT, MT_DATA);
10422 + if (m0 == NULL) {
10423 + hifnstats.hst_nomem_mbuf++;
10424 + err = dma->cmdu ? ERESTART : ENOMEM;
10425 + goto err_srcmap;
10427 + if (totlen >= MINCLSIZE) {
10428 + MCLGET(m0, M_DONTWAIT);
10429 + if ((m0->m_flags & M_EXT) == 0) {
10430 + hifnstats.hst_nomem_mcl++;
10431 + err = dma->cmdu ? ERESTART : ENOMEM;
10432 + m_freem(m0);
10433 + goto err_srcmap;
10435 + len = MCLBYTES;
10437 + totlen -= len;
10438 + m0->m_pkthdr.len = m0->m_len = len;
10439 + mlast = m0;
10441 + while (totlen > 0) {
10442 + MGET(m, M_DONTWAIT, MT_DATA);
10443 + if (m == NULL) {
10444 + hifnstats.hst_nomem_mbuf++;
10445 + err = dma->cmdu ? ERESTART : ENOMEM;
10446 + m_freem(m0);
10447 + goto err_srcmap;
10449 + len = MLEN;
10450 + if (totlen >= MINCLSIZE) {
10451 + MCLGET(m, M_DONTWAIT);
10452 + if ((m->m_flags & M_EXT) == 0) {
10453 + hifnstats.hst_nomem_mcl++;
10454 + err = dma->cmdu ? ERESTART : ENOMEM;
10455 + mlast->m_next = m;
10456 + m_freem(m0);
10457 + goto err_srcmap;
10459 + len = MCLBYTES;
10462 + m->m_len = len;
10463 + m0->m_pkthdr.len += len;
10464 + totlen -= len;
10466 + mlast->m_next = m;
10467 + mlast = m;
10469 + cmd->dst_m = m0;
10470 +#else
10471 + device_printf(sc->sc_dev,
10472 + "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
10473 + __FILE__, __LINE__);
10474 + err = EINVAL;
10475 + goto err_srcmap;
10476 +#endif
10477 + } else {
10478 + device_printf(sc->sc_dev,
10479 + "%s,%d: unaligned contig buffers not implemented\n",
10480 + __FILE__, __LINE__);
10481 + err = EINVAL;
10482 + goto err_srcmap;
10486 + if (cmd->dst_map == NULL) {
10487 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
10488 + if (pci_map_skb(sc, &cmd->dst, cmd->dst_skb)) {
10489 + hifnstats.hst_nomem_map++;
10490 + err = ENOMEM;
10491 + goto err_dstmap1;
10493 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
10494 + if (pci_map_uio(sc, &cmd->dst, cmd->dst_io)) {
10495 + hifnstats.hst_nomem_load++;
10496 + err = ENOMEM;
10497 + goto err_dstmap1;
10499 + } else {
10500 + if (pci_map_buf(sc, &cmd->dst, cmd->dst_buf, crp->crp_ilen)) {
10501 + hifnstats.hst_nomem_load++;
10502 + err = ENOMEM;
10503 + goto err_dstmap1;
10508 +#ifdef HIFN_DEBUG
10509 + if (hifn_debug) {
10510 + device_printf(sc->sc_dev,
10511 + "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
10512 + READ_REG_1(sc, HIFN_1_DMA_CSR),
10513 + READ_REG_1(sc, HIFN_1_DMA_IER),
10514 + dma->cmdu, dma->srcu, dma->dstu, dma->resu,
10515 + cmd->src_nsegs, cmd->dst_nsegs);
10517 +#endif
10519 +#if 0
10520 + if (cmd->src_map == cmd->dst_map) {
10521 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
10522 + BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
10523 + } else {
10524 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
10525 + BUS_DMASYNC_PREWRITE);
10526 + bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
10527 + BUS_DMASYNC_PREREAD);
10529 +#endif
10531 + /*
10532 + * need N src, and N dst
10533 + */
10534 + if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
10535 + (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
10536 +#ifdef HIFN_DEBUG
10537 + if (hifn_debug) {
10538 + device_printf(sc->sc_dev,
10539 + "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
10540 + dma->srcu, cmd->src_nsegs,
10541 + dma->dstu, cmd->dst_nsegs);
10543 +#endif
10544 + hifnstats.hst_nomem_sd++;
10545 + err = ERESTART;
10546 + goto err_dstmap;
10549 + if (dma->cmdi == HIFN_D_CMD_RSIZE) {
10550 + dma->cmdi = 0;
10551 + dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
10552 + wmb();
10553 + dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
10554 + HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
10555 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
10557 + cmdi = dma->cmdi++;
10558 + cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
10559 + HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
10561 + /* .p for command/result already set */
10562 + dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_LAST |
10563 + HIFN_D_MASKDONEIRQ);
10564 + wmb();
10565 + dma->cmdr[cmdi].l |= htole32(HIFN_D_VALID);
10566 + HIFN_CMDR_SYNC(sc, cmdi,
10567 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
10568 + dma->cmdu++;
10570 + /*
10571 + * We don't worry about missing an interrupt (which a "command wait"
10572 + * interrupt salvages us from), unless there is more than one command
10573 + * in the queue.
10574 + */
10575 + if (dma->cmdu > 1) {
10576 + sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
10577 + WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
10580 + hifnstats.hst_ipackets++;
10581 + hifnstats.hst_ibytes += cmd->src_mapsize;
10583 + hifn_dmamap_load_src(sc, cmd);
10585 + /*
10586 + * Unlike other descriptors, we don't mask done interrupt from
10587 + * result descriptor.
10588 + */
10589 +#ifdef HIFN_DEBUG
10590 + if (hifn_debug)
10591 + device_printf(sc->sc_dev, "load res\n");
10592 +#endif
10593 + if (dma->resi == HIFN_D_RES_RSIZE) {
10594 + dma->resi = 0;
10595 + dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
10596 + wmb();
10597 + dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
10598 + HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
10599 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
10601 + resi = dma->resi++;
10602 + KASSERT(dma->hifn_commands[resi] == NULL,
10603 + ("hifn_crypto: command slot %u busy", resi));
10604 + dma->hifn_commands[resi] = cmd;
10605 + HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
10606 + if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
10607 + dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
10608 + HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
10609 + wmb();
10610 + dma->resr[resi].l |= htole32(HIFN_D_VALID);
10611 + sc->sc_curbatch++;
10612 + if (sc->sc_curbatch > hifnstats.hst_maxbatch)
10613 + hifnstats.hst_maxbatch = sc->sc_curbatch;
10614 + hifnstats.hst_totbatch++;
10615 + } else {
10616 + dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_LAST);
10617 + wmb();
10618 + dma->resr[resi].l |= htole32(HIFN_D_VALID);
10619 + sc->sc_curbatch = 0;
10621 + HIFN_RESR_SYNC(sc, resi,
10622 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
10623 + dma->resu++;
10625 + if (cmd->sloplen)
10626 + cmd->slopidx = resi;
10628 + hifn_dmamap_load_dst(sc, cmd);
10630 + csr = 0;
10631 + if (sc->sc_c_busy == 0) {
10632 + csr |= HIFN_DMACSR_C_CTRL_ENA;
10633 + sc->sc_c_busy = 1;
10635 + if (sc->sc_s_busy == 0) {
10636 + csr |= HIFN_DMACSR_S_CTRL_ENA;
10637 + sc->sc_s_busy = 1;
10639 + if (sc->sc_r_busy == 0) {
10640 + csr |= HIFN_DMACSR_R_CTRL_ENA;
10641 + sc->sc_r_busy = 1;
10643 + if (sc->sc_d_busy == 0) {
10644 + csr |= HIFN_DMACSR_D_CTRL_ENA;
10645 + sc->sc_d_busy = 1;
10647 + if (csr)
10648 + WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
10650 +#ifdef HIFN_DEBUG
10651 + if (hifn_debug) {
10652 + device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
10653 + READ_REG_1(sc, HIFN_1_DMA_CSR),
10654 + READ_REG_1(sc, HIFN_1_DMA_IER));
10656 +#endif
10658 + sc->sc_active = 5;
10659 + HIFN_UNLOCK(sc);
10660 + KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
10661 + return (err); /* success */
10663 +err_dstmap:
10664 + if (cmd->src_map != cmd->dst_map)
10665 + pci_unmap_buf(sc, &cmd->dst);
10666 +err_dstmap1:
10667 +err_srcmap:
10668 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
10669 + if (cmd->src_skb != cmd->dst_skb)
10670 +#ifdef NOTYET
10671 + m_freem(cmd->dst_m);
10672 +#else
10673 + device_printf(sc->sc_dev,
10674 + "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
10675 + __FILE__, __LINE__);
10676 +#endif
10678 + pci_unmap_buf(sc, &cmd->src);
10679 +err_srcmap1:
10680 + HIFN_UNLOCK(sc);
10681 + return (err);
10684 +static void
10685 +hifn_tick(unsigned long arg)
10687 + struct hifn_softc *sc;
10688 + unsigned long l_flags;
10690 + if (arg >= HIFN_MAX_CHIPS)
10691 + return;
10692 + sc = hifn_chip_idx[arg];
10693 + if (!sc)
10694 + return;
10696 + HIFN_LOCK(sc);
10697 + if (sc->sc_active == 0) {
10698 + struct hifn_dma *dma = sc->sc_dma;
10699 + u_int32_t r = 0;
10701 + if (dma->cmdu == 0 && sc->sc_c_busy) {
10702 + sc->sc_c_busy = 0;
10703 + r |= HIFN_DMACSR_C_CTRL_DIS;
10705 + if (dma->srcu == 0 && sc->sc_s_busy) {
10706 + sc->sc_s_busy = 0;
10707 + r |= HIFN_DMACSR_S_CTRL_DIS;
10709 + if (dma->dstu == 0 && sc->sc_d_busy) {
10710 + sc->sc_d_busy = 0;
10711 + r |= HIFN_DMACSR_D_CTRL_DIS;
10713 + if (dma->resu == 0 && sc->sc_r_busy) {
10714 + sc->sc_r_busy = 0;
10715 + r |= HIFN_DMACSR_R_CTRL_DIS;
10717 + if (r)
10718 + WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
10719 + } else
10720 + sc->sc_active--;
10721 + HIFN_UNLOCK(sc);
10722 + mod_timer(&sc->sc_tickto, jiffies + HZ);
10725 +static irqreturn_t
10726 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
10727 +hifn_intr(int irq, void *arg)
10728 +#else
10729 +hifn_intr(int irq, void *arg, struct pt_regs *regs)
10730 +#endif
10732 + struct hifn_softc *sc = arg;
10733 + struct hifn_dma *dma;
10734 + u_int32_t dmacsr, restart;
10735 + int i, u;
10736 + unsigned long l_flags;
10738 + dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
10740 + /* Nothing in the DMA unit interrupted */
10741 + if ((dmacsr & sc->sc_dmaier) == 0)
10742 + return IRQ_NONE;
10744 + HIFN_LOCK(sc);
10746 + dma = sc->sc_dma;
10748 +#ifdef HIFN_DEBUG
10749 + if (hifn_debug) {
10750 + device_printf(sc->sc_dev,
10751 + "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
10752 + dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
10753 + dma->cmdi, dma->srci, dma->dsti, dma->resi,
10754 + dma->cmdk, dma->srck, dma->dstk, dma->resk,
10755 + dma->cmdu, dma->srcu, dma->dstu, dma->resu);
10757 +#endif
10759 + WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
10761 + if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
10762 + (dmacsr & HIFN_DMACSR_PUBDONE))
10763 + WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
10764 + READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
10766 + restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
10767 + if (restart)
10768 + device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
10770 + if (sc->sc_flags & HIFN_IS_7811) {
10771 + if (dmacsr & HIFN_DMACSR_ILLR)
10772 + device_printf(sc->sc_dev, "illegal read\n");
10773 + if (dmacsr & HIFN_DMACSR_ILLW)
10774 + device_printf(sc->sc_dev, "illegal write\n");
10777 + restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
10778 + HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
10779 + if (restart) {
10780 + device_printf(sc->sc_dev, "abort, resetting.\n");
10781 + hifnstats.hst_abort++;
10782 + hifn_abort(sc);
10783 + HIFN_UNLOCK(sc);
10784 + return IRQ_HANDLED;
10787 + if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
10788 + /*
10789 + * If no slots to process and we receive a "waiting on
10790 + * command" interrupt, we disable the "waiting on command"
10791 + * (by clearing it).
10792 + */
10793 + sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
10794 + WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
10797 + /* clear the rings */
10798 + i = dma->resk; u = dma->resu;
10799 + while (u != 0) {
10800 + HIFN_RESR_SYNC(sc, i,
10801 + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
10802 + if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
10803 + HIFN_RESR_SYNC(sc, i,
10804 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
10805 + break;
10808 + if (i != HIFN_D_RES_RSIZE) {
10809 + struct hifn_command *cmd;
10810 + u_int8_t *macbuf = NULL;
10812 + HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
10813 + cmd = dma->hifn_commands[i];
10814 + KASSERT(cmd != NULL,
10815 + ("hifn_intr: null command slot %u", i));
10816 + dma->hifn_commands[i] = NULL;
10818 + if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
10819 + macbuf = dma->result_bufs[i];
10820 + macbuf += 12;
10823 + hifn_callback(sc, cmd, macbuf);
10824 + hifnstats.hst_opackets++;
10825 + u--;
10828 + if (++i == (HIFN_D_RES_RSIZE + 1))
10829 + i = 0;
10831 + dma->resk = i; dma->resu = u;
10833 + i = dma->srck; u = dma->srcu;
10834 + while (u != 0) {
10835 + if (i == HIFN_D_SRC_RSIZE)
10836 + i = 0;
10837 + HIFN_SRCR_SYNC(sc, i,
10838 + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
10839 + if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
10840 + HIFN_SRCR_SYNC(sc, i,
10841 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
10842 + break;
10844 + i++, u--;
10846 + dma->srck = i; dma->srcu = u;
10848 + i = dma->cmdk; u = dma->cmdu;
10849 + while (u != 0) {
10850 + HIFN_CMDR_SYNC(sc, i,
10851 + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
10852 + if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
10853 + HIFN_CMDR_SYNC(sc, i,
10854 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
10855 + break;
10857 + if (i != HIFN_D_CMD_RSIZE) {
10858 + u--;
10859 + HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
10861 + if (++i == (HIFN_D_CMD_RSIZE + 1))
10862 + i = 0;
10864 + dma->cmdk = i; dma->cmdu = u;
10866 + HIFN_UNLOCK(sc);
10868 + if (sc->sc_needwakeup) { /* XXX check high watermark */
10869 + int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
10870 +#ifdef HIFN_DEBUG
10871 + if (hifn_debug)
10872 + device_printf(sc->sc_dev,
10873 + "wakeup crypto (%x) u %d/%d/%d/%d\n",
10874 + sc->sc_needwakeup,
10875 + dma->cmdu, dma->srcu, dma->dstu, dma->resu);
10876 +#endif
10877 + sc->sc_needwakeup &= ~wakeup;
10878 + crypto_unblock(sc->sc_cid, wakeup);
10881 + return IRQ_HANDLED;
10885 + * Allocate a new 'session' and return an encoded session id. 'sidp'
10886 + * contains our registration id, and should contain an encoded session
10887 + * id on successful allocation.
10888 + */
10889 +static int
10890 +hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
10892 + struct hifn_softc *sc = device_get_softc(dev);
10893 + struct cryptoini *c;
10894 + int mac = 0, cry = 0, sesn;
10895 + struct hifn_session *ses = NULL;
10896 + unsigned long l_flags;
10898 + DPRINTF("%s()\n", __FUNCTION__);
10900 + KASSERT(sc != NULL, ("hifn_newsession: null softc"));
10901 + if (sidp == NULL || cri == NULL || sc == NULL) {
10902 + DPRINTF("%s,%d: %s - EINVAL\n", __FILE__, __LINE__, __FUNCTION__);
10903 + return (EINVAL);
10906 + HIFN_LOCK(sc);
10907 + if (sc->sc_sessions == NULL) {
10908 + ses = sc->sc_sessions = (struct hifn_session *)kmalloc(sizeof(*ses),
10909 + SLAB_ATOMIC);
10910 + if (ses == NULL) {
10911 + HIFN_UNLOCK(sc);
10912 + return (ENOMEM);
10914 + sesn = 0;
10915 + sc->sc_nsessions = 1;
10916 + } else {
10917 + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
10918 + if (!sc->sc_sessions[sesn].hs_used) {
10919 + ses = &sc->sc_sessions[sesn];
10920 + break;
10924 + if (ses == NULL) {
10925 + sesn = sc->sc_nsessions;
10926 + ses = (struct hifn_session *)kmalloc((sesn + 1) * sizeof(*ses),
10927 + SLAB_ATOMIC);
10928 + if (ses == NULL) {
10929 + HIFN_UNLOCK(sc);
10930 + return (ENOMEM);
10932 + bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
10933 + bzero(sc->sc_sessions, sesn * sizeof(*ses));
10934 + kfree(sc->sc_sessions);
10935 + sc->sc_sessions = ses;
10936 + ses = &sc->sc_sessions[sesn];
10937 + sc->sc_nsessions++;
10940 + HIFN_UNLOCK(sc);
10942 + bzero(ses, sizeof(*ses));
10943 + ses->hs_used = 1;
10945 + for (c = cri; c != NULL; c = c->cri_next) {
10946 + switch (c->cri_alg) {
10947 + case CRYPTO_MD5:
10948 + case CRYPTO_SHA1:
10949 + case CRYPTO_MD5_HMAC:
10950 + case CRYPTO_SHA1_HMAC:
10951 + if (mac) {
10952 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
10953 + return (EINVAL);
10955 + mac = 1;
10956 + ses->hs_mlen = c->cri_mlen;
10957 + if (ses->hs_mlen == 0) {
10958 + switch (c->cri_alg) {
10959 + case CRYPTO_MD5:
10960 + case CRYPTO_MD5_HMAC:
10961 + ses->hs_mlen = 16;
10962 + break;
10963 + case CRYPTO_SHA1:
10964 + case CRYPTO_SHA1_HMAC:
10965 + ses->hs_mlen = 20;
10966 + break;
10969 + break;
10970 + case CRYPTO_DES_CBC:
10971 + case CRYPTO_3DES_CBC:
10972 + case CRYPTO_AES_CBC:
10973 + /* XXX this may read fewer, does it matter? */
10974 + read_random(ses->hs_iv,
10975 + c->cri_alg == CRYPTO_AES_CBC ?
10976 + HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
10977 + /*FALLTHROUGH*/
10978 + case CRYPTO_ARC4:
10979 + if (cry) {
10980 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
10981 + return (EINVAL);
10983 + cry = 1;
10984 + break;
10985 + default:
10986 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
10987 + return (EINVAL);
10990 + if (mac == 0 && cry == 0) {
10991 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
10992 + return (EINVAL);
10995 + *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
10997 + return (0);
11001 + * Deallocate a session.
11002 + * XXX this routine should run a zero'd mac/encrypt key into context ram.
11003 + * XXX to blow away any keys already stored there.
11004 + */
11005 +static int
11006 +hifn_freesession(device_t dev, u_int64_t tid)
11008 + struct hifn_softc *sc = device_get_softc(dev);
11009 + int session, error;
11010 + u_int32_t sid = CRYPTO_SESID2LID(tid);
11011 + unsigned long l_flags;
11013 + DPRINTF("%s()\n", __FUNCTION__);
11015 + KASSERT(sc != NULL, ("hifn_freesession: null softc"));
11016 + if (sc == NULL) {
11017 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
11018 + return (EINVAL);
11021 + HIFN_LOCK(sc);
11022 + session = HIFN_SESSION(sid);
11023 + if (session < sc->sc_nsessions) {
11024 + bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
11025 + error = 0;
11026 + } else {
11027 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
11028 + error = EINVAL;
11030 + HIFN_UNLOCK(sc);
11032 + return (error);
11035 +static int
11036 +hifn_process(device_t dev, struct cryptop *crp, int hint)
11038 + struct hifn_softc *sc = device_get_softc(dev);
11039 + struct hifn_command *cmd = NULL;
11040 + int session, err, ivlen;
11041 + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
11043 + DPRINTF("%s()\n", __FUNCTION__);
11045 + if (crp == NULL || crp->crp_callback == NULL) {
11046 + hifnstats.hst_invalid++;
11047 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
11048 + return (EINVAL);
11050 + session = HIFN_SESSION(crp->crp_sid);
11052 + if (sc == NULL || session >= sc->sc_nsessions) {
11053 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
11054 + err = EINVAL;
11055 + goto errout;
11058 + cmd = kmalloc(sizeof(struct hifn_command), SLAB_ATOMIC);
11059 + if (cmd == NULL) {
11060 + hifnstats.hst_nomem++;
11061 + err = ENOMEM;
11062 + goto errout;
11064 + memset(cmd, 0, sizeof(*cmd));
11066 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
11067 + cmd->src_skb = (struct sk_buff *)crp->crp_buf;
11068 + cmd->dst_skb = (struct sk_buff *)crp->crp_buf;
11069 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
11070 + cmd->src_io = (struct uio *)crp->crp_buf;
11071 + cmd->dst_io = (struct uio *)crp->crp_buf;
11072 + } else {
11073 + cmd->src_buf = crp->crp_buf;
11074 + cmd->dst_buf = crp->crp_buf;
11077 + crd1 = crp->crp_desc;
11078 + if (crd1 == NULL) {
11079 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
11080 + err = EINVAL;
11081 + goto errout;
11083 + crd2 = crd1->crd_next;
11085 + if (crd2 == NULL) {
11086 + if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
11087 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
11088 + crd1->crd_alg == CRYPTO_SHA1 ||
11089 + crd1->crd_alg == CRYPTO_MD5) {
11090 + maccrd = crd1;
11091 + enccrd = NULL;
11092 + } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
11093 + crd1->crd_alg == CRYPTO_3DES_CBC ||
11094 + crd1->crd_alg == CRYPTO_AES_CBC ||
11095 + crd1->crd_alg == CRYPTO_ARC4) {
11096 + if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
11097 + cmd->base_masks |= HIFN_BASE_CMD_DECODE;
11098 + maccrd = NULL;
11099 + enccrd = crd1;
11100 + } else {
11101 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
11102 + err = EINVAL;
11103 + goto errout;
11105 + } else {
11106 + if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
11107 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
11108 + crd1->crd_alg == CRYPTO_MD5 ||
11109 + crd1->crd_alg == CRYPTO_SHA1) &&
11110 + (crd2->crd_alg == CRYPTO_DES_CBC ||
11111 + crd2->crd_alg == CRYPTO_3DES_CBC ||
11112 + crd2->crd_alg == CRYPTO_AES_CBC ||
11113 + crd2->crd_alg == CRYPTO_ARC4) &&
11114 + ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
11115 + cmd->base_masks = HIFN_BASE_CMD_DECODE;
11116 + maccrd = crd1;
11117 + enccrd = crd2;
11118 + } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
11119 + crd1->crd_alg == CRYPTO_ARC4 ||
11120 + crd1->crd_alg == CRYPTO_3DES_CBC ||
11121 + crd1->crd_alg == CRYPTO_AES_CBC) &&
11122 + (crd2->crd_alg == CRYPTO_MD5_HMAC ||
11123 + crd2->crd_alg == CRYPTO_SHA1_HMAC ||
11124 + crd2->crd_alg == CRYPTO_MD5 ||
11125 + crd2->crd_alg == CRYPTO_SHA1) &&
11126 + (crd1->crd_flags & CRD_F_ENCRYPT)) {
11127 + enccrd = crd1;
11128 + maccrd = crd2;
11129 + } else {
11130 + /*
11131 + * We cannot order the 7751 as requested
11132 + */
11133 + DPRINTF("%s,%d: %s %d,%d,%d - EINVAL\n",__FILE__,__LINE__,__FUNCTION__, crd1->crd_alg, crd2->crd_alg, crd1->crd_flags & CRD_F_ENCRYPT);
11134 + err = EINVAL;
11135 + goto errout;
11139 + if (enccrd) {
11140 + cmd->enccrd = enccrd;
11141 + cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
11142 + switch (enccrd->crd_alg) {
11143 + case CRYPTO_ARC4:
11144 + cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
11145 + break;
11146 + case CRYPTO_DES_CBC:
11147 + cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
11148 + HIFN_CRYPT_CMD_MODE_CBC |
11149 + HIFN_CRYPT_CMD_NEW_IV;
11150 + break;
11151 + case CRYPTO_3DES_CBC:
11152 + cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
11153 + HIFN_CRYPT_CMD_MODE_CBC |
11154 + HIFN_CRYPT_CMD_NEW_IV;
11155 + break;
11156 + case CRYPTO_AES_CBC:
11157 + cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
11158 + HIFN_CRYPT_CMD_MODE_CBC |
11159 + HIFN_CRYPT_CMD_NEW_IV;
11160 + break;
11161 + default:
11162 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
11163 + err = EINVAL;
11164 + goto errout;
11166 + if (enccrd->crd_alg != CRYPTO_ARC4) {
11167 + ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
11168 + HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
11169 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
11170 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
11171 + bcopy(enccrd->crd_iv, cmd->iv, ivlen);
11172 + else
11173 + bcopy(sc->sc_sessions[session].hs_iv,
11174 + cmd->iv, ivlen);
11176 + if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
11177 + == 0) {
11178 + crypto_copyback(crp->crp_flags,
11179 + crp->crp_buf, enccrd->crd_inject,
11180 + ivlen, cmd->iv);
11182 + } else {
11183 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
11184 + bcopy(enccrd->crd_iv, cmd->iv, ivlen);
11185 + else {
11186 + crypto_copydata(crp->crp_flags,
11187 + crp->crp_buf, enccrd->crd_inject,
11188 + ivlen, cmd->iv);
11193 + if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
11194 + cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
11195 + cmd->ck = enccrd->crd_key;
11196 + cmd->cklen = enccrd->crd_klen >> 3;
11197 + cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
11199 + /*
11200 + * Need to specify the size for the AES key in the masks.
11201 + */
11202 + if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
11203 + HIFN_CRYPT_CMD_ALG_AES) {
11204 + switch (cmd->cklen) {
11205 + case 16:
11206 + cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
11207 + break;
11208 + case 24:
11209 + cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
11210 + break;
11211 + case 32:
11212 + cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
11213 + break;
11214 + default:
11215 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
11216 + err = EINVAL;
11217 + goto errout;
11222 + if (maccrd) {
11223 + cmd->maccrd = maccrd;
11224 + cmd->base_masks |= HIFN_BASE_CMD_MAC;
11226 + switch (maccrd->crd_alg) {
11227 + case CRYPTO_MD5:
11228 + cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
11229 + HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
11230 + HIFN_MAC_CMD_POS_IPSEC;
11231 + break;
11232 + case CRYPTO_MD5_HMAC:
11233 + cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
11234 + HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
11235 + HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
11236 + break;
11237 + case CRYPTO_SHA1:
11238 + cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
11239 + HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
11240 + HIFN_MAC_CMD_POS_IPSEC;
11241 + break;
11242 + case CRYPTO_SHA1_HMAC:
11243 + cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
11244 + HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
11245 + HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
11246 + break;
11249 + if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
11250 + maccrd->crd_alg == CRYPTO_MD5_HMAC) {
11251 + cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
11252 + bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
11253 + bzero(cmd->mac + (maccrd->crd_klen >> 3),
11254 + HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
11258 + cmd->crp = crp;
11259 + cmd->session_num = session;
11260 + cmd->softc = sc;
11262 + err = hifn_crypto(sc, cmd, crp, hint);
11263 + if (!err) {
11264 + return 0;
11265 + } else if (err == ERESTART) {
11266 + /*
11267 + * There weren't enough resources to dispatch the request
11268 + * to the part. Notify the caller so they'll requeue this
11269 + * request and resubmit it again soon.
11270 + */
11271 +#ifdef HIFN_DEBUG
11272 + if (hifn_debug)
11273 + device_printf(sc->sc_dev, "requeue request\n");
11274 +#endif
11275 + kfree(cmd);
11276 + sc->sc_needwakeup |= CRYPTO_SYMQ;
11277 + return (err);
11280 +errout:
11281 + if (cmd != NULL)
11282 + kfree(cmd);
11283 + if (err == EINVAL)
11284 + hifnstats.hst_invalid++;
11285 + else
11286 + hifnstats.hst_nomem++;
11287 + crp->crp_etype = err;
11288 + crypto_done(crp);
11289 + return (err);
11292 +static void
11293 +hifn_abort(struct hifn_softc *sc)
11295 + struct hifn_dma *dma = sc->sc_dma;
11296 + struct hifn_command *cmd;
11297 + struct cryptop *crp;
11298 + int i, u;
11300 + DPRINTF("%s()\n", __FUNCTION__);
11302 + i = dma->resk; u = dma->resu;
11303 + while (u != 0) {
11304 + cmd = dma->hifn_commands[i];
11305 + KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
11306 + dma->hifn_commands[i] = NULL;
11307 + crp = cmd->crp;
11309 + if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
11310 + /* Salvage what we can. */
11311 + u_int8_t *macbuf;
11313 + if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
11314 + macbuf = dma->result_bufs[i];
11315 + macbuf += 12;
11316 + } else
11317 + macbuf = NULL;
11318 + hifnstats.hst_opackets++;
11319 + hifn_callback(sc, cmd, macbuf);
11320 + } else {
11321 +#if 0
11322 + if (cmd->src_map == cmd->dst_map) {
11323 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
11324 + BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
11325 + } else {
11326 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
11327 + BUS_DMASYNC_POSTWRITE);
11328 + bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
11329 + BUS_DMASYNC_POSTREAD);
11331 +#endif
11333 + if (cmd->src_skb != cmd->dst_skb) {
11334 +#ifdef NOTYET
11335 + m_freem(cmd->src_m);
11336 + crp->crp_buf = (caddr_t)cmd->dst_m;
11337 +#else
11338 + device_printf(sc->sc_dev,
11339 + "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
11340 + __FILE__, __LINE__);
11341 +#endif
11344 + /* non-shared buffers cannot be restarted */
11345 + if (cmd->src_map != cmd->dst_map) {
11346 + /*
11347 + * XXX should be EAGAIN, delayed until
11348 + * after the reset.
11349 + */
11350 + crp->crp_etype = ENOMEM;
11351 + pci_unmap_buf(sc, &cmd->dst);
11352 + } else
11353 + crp->crp_etype = ENOMEM;
11355 + pci_unmap_buf(sc, &cmd->src);
11357 + kfree(cmd);
11358 + if (crp->crp_etype != EAGAIN)
11359 + crypto_done(crp);
11362 + if (++i == HIFN_D_RES_RSIZE)
11363 + i = 0;
11364 + u--;
11366 + dma->resk = i; dma->resu = u;
11368 + hifn_reset_board(sc, 1);
11369 + hifn_init_dma(sc);
11370 + hifn_init_pci_registers(sc);
11373 +static void
11374 +hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
11376 + struct hifn_dma *dma = sc->sc_dma;
11377 + struct cryptop *crp = cmd->crp;
11378 + struct cryptodesc *crd;
11379 + int i, u, ivlen;
11381 + DPRINTF("%s()\n", __FUNCTION__);
11383 +#if 0
11384 + if (cmd->src_map == cmd->dst_map) {
11385 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
11386 + BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
11387 + } else {
11388 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
11389 + BUS_DMASYNC_POSTWRITE);
11390 + bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
11391 + BUS_DMASYNC_POSTREAD);
11393 +#endif
11395 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
11396 + if (cmd->src_skb != cmd->dst_skb) {
11397 +#ifdef NOTYET
11398 + crp->crp_buf = (caddr_t)cmd->dst_m;
11399 + totlen = cmd->src_mapsize;
11400 + for (m = cmd->dst_m; m != NULL; m = m->m_next) {
11401 + if (totlen < m->m_len) {
11402 + m->m_len = totlen;
11403 + totlen = 0;
11404 + } else
11405 + totlen -= m->m_len;
11407 + cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
11408 + m_freem(cmd->src_m);
11409 +#else
11410 + device_printf(sc->sc_dev,
11411 + "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
11412 + __FILE__, __LINE__);
11413 +#endif
11417 + if (cmd->sloplen != 0) {
11418 + crypto_copyback(crp->crp_flags, crp->crp_buf,
11419 + cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
11420 + (caddr_t)&dma->slop[cmd->slopidx]);
11423 + i = dma->dstk; u = dma->dstu;
11424 + while (u != 0) {
11425 + if (i == HIFN_D_DST_RSIZE)
11426 + i = 0;
11427 +#if 0
11428 + bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
11429 + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
11430 +#endif
11431 + if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
11432 +#if 0
11433 + bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
11434 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
11435 +#endif
11436 + break;
11438 + i++, u--;
11440 + dma->dstk = i; dma->dstu = u;
11442 + hifnstats.hst_obytes += cmd->dst_mapsize;
11444 + if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
11445 + HIFN_BASE_CMD_CRYPT) {
11446 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
11447 + if (crd->crd_alg != CRYPTO_DES_CBC &&
11448 + crd->crd_alg != CRYPTO_3DES_CBC &&
11449 + crd->crd_alg != CRYPTO_AES_CBC)
11450 + continue;
11451 + ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
11452 + HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
11453 + crypto_copydata(crp->crp_flags, crp->crp_buf,
11454 + crd->crd_skip + crd->crd_len - ivlen, ivlen,
11455 + cmd->softc->sc_sessions[cmd->session_num].hs_iv);
11456 + break;
11460 + if (macbuf != NULL) {
11461 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
11462 + int len;
11464 + if (crd->crd_alg != CRYPTO_MD5 &&
11465 + crd->crd_alg != CRYPTO_SHA1 &&
11466 + crd->crd_alg != CRYPTO_MD5_HMAC &&
11467 + crd->crd_alg != CRYPTO_SHA1_HMAC) {
11468 + continue;
11470 + len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
11471 + crypto_copyback(crp->crp_flags, crp->crp_buf,
11472 + crd->crd_inject, len, macbuf);
11473 + break;
11477 + if (cmd->src_map != cmd->dst_map)
11478 + pci_unmap_buf(sc, &cmd->dst);
11479 + pci_unmap_buf(sc, &cmd->src);
11480 + kfree(cmd);
11481 + crypto_done(crp);
11485 + * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
11486 + * and Group 1 registers; avoid conditions that could create
11487 + * burst writes by doing a read in between the writes.
11489 + * NB: The read we interpose is always to the same register;
11490 + * we do this because reading from an arbitrary (e.g. last)
11491 + * register may not always work.
11492 + */
11493 +static void
11494 +hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
11496 + if (sc->sc_flags & HIFN_IS_7811) {
11497 + if (sc->sc_bar0_lastreg == reg - 4)
11498 + readl(sc->sc_bar0 + HIFN_0_PUCNFG);
11499 + sc->sc_bar0_lastreg = reg;
11501 + writel(val, sc->sc_bar0 + reg);
11504 +static void
11505 +hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
11507 + if (sc->sc_flags & HIFN_IS_7811) {
11508 + if (sc->sc_bar1_lastreg == reg - 4)
11509 + readl(sc->sc_bar1 + HIFN_1_REVID);
11510 + sc->sc_bar1_lastreg = reg;
11512 + writel(val, sc->sc_bar1 + reg);
11516 +static struct pci_device_id hifn_pci_tbl[] = {
11517 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
11518 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
11519 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
11520 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
11521 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
11522 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
11523 + { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
11524 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
11525 + { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
11526 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
11527 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
11528 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
11529 + /*
11530 + * Other vendors share this PCI ID as well, such as
11531 + * http://www.powercrypt.com, and obviously they also
11532 + * use the same key.
11533 + */
11534 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
11535 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
11536 + { 0, 0, 0, 0, 0, 0, }
11538 +MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
11540 +static struct pci_driver hifn_driver = {
11541 + .name = "hifn",
11542 + .id_table = hifn_pci_tbl,
11543 + .probe = hifn_probe,
11544 + .remove = hifn_remove,
11545 + /* add PM stuff here one day */
11548 +static int __init hifn_init (void)
11550 + struct hifn_softc *sc = NULL;
11551 + int rc;
11553 + DPRINTF("%s(%p)\n", __FUNCTION__, hifn_init);
11555 + rc = pci_register_driver(&hifn_driver);
11556 + pci_register_driver_compat(&hifn_driver, rc);
11558 + return rc;
11561 +static void __exit hifn_exit (void)
11563 + pci_unregister_driver(&hifn_driver);
11566 +module_init(hifn_init);
11567 +module_exit(hifn_exit);
11569 +MODULE_LICENSE("BSD");
11570 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
11571 +MODULE_DESCRIPTION("OCF driver for hifn PCI crypto devices");
11572 diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/hifn7751reg.h linux-2.6.30/crypto/ocf/hifn/hifn7751reg.h
11573 --- linux-2.6.30.orig/crypto/ocf/hifn/hifn7751reg.h 1970-01-01 01:00:00.000000000 +0100
11574 +++ linux-2.6.30/crypto/ocf/hifn/hifn7751reg.h 2009-06-11 10:55:27.000000000 +0200
11575 @@ -0,0 +1,540 @@
11576 +/* $FreeBSD: src/sys/dev/hifn/hifn7751reg.h,v 1.7 2007/03/21 03:42:49 sam Exp $ */
11577 +/* $OpenBSD: hifn7751reg.h,v 1.35 2002/04/08 17:49:42 jason Exp $ */
11579 +/*-
11580 + * Invertex AEON / Hifn 7751 driver
11581 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
11582 + * Copyright (c) 1999 Theo de Raadt
11583 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
11584 + * http://www.netsec.net
11586 + * Please send any comments, feedback, bug-fixes, or feature requests to
11587 + * software@invertex.com.
11589 + * Redistribution and use in source and binary forms, with or without
11590 + * modification, are permitted provided that the following conditions
11591 + * are met:
11593 + * 1. Redistributions of source code must retain the above copyright
11594 + * notice, this list of conditions and the following disclaimer.
11595 + * 2. Redistributions in binary form must reproduce the above copyright
11596 + * notice, this list of conditions and the following disclaimer in the
11597 + * documentation and/or other materials provided with the distribution.
11598 + * 3. The name of the author may not be used to endorse or promote products
11599 + * derived from this software without specific prior written permission.
11602 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
11603 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
11604 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
11605 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
11606 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
11607 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
11608 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
11609 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11610 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
11611 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11613 + * Effort sponsored in part by the Defense Advanced Research Projects
11614 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
11615 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
11617 + */
11618 +#ifndef __HIFN_H__
11619 +#define __HIFN_H__
11622 + * Some PCI configuration space offset defines. The names were made
11623 + * identical to the names used by the Linux kernel.
11624 + */
11625 +#define HIFN_BAR0 PCIR_BAR(0) /* PUC register map */
11626 +#define HIFN_BAR1 PCIR_BAR(1) /* DMA register map */
11627 +#define HIFN_TRDY_TIMEOUT 0x40
11628 +#define HIFN_RETRY_TIMEOUT 0x41
11631 + * PCI vendor and device identifiers
11632 + * (the names are preserved from their OpenBSD source).
11633 + */
11634 +#define PCI_VENDOR_HIFN 0x13a3 /* Hifn */
11635 +#define PCI_PRODUCT_HIFN_7751 0x0005 /* 7751 */
11636 +#define PCI_PRODUCT_HIFN_6500 0x0006 /* 6500 */
11637 +#define PCI_PRODUCT_HIFN_7811 0x0007 /* 7811 */
11638 +#define PCI_PRODUCT_HIFN_7855 0x001f /* 7855 */
11639 +#define PCI_PRODUCT_HIFN_7951 0x0012 /* 7951 */
11640 +#define PCI_PRODUCT_HIFN_7955 0x0020 /* 7954/7955 */
11641 +#define PCI_PRODUCT_HIFN_7956 0x001d /* 7956 */
11643 +#define PCI_VENDOR_INVERTEX 0x14e1 /* Invertex */
11644 +#define PCI_PRODUCT_INVERTEX_AEON 0x0005 /* AEON */
11646 +#define PCI_VENDOR_NETSEC 0x1660 /* NetSec */
11647 +#define PCI_PRODUCT_NETSEC_7751 0x7751 /* 7751 */
11650 + * The values below should multiple of 4 -- and be large enough to handle
11651 + * any command the driver implements.
11653 + * MAX_COMMAND = base command + mac command + encrypt command +
11654 + * mac-key + rc4-key
11655 + * MAX_RESULT = base result + mac result + mac + encrypt result
11656 + *
11658 + */
11659 +#define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260)
11660 +#define HIFN_MAX_RESULT (8 + 4 + 20 + 4)
11663 + * hifn_desc_t
11665 + * Holds an individual descriptor for any of the rings.
11666 + */
11667 +typedef struct hifn_desc {
11668 + volatile u_int32_t l; /* length and status bits */
11669 + volatile u_int32_t p;
11670 +} hifn_desc_t;
11673 + * Masks for the "length" field of struct hifn_desc.
11674 + */
11675 +#define HIFN_D_LENGTH 0x0000ffff /* length bit mask */
11676 +#define HIFN_D_MASKDONEIRQ 0x02000000 /* mask the done interrupt */
11677 +#define HIFN_D_DESTOVER 0x04000000 /* destination overflow */
11678 +#define HIFN_D_OVER 0x08000000 /* overflow */
11679 +#define HIFN_D_LAST 0x20000000 /* last descriptor in chain */
11680 +#define HIFN_D_JUMP 0x40000000 /* jump descriptor */
11681 +#define HIFN_D_VALID 0x80000000 /* valid bit */
11685 + * Processing Unit Registers (offset from BASEREG0)
11686 + */
11687 +#define HIFN_0_PUDATA 0x00 /* Processing Unit Data */
11688 +#define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */
11689 +#define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */
11690 +#define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */
11691 +#define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */
11692 +#define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */
11693 +#define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */
11694 +#define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */
11695 +#define HIFN_0_PUCTRL2 0x28 /* Processing Unit Control (2nd map) */
11696 +#define HIFN_0_MUTE1 0x80
11697 +#define HIFN_0_MUTE2 0x90
11698 +#define HIFN_0_SPACESIZE 0x100 /* Register space size */
11700 +/* Processing Unit Control Register (HIFN_0_PUCTRL) */
11701 +#define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */
11702 +#define HIFN_PUCTRL_STOP 0x0008 /* stop pu */
11703 +#define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */
11704 +#define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */
11705 +#define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */
11707 +/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
11708 +#define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */
11709 +#define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */
11710 +#define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
11711 +#define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
11712 +#define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */
11713 +#define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */
11714 +#define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */
11715 +#define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */
11716 +#define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */
11717 +#define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */
11719 +/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
11720 +#define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */
11721 +#define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */
11722 +#define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */
11723 +#define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */
11724 +#define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */
11725 +#define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */
11726 +#define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */
11727 +#define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */
11728 +#define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */
11729 +#define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */
11730 +#define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */
11731 +#define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */
11732 +#define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */
11733 +#define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */
11734 +#define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */
11735 +#define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */
11736 +#define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */
11737 +#define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */
11738 +#define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */
11739 +#define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */
11740 +#define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */
11741 +#define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */
11742 +#define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */
11744 +/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
11745 +#define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */
11746 +#define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */
11747 +#define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
11748 +#define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
11749 +#define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */
11750 +#define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */
11751 +#define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */
11752 +#define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */
11753 +#define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */
11754 +#define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */
11756 +/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
11757 +#define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */
11758 +#define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */
11759 +#define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
11760 +#define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
11761 +#define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */
11762 +#define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */
11763 +#define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */
11764 +#define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */
11765 +#define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */
11766 +#define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */
11767 +#define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */
11768 +#define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */
11769 +#define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */
11770 +#define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */
11771 +#define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */
11772 +#define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */
11773 +#define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */
11775 +/* FIFO Status Register (HIFN_0_FIFOSTAT) */
11776 +#define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */
11777 +#define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */
11779 +/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
11780 +#define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as this value */
11783 + * DMA Interface Registers (offset from BASEREG1)
11784 + */
11785 +#define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */
11786 +#define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */
11787 +#define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */
11788 +#define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */
11789 +#define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */
11790 +#define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */
11791 +#define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */
11792 +#define HIFN_1_PLL 0x4c /* 7955/7956: PLL config */
11793 +#define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */
11794 +#define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */
11795 +#define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */
11796 +#define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */
11797 +#define HIFN_1_DMA_CNFG2 0x6c /* 7955/7956: dma config #2 */
11798 +#define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */
11799 +#define HIFN_1_REVID 0x98 /* Revision ID */
11801 +#define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */
11802 +#define HIFN_1_PUB_BASE 0x300 /* Public Base Address */
11803 +#define HIFN_1_PUB_OPLEN 0x304 /* 7951-compat Public Operand Length */
11804 +#define HIFN_1_PUB_OP 0x308 /* 7951-compat Public Operand */
11805 +#define HIFN_1_PUB_STATUS 0x30c /* 7951-compat Public Status */
11806 +#define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */
11807 +#define HIFN_1_RNG_CONFIG 0x314 /* RNG config */
11808 +#define HIFN_1_RNG_DATA 0x318 /* RNG data */
11809 +#define HIFN_1_PUB_MODE 0x320 /* PK mode */
11810 +#define HIFN_1_PUB_FIFO_OPLEN 0x380 /* first element of oplen fifo */
11811 +#define HIFN_1_PUB_FIFO_OP 0x384 /* first element of op fifo */
11812 +#define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */
11813 +#define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */
11815 +/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
11816 +#define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */
11817 +#define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */
11818 +#define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */
11819 +#define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */
11820 +#define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */
11821 +#define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */
11822 +#define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */
11823 +#define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */
11824 +#define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */
11825 +#define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */
11826 +#define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */
11827 +#define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */
11828 +#define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */
11829 +#define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */
11830 +#define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */
11831 +#define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */
11832 +#define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */
11833 +#define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */
11834 +#define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */
11835 +#define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */
11836 +#define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */
11837 +#define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */
11838 +#define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */
11839 +#define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */
11840 +#define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */
11841 +#define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */
11842 +#define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */
11843 +#define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */
11844 +#define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */
11845 +#define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */
11846 +#define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */
11847 +#define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */
11848 +#define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */
11849 +#define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */
11850 +#define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */
11851 +#define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */
11852 +#define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */
11853 +#define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */
11855 +/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
11856 +#define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */
11857 +#define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */
11858 +#define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */
11859 +#define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */
11860 +#define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */
11861 +#define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */
11862 +#define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */
11863 +#define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */
11864 +#define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */
11865 +#define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */
11866 +#define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */
11867 +#define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */
11868 +#define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */
11869 +#define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */
11870 +#define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */
11871 +#define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */
11872 +#define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */
11873 +#define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */
11874 +#define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */
11875 +#define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */
11876 +#define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */
11877 +#define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */
11879 +/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
11880 +#define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */
11881 +#define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */
11882 +#define HIFN_DMACNFG_UNLOCK 0x00000800
11883 +#define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */
11884 +#define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */
11885 +#define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */
11886 +#define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */
11887 +#define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */
11889 +/* DMA Configuration Register (HIFN_1_DMA_CNFG2) */
11890 +#define HIFN_DMACNFG2_PKSWAP32 (1 << 19) /* swap the OPLEN/OP reg */
11891 +#define HIFN_DMACNFG2_PKSWAP8 (1 << 18) /* swap the bits of OPLEN/OP */
11892 +#define HIFN_DMACNFG2_BAR0_SWAP32 (1<<17) /* swap the bytes of BAR0 */
11893 +#define HIFN_DMACNFG2_BAR1_SWAP8 (1<<16) /* swap the bits of BAR0 */
11894 +#define HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT 12
11895 +#define HIFN_DMACNFG2_INIT_READ_BURST_SHIFT 8
11896 +#define HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT 4
11897 +#define HIFN_DMACNFG2_TGT_READ_BURST_SHIFT 0
11899 +/* 7811 RNG Enable Register (HIFN_1_7811_RNGENA) */
11900 +#define HIFN_7811_RNGENA_ENA 0x00000001 /* enable RNG */
11902 +/* 7811 RNG Config Register (HIFN_1_7811_RNGCFG) */
11903 +#define HIFN_7811_RNGCFG_PRE1 0x00000f00 /* first prescalar */
11904 +#define HIFN_7811_RNGCFG_OPRE 0x00000080 /* output prescalar */
11905 +#define HIFN_7811_RNGCFG_DEFL 0x00000f80 /* 2 words/ 1/100 sec */
11907 +/* 7811 RNG Status Register (HIFN_1_7811_RNGSTS) */
11908 +#define HIFN_7811_RNGSTS_RDY 0x00004000 /* two numbers in FIFO */
11909 +#define HIFN_7811_RNGSTS_UFL 0x00001000 /* rng underflow */
11911 +/* 7811 MIPS Reset Register (HIFN_1_7811_MIPSRST) */
11912 +#define HIFN_MIPSRST_BAR2SIZE 0xffff0000 /* sdram size */
11913 +#define HIFN_MIPSRST_GPRAMINIT 0x00008000 /* gpram can be accessed */
11914 +#define HIFN_MIPSRST_CRAMINIT 0x00004000 /* ctxram can be accessed */
11915 +#define HIFN_MIPSRST_LED2 0x00000400 /* external LED2 */
11916 +#define HIFN_MIPSRST_LED1 0x00000200 /* external LED1 */
11917 +#define HIFN_MIPSRST_LED0 0x00000100 /* external LED0 */
11918 +#define HIFN_MIPSRST_MIPSDIS 0x00000004 /* disable MIPS */
11919 +#define HIFN_MIPSRST_MIPSRST 0x00000002 /* warm reset MIPS */
11920 +#define HIFN_MIPSRST_MIPSCOLD 0x00000001 /* cold reset MIPS */
11922 +/* Public key reset register (HIFN_1_PUB_RESET) */
11923 +#define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */
11925 +/* Public operation register (HIFN_1_PUB_OP) */
11926 +#define HIFN_PUBOP_AOFFSET 0x0000003e /* A offset */
11927 +#define HIFN_PUBOP_BOFFSET 0x00000fc0 /* B offset */
11928 +#define HIFN_PUBOP_MOFFSET 0x0003f000 /* M offset */
11929 +#define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */
11930 +#define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */
11931 +#define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */
11932 +#define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */
11933 +#define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */
11934 +#define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */
11935 +#define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */
11936 +#define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */
11937 +#define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */
11938 +#define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */
11939 +#define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */
11940 +#define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */
11941 +#define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular Red */
11942 +#define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular Exp */
11944 +/* Public operand length register (HIFN_1_PUB_OPLEN) */
11945 +#define HIFN_PUBOPLEN_MODLEN 0x0000007f
11946 +#define HIFN_PUBOPLEN_EXPLEN 0x0003ff80
11947 +#define HIFN_PUBOPLEN_REDLEN 0x003c0000
11949 +/* Public status register (HIFN_1_PUB_STATUS) */
11950 +#define HIFN_PUBSTS_DONE 0x00000001 /* operation done */
11951 +#define HIFN_PUBSTS_CARRY 0x00000002 /* carry */
11952 +#define HIFN_PUBSTS_FIFO_EMPTY 0x00000100 /* fifo empty */
11953 +#define HIFN_PUBSTS_FIFO_FULL 0x00000200 /* fifo full */
11954 +#define HIFN_PUBSTS_FIFO_OVFL 0x00000400 /* fifo overflow */
11955 +#define HIFN_PUBSTS_FIFO_WRITE 0x000f0000 /* fifo write */
11956 +#define HIFN_PUBSTS_FIFO_READ 0x0f000000 /* fifo read */
11958 +/* Public interrupt enable register (HIFN_1_PUB_IEN) */
11959 +#define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */
11961 +/* Random number generator config register (HIFN_1_RNG_CONFIG) */
11962 +#define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */
11965 + * Register offsets in register set 1
11966 + */
11968 +#define HIFN_UNLOCK_SECRET1 0xf4
11969 +#define HIFN_UNLOCK_SECRET2 0xfc
11972 + * PLL config register
11974 + * This register is present only on 7954/7955/7956 parts. It must be
11975 + * programmed according to the bus interface method used by the h/w.
11976 + * Note that the parts require a stable clock. Since the PCI clock
11977 + * may vary the reference clock must usually be used. To avoid
11978 + * overclocking the core logic, setup must be done carefully, refer
11979 + * to the driver for details. The exact multiplier required varies
11980 + * by part and system configuration; refer to the Hifn documentation.
11981 + */
11982 +#define HIFN_PLL_REF_SEL 0x00000001 /* REF/HBI clk selection */
11983 +#define HIFN_PLL_BP 0x00000002 /* bypass (used during setup) */
11984 +/* bit 2 reserved */
11985 +#define HIFN_PLL_PK_CLK_SEL 0x00000008 /* public key clk select */
11986 +#define HIFN_PLL_PE_CLK_SEL 0x00000010 /* packet engine clk select */
11987 +/* bits 5-9 reserved */
11988 +#define HIFN_PLL_MBSET 0x00000400 /* must be set to 1 */
11989 +#define HIFN_PLL_ND 0x00003800 /* Fpll_ref multiplier select */
11990 +#define HIFN_PLL_ND_SHIFT 11
11991 +#define HIFN_PLL_ND_2 0x00000000 /* 2x */
11992 +#define HIFN_PLL_ND_4 0x00000800 /* 4x */
11993 +#define HIFN_PLL_ND_6 0x00001000 /* 6x */
11994 +#define HIFN_PLL_ND_8 0x00001800 /* 8x */
11995 +#define HIFN_PLL_ND_10 0x00002000 /* 10x */
11996 +#define HIFN_PLL_ND_12 0x00002800 /* 12x */
11997 +/* bits 14-15 reserved */
11998 +#define HIFN_PLL_IS 0x00010000 /* charge pump current select */
11999 +/* bits 17-31 reserved */
12002 + * Board configuration specifies only these bits.
12003 + */
12004 +#define HIFN_PLL_CONFIG (HIFN_PLL_IS|HIFN_PLL_ND|HIFN_PLL_REF_SEL)
12007 + * Public Key Engine Mode Register
12008 + */
12009 +#define HIFN_PKMODE_HOSTINVERT (1 << 0) /* HOST INVERT */
12010 +#define HIFN_PKMODE_ENHANCED (1 << 1) /* Enable enhanced mode */
12013 +/*********************************************************************
12014 + * Structs for board commands
12016 + *********************************************************************/
12019 + * Structure to help build up the command data structure.
12020 + */
12021 +typedef struct hifn_base_command {
12022 + volatile u_int16_t masks;
12023 + volatile u_int16_t session_num;
12024 + volatile u_int16_t total_source_count;
12025 + volatile u_int16_t total_dest_count;
12026 +} hifn_base_command_t;
12028 +#define HIFN_BASE_CMD_MAC 0x0400
12029 +#define HIFN_BASE_CMD_CRYPT 0x0800
12030 +#define HIFN_BASE_CMD_DECODE 0x2000
12031 +#define HIFN_BASE_CMD_SRCLEN_M 0xc000
12032 +#define HIFN_BASE_CMD_SRCLEN_S 14
12033 +#define HIFN_BASE_CMD_DSTLEN_M 0x3000
12034 +#define HIFN_BASE_CMD_DSTLEN_S 12
12035 +#define HIFN_BASE_CMD_LENMASK_HI 0x30000
12036 +#define HIFN_BASE_CMD_LENMASK_LO 0x0ffff
12039 + * Structure to help build up the command data structure.
12040 + */
12041 +typedef struct hifn_crypt_command {
12042 + volatile u_int16_t masks;
12043 + volatile u_int16_t header_skip;
12044 + volatile u_int16_t source_count;
12045 + volatile u_int16_t reserved;
12046 +} hifn_crypt_command_t;
12048 +#define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */
12049 +#define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */
12050 +#define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */
12051 +#define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */
12052 +#define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */
12053 +#define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */
12054 +#define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */
12055 +#define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */
12056 +#define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */
12057 +#define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */
12058 +#define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */
12059 +#define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */
12060 +#define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */
12062 +#define HIFN_CRYPT_CMD_SRCLEN_M 0xc000
12063 +#define HIFN_CRYPT_CMD_SRCLEN_S 14
12065 +#define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */
12066 +#define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */
12067 +#define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */
12068 +#define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */
12071 + * Structure to help build up the command data structure.
12072 + */
12073 +typedef struct hifn_mac_command {
12074 + volatile u_int16_t masks;
12075 + volatile u_int16_t header_skip;
12076 + volatile u_int16_t source_count;
12077 + volatile u_int16_t reserved;
12078 +} hifn_mac_command_t;
12080 +#define HIFN_MAC_CMD_ALG_MASK 0x0001
12081 +#define HIFN_MAC_CMD_ALG_SHA1 0x0000
12082 +#define HIFN_MAC_CMD_ALG_MD5 0x0001
12083 +#define HIFN_MAC_CMD_MODE_MASK 0x000c
12084 +#define HIFN_MAC_CMD_MODE_HMAC 0x0000
12085 +#define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004
12086 +#define HIFN_MAC_CMD_MODE_HASH 0x0008
12087 +#define HIFN_MAC_CMD_MODE_FULL 0x0004
12088 +#define HIFN_MAC_CMD_TRUNC 0x0010
12089 +#define HIFN_MAC_CMD_RESULT 0x0020
12090 +#define HIFN_MAC_CMD_APPEND 0x0040
12091 +#define HIFN_MAC_CMD_SRCLEN_M 0xc000
12092 +#define HIFN_MAC_CMD_SRCLEN_S 14
12095 + * MAC POS IPsec initiates authentication after encryption on encodes
12096 + * and before decryption on decodes.
12097 + */
12098 +#define HIFN_MAC_CMD_POS_IPSEC 0x0200
12099 +#define HIFN_MAC_CMD_NEW_KEY 0x0800
12102 + * The poll frequency and poll scalar defines are unshifted values used
12103 + * to set fields in the DMA Configuration Register.
12104 + */
12105 +#ifndef HIFN_POLL_FREQUENCY
12106 +#define HIFN_POLL_FREQUENCY 0x1
12107 +#endif
12109 +#ifndef HIFN_POLL_SCALAR
12110 +#define HIFN_POLL_SCALAR 0x0
12111 +#endif
12113 +#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */
12114 +#define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */
12115 +#endif /* __HIFN_H__ */
12116 diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/hifn7751var.h linux-2.6.30/crypto/ocf/hifn/hifn7751var.h
12117 --- linux-2.6.30.orig/crypto/ocf/hifn/hifn7751var.h 1970-01-01 01:00:00.000000000 +0100
12118 +++ linux-2.6.30/crypto/ocf/hifn/hifn7751var.h 2009-06-11 10:55:27.000000000 +0200
12119 @@ -0,0 +1,369 @@
12120 +/* $FreeBSD: src/sys/dev/hifn/hifn7751var.h,v 1.9 2007/03/21 03:42:49 sam Exp $ */
12121 +/* $OpenBSD: hifn7751var.h,v 1.42 2002/04/08 17:49:42 jason Exp $ */
12123 +/*-
12124 + * Invertex AEON / Hifn 7751 driver
12125 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
12126 + * Copyright (c) 1999 Theo de Raadt
12127 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
12128 + * http://www.netsec.net
12130 + * Please send any comments, feedback, bug-fixes, or feature requests to
12131 + * software@invertex.com.
12133 + * Redistribution and use in source and binary forms, with or without
12134 + * modification, are permitted provided that the following conditions
12135 + * are met:
12137 + * 1. Redistributions of source code must retain the above copyright
12138 + * notice, this list of conditions and the following disclaimer.
12139 + * 2. Redistributions in binary form must reproduce the above copyright
12140 + * notice, this list of conditions and the following disclaimer in the
12141 + * documentation and/or other materials provided with the distribution.
12142 + * 3. The name of the author may not be used to endorse or promote products
12143 + * derived from this software without specific prior written permission.
12146 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
12147 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
12148 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
12149 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
12150 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12151 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
12152 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
12153 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12154 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
12155 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12157 + * Effort sponsored in part by the Defense Advanced Research Projects
12158 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
12159 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
12161 + */
12163 +#ifndef __HIFN7751VAR_H__
12164 +#define __HIFN7751VAR_H__
12166 +#ifdef __KERNEL__
12169 + * Some configurable values for the driver. By default command+result
12170 + * descriptor rings are the same size. The src+dst descriptor rings
12171 + * are sized at 3.5x the number of potential commands. Slower parts
12172 + * (e.g. 7951) tend to run out of src descriptors; faster parts (7811)
12173 + * src+cmd/result descriptors. It's not clear that increasing the size
12174 + * of the descriptor rings helps performance significantly as other
12175 + * factors tend to come into play (e.g. copying misaligned packets).
12176 + */
12177 +#define HIFN_D_CMD_RSIZE 24 /* command descriptors */
12178 +#define HIFN_D_SRC_RSIZE ((HIFN_D_CMD_RSIZE * 7) / 2) /* source descriptors */
12179 +#define HIFN_D_RES_RSIZE HIFN_D_CMD_RSIZE /* result descriptors */
12180 +#define HIFN_D_DST_RSIZE HIFN_D_SRC_RSIZE /* destination descriptors */
12183 + * Length values for cryptography
12184 + */
12185 +#define HIFN_DES_KEY_LENGTH 8
12186 +#define HIFN_3DES_KEY_LENGTH 24
12187 +#define HIFN_MAX_CRYPT_KEY_LENGTH HIFN_3DES_KEY_LENGTH
12188 +#define HIFN_IV_LENGTH 8
12189 +#define HIFN_AES_IV_LENGTH 16
12190 +#define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH
12193 + * Length values for authentication
12194 + */
12195 +#define HIFN_MAC_KEY_LENGTH 64
12196 +#define HIFN_MD5_LENGTH 16
12197 +#define HIFN_SHA1_LENGTH 20
12198 +#define HIFN_MAC_TRUNC_LENGTH 12
12200 +#define MAX_SCATTER 64
12203 + * Data structure to hold all 4 rings and any other ring related data.
12204 + */
12205 +struct hifn_dma {
12206 + /*
12207 + * Descriptor rings. We add +1 to the size to accomidate the
12208 + * jump descriptor.
12209 + */
12210 + struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1];
12211 + struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1];
12212 + struct hifn_desc dstr[HIFN_D_DST_RSIZE+1];
12213 + struct hifn_desc resr[HIFN_D_RES_RSIZE+1];
12215 + struct hifn_command *hifn_commands[HIFN_D_RES_RSIZE];
12217 + u_char command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
12218 + u_char result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
12219 + u_int32_t slop[HIFN_D_CMD_RSIZE];
12221 + u_int64_t test_src, test_dst;
12223 + /*
12224 + * Our current positions for insertion and removal from the desriptor
12225 + * rings.
12226 + */
12227 + int cmdi, srci, dsti, resi;
12228 + volatile int cmdu, srcu, dstu, resu;
12229 + int cmdk, srck, dstk, resk;
12232 +struct hifn_session {
12233 + int hs_used;
12234 + int hs_mlen;
12235 + u_int8_t hs_iv[HIFN_MAX_IV_LENGTH];
12238 +#define HIFN_RING_SYNC(sc, r, i, f) \
12239 + /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
12241 +#define HIFN_CMDR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), cmdr, (i), (f))
12242 +#define HIFN_RESR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), resr, (i), (f))
12243 +#define HIFN_SRCR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), srcr, (i), (f))
12244 +#define HIFN_DSTR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), dstr, (i), (f))
12246 +#define HIFN_CMD_SYNC(sc, i, f) \
12247 + /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
12249 +#define HIFN_RES_SYNC(sc, i, f) \
12250 + /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
12252 +typedef int bus_size_t;
12255 + * Holds data specific to a single HIFN board.
12256 + */
12257 +struct hifn_softc {
12258 + softc_device_decl sc_dev;
12260 + struct pci_dev *sc_pcidev; /* PCI device pointer */
12261 + spinlock_t sc_mtx; /* per-instance lock */
12263 + int sc_num; /* for multiple devs */
12265 + ocf_iomem_t sc_bar0;
12266 + bus_size_t sc_bar0_lastreg;/* bar0 last reg written */
12267 + ocf_iomem_t sc_bar1;
12268 + bus_size_t sc_bar1_lastreg;/* bar1 last reg written */
12270 + int sc_irq;
12272 + u_int32_t sc_dmaier;
12273 + u_int32_t sc_drammodel; /* 1=dram, 0=sram */
12274 + u_int32_t sc_pllconfig; /* 7954/7955/7956 PLL config */
12276 + struct hifn_dma *sc_dma;
12277 + dma_addr_t sc_dma_physaddr;/* physical address of sc_dma */
12279 + int sc_dmansegs;
12280 + int32_t sc_cid;
12281 + int sc_maxses;
12282 + int sc_nsessions;
12283 + struct hifn_session *sc_sessions;
12284 + int sc_ramsize;
12285 + int sc_flags;
12286 +#define HIFN_HAS_RNG 0x1 /* includes random number generator */
12287 +#define HIFN_HAS_PUBLIC 0x2 /* includes public key support */
12288 +#define HIFN_HAS_AES 0x4 /* includes AES support */
12289 +#define HIFN_IS_7811 0x8 /* Hifn 7811 part */
12290 +#define HIFN_IS_7956 0x10 /* Hifn 7956/7955 don't have SDRAM */
12292 + struct timer_list sc_tickto; /* for managing DMA */
12294 + int sc_rngfirst;
12295 + int sc_rnghz; /* RNG polling frequency */
12297 + int sc_c_busy; /* command ring busy */
12298 + int sc_s_busy; /* source data ring busy */
12299 + int sc_d_busy; /* destination data ring busy */
12300 + int sc_r_busy; /* result ring busy */
12301 + int sc_active; /* for initial countdown */
12302 + int sc_needwakeup; /* ops q'd wating on resources */
12303 + int sc_curbatch; /* # ops submitted w/o int */
12304 + int sc_suspended;
12305 +#ifdef HIFN_VULCANDEV
12306 + struct cdev *sc_pkdev;
12307 +#endif
12310 +#define HIFN_LOCK(_sc) spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
12311 +#define HIFN_UNLOCK(_sc) spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
12314 + * hifn_command_t
12316 + * This is the control structure used to pass commands to hifn_encrypt().
12318 + * flags
12319 + * -----
12320 + * Flags is the bitwise "or" values for command configuration. A single
12321 + * encrypt direction needs to be set:
12323 + * HIFN_ENCODE or HIFN_DECODE
12325 + * To use cryptography, a single crypto algorithm must be included:
12327 + * HIFN_CRYPT_3DES or HIFN_CRYPT_DES
12329 + * To use authentication is used, a single MAC algorithm must be included:
12331 + * HIFN_MAC_MD5 or HIFN_MAC_SHA1
12333 + * By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash.
12334 + * If the value below is set, hash values are truncated or assumed
12335 + * truncated to 12 bytes:
12337 + * HIFN_MAC_TRUNC
12339 + * Keys for encryption and authentication can be sent as part of a command,
12340 + * or the last key value used with a particular session can be retrieved
12341 + * and used again if either of these flags are not specified.
12343 + * HIFN_CRYPT_NEW_KEY, HIFN_MAC_NEW_KEY
12345 + * session_num
12346 + * -----------
12347 + * A number between 0 and 2048 (for DRAM models) or a number between
12348 + * 0 and 768 (for SRAM models). Those who don't want to use session
12349 + * numbers should leave value at zero and send a new crypt key and/or
12350 + * new MAC key on every command. If you use session numbers and
12351 + * don't send a key with a command, the last key sent for that same
12352 + * session number will be used.
12354 + * Warning: Using session numbers and multiboard at the same time
12355 + * is currently broken.
12357 + * mbuf
12358 + * ----
12359 + * Either fill in the mbuf pointer and npa=0 or
12360 + * fill packp[] and packl[] and set npa to > 0
12361 + *
12362 + * mac_header_skip
12363 + * ---------------
12364 + * The number of bytes of the source_buf that are skipped over before
12365 + * authentication begins. This must be a number between 0 and 2^16-1
12366 + * and can be used by IPsec implementers to skip over IP headers.
12367 + * *** Value ignored if authentication not used ***
12369 + * crypt_header_skip
12370 + * -----------------
12371 + * The number of bytes of the source_buf that are skipped over before
12372 + * the cryptographic operation begins. This must be a number between 0
12373 + * and 2^16-1. For IPsec, this number will always be 8 bytes larger
12374 + * than the auth_header_skip (to skip over the ESP header).
12375 + * *** Value ignored if cryptography not used ***
12377 + */
12378 +struct hifn_operand {
12379 + union {
12380 + struct sk_buff *skb;
12381 + struct uio *io;
12382 + unsigned char *buf;
12383 + } u;
12384 + void *map;
12385 + bus_size_t mapsize;
12386 + int nsegs;
12387 + struct {
12388 + dma_addr_t ds_addr;
12389 + int ds_len;
12390 + } segs[MAX_SCATTER];
12393 +struct hifn_command {
12394 + u_int16_t session_num;
12395 + u_int16_t base_masks, cry_masks, mac_masks;
12396 + u_int8_t iv[HIFN_MAX_IV_LENGTH], *ck, mac[HIFN_MAC_KEY_LENGTH];
12397 + int cklen;
12398 + int sloplen, slopidx;
12400 + struct hifn_operand src;
12401 + struct hifn_operand dst;
12403 + struct hifn_softc *softc;
12404 + struct cryptop *crp;
12405 + struct cryptodesc *enccrd, *maccrd;
12408 +#define src_skb src.u.skb
12409 +#define src_io src.u.io
12410 +#define src_map src.map
12411 +#define src_mapsize src.mapsize
12412 +#define src_segs src.segs
12413 +#define src_nsegs src.nsegs
12414 +#define src_buf src.u.buf
12416 +#define dst_skb dst.u.skb
12417 +#define dst_io dst.u.io
12418 +#define dst_map dst.map
12419 +#define dst_mapsize dst.mapsize
12420 +#define dst_segs dst.segs
12421 +#define dst_nsegs dst.nsegs
12422 +#define dst_buf dst.u.buf
12425 + * Return values for hifn_crypto()
12426 + */
12427 +#define HIFN_CRYPTO_SUCCESS 0
12428 +#define HIFN_CRYPTO_BAD_INPUT (-1)
12429 +#define HIFN_CRYPTO_RINGS_FULL (-2)
12431 +/**************************************************************************
12433 + * Function: hifn_crypto
12435 + * Purpose: Called by external drivers to begin an encryption on the
12436 + * HIFN board.
12438 + * Blocking/Non-blocking Issues
12439 + * ============================
12440 + * The driver cannot block in hifn_crypto (no calls to tsleep) currently.
12441 + * hifn_crypto() returns HIFN_CRYPTO_RINGS_FULL if there is not enough
12442 + * room in any of the rings for the request to proceed.
12444 + * Return Values
12445 + * =============
12446 + * 0 for success, negative values on error
12448 + * Defines for negative error codes are:
12449 + *
12450 + * HIFN_CRYPTO_BAD_INPUT : The passed in command had invalid settings.
12451 + * HIFN_CRYPTO_RINGS_FULL : All DMA rings were full and non-blocking
12452 + * behaviour was requested.
12454 + *************************************************************************/
12457 + * Convert back and forth from 'sid' to 'card' and 'session'
12458 + */
12459 +#define HIFN_CARD(sid) (((sid) & 0xf0000000) >> 28)
12460 +#define HIFN_SESSION(sid) ((sid) & 0x000007ff)
12461 +#define HIFN_SID(crd,ses) (((crd) << 28) | ((ses) & 0x7ff))
12463 +#endif /* _KERNEL */
12465 +struct hifn_stats {
12466 + u_int64_t hst_ibytes;
12467 + u_int64_t hst_obytes;
12468 + u_int32_t hst_ipackets;
12469 + u_int32_t hst_opackets;
12470 + u_int32_t hst_invalid;
12471 + u_int32_t hst_nomem; /* malloc or one of hst_nomem_* */
12472 + u_int32_t hst_abort;
12473 + u_int32_t hst_noirq; /* IRQ for no reason */
12474 + u_int32_t hst_totbatch; /* ops submitted w/o interrupt */
12475 + u_int32_t hst_maxbatch; /* max ops submitted together */
12476 + u_int32_t hst_unaligned; /* unaligned src caused copy */
12477 + /*
12478 + * The following divides hst_nomem into more specific buckets.
12479 + */
12480 + u_int32_t hst_nomem_map; /* bus_dmamap_create failed */
12481 + u_int32_t hst_nomem_load; /* bus_dmamap_load_* failed */
12482 + u_int32_t hst_nomem_mbuf; /* MGET* failed */
12483 + u_int32_t hst_nomem_mcl; /* MCLGET* failed */
12484 + u_int32_t hst_nomem_cr; /* out of command/result descriptor */
12485 + u_int32_t hst_nomem_sd; /* out of src/dst descriptors */
12488 +#endif /* __HIFN7751VAR_H__ */
12489 diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/hifnHIPP.c linux-2.6.30/crypto/ocf/hifn/hifnHIPP.c
12490 --- linux-2.6.30.orig/crypto/ocf/hifn/hifnHIPP.c 1970-01-01 01:00:00.000000000 +0100
12491 +++ linux-2.6.30/crypto/ocf/hifn/hifnHIPP.c 2009-06-11 10:55:27.000000000 +0200
12492 @@ -0,0 +1,420 @@
12493 +/*-
12494 + * Driver for Hifn HIPP-I/II chipset
12495 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
12497 + * Redistribution and use in source and binary forms, with or without
12498 + * modification, are permitted provided that the following conditions
12499 + * are met:
12501 + * 1. Redistributions of source code must retain the above copyright
12502 + * notice, this list of conditions and the following disclaimer.
12503 + * 2. Redistributions in binary form must reproduce the above copyright
12504 + * notice, this list of conditions and the following disclaimer in the
12505 + * documentation and/or other materials provided with the distribution.
12506 + * 3. The name of the author may not be used to endorse or promote products
12507 + * derived from this software without specific prior written permission.
12509 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
12510 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
12511 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
12512 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
12513 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12514 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
12515 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
12516 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12517 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
12518 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12520 + * Effort sponsored by Hifn Inc.
12522 + */
12525 + * Driver for various Hifn encryption processors.
12526 + */
12527 +#ifndef AUTOCONF_INCLUDED
12528 +#include <linux/config.h>
12529 +#endif
12530 +#include <linux/module.h>
12531 +#include <linux/init.h>
12532 +#include <linux/list.h>
12533 +#include <linux/slab.h>
12534 +#include <linux/wait.h>
12535 +#include <linux/sched.h>
12536 +#include <linux/pci.h>
12537 +#include <linux/delay.h>
12538 +#include <linux/interrupt.h>
12539 +#include <linux/spinlock.h>
12540 +#include <linux/random.h>
12541 +#include <linux/version.h>
12542 +#include <linux/skbuff.h>
12543 +#include <linux/uio.h>
12544 +#include <linux/sysfs.h>
12545 +#include <linux/miscdevice.h>
12546 +#include <asm/io.h>
12548 +#include <cryptodev.h>
12550 +#include "hifnHIPPreg.h"
12551 +#include "hifnHIPPvar.h"
12553 +#if 1
12554 +#define DPRINTF(a...) if (hipp_debug) { \
12555 + printk("%s: ", sc ? \
12556 + device_get_nameunit(sc->sc_dev) : "hifn"); \
12557 + printk(a); \
12558 + } else
12559 +#else
12560 +#define DPRINTF(a...)
12561 +#endif
12563 +typedef int bus_size_t;
12565 +static inline int
12566 +pci_get_revid(struct pci_dev *dev)
12568 + u8 rid = 0;
12569 + pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
12570 + return rid;
12573 +#define debug hipp_debug
12574 +int hipp_debug = 0;
12575 +module_param(hipp_debug, int, 0644);
12576 +MODULE_PARM_DESC(hipp_debug, "Enable debug");
12578 +int hipp_maxbatch = 1;
12579 +module_param(hipp_maxbatch, int, 0644);
12580 +MODULE_PARM_DESC(hipp_maxbatch, "max ops to batch w/o interrupt");
12582 +static int hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent);
12583 +static void hipp_remove(struct pci_dev *dev);
12584 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
12585 +static irqreturn_t hipp_intr(int irq, void *arg);
12586 +#else
12587 +static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs);
12588 +#endif
12590 +static int hipp_num_chips = 0;
12591 +static struct hipp_softc *hipp_chip_idx[HIPP_MAX_CHIPS];
12593 +static int hipp_newsession(device_t, u_int32_t *, struct cryptoini *);
12594 +static int hipp_freesession(device_t, u_int64_t);
12595 +static int hipp_process(device_t, struct cryptop *, int);
12597 +static device_method_t hipp_methods = {
12598 + /* crypto device methods */
12599 + DEVMETHOD(cryptodev_newsession, hipp_newsession),
12600 + DEVMETHOD(cryptodev_freesession,hipp_freesession),
12601 + DEVMETHOD(cryptodev_process, hipp_process),
12604 +static __inline u_int32_t
12605 +READ_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg)
12607 + u_int32_t v = readl(sc->sc_bar[barno] + reg);
12608 + //sc->sc_bar0_lastreg = (bus_size_t) -1;
12609 + return (v);
12611 +static __inline void
12612 +WRITE_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg, u_int32_t val)
12614 + writel(val, sc->sc_bar[barno] + reg);
12617 +#define READ_REG_0(sc, reg) READ_REG(sc, 0, reg)
12618 +#define WRITE_REG_0(sc, reg, val) WRITE_REG(sc,0, reg, val)
12619 +#define READ_REG_1(sc, reg) READ_REG(sc, 1, reg)
12620 +#define WRITE_REG_1(sc, reg, val) WRITE_REG(sc,1, reg, val)
12622 +static int
12623 +hipp_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
12625 + return EINVAL;
12628 +static int
12629 +hipp_freesession(device_t dev, u_int64_t tid)
12631 + return EINVAL;
12634 +static int
12635 +hipp_process(device_t dev, struct cryptop *crp, int hint)
12637 + return EINVAL;
12640 +static const char*
12641 +hipp_partname(struct hipp_softc *sc, char buf[128], size_t blen)
12643 + char *n = NULL;
12645 + switch (pci_get_vendor(sc->sc_pcidev)) {
12646 + case PCI_VENDOR_HIFN:
12647 + switch (pci_get_device(sc->sc_pcidev)) {
12648 + case PCI_PRODUCT_HIFN_7855: n = "Hifn 7855";
12649 + case PCI_PRODUCT_HIFN_8155: n = "Hifn 8155";
12650 + case PCI_PRODUCT_HIFN_6500: n = "Hifn 6500";
12654 + if(n==NULL) {
12655 + snprintf(buf, blen, "VID=%02x,PID=%02x",
12656 + pci_get_vendor(sc->sc_pcidev),
12657 + pci_get_device(sc->sc_pcidev));
12658 + } else {
12659 + buf[0]='\0';
12660 + strncat(buf, n, blen);
12662 + return buf;
12665 +struct hipp_fs_entry {
12666 + struct attribute attr;
12667 + /* other stuff */
12671 +static ssize_t
12672 +cryptoid_show(struct device *dev,
12673 + struct device_attribute *attr,
12674 + char *buf)
12676 + struct hipp_softc *sc;
12678 + sc = pci_get_drvdata(to_pci_dev (dev));
12679 + return sprintf (buf, "%d\n", sc->sc_cid);
12682 +struct device_attribute hipp_dev_cryptoid = __ATTR_RO(cryptoid);
12685 + * Attach an interface that successfully probed.
12686 + */
12687 +static int
12688 +hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent)
12690 + struct hipp_softc *sc = NULL;
12691 + int i;
12692 + //char rbase;
12693 + //u_int16_t ena;
12694 + int rev;
12695 + //int rseg;
12696 + int rc;
12698 + DPRINTF("%s()\n", __FUNCTION__);
12700 + if (pci_enable_device(dev) < 0)
12701 + return(-ENODEV);
12703 + if (pci_set_mwi(dev))
12704 + return(-ENODEV);
12706 + if (!dev->irq) {
12707 + printk("hifn: found device with no IRQ assigned. check BIOS settings!");
12708 + pci_disable_device(dev);
12709 + return(-ENODEV);
12712 + sc = (struct hipp_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
12713 + if (!sc)
12714 + return(-ENOMEM);
12715 + memset(sc, 0, sizeof(*sc));
12717 + softc_device_init(sc, "hifn-hipp", hipp_num_chips, hipp_methods);
12719 + sc->sc_pcidev = dev;
12720 + sc->sc_irq = -1;
12721 + sc->sc_cid = -1;
12722 + sc->sc_num = hipp_num_chips++;
12724 + if (sc->sc_num < HIPP_MAX_CHIPS)
12725 + hipp_chip_idx[sc->sc_num] = sc;
12727 + pci_set_drvdata(sc->sc_pcidev, sc);
12729 + spin_lock_init(&sc->sc_mtx);
12731 + /*
12732 + * Setup PCI resources.
12733 + * The READ_REG_0, WRITE_REG_0, READ_REG_1,
12734 + * and WRITE_REG_1 macros throughout the driver are used
12735 + * to permit better debugging.
12736 + */
12737 + for(i=0; i<4; i++) {
12738 + unsigned long mem_start, mem_len;
12739 + mem_start = pci_resource_start(sc->sc_pcidev, i);
12740 + mem_len = pci_resource_len(sc->sc_pcidev, i);
12741 + sc->sc_barphy[i] = (caddr_t)mem_start;
12742 + sc->sc_bar[i] = (ocf_iomem_t) ioremap(mem_start, mem_len);
12743 + if (!sc->sc_bar[i]) {
12744 + device_printf(sc->sc_dev, "cannot map bar%d register space\n", i);
12745 + goto fail;
12749 + //hipp_reset_board(sc, 0);
12750 + pci_set_master(sc->sc_pcidev);
12752 + /*
12753 + * Arrange the interrupt line.
12754 + */
12755 + rc = request_irq(dev->irq, hipp_intr, IRQF_SHARED, "hifn", sc);
12756 + if (rc) {
12757 + device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
12758 + goto fail;
12760 + sc->sc_irq = dev->irq;
12762 + rev = READ_REG_1(sc, HIPP_1_REVID) & 0xffff;
12765 + char b[32];
12766 + device_printf(sc->sc_dev, "%s, rev %u",
12767 + hipp_partname(sc, b, sizeof(b)), rev);
12770 +#if 0
12771 + if (sc->sc_flags & HIFN_IS_7956)
12772 + printf(", pll=0x%x<%s clk, %ux mult>",
12773 + sc->sc_pllconfig,
12774 + sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
12775 + 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
12776 +#endif
12777 + printf("\n");
12779 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
12780 + if (sc->sc_cid < 0) {
12781 + device_printf(sc->sc_dev, "could not get crypto driver id\n");
12782 + goto fail;
12785 +#if 0 /* cannot work with a non-GPL module */
12786 + /* make a sysfs entry to let the world know what entry we got */
12787 + sysfs_create_file(&sc->sc_pcidev->dev.kobj, &hipp_dev_cryptoid.attr);
12788 +#endif
12790 +#if 0
12791 + init_timer(&sc->sc_tickto);
12792 + sc->sc_tickto.function = hifn_tick;
12793 + sc->sc_tickto.data = (unsigned long) sc->sc_num;
12794 + mod_timer(&sc->sc_tickto, jiffies + HZ);
12795 +#endif
12797 +#if 0 /* no code here yet ?? */
12798 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
12799 +#endif
12801 + return (0);
12803 +fail:
12804 + if (sc->sc_cid >= 0)
12805 + crypto_unregister_all(sc->sc_cid);
12806 + if (sc->sc_irq != -1)
12807 + free_irq(sc->sc_irq, sc);
12809 +#if 0
12810 + if (sc->sc_dma) {
12811 + /* Turn off DMA polling */
12812 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
12813 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
12815 + pci_free_consistent(sc->sc_pcidev,
12816 + sizeof(*sc->sc_dma),
12817 + sc->sc_dma, sc->sc_dma_physaddr);
12819 +#endif
12820 + kfree(sc);
12821 + return (-ENXIO);
12825 + * Detach an interface that successfully probed.
12826 + */
12827 +static void
12828 +hipp_remove(struct pci_dev *dev)
12830 + struct hipp_softc *sc = pci_get_drvdata(dev);
12831 + unsigned long l_flags;
12833 + DPRINTF("%s()\n", __FUNCTION__);
12835 + /* disable interrupts */
12836 + HIPP_LOCK(sc);
12838 +#if 0
12839 + WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
12840 + HIFN_UNLOCK(sc);
12842 + /*XXX other resources */
12843 + del_timer_sync(&sc->sc_tickto);
12845 + /* Turn off DMA polling */
12846 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
12847 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
12848 +#endif
12850 + crypto_unregister_all(sc->sc_cid);
12852 + free_irq(sc->sc_irq, sc);
12854 +#if 0
12855 + pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
12856 + sc->sc_dma, sc->sc_dma_physaddr);
12857 +#endif
12860 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
12861 +static irqreturn_t hipp_intr(int irq, void *arg)
12862 +#else
12863 +static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs)
12864 +#endif
12866 + struct hipp_softc *sc = arg;
12868 + sc = sc; /* shut up compiler */
12870 + return IRQ_HANDLED;
12873 +static struct pci_device_id hipp_pci_tbl[] = {
12874 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7855,
12875 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
12876 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_8155,
12877 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
12879 +MODULE_DEVICE_TABLE(pci, hipp_pci_tbl);
12881 +static struct pci_driver hipp_driver = {
12882 + .name = "hipp",
12883 + .id_table = hipp_pci_tbl,
12884 + .probe = hipp_probe,
12885 + .remove = hipp_remove,
12886 + /* add PM stuff here one day */
12889 +static int __init hipp_init (void)
12891 + struct hipp_softc *sc = NULL;
12892 + int rc;
12894 + DPRINTF("%s(%p)\n", __FUNCTION__, hipp_init);
12896 + rc = pci_register_driver(&hipp_driver);
12897 + pci_register_driver_compat(&hipp_driver, rc);
12899 + return rc;
12902 +static void __exit hipp_exit (void)
12904 + pci_unregister_driver(&hipp_driver);
12907 +module_init(hipp_init);
12908 +module_exit(hipp_exit);
12910 +MODULE_LICENSE("BSD");
12911 +MODULE_AUTHOR("Michael Richardson <mcr@xelerance.com>");
12912 +MODULE_DESCRIPTION("OCF driver for hifn HIPP-I/II PCI crypto devices");
12913 diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/hifnHIPPreg.h linux-2.6.30/crypto/ocf/hifn/hifnHIPPreg.h
12914 --- linux-2.6.30.orig/crypto/ocf/hifn/hifnHIPPreg.h 1970-01-01 01:00:00.000000000 +0100
12915 +++ linux-2.6.30/crypto/ocf/hifn/hifnHIPPreg.h 2009-06-11 10:55:27.000000000 +0200
12916 @@ -0,0 +1,46 @@
12917 +/*-
12918 + * Hifn HIPP-I/HIPP-II (7855/8155) driver.
12919 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
12921 + * Redistribution and use in source and binary forms, with or without
12922 + * modification, are permitted provided that the following conditions
12923 + * are met:
12925 + * 1. Redistributions of source code must retain the above copyright
12926 + * notice, this list of conditions and the following disclaimer.
12927 + * 2. Redistributions in binary form must reproduce the above copyright
12928 + * notice, this list of conditions and the following disclaimer in the
12929 + * documentation and/or other materials provided with the distribution.
12930 + * 3. The name of the author may not be used to endorse or promote products
12931 + * derived from this software without specific prior written permission.
12934 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
12935 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
12936 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
12937 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
12938 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12939 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
12940 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
12941 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12942 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
12943 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12945 + * Effort sponsored by Hifn inc.
12947 + */
12949 +#ifndef __HIFNHIPP_H__
12950 +#define __HIFNHIPP_H__
12953 + * PCI vendor and device identifiers
12954 + */
12955 +#define PCI_VENDOR_HIFN 0x13a3 /* Hifn */
12956 +#define PCI_PRODUCT_HIFN_6500 0x0006 /* 6500 */
12957 +#define PCI_PRODUCT_HIFN_7855 0x001f /* 7855 */
12958 +#define PCI_PRODUCT_HIFN_8155 0x999 /* XXX 8155 */
12960 +#define HIPP_1_REVID 0x01 /* BOGUS */
12962 +#endif /* __HIPP_H__ */
12963 diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/hifnHIPPvar.h linux-2.6.30/crypto/ocf/hifn/hifnHIPPvar.h
12964 --- linux-2.6.30.orig/crypto/ocf/hifn/hifnHIPPvar.h 1970-01-01 01:00:00.000000000 +0100
12965 +++ linux-2.6.30/crypto/ocf/hifn/hifnHIPPvar.h 2009-06-11 10:55:27.000000000 +0200
12966 @@ -0,0 +1,93 @@
12968 + * Hifn HIPP-I/HIPP-II (7855/8155) driver.
12969 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com> *
12971 + * Redistribution and use in source and binary forms, with or without
12972 + * modification, are permitted provided that the following conditions
12973 + * are met:
12975 + * 1. Redistributions of source code must retain the above copyright
12976 + * notice, this list of conditions and the following disclaimer.
12977 + * 2. Redistributions in binary form must reproduce the above copyright
12978 + * notice, this list of conditions and the following disclaimer in the
12979 + * documentation and/or other materials provided with the distribution.
12980 + * 3. The name of the author may not be used to endorse or promote products
12981 + * derived from this software without specific prior written permission.
12984 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
12985 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
12986 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
12987 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
12988 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12989 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
12990 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
12991 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12992 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
12993 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12995 + * Effort sponsored by Hifn inc.
12997 + */
12999 +#ifndef __HIFNHIPPVAR_H__
13000 +#define __HIFNHIPPVAR_H__
13002 +#define HIPP_MAX_CHIPS 8
13005 + * Holds data specific to a single Hifn HIPP-I board.
13006 + */
13007 +struct hipp_softc {
13008 + softc_device_decl sc_dev;
13010 + struct pci_dev *sc_pcidev; /* device backpointer */
13011 + ocf_iomem_t sc_bar[5];
13012 + caddr_t sc_barphy[5]; /* physical address */
13013 + int sc_num; /* for multiple devs */
13014 + spinlock_t sc_mtx; /* per-instance lock */
13015 + int32_t sc_cid;
13016 + int sc_irq;
13018 +#if 0
13020 + u_int32_t sc_dmaier;
13021 + u_int32_t sc_drammodel; /* 1=dram, 0=sram */
13022 + u_int32_t sc_pllconfig; /* 7954/7955/7956 PLL config */
13024 + struct hifn_dma *sc_dma;
13025 + dma_addr_t sc_dma_physaddr;/* physical address of sc_dma */
13027 + int sc_dmansegs;
13028 + int sc_maxses;
13029 + int sc_nsessions;
13030 + struct hifn_session *sc_sessions;
13031 + int sc_ramsize;
13032 + int sc_flags;
13033 +#define HIFN_HAS_RNG 0x1 /* includes random number generator */
13034 +#define HIFN_HAS_PUBLIC 0x2 /* includes public key support */
13035 +#define HIFN_HAS_AES 0x4 /* includes AES support */
13036 +#define HIFN_IS_7811 0x8 /* Hifn 7811 part */
13037 +#define HIFN_IS_7956 0x10 /* Hifn 7956/7955 don't have SDRAM */
13039 + struct timer_list sc_tickto; /* for managing DMA */
13041 + int sc_rngfirst;
13042 + int sc_rnghz; /* RNG polling frequency */
13044 + int sc_c_busy; /* command ring busy */
13045 + int sc_s_busy; /* source data ring busy */
13046 + int sc_d_busy; /* destination data ring busy */
13047 + int sc_r_busy; /* result ring busy */
13048 + int sc_active; /* for initial countdown */
13049 + int sc_needwakeup; /* ops q'd wating on resources */
13050 + int sc_curbatch; /* # ops submitted w/o int */
13051 + int sc_suspended;
13052 + struct miscdevice sc_miscdev;
13053 +#endif
13056 +#define HIPP_LOCK(_sc) spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
13057 +#define HIPP_UNLOCK(_sc) spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
13059 +#endif /* __HIFNHIPPVAR_H__ */
13060 diff -Nur linux-2.6.30.orig/crypto/ocf/hifn/Makefile linux-2.6.30/crypto/ocf/hifn/Makefile
13061 --- linux-2.6.30.orig/crypto/ocf/hifn/Makefile 1970-01-01 01:00:00.000000000 +0100
13062 +++ linux-2.6.30/crypto/ocf/hifn/Makefile 2009-06-11 10:55:27.000000000 +0200
13063 @@ -0,0 +1,13 @@
13064 +# for SGlinux builds
13065 +-include $(ROOTDIR)/modules/.config
13067 +obj-$(CONFIG_OCF_HIFN) += hifn7751.o
13068 +obj-$(CONFIG_OCF_HIFNHIPP) += hifnHIPP.o
13070 +obj ?= .
13071 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
13073 +ifdef TOPDIR
13074 +-include $(TOPDIR)/Rules.make
13075 +endif
13077 diff -Nur linux-2.6.30.orig/crypto/ocf/ixp4xx/ixp4xx.c linux-2.6.30/crypto/ocf/ixp4xx/ixp4xx.c
13078 --- linux-2.6.30.orig/crypto/ocf/ixp4xx/ixp4xx.c 1970-01-01 01:00:00.000000000 +0100
13079 +++ linux-2.6.30/crypto/ocf/ixp4xx/ixp4xx.c 2009-06-11 10:55:27.000000000 +0200
13080 @@ -0,0 +1,1328 @@
13082 + * An OCF module that uses Intels IXP CryptACC API to do the crypto.
13083 + * This driver requires the IXP400 Access Library that is available
13084 + * from Intel in order to operate (or compile).
13086 + * Written by David McCullough <david_mccullough@securecomputing.com>
13087 + * Copyright (C) 2006-2007 David McCullough
13088 + * Copyright (C) 2004-2005 Intel Corporation.
13090 + * LICENSE TERMS
13092 + * The free distribution and use of this software in both source and binary
13093 + * form is allowed (with or without changes) provided that:
13095 + * 1. distributions of this source code include the above copyright
13096 + * notice, this list of conditions and the following disclaimer;
13098 + * 2. distributions in binary form include the above copyright
13099 + * notice, this list of conditions and the following disclaimer
13100 + * in the documentation and/or other associated materials;
13102 + * 3. the copyright holder's name is not used to endorse products
13103 + * built using this software without specific written permission.
13105 + * ALTERNATIVELY, provided that this notice is retained in full, this product
13106 + * may be distributed under the terms of the GNU General Public License (GPL),
13107 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
13109 + * DISCLAIMER
13111 + * This software is provided 'as is' with no explicit or implied warranties
13112 + * in respect of its properties, including, but not limited to, correctness
13113 + * and/or fitness for purpose.
13114 + */
13116 +#ifndef AUTOCONF_INCLUDED
13117 +#include <linux/config.h>
13118 +#endif
13119 +#include <linux/module.h>
13120 +#include <linux/init.h>
13121 +#include <linux/list.h>
13122 +#include <linux/slab.h>
13123 +#include <linux/sched.h>
13124 +#include <linux/wait.h>
13125 +#include <linux/crypto.h>
13126 +#include <linux/interrupt.h>
13127 +#include <asm/scatterlist.h>
13129 +#include <IxTypes.h>
13130 +#include <IxOsBuffMgt.h>
13131 +#include <IxNpeDl.h>
13132 +#include <IxCryptoAcc.h>
13133 +#include <IxQMgr.h>
13134 +#include <IxOsServices.h>
13135 +#include <IxOsCacheMMU.h>
13137 +#include <cryptodev.h>
13138 +#include <uio.h>
13140 +#ifndef IX_MBUF_PRIV
13141 +#define IX_MBUF_PRIV(x) ((x)->priv)
13142 +#endif
13144 +struct ixp_data;
13146 +struct ixp_q {
13147 + struct list_head ixp_q_list;
13148 + struct ixp_data *ixp_q_data;
13149 + struct cryptop *ixp_q_crp;
13150 + struct cryptodesc *ixp_q_ccrd;
13151 + struct cryptodesc *ixp_q_acrd;
13152 + IX_MBUF ixp_q_mbuf;
13153 + UINT8 *ixp_hash_dest; /* Location for hash in client buffer */
13154 + UINT8 *ixp_hash_src; /* Location of hash in internal buffer */
13155 + unsigned char ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
13156 + unsigned char *ixp_q_iv;
13159 +struct ixp_data {
13160 + int ixp_registered; /* is the context registered */
13161 + int ixp_crd_flags; /* detect direction changes */
13163 + int ixp_cipher_alg;
13164 + int ixp_auth_alg;
13166 + UINT32 ixp_ctx_id;
13167 + UINT32 ixp_hash_key_id; /* used when hashing */
13168 + IxCryptoAccCtx ixp_ctx;
13169 + IX_MBUF ixp_pri_mbuf;
13170 + IX_MBUF ixp_sec_mbuf;
13172 + struct work_struct ixp_pending_work;
13173 + struct work_struct ixp_registration_work;
13174 + struct list_head ixp_q; /* unprocessed requests */
13177 +#ifdef __ixp46X
13179 +#define MAX_IOP_SIZE 64 /* words */
13180 +#define MAX_OOP_SIZE 128
13182 +#define MAX_PARAMS 3
13184 +struct ixp_pkq {
13185 + struct list_head pkq_list;
13186 + struct cryptkop *pkq_krp;
13188 + IxCryptoAccPkeEauInOperands pkq_op;
13189 + IxCryptoAccPkeEauOpResult pkq_result;
13191 + UINT32 pkq_ibuf0[MAX_IOP_SIZE];
13192 + UINT32 pkq_ibuf1[MAX_IOP_SIZE];
13193 + UINT32 pkq_ibuf2[MAX_IOP_SIZE];
13194 + UINT32 pkq_obuf[MAX_OOP_SIZE];
13197 +static LIST_HEAD(ixp_pkq); /* current PK wait list */
13198 +static struct ixp_pkq *ixp_pk_cur;
13199 +static spinlock_t ixp_pkq_lock;
13201 +#endif /* __ixp46X */
13203 +static int ixp_blocked = 0;
13205 +static int32_t ixp_id = -1;
13206 +static struct ixp_data **ixp_sessions = NULL;
13207 +static u_int32_t ixp_sesnum = 0;
13209 +static int ixp_process(device_t, struct cryptop *, int);
13210 +static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
13211 +static int ixp_freesession(device_t, u_int64_t);
13212 +#ifdef __ixp46X
13213 +static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
13214 +#endif
13216 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
13217 +static kmem_cache_t *qcache;
13218 +#else
13219 +static struct kmem_cache *qcache;
13220 +#endif
13222 +#define debug ixp_debug
13223 +static int ixp_debug = 0;
13224 +module_param(ixp_debug, int, 0644);
13225 +MODULE_PARM_DESC(ixp_debug, "Enable debug");
13227 +static int ixp_init_crypto = 1;
13228 +module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
13229 +MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
13231 +static void ixp_process_pending(void *arg);
13232 +static void ixp_registration(void *arg);
13233 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13234 +static void ixp_process_pending_wq(struct work_struct *work);
13235 +static void ixp_registration_wq(struct work_struct *work);
13236 +#endif
13239 + * dummy device structure
13240 + */
13242 +static struct {
13243 + softc_device_decl sc_dev;
13244 +} ixpdev;
13246 +static device_method_t ixp_methods = {
13247 + /* crypto device methods */
13248 + DEVMETHOD(cryptodev_newsession, ixp_newsession),
13249 + DEVMETHOD(cryptodev_freesession,ixp_freesession),
13250 + DEVMETHOD(cryptodev_process, ixp_process),
13251 +#ifdef __ixp46X
13252 + DEVMETHOD(cryptodev_kprocess, ixp_kprocess),
13253 +#endif
13257 + * Generate a new software session.
13258 + */
13259 +static int
13260 +ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
13262 + struct ixp_data *ixp;
13263 + u_int32_t i;
13264 +#define AUTH_LEN(cri, def) \
13265 + (cri->cri_mlen ? cri->cri_mlen : (def))
13267 + dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
13268 + if (sid == NULL || cri == NULL) {
13269 + dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
13270 + return EINVAL;
13273 + if (ixp_sessions) {
13274 + for (i = 1; i < ixp_sesnum; i++)
13275 + if (ixp_sessions[i] == NULL)
13276 + break;
13277 + } else
13278 + i = 1; /* NB: to silence compiler warning */
13280 + if (ixp_sessions == NULL || i == ixp_sesnum) {
13281 + struct ixp_data **ixpd;
13283 + if (ixp_sessions == NULL) {
13284 + i = 1; /* We leave ixp_sessions[0] empty */
13285 + ixp_sesnum = CRYPTO_SW_SESSIONS;
13286 + } else
13287 + ixp_sesnum *= 2;
13289 + ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
13290 + if (ixpd == NULL) {
13291 + /* Reset session number */
13292 + if (ixp_sesnum == CRYPTO_SW_SESSIONS)
13293 + ixp_sesnum = 0;
13294 + else
13295 + ixp_sesnum /= 2;
13296 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
13297 + return ENOBUFS;
13299 + memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
13301 + /* Copy existing sessions */
13302 + if (ixp_sessions) {
13303 + memcpy(ixpd, ixp_sessions,
13304 + (ixp_sesnum / 2) * sizeof(struct ixp_data *));
13305 + kfree(ixp_sessions);
13308 + ixp_sessions = ixpd;
13311 + ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
13312 + SLAB_ATOMIC);
13313 + if (ixp_sessions[i] == NULL) {
13314 + ixp_freesession(NULL, i);
13315 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
13316 + return ENOBUFS;
13319 + *sid = i;
13321 + ixp = ixp_sessions[i];
13322 + memset(ixp, 0, sizeof(*ixp));
13324 + ixp->ixp_cipher_alg = -1;
13325 + ixp->ixp_auth_alg = -1;
13326 + ixp->ixp_ctx_id = -1;
13327 + INIT_LIST_HEAD(&ixp->ixp_q);
13329 + ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
13331 + while (cri) {
13332 + switch (cri->cri_alg) {
13333 + case CRYPTO_DES_CBC:
13334 + ixp->ixp_cipher_alg = cri->cri_alg;
13335 + ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
13336 + ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13337 + ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13338 + ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13339 + ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
13340 + IX_CRYPTO_ACC_DES_IV_64;
13341 + memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13342 + cri->cri_key, (cri->cri_klen + 7) / 8);
13343 + break;
13345 + case CRYPTO_3DES_CBC:
13346 + ixp->ixp_cipher_alg = cri->cri_alg;
13347 + ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
13348 + ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13349 + ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13350 + ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13351 + ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
13352 + IX_CRYPTO_ACC_DES_IV_64;
13353 + memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13354 + cri->cri_key, (cri->cri_klen + 7) / 8);
13355 + break;
13357 + case CRYPTO_RIJNDAEL128_CBC:
13358 + ixp->ixp_cipher_alg = cri->cri_alg;
13359 + ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
13360 + ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13361 + ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13362 + ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
13363 + ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
13364 + memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13365 + cri->cri_key, (cri->cri_klen + 7) / 8);
13366 + break;
13368 + case CRYPTO_MD5:
13369 + case CRYPTO_MD5_HMAC:
13370 + ixp->ixp_auth_alg = cri->cri_alg;
13371 + ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
13372 + ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
13373 + ixp->ixp_ctx.authCtx.aadLen = 0;
13374 + /* Only MD5_HMAC needs a key */
13375 + if (cri->cri_alg == CRYPTO_MD5_HMAC) {
13376 + ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
13377 + if (ixp->ixp_ctx.authCtx.authKeyLen >
13378 + sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
13379 + printk(
13380 + "ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
13381 + cri->cri_klen);
13382 + ixp_freesession(NULL, i);
13383 + return EINVAL;
13385 + memcpy(ixp->ixp_ctx.authCtx.key.authKey,
13386 + cri->cri_key, (cri->cri_klen + 7) / 8);
13388 + break;
13390 + case CRYPTO_SHA1:
13391 + case CRYPTO_SHA1_HMAC:
13392 + ixp->ixp_auth_alg = cri->cri_alg;
13393 + ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
13394 + ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
13395 + ixp->ixp_ctx.authCtx.aadLen = 0;
13396 + /* Only SHA1_HMAC needs a key */
13397 + if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
13398 + ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
13399 + if (ixp->ixp_ctx.authCtx.authKeyLen >
13400 + sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
13401 + printk(
13402 + "ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
13403 + cri->cri_klen);
13404 + ixp_freesession(NULL, i);
13405 + return EINVAL;
13407 + memcpy(ixp->ixp_ctx.authCtx.key.authKey,
13408 + cri->cri_key, (cri->cri_klen + 7) / 8);
13410 + break;
13412 + default:
13413 + printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
13414 + ixp_freesession(NULL, i);
13415 + return EINVAL;
13417 + cri = cri->cri_next;
13420 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13421 + INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
13422 + INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
13423 +#else
13424 + INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
13425 + INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
13426 +#endif
13428 + return 0;
13433 + * Free a session.
13434 + */
13435 +static int
13436 +ixp_freesession(device_t dev, u_int64_t tid)
13438 + u_int32_t sid = CRYPTO_SESID2LID(tid);
13440 + dprintk("%s()\n", __FUNCTION__);
13441 + if (sid > ixp_sesnum || ixp_sessions == NULL ||
13442 + ixp_sessions[sid] == NULL) {
13443 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
13444 + return EINVAL;
13447 + /* Silently accept and return */
13448 + if (sid == 0)
13449 + return 0;
13451 + if (ixp_sessions[sid]) {
13452 + if (ixp_sessions[sid]->ixp_ctx_id != -1) {
13453 + ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
13454 + ixp_sessions[sid]->ixp_ctx_id = -1;
13457 + flush_scheduled_work();
13459 + kfree(ixp_sessions[sid]);
13461 + ixp_sessions[sid] = NULL;
13462 + if (ixp_blocked) {
13463 + ixp_blocked = 0;
13464 + crypto_unblock(ixp_id, CRYPTO_SYMQ);
13466 + return 0;
13471 + * callback for when hash processing is complete
13472 + */
13474 +static void
13475 +ixp_hash_perform_cb(
13476 + UINT32 hash_key_id,
13477 + IX_MBUF *bufp,
13478 + IxCryptoAccStatus status)
13480 + struct ixp_q *q;
13482 + dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
13484 + if (bufp == NULL) {
13485 + printk("ixp: NULL buf in %s\n", __FUNCTION__);
13486 + return;
13489 + q = IX_MBUF_PRIV(bufp);
13490 + if (q == NULL) {
13491 + printk("ixp: NULL priv in %s\n", __FUNCTION__);
13492 + return;
13495 + if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
13496 + /* On success, need to copy hash back into original client buffer */
13497 + memcpy(q->ixp_hash_dest, q->ixp_hash_src,
13498 + (q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
13499 + SHA1_HASH_LEN : MD5_HASH_LEN);
13501 + else {
13502 + printk("ixp: hash perform failed status=%d\n", status);
13503 + q->ixp_q_crp->crp_etype = EINVAL;
13506 + /* Free internal buffer used for hashing */
13507 + kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
13509 + crypto_done(q->ixp_q_crp);
13510 + kmem_cache_free(qcache, q);
13514 + * setup a request and perform it
13515 + */
13516 +static void
13517 +ixp_q_process(struct ixp_q *q)
13519 + IxCryptoAccStatus status;
13520 + struct ixp_data *ixp = q->ixp_q_data;
13521 + int auth_off = 0;
13522 + int auth_len = 0;
13523 + int crypt_off = 0;
13524 + int crypt_len = 0;
13525 + int icv_off = 0;
13526 + char *crypt_func;
13528 + dprintk("%s(%p)\n", __FUNCTION__, q);
13530 + if (q->ixp_q_ccrd) {
13531 + if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
13532 + q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
13533 + } else {
13534 + q->ixp_q_iv = q->ixp_q_iv_data;
13535 + crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
13536 + q->ixp_q_ccrd->crd_inject,
13537 + ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
13538 + (caddr_t) q->ixp_q_iv);
13541 + if (q->ixp_q_acrd) {
13542 + auth_off = q->ixp_q_acrd->crd_skip;
13543 + auth_len = q->ixp_q_acrd->crd_len;
13544 + icv_off = q->ixp_q_acrd->crd_inject;
13547 + crypt_off = q->ixp_q_ccrd->crd_skip;
13548 + crypt_len = q->ixp_q_ccrd->crd_len;
13549 + } else { /* if (q->ixp_q_acrd) */
13550 + auth_off = q->ixp_q_acrd->crd_skip;
13551 + auth_len = q->ixp_q_acrd->crd_len;
13552 + icv_off = q->ixp_q_acrd->crd_inject;
13555 + if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
13556 + struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
13557 + if (skb_shinfo(skb)->nr_frags) {
13558 + /*
13559 + * DAVIDM fix this limitation one day by using
13560 + * a buffer pool and chaining, it is not currently
13561 + * needed for current user/kernel space acceleration
13562 + */
13563 + printk("ixp: Cannot handle fragmented skb's yet !\n");
13564 + q->ixp_q_crp->crp_etype = ENOENT;
13565 + goto done;
13567 + IX_MBUF_MLEN(&q->ixp_q_mbuf) =
13568 + IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = skb->len;
13569 + IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
13570 + } else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
13571 + struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
13572 + if (uiop->uio_iovcnt != 1) {
13573 + /*
13574 + * DAVIDM fix this limitation one day by using
13575 + * a buffer pool and chaining, it is not currently
13576 + * needed for current user/kernel space acceleration
13577 + */
13578 + printk("ixp: Cannot handle more than 1 iovec yet !\n");
13579 + q->ixp_q_crp->crp_etype = ENOENT;
13580 + goto done;
13582 + IX_MBUF_MLEN(&q->ixp_q_mbuf) =
13583 + IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
13584 + IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
13585 + } else /* contig buffer */ {
13586 + IX_MBUF_MLEN(&q->ixp_q_mbuf) =
13587 + IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
13588 + IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
13591 + IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
13593 + if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
13594 + /*
13595 + * For SHA1 and MD5 hash, need to create an internal buffer that is big
13596 + * enough to hold the original data + the appropriate padding for the
13597 + * hash algorithm.
13598 + */
13599 + UINT8 *tbuf = NULL;
13601 + IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
13602 + ((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
13603 + tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
13605 + if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
13606 + printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
13607 + IX_MBUF_MLEN(&q->ixp_q_mbuf));
13608 + q->ixp_q_crp->crp_etype = ENOMEM;
13609 + goto done;
13611 + memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
13613 + /* Set location in client buffer to copy hash into */
13614 + q->ixp_hash_dest =
13615 + &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
13617 + IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
13619 + /* Set location in internal buffer for where hash starts */
13620 + q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
13622 + crypt_func = "ixCryptoAccHashPerform";
13623 + status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
13624 + &q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
13625 + &ixp->ixp_hash_key_id);
13627 + else {
13628 + crypt_func = "ixCryptoAccAuthCryptPerform";
13629 + status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
13630 + NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
13631 + q->ixp_q_iv);
13634 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
13635 + return;
13637 + if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
13638 + q->ixp_q_crp->crp_etype = ENOMEM;
13639 + goto done;
13642 + printk("ixp: %s failed %u\n", crypt_func, status);
13643 + q->ixp_q_crp->crp_etype = EINVAL;
13645 +done:
13646 + crypto_done(q->ixp_q_crp);
13647 + kmem_cache_free(qcache, q);
13652 + * because we cannot process the Q from the Register callback
13653 + * we do it here on a task Q.
13654 + */
13656 +static void
13657 +ixp_process_pending(void *arg)
13659 + struct ixp_data *ixp = arg;
13660 + struct ixp_q *q = NULL;
13662 + dprintk("%s(%p)\n", __FUNCTION__, arg);
13664 + if (!ixp)
13665 + return;
13667 + while (!list_empty(&ixp->ixp_q)) {
13668 + q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
13669 + list_del(&q->ixp_q_list);
13670 + ixp_q_process(q);
13674 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13675 +static void
13676 +ixp_process_pending_wq(struct work_struct *work)
13678 + struct ixp_data *ixp = container_of(work, struct ixp_data,
13679 + ixp_pending_work);
13680 + ixp_process_pending(ixp);
13682 +#endif
13685 + * callback for when context registration is complete
13686 + */
13688 +static void
13689 +ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
13691 + int i;
13692 + struct ixp_data *ixp;
13693 + struct ixp_q *q;
13695 + dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
13697 + /*
13698 + * free any buffer passed in to this routine
13699 + */
13700 + if (bufp) {
13701 + IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
13702 + kfree(IX_MBUF_MDATA(bufp));
13703 + IX_MBUF_MDATA(bufp) = NULL;
13706 + for (i = 0; i < ixp_sesnum; i++) {
13707 + ixp = ixp_sessions[i];
13708 + if (ixp && ixp->ixp_ctx_id == ctx_id)
13709 + break;
13711 + if (i >= ixp_sesnum) {
13712 + printk("ixp: invalid context id %d\n", ctx_id);
13713 + return;
13716 + if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
13717 + /* this is normal to free the first of two buffers */
13718 + dprintk("ixp: register not finished yet.\n");
13719 + return;
13722 + if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
13723 + printk("ixp: register failed 0x%x\n", status);
13724 + while (!list_empty(&ixp->ixp_q)) {
13725 + q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
13726 + list_del(&q->ixp_q_list);
13727 + q->ixp_q_crp->crp_etype = EINVAL;
13728 + crypto_done(q->ixp_q_crp);
13729 + kmem_cache_free(qcache, q);
13731 + return;
13734 + /*
13735 + * we are now registered, we cannot start processing the Q here
13736 + * or we get strange errors with AES (DES/3DES seem to be ok).
13737 + */
13738 + ixp->ixp_registered = 1;
13739 + schedule_work(&ixp->ixp_pending_work);
13744 + * callback for when data processing is complete
13745 + */
13747 +static void
13748 +ixp_perform_cb(
13749 + UINT32 ctx_id,
13750 + IX_MBUF *sbufp,
13751 + IX_MBUF *dbufp,
13752 + IxCryptoAccStatus status)
13754 + struct ixp_q *q;
13756 + dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
13757 + dbufp, status);
13759 + if (sbufp == NULL) {
13760 + printk("ixp: NULL sbuf in ixp_perform_cb\n");
13761 + return;
13764 + q = IX_MBUF_PRIV(sbufp);
13765 + if (q == NULL) {
13766 + printk("ixp: NULL priv in ixp_perform_cb\n");
13767 + return;
13770 + if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
13771 + printk("ixp: perform failed status=%d\n", status);
13772 + q->ixp_q_crp->crp_etype = EINVAL;
13775 + crypto_done(q->ixp_q_crp);
13776 + kmem_cache_free(qcache, q);
13781 + * registration is not callable at IRQ time, so we defer
13782 + * to a task queue, this routines completes the registration for us
13783 + * when the task queue runs
13785 + * Unfortunately this means we cannot tell OCF that the driver is blocked,
13786 + * we do that on the next request.
13787 + */
13789 +static void
13790 +ixp_registration(void *arg)
13792 + struct ixp_data *ixp = arg;
13793 + struct ixp_q *q = NULL;
13794 + IX_MBUF *pri = NULL, *sec = NULL;
13795 + int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
13797 + if (!ixp) {
13798 + printk("ixp: ixp_registration with no arg\n");
13799 + return;
13802 + if (ixp->ixp_ctx_id != -1) {
13803 + ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
13804 + ixp->ixp_ctx_id = -1;
13807 + if (list_empty(&ixp->ixp_q)) {
13808 + printk("ixp: ixp_registration with no Q\n");
13809 + return;
13812 + /*
13813 + * setup the primary and secondary buffers
13814 + */
13815 + q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
13816 + if (q->ixp_q_acrd) {
13817 + pri = &ixp->ixp_pri_mbuf;
13818 + sec = &ixp->ixp_sec_mbuf;
13819 + IX_MBUF_MLEN(pri) = IX_MBUF_PKT_LEN(pri) = 128;
13820 + IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
13821 + IX_MBUF_MLEN(sec) = IX_MBUF_PKT_LEN(sec) = 128;
13822 + IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
13825 + /* Only need to register if a crypt op or HMAC op */
13826 + if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
13827 + ixp->ixp_auth_alg == CRYPTO_MD5)) {
13828 + status = ixCryptoAccCtxRegister(
13829 + &ixp->ixp_ctx,
13830 + pri, sec,
13831 + ixp_register_cb,
13832 + ixp_perform_cb,
13833 + &ixp->ixp_ctx_id);
13835 + else {
13836 + /* Otherwise we start processing pending q */
13837 + schedule_work(&ixp->ixp_pending_work);
13840 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
13841 + return;
13843 + if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
13844 + printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
13845 + ixp_blocked = 1;
13846 + /* perhaps we should return EGAIN on queued ops ? */
13847 + return;
13850 + printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
13851 + ixp->ixp_ctx_id = -1;
13853 + /*
13854 + * everything waiting is toasted
13855 + */
13856 + while (!list_empty(&ixp->ixp_q)) {
13857 + q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
13858 + list_del(&q->ixp_q_list);
13859 + q->ixp_q_crp->crp_etype = ENOENT;
13860 + crypto_done(q->ixp_q_crp);
13861 + kmem_cache_free(qcache, q);
13865 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13866 +static void
13867 +ixp_registration_wq(struct work_struct *work)
13869 + struct ixp_data *ixp = container_of(work, struct ixp_data,
13870 + ixp_registration_work);
13871 + ixp_registration(ixp);
13873 +#endif
13876 + * Process a request.
13877 + */
13878 +static int
13879 +ixp_process(device_t dev, struct cryptop *crp, int hint)
13881 + struct ixp_data *ixp;
13882 + unsigned int lid;
13883 + struct ixp_q *q = NULL;
13884 + int status;
13886 + dprintk("%s()\n", __FUNCTION__);
13888 + /* Sanity check */
13889 + if (crp == NULL) {
13890 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
13891 + return EINVAL;
13894 + crp->crp_etype = 0;
13896 + if (ixp_blocked)
13897 + return ERESTART;
13899 + if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
13900 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
13901 + crp->crp_etype = EINVAL;
13902 + goto done;
13905 + /*
13906 + * find the session we are using
13907 + */
13909 + lid = crp->crp_sid & 0xffffffff;
13910 + if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
13911 + ixp_sessions[lid] == NULL) {
13912 + crp->crp_etype = ENOENT;
13913 + dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
13914 + goto done;
13916 + ixp = ixp_sessions[lid];
13918 + /*
13919 + * setup a new request ready for queuing
13920 + */
13921 + q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
13922 + if (q == NULL) {
13923 + dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
13924 + crp->crp_etype = ENOMEM;
13925 + goto done;
13927 + /*
13928 + * save some cycles by only zeroing the important bits
13929 + */
13930 + memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
13931 + q->ixp_q_ccrd = NULL;
13932 + q->ixp_q_acrd = NULL;
13933 + q->ixp_q_crp = crp;
13934 + q->ixp_q_data = ixp;
13936 + /*
13937 + * point the cipher and auth descriptors appropriately
13938 + * check that we have something to do
13939 + */
13940 + if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
13941 + q->ixp_q_ccrd = crp->crp_desc;
13942 + else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
13943 + q->ixp_q_acrd = crp->crp_desc;
13944 + else {
13945 + crp->crp_etype = ENOENT;
13946 + dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
13947 + goto done;
13949 + if (crp->crp_desc->crd_next) {
13950 + if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
13951 + q->ixp_q_ccrd = crp->crp_desc->crd_next;
13952 + else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
13953 + q->ixp_q_acrd = crp->crp_desc->crd_next;
13954 + else {
13955 + crp->crp_etype = ENOENT;
13956 + dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
13957 + goto done;
13961 + /*
13962 + * If there is a direction change for this context then we mark it as
13963 + * unregistered and re-register is for the new direction. This is not
13964 + * a very expensive operation and currently only tends to happen when
13965 + * user-space application are doing benchmarks
13967 + * DM - we should be checking for pending requests before unregistering.
13968 + */
13969 + if (q->ixp_q_ccrd && ixp->ixp_registered &&
13970 + ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
13971 + dprintk("%s - detected direction change on session\n", __FUNCTION__);
13972 + ixp->ixp_registered = 0;
13975 + /*
13976 + * if we are registered, call straight into the perform code
13977 + */
13978 + if (ixp->ixp_registered) {
13979 + ixp_q_process(q);
13980 + return 0;
13983 + /*
13984 + * the only part of the context not set in newsession is the direction
13985 + * dependent parts
13986 + */
13987 + if (q->ixp_q_ccrd) {
13988 + ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
13989 + if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
13990 + ixp->ixp_ctx.operation = q->ixp_q_acrd ?
13991 + IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
13992 + } else {
13993 + ixp->ixp_ctx.operation = q->ixp_q_acrd ?
13994 + IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
13996 + } else {
13997 + /* q->ixp_q_acrd must be set if we are here */
13998 + ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
14001 + status = list_empty(&ixp->ixp_q);
14002 + list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
14003 + if (status)
14004 + schedule_work(&ixp->ixp_registration_work);
14005 + return 0;
14007 +done:
14008 + if (q)
14009 + kmem_cache_free(qcache, q);
14010 + crypto_done(crp);
14011 + return 0;
14015 +#ifdef __ixp46X
14017 + * key processing support for the ixp465
14018 + */
14022 + * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
14023 + * assume zeroed and only copy bits that are significant
14024 + */
14026 +static int
14027 +ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
14029 + unsigned char *src = (unsigned char *) p->crp_p;
14030 + unsigned char *dst;
14031 + int len, bits = p->crp_nbits;
14033 + dprintk("%s()\n", __FUNCTION__);
14035 + if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
14036 + dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
14037 + bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
14038 + return -1;
14041 + len = (bits + 31) / 32; /* the number UINT32's needed */
14043 + dst = (unsigned char *) &buf[len];
14044 + dst--;
14046 + while (bits > 0) {
14047 + *dst-- = *src++;
14048 + bits -= 8;
14051 +#if 0 /* no need to zero remaining bits as it is done during request alloc */
14052 + while (dst > (unsigned char *) buf)
14053 + *dst-- = '\0';
14054 +#endif
14056 + op->pData = buf;
14057 + op->dataLen = len;
14058 + return 0;
14062 + * copy out the result, be as forgiving as we can about small output buffers
14063 + */
14065 +static int
14066 +ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
14068 + unsigned char *dst = (unsigned char *) p->crp_p;
14069 + unsigned char *src = (unsigned char *) buf;
14070 + int len, z, bits = p->crp_nbits;
14072 + dprintk("%s()\n", __FUNCTION__);
14074 + len = op->dataLen * sizeof(UINT32);
14076 + /* skip leading zeroes to be small buffer friendly */
14077 + z = 0;
14078 + while (z < len && src[z] == '\0')
14079 + z++;
14081 + src += len;
14082 + src--;
14083 + len -= z;
14085 + while (len > 0 && bits > 0) {
14086 + *dst++ = *src--;
14087 + len--;
14088 + bits -= 8;
14091 + while (bits > 0) {
14092 + *dst++ = '\0';
14093 + bits -= 8;
14096 + if (len > 0) {
14097 + dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
14098 + __FUNCTION__, len, z, p->crp_nbits / 8);
14099 + return -1;
14102 + return 0;
14107 + * the parameter offsets for exp_mod
14108 + */
14110 +#define IXP_PARAM_BASE 0
14111 +#define IXP_PARAM_EXP 1
14112 +#define IXP_PARAM_MOD 2
14113 +#define IXP_PARAM_RES 3
14116 + * key processing complete callback, is also used to start processing
14117 + * by passing a NULL for pResult
14118 + */
14120 +static void
14121 +ixp_kperform_cb(
14122 + IxCryptoAccPkeEauOperation operation,
14123 + IxCryptoAccPkeEauOpResult *pResult,
14124 + BOOL carryOrBorrow,
14125 + IxCryptoAccStatus status)
14127 + struct ixp_pkq *q, *tmp;
14128 + unsigned long flags;
14130 + dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
14131 + carryOrBorrow, status);
14133 + /* handle a completed request */
14134 + if (pResult) {
14135 + if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
14136 + q = ixp_pk_cur;
14137 + if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14138 + dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
14139 + q->pkq_krp->krp_status = ERANGE; /* could do better */
14140 + } else {
14141 + /* copy out the result */
14142 + if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
14143 + &q->pkq_result, q->pkq_obuf))
14144 + q->pkq_krp->krp_status = ERANGE;
14146 + crypto_kdone(q->pkq_krp);
14147 + kfree(q);
14148 + ixp_pk_cur = NULL;
14149 + } else
14150 + printk("%s - callback with invalid result pointer\n", __FUNCTION__);
14153 + spin_lock_irqsave(&ixp_pkq_lock, flags);
14154 + if (ixp_pk_cur || list_empty(&ixp_pkq)) {
14155 + spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14156 + return;
14159 + list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
14161 + list_del(&q->pkq_list);
14162 + ixp_pk_cur = q;
14164 + spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14166 + status = ixCryptoAccPkeEauPerform(
14167 + IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
14168 + &q->pkq_op,
14169 + ixp_kperform_cb,
14170 + &q->pkq_result);
14172 + if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
14173 + dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
14174 + return; /* callback will return here for callback */
14175 + } else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
14176 + printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
14177 + } else {
14178 + printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
14179 + __FUNCTION__, status);
14181 + q->pkq_krp->krp_status = ERANGE; /* could do better */
14182 + crypto_kdone(q->pkq_krp);
14183 + kfree(q);
14184 + spin_lock_irqsave(&ixp_pkq_lock, flags);
14186 + spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14190 +static int
14191 +ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
14193 + struct ixp_pkq *q;
14194 + int rc = 0;
14195 + unsigned long flags;
14197 + dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
14198 + krp->krp_param[IXP_PARAM_BASE].crp_nbits,
14199 + krp->krp_param[IXP_PARAM_EXP].crp_nbits,
14200 + krp->krp_param[IXP_PARAM_MOD].crp_nbits,
14201 + krp->krp_param[IXP_PARAM_RES].crp_nbits);
14204 + if (krp->krp_op != CRK_MOD_EXP) {
14205 + krp->krp_status = EOPNOTSUPP;
14206 + goto err;
14209 + q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
14210 + if (q == NULL) {
14211 + krp->krp_status = ENOMEM;
14212 + goto err;
14215 + /*
14216 + * The PKE engine does not appear to zero the output buffer
14217 + * appropriately, so we need to do it all here.
14218 + */
14219 + memset(q, 0, sizeof(*q));
14221 + q->pkq_krp = krp;
14222 + INIT_LIST_HEAD(&q->pkq_list);
14224 + if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
14225 + q->pkq_ibuf0))
14226 + rc = 1;
14227 + if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
14228 + &q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
14229 + rc = 2;
14230 + if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
14231 + &q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
14232 + rc = 3;
14234 + if (rc) {
14235 + kfree(q);
14236 + krp->krp_status = ERANGE;
14237 + goto err;
14240 + q->pkq_result.pData = q->pkq_obuf;
14241 + q->pkq_result.dataLen =
14242 + (krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
14244 + spin_lock_irqsave(&ixp_pkq_lock, flags);
14245 + list_add_tail(&q->pkq_list, &ixp_pkq);
14246 + spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14248 + if (!ixp_pk_cur)
14249 + ixp_kperform_cb(0, NULL, 0, 0);
14250 + return (0);
14252 +err:
14253 + crypto_kdone(krp);
14254 + return (0);
14259 +#ifdef CONFIG_OCF_RANDOMHARVEST
14261 + * We run the random number generator output through SHA so that it
14262 + * is FIPS compliant.
14263 + */
14265 +static volatile int sha_done = 0;
14266 +static unsigned char sha_digest[20];
14268 +static void
14269 +ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
14271 + dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
14272 + if (sha_digest != digest)
14273 + printk("digest error\n");
14274 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
14275 + sha_done = 1;
14276 + else
14277 + sha_done = -status;
14280 +static int
14281 +ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
14283 + IxCryptoAccStatus status;
14284 + int i, n, rc;
14286 + dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
14287 + memset(buf, 0, maxwords * sizeof(*buf));
14288 + status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
14289 + if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14290 + dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
14291 + __FUNCTION__, status);
14292 + return 0;
14295 + /*
14296 + * run the random data through SHA to make it look more random
14297 + */
14299 + n = sizeof(sha_digest); /* process digest bytes at a time */
14301 + rc = 0;
14302 + for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
14303 + if ((maxwords - i) * sizeof(*buf) < n)
14304 + n = (maxwords - i) * sizeof(*buf);
14305 + sha_done = 0;
14306 + status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
14307 + (UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
14308 + if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14309 + dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
14310 + return -EIO;
14312 + while (!sha_done)
14313 + schedule();
14314 + if (sha_done < 0) {
14315 + dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
14316 + return 0;
14318 + memcpy(&buf[i], sha_digest, n);
14319 + rc += n / sizeof(*buf);;
14322 + return rc;
14324 +#endif /* CONFIG_OCF_RANDOMHARVEST */
14326 +#endif /* __ixp46X */
14331 + * our driver startup and shutdown routines
14332 + */
14334 +static int
14335 +ixp_init(void)
14337 + dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
14339 + if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
14340 + printk("ixCryptoAccInit failed, assuming already initialised!\n");
14342 + qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
14343 + SLAB_HWCACHE_ALIGN, NULL
14344 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
14345 + , NULL
14346 +#endif
14347 + );
14348 + if (!qcache) {
14349 + printk("failed to create Qcache\n");
14350 + return -ENOENT;
14353 + memset(&ixpdev, 0, sizeof(ixpdev));
14354 + softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
14356 + ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
14357 + CRYPTOCAP_F_HARDWARE);
14358 + if (ixp_id < 0)
14359 + panic("IXP/OCF crypto device cannot initialize!");
14361 +#define REGISTER(alg) \
14362 + crypto_register(ixp_id,alg,0,0)
14364 + REGISTER(CRYPTO_DES_CBC);
14365 + REGISTER(CRYPTO_3DES_CBC);
14366 + REGISTER(CRYPTO_RIJNDAEL128_CBC);
14367 +#ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
14368 + REGISTER(CRYPTO_MD5);
14369 + REGISTER(CRYPTO_SHA1);
14370 +#endif
14371 + REGISTER(CRYPTO_MD5_HMAC);
14372 + REGISTER(CRYPTO_SHA1_HMAC);
14373 +#undef REGISTER
14375 +#ifdef __ixp46X
14376 + spin_lock_init(&ixp_pkq_lock);
14377 + /*
14378 + * we do not enable the go fast options here as they can potentially
14379 + * allow timing based attacks
14381 + * http://www.openssl.org/news/secadv_20030219.txt
14382 + */
14383 + ixCryptoAccPkeEauExpConfig(0, 0);
14384 + crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
14385 +#ifdef CONFIG_OCF_RANDOMHARVEST
14386 + crypto_rregister(ixp_id, ixp_read_random, NULL);
14387 +#endif
14388 +#endif
14390 + return 0;
14393 +static void
14394 +ixp_exit(void)
14396 + dprintk("%s()\n", __FUNCTION__);
14397 + crypto_unregister_all(ixp_id);
14398 + ixp_id = -1;
14399 + kmem_cache_destroy(qcache);
14400 + qcache = NULL;
14403 +module_init(ixp_init);
14404 +module_exit(ixp_exit);
14406 +MODULE_LICENSE("Dual BSD/GPL");
14407 +MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
14408 +MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");
14409 diff -Nur linux-2.6.30.orig/crypto/ocf/ixp4xx/Makefile linux-2.6.30/crypto/ocf/ixp4xx/Makefile
14410 --- linux-2.6.30.orig/crypto/ocf/ixp4xx/Makefile 1970-01-01 01:00:00.000000000 +0100
14411 +++ linux-2.6.30/crypto/ocf/ixp4xx/Makefile 2009-06-11 10:55:27.000000000 +0200
14412 @@ -0,0 +1,104 @@
14413 +# for SGlinux builds
14414 +-include $(ROOTDIR)/modules/.config
14417 +# You will need to point this at your Intel ixp425 includes, this portion
14418 +# of the Makefile only really works under SGLinux with the appropriate libs
14419 +# installed. They can be downloaded from http://www.snapgear.org/
14421 +ifeq ($(CONFIG_CPU_IXP46X),y)
14422 +IXPLATFORM = ixp46X
14423 +else
14424 +ifeq ($(CONFIG_CPU_IXP43X),y)
14425 +IXPLATFORM = ixp43X
14426 +else
14427 +IXPLATFORM = ixp42X
14428 +endif
14429 +endif
14431 +ifdef CONFIG_IXP400_LIB_2_4
14432 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp400_xscale_sw
14433 +OSAL_DIR = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp_osal
14434 +endif
14435 +ifdef CONFIG_IXP400_LIB_2_1
14436 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp400_xscale_sw
14437 +OSAL_DIR = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp_osal
14438 +endif
14439 +ifdef CONFIG_IXP400_LIB_2_0
14440 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp400_xscale_sw
14441 +OSAL_DIR = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp_osal
14442 +endif
14443 +ifdef IX_XSCALE_SW
14444 +ifdef CONFIG_IXP400_LIB_2_4
14445 +IXP_CFLAGS = \
14446 + -I$(ROOTDIR)/. \
14447 + -I$(IX_XSCALE_SW)/src/include \
14448 + -I$(OSAL_DIR)/common/include/ \
14449 + -I$(OSAL_DIR)/common/include/modules/ \
14450 + -I$(OSAL_DIR)/common/include/modules/ddk/ \
14451 + -I$(OSAL_DIR)/common/include/modules/bufferMgt/ \
14452 + -I$(OSAL_DIR)/common/include/modules/ioMem/ \
14453 + -I$(OSAL_DIR)/common/os/linux/include/ \
14454 + -I$(OSAL_DIR)/common/os/linux/include/core/ \
14455 + -I$(OSAL_DIR)/common/os/linux/include/modules/ \
14456 + -I$(OSAL_DIR)/common/os/linux/include/modules/ddk/ \
14457 + -I$(OSAL_DIR)/common/os/linux/include/modules/bufferMgt/ \
14458 + -I$(OSAL_DIR)/common/os/linux/include/modules/ioMem/ \
14459 + -I$(OSAL_DIR)/platforms/$(IXPLATFORM)/include/ \
14460 + -I$(OSAL_DIR)/platforms/$(IXPLATFORM)/os/linux/include/ \
14461 + -DENABLE_IOMEM -DENABLE_BUFFERMGT -DENABLE_DDK \
14462 + -DUSE_IXP4XX_CRYPTO
14463 +else
14464 +IXP_CFLAGS = \
14465 + -I$(ROOTDIR)/. \
14466 + -I$(IX_XSCALE_SW)/src/include \
14467 + -I$(OSAL_DIR)/ \
14468 + -I$(OSAL_DIR)/os/linux/include/ \
14469 + -I$(OSAL_DIR)/os/linux/include/modules/ \
14470 + -I$(OSAL_DIR)/os/linux/include/modules/ioMem/ \
14471 + -I$(OSAL_DIR)/os/linux/include/modules/bufferMgt/ \
14472 + -I$(OSAL_DIR)/os/linux/include/core/ \
14473 + -I$(OSAL_DIR)/os/linux/include/platforms/ \
14474 + -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ \
14475 + -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp425 \
14476 + -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp465 \
14477 + -I$(OSAL_DIR)/os/linux/include/core/ \
14478 + -I$(OSAL_DIR)/include/ \
14479 + -I$(OSAL_DIR)/include/modules/ \
14480 + -I$(OSAL_DIR)/include/modules/bufferMgt/ \
14481 + -I$(OSAL_DIR)/include/modules/ioMem/ \
14482 + -I$(OSAL_DIR)/include/platforms/ \
14483 + -I$(OSAL_DIR)/include/platforms/ixp400/ \
14484 + -DUSE_IXP4XX_CRYPTO
14485 +endif
14486 +endif
14487 +ifdef CONFIG_IXP400_LIB_1_4
14488 +IXP_CFLAGS = \
14489 + -I$(ROOTDIR)/. \
14490 + -I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/include \
14491 + -I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/linux \
14492 + -DUSE_IXP4XX_CRYPTO
14493 +endif
14494 +ifndef IXPDIR
14495 +IXPDIR = ixp-version-is-not-supported
14496 +endif
14498 +ifeq ($(CONFIG_CPU_IXP46X),y)
14499 +IXP_CFLAGS += -D__ixp46X
14500 +else
14501 +ifeq ($(CONFIG_CPU_IXP43X),y)
14502 +IXP_CFLAGS += -D__ixp43X
14503 +else
14504 +IXP_CFLAGS += -D__ixp42X
14505 +endif
14506 +endif
14508 +obj-$(CONFIG_OCF_IXP4XX) += ixp4xx.o
14510 +obj ?= .
14511 +EXTRA_CFLAGS += $(IXP_CFLAGS) -I$(obj)/.. -I$(obj)/.
14513 +ifdef TOPDIR
14514 +-include $(TOPDIR)/Rules.make
14515 +endif
14517 diff -Nur linux-2.6.30.orig/crypto/ocf/Kconfig linux-2.6.30/crypto/ocf/Kconfig
14518 --- linux-2.6.30.orig/crypto/ocf/Kconfig 1970-01-01 01:00:00.000000000 +0100
14519 +++ linux-2.6.30/crypto/ocf/Kconfig 2009-06-11 10:55:27.000000000 +0200
14520 @@ -0,0 +1,101 @@
14521 +menu "OCF Configuration"
14523 +config OCF_OCF
14524 + tristate "OCF (Open Cryptograhic Framework)"
14525 + help
14526 + A linux port of the OpenBSD/FreeBSD crypto framework.
14528 +config OCF_RANDOMHARVEST
14529 + bool "crypto random --- harvest entropy for /dev/random"
14530 + depends on OCF_OCF
14531 + help
14532 + Includes code to harvest random numbers from devices that support it.
14534 +config OCF_FIPS
14535 + bool "enable fips RNG checks"
14536 + depends on OCF_OCF && OCF_RANDOMHARVEST
14537 + help
14538 + Run all RNG provided data through a fips check before
14539 + adding it /dev/random's entropy pool.
14541 +config OCF_CRYPTODEV
14542 + tristate "cryptodev (user space support)"
14543 + depends on OCF_OCF
14544 + help
14545 + The user space API to access crypto hardware.
14547 +config OCF_CRYPTOSOFT
14548 + tristate "cryptosoft (software crypto engine)"
14549 + depends on OCF_OCF
14550 + help
14551 + A software driver for the OCF framework that uses
14552 + the kernel CryptoAPI.
14554 +config OCF_SAFE
14555 + tristate "safenet (HW crypto engine)"
14556 + depends on OCF_OCF
14557 + help
14558 + A driver for a number of the safenet Excel crypto accelerators.
14559 + Currently tested and working on the 1141 and 1741.
14561 +config OCF_IXP4XX
14562 + tristate "IXP4xx (HW crypto engine)"
14563 + depends on OCF_OCF
14564 + help
14565 + XScale IXP4xx crypto accelerator driver. Requires the
14566 + Intel Access library.
14568 +config OCF_IXP4XX_SHA1_MD5
14569 + bool "IXP4xx SHA1 and MD5 Hashing"
14570 + depends on OCF_IXP4XX
14571 + help
14572 + Allows the IXP4xx crypto accelerator to perform SHA1 and MD5 hashing.
14573 + Note: this is MUCH slower than using cryptosoft (software crypto engine).
14575 +config OCF_HIFN
14576 + tristate "hifn (HW crypto engine)"
14577 + depends on OCF_OCF
14578 + help
14579 + OCF driver for various HIFN based crypto accelerators.
14580 + (7951, 7955, 7956, 7751, 7811)
14582 +config OCF_HIFNHIPP
14583 + tristate "Hifn HIPP (HW packet crypto engine)"
14584 + depends on OCF_OCF
14585 + help
14586 + OCF driver for various HIFN (HIPP) based crypto accelerators
14587 + (7855)
14589 +config OCF_TALITOS
14590 + tristate "talitos (HW crypto engine)"
14591 + depends on OCF_OCF
14592 + help
14593 + OCF driver for Freescale's security engine (SEC/talitos).
14595 +config OCF_PASEMI
14596 + tristate "pasemi (HW crypto engine)"
14597 + depends on OCF_OCF && PPC_PASEMI
14598 + help
14599 + OCF driver for the PA Semi PWRficient DMA Engine
14601 +config OCF_EP80579
14602 + tristate "ep80579 (HW crypto engine)"
14603 + depends on OCF_OCF
14604 + help
14605 + OCF driver for the Intel EP80579 Integrated Processor Product Line.
14607 +config OCF_OCFNULL
14608 + tristate "ocfnull (fake crypto engine)"
14609 + depends on OCF_OCF
14610 + help
14611 + OCF driver for measuring ipsec overheads (does no crypto)
14613 +config OCF_BENCH
14614 + tristate "ocf-bench (HW crypto in-kernel benchmark)"
14615 + depends on OCF_OCF
14616 + help
14617 + A very simple encryption test for the in-kernel interface
14618 + of OCF. Also includes code to benchmark the IXP Access library
14619 + for comparison.
14621 +endmenu
14622 diff -Nur linux-2.6.30.orig/crypto/ocf/Makefile linux-2.6.30/crypto/ocf/Makefile
14623 --- linux-2.6.30.orig/crypto/ocf/Makefile 1970-01-01 01:00:00.000000000 +0100
14624 +++ linux-2.6.30/crypto/ocf/Makefile 2009-06-11 10:55:27.000000000 +0200
14625 @@ -0,0 +1,121 @@
14626 +# for SGlinux builds
14627 +-include $(ROOTDIR)/modules/.config
14629 +OCF_OBJS = crypto.o criov.o
14631 +ifdef CONFIG_OCF_RANDOMHARVEST
14632 + OCF_OBJS += random.o
14633 +endif
14635 +ifdef CONFIG_OCF_FIPS
14636 + OCF_OBJS += rndtest.o
14637 +endif
14639 +# Add in autoconf.h to get #defines for CONFIG_xxx
14640 +AUTOCONF_H=$(ROOTDIR)/modules/autoconf.h
14641 +ifeq ($(AUTOCONF_H), $(wildcard $(AUTOCONF_H)))
14642 + EXTRA_CFLAGS += -include $(AUTOCONF_H)
14643 + export EXTRA_CFLAGS
14644 +endif
14646 +ifndef obj
14647 + obj ?= .
14648 + _obj = subdir
14649 + mod-subdirs := safe hifn ixp4xx talitos ocfnull
14650 + export-objs += crypto.o criov.o random.o
14651 + list-multi += ocf.o
14652 + _slash :=
14653 +else
14654 + _obj = obj
14655 + _slash := /
14656 +endif
14658 +EXTRA_CFLAGS += -I$(obj)/.
14660 +obj-$(CONFIG_OCF_OCF) += ocf.o
14661 +obj-$(CONFIG_OCF_CRYPTODEV) += cryptodev.o
14662 +obj-$(CONFIG_OCF_CRYPTOSOFT) += cryptosoft.o
14663 +obj-$(CONFIG_OCF_BENCH) += ocf-bench.o
14665 +$(_obj)-$(CONFIG_OCF_SAFE) += safe$(_slash)
14666 +$(_obj)-$(CONFIG_OCF_HIFN) += hifn$(_slash)
14667 +$(_obj)-$(CONFIG_OCF_IXP4XX) += ixp4xx$(_slash)
14668 +$(_obj)-$(CONFIG_OCF_TALITOS) += talitos$(_slash)
14669 +$(_obj)-$(CONFIG_OCF_PASEMI) += pasemi$(_slash)
14670 +$(_obj)-$(CONFIG_OCF_EP80579) += ep80579$(_slash)
14671 +$(_obj)-$(CONFIG_OCF_OCFNULL) += ocfnull$(_slash)
14673 +ocf-objs := $(OCF_OBJS)
14675 +$(list-multi) dummy1: $(ocf-objs)
14676 + $(LD) -r -o $@ $(ocf-objs)
14678 +.PHONY:
14679 +clean:
14680 + rm -f *.o *.ko .*.o.flags .*.ko.cmd .*.o.cmd .*.mod.o.cmd *.mod.c
14681 + rm -f */*.o */*.ko */.*.o.cmd */.*.ko.cmd */.*.mod.o.cmd */*.mod.c */.*.o.flags
14683 +ifdef TOPDIR
14684 +-include $(TOPDIR)/Rules.make
14685 +endif
14688 +# release gen targets
14691 +.PHONY: patch
14692 +patch:
14693 + REL=`date +%Y%m%d`; \
14694 + patch=ocf-linux-$$REL.patch; \
14695 + patch24=ocf-linux-24-$$REL.patch; \
14696 + patch26=ocf-linux-26-$$REL.patch; \
14697 + ( \
14698 + find . -name Makefile; \
14699 + find . -name Config.in; \
14700 + find . -name Kconfig; \
14701 + find . -name README; \
14702 + find . -name '*.[ch]' | grep -v '.mod.c'; \
14703 + ) | while read t; do \
14704 + diff -Nau /dev/null $$t | sed 's?^+++ \./?+++ linux/crypto/ocf/?'; \
14705 + done > $$patch; \
14706 + cat patches/linux-2.4.35-ocf.patch $$patch > $$patch24; \
14707 + cat patches/linux-2.6.26-ocf.patch $$patch > $$patch26
14709 +.PHONY: tarball
14710 +tarball:
14711 + REL=`date +%Y%m%d`; RELDIR=/tmp/ocf-linux-$$REL; \
14712 + CURDIR=`pwd`; \
14713 + rm -rf /tmp/ocf-linux-$$REL*; \
14714 + mkdir -p $$RELDIR/tools; \
14715 + cp README* $$RELDIR; \
14716 + cp patches/openss*.patch $$RELDIR; \
14717 + cp patches/crypto-tools.patch $$RELDIR; \
14718 + cp tools/[!C]* $$RELDIR/tools; \
14719 + cd ..; \
14720 + tar cvf $$RELDIR/ocf-linux.tar \
14721 + --exclude=CVS \
14722 + --exclude=.* \
14723 + --exclude=*.o \
14724 + --exclude=*.ko \
14725 + --exclude=*.mod.* \
14726 + --exclude=README* \
14727 + --exclude=ocf-*.patch \
14728 + --exclude=ocf/patches/openss*.patch \
14729 + --exclude=ocf/patches/crypto-tools.patch \
14730 + --exclude=ocf/tools \
14731 + ocf; \
14732 + gzip -9 $$RELDIR/ocf-linux.tar; \
14733 + cd /tmp; \
14734 + tar cvf ocf-linux-$$REL.tar ocf-linux-$$REL; \
14735 + gzip -9 ocf-linux-$$REL.tar; \
14736 + cd $$CURDIR/../../user; \
14737 + rm -rf /tmp/crypto-tools-$$REL*; \
14738 + tar cvf /tmp/crypto-tools-$$REL.tar \
14739 + --exclude=CVS \
14740 + --exclude=.* \
14741 + --exclude=*.o \
14742 + --exclude=cryptotest \
14743 + --exclude=cryptokeytest \
14744 + crypto-tools; \
14745 + gzip -9 /tmp/crypto-tools-$$REL.tar
14747 diff -Nur linux-2.6.30.orig/crypto/ocf/ocf-bench.c linux-2.6.30/crypto/ocf/ocf-bench.c
14748 --- linux-2.6.30.orig/crypto/ocf/ocf-bench.c 1970-01-01 01:00:00.000000000 +0100
14749 +++ linux-2.6.30/crypto/ocf/ocf-bench.c 2009-06-11 10:55:27.000000000 +0200
14750 @@ -0,0 +1,436 @@
14752 + * A loadable module that benchmarks the OCF crypto speed from kernel space.
14754 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
14756 + * LICENSE TERMS
14758 + * The free distribution and use of this software in both source and binary
14759 + * form is allowed (with or without changes) provided that:
14761 + * 1. distributions of this source code include the above copyright
14762 + * notice, this list of conditions and the following disclaimer;
14764 + * 2. distributions in binary form include the above copyright
14765 + * notice, this list of conditions and the following disclaimer
14766 + * in the documentation and/or other associated materials;
14768 + * 3. the copyright holder's name is not used to endorse products
14769 + * built using this software without specific written permission.
14771 + * ALTERNATIVELY, provided that this notice is retained in full, this product
14772 + * may be distributed under the terms of the GNU General Public License (GPL),
14773 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
14775 + * DISCLAIMER
14777 + * This software is provided 'as is' with no explicit or implied warranties
14778 + * in respect of its properties, including, but not limited to, correctness
14779 + * and/or fitness for purpose.
14780 + */
14783 +#ifndef AUTOCONF_INCLUDED
14784 +#include <linux/config.h>
14785 +#endif
14786 +#include <linux/module.h>
14787 +#include <linux/init.h>
14788 +#include <linux/list.h>
14789 +#include <linux/slab.h>
14790 +#include <linux/wait.h>
14791 +#include <linux/sched.h>
14792 +#include <linux/spinlock.h>
14793 +#include <linux/version.h>
14794 +#include <linux/interrupt.h>
14795 +#include <cryptodev.h>
14797 +#ifdef I_HAVE_AN_XSCALE_WITH_INTEL_SDK
14798 +#define BENCH_IXP_ACCESS_LIB 1
14799 +#endif
14800 +#ifdef BENCH_IXP_ACCESS_LIB
14801 +#include <IxTypes.h>
14802 +#include <IxOsBuffMgt.h>
14803 +#include <IxNpeDl.h>
14804 +#include <IxCryptoAcc.h>
14805 +#include <IxQMgr.h>
14806 +#include <IxOsServices.h>
14807 +#include <IxOsCacheMMU.h>
14808 +#endif
14811 + * support for access lib version 1.4
14812 + */
14813 +#ifndef IX_MBUF_PRIV
14814 +#define IX_MBUF_PRIV(x) ((x)->priv)
14815 +#endif
14818 + * the number of simultaneously active requests
14819 + */
14820 +static int request_q_len = 20;
14821 +module_param(request_q_len, int, 0);
14822 +MODULE_PARM_DESC(request_q_len, "Number of outstanding requests");
14824 + * how many requests we want to have processed
14825 + */
14826 +static int request_num = 1024;
14827 +module_param(request_num, int, 0);
14828 +MODULE_PARM_DESC(request_num, "run for at least this many requests");
14830 + * the size of each request
14831 + */
14832 +static int request_size = 1500;
14833 +module_param(request_size, int, 0);
14834 +MODULE_PARM_DESC(request_size, "size of each request");
14837 + * a structure for each request
14838 + */
14839 +typedef struct {
14840 + struct work_struct work;
14841 +#ifdef BENCH_IXP_ACCESS_LIB
14842 + IX_MBUF mbuf;
14843 +#endif
14844 + unsigned char *buffer;
14845 +} request_t;
14847 +static request_t *requests;
14849 +static int outstanding;
14850 +static int total;
14852 +/*************************************************************************/
14854 + * OCF benchmark routines
14855 + */
14857 +static uint64_t ocf_cryptoid;
14858 +static int ocf_init(void);
14859 +static int ocf_cb(struct cryptop *crp);
14860 +static void ocf_request(void *arg);
14861 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
14862 +static void ocf_request_wq(struct work_struct *work);
14863 +#endif
14865 +static int
14866 +ocf_init(void)
14868 + int error;
14869 + struct cryptoini crie, cria;
14870 + struct cryptodesc crda, crde;
14872 + memset(&crie, 0, sizeof(crie));
14873 + memset(&cria, 0, sizeof(cria));
14874 + memset(&crde, 0, sizeof(crde));
14875 + memset(&crda, 0, sizeof(crda));
14877 + cria.cri_alg = CRYPTO_SHA1_HMAC;
14878 + cria.cri_klen = 20 * 8;
14879 + cria.cri_key = "0123456789abcdefghij";
14881 + crie.cri_alg = CRYPTO_3DES_CBC;
14882 + crie.cri_klen = 24 * 8;
14883 + crie.cri_key = "0123456789abcdefghijklmn";
14885 + crie.cri_next = &cria;
14887 + error = crypto_newsession(&ocf_cryptoid, &crie, 0);
14888 + if (error) {
14889 + printk("crypto_newsession failed %d\n", error);
14890 + return -1;
14892 + return 0;
14895 +static int
14896 +ocf_cb(struct cryptop *crp)
14898 + request_t *r = (request_t *) crp->crp_opaque;
14900 + if (crp->crp_etype)
14901 + printk("Error in OCF processing: %d\n", crp->crp_etype);
14902 + total++;
14903 + crypto_freereq(crp);
14904 + crp = NULL;
14906 + if (total > request_num) {
14907 + outstanding--;
14908 + return 0;
14911 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
14912 + INIT_WORK(&r->work, ocf_request_wq);
14913 +#else
14914 + INIT_WORK(&r->work, ocf_request, r);
14915 +#endif
14916 + schedule_work(&r->work);
14917 + return 0;
14921 +static void
14922 +ocf_request(void *arg)
14924 + request_t *r = arg;
14925 + struct cryptop *crp = crypto_getreq(2);
14926 + struct cryptodesc *crde, *crda;
14928 + if (!crp) {
14929 + outstanding--;
14930 + return;
14933 + crde = crp->crp_desc;
14934 + crda = crde->crd_next;
14936 + crda->crd_skip = 0;
14937 + crda->crd_flags = 0;
14938 + crda->crd_len = request_size;
14939 + crda->crd_inject = request_size;
14940 + crda->crd_alg = CRYPTO_SHA1_HMAC;
14941 + crda->crd_key = "0123456789abcdefghij";
14942 + crda->crd_klen = 20 * 8;
14944 + crde->crd_skip = 0;
14945 + crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT;
14946 + crde->crd_len = request_size;
14947 + crde->crd_inject = request_size;
14948 + crde->crd_alg = CRYPTO_3DES_CBC;
14949 + crde->crd_key = "0123456789abcdefghijklmn";
14950 + crde->crd_klen = 24 * 8;
14952 + crp->crp_ilen = request_size + 64;
14953 + crp->crp_flags = CRYPTO_F_CBIMM;
14954 + crp->crp_buf = (caddr_t) r->buffer;
14955 + crp->crp_callback = ocf_cb;
14956 + crp->crp_sid = ocf_cryptoid;
14957 + crp->crp_opaque = (caddr_t) r;
14958 + crypto_dispatch(crp);
14961 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
14962 +static void
14963 +ocf_request_wq(struct work_struct *work)
14965 + request_t *r = container_of(work, request_t, work);
14966 + ocf_request(r);
14968 +#endif
14970 +/*************************************************************************/
14971 +#ifdef BENCH_IXP_ACCESS_LIB
14972 +/*************************************************************************/
14974 + * CryptoAcc benchmark routines
14975 + */
14977 +static IxCryptoAccCtx ixp_ctx;
14978 +static UINT32 ixp_ctx_id;
14979 +static IX_MBUF ixp_pri;
14980 +static IX_MBUF ixp_sec;
14981 +static int ixp_registered = 0;
14983 +static void ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp,
14984 + IxCryptoAccStatus status);
14985 +static void ixp_perform_cb(UINT32 ctx_id, IX_MBUF *sbufp, IX_MBUF *dbufp,
14986 + IxCryptoAccStatus status);
14987 +static void ixp_request(void *arg);
14988 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
14989 +static void ixp_request_wq(struct work_struct *work);
14990 +#endif
14992 +static int
14993 +ixp_init(void)
14995 + IxCryptoAccStatus status;
14997 + ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
14998 + ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
14999 + ixp_ctx.cipherCtx.cipherKeyLen = 24;
15000 + ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
15001 + ixp_ctx.cipherCtx.cipherInitialVectorLen = IX_CRYPTO_ACC_DES_IV_64;
15002 + memcpy(ixp_ctx.cipherCtx.key.cipherKey, "0123456789abcdefghijklmn", 24);
15004 + ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
15005 + ixp_ctx.authCtx.authDigestLen = 12;
15006 + ixp_ctx.authCtx.aadLen = 0;
15007 + ixp_ctx.authCtx.authKeyLen = 20;
15008 + memcpy(ixp_ctx.authCtx.key.authKey, "0123456789abcdefghij", 20);
15010 + ixp_ctx.useDifferentSrcAndDestMbufs = 0;
15011 + ixp_ctx.operation = IX_CRYPTO_ACC_OP_ENCRYPT_AUTH ;
15013 + IX_MBUF_MLEN(&ixp_pri) = IX_MBUF_PKT_LEN(&ixp_pri) = 128;
15014 + IX_MBUF_MDATA(&ixp_pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
15015 + IX_MBUF_MLEN(&ixp_sec) = IX_MBUF_PKT_LEN(&ixp_sec) = 128;
15016 + IX_MBUF_MDATA(&ixp_sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
15018 + status = ixCryptoAccCtxRegister(&ixp_ctx, &ixp_pri, &ixp_sec,
15019 + ixp_register_cb, ixp_perform_cb, &ixp_ctx_id);
15021 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status) {
15022 + while (!ixp_registered)
15023 + schedule();
15024 + return ixp_registered < 0 ? -1 : 0;
15027 + printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
15028 + return -1;
15031 +static void
15032 +ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
15034 + if (bufp) {
15035 + IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
15036 + kfree(IX_MBUF_MDATA(bufp));
15037 + IX_MBUF_MDATA(bufp) = NULL;
15040 + if (IX_CRYPTO_ACC_STATUS_WAIT == status)
15041 + return;
15042 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
15043 + ixp_registered = 1;
15044 + else
15045 + ixp_registered = -1;
15048 +static void
15049 +ixp_perform_cb(
15050 + UINT32 ctx_id,
15051 + IX_MBUF *sbufp,
15052 + IX_MBUF *dbufp,
15053 + IxCryptoAccStatus status)
15055 + request_t *r = NULL;
15057 + total++;
15058 + if (total > request_num) {
15059 + outstanding--;
15060 + return;
15063 + if (!sbufp || !(r = IX_MBUF_PRIV(sbufp))) {
15064 + printk("crappo %p %p\n", sbufp, r);
15065 + outstanding--;
15066 + return;
15069 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
15070 + INIT_WORK(&r->work, ixp_request_wq);
15071 +#else
15072 + INIT_WORK(&r->work, ixp_request, r);
15073 +#endif
15074 + schedule_work(&r->work);
15077 +static void
15078 +ixp_request(void *arg)
15080 + request_t *r = arg;
15081 + IxCryptoAccStatus status;
15083 + memset(&r->mbuf, 0, sizeof(r->mbuf));
15084 + IX_MBUF_MLEN(&r->mbuf) = IX_MBUF_PKT_LEN(&r->mbuf) = request_size + 64;
15085 + IX_MBUF_MDATA(&r->mbuf) = r->buffer;
15086 + IX_MBUF_PRIV(&r->mbuf) = r;
15087 + status = ixCryptoAccAuthCryptPerform(ixp_ctx_id, &r->mbuf, NULL,
15088 + 0, request_size, 0, request_size, request_size, r->buffer);
15089 + if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
15090 + printk("status1 = %d\n", status);
15091 + outstanding--;
15092 + return;
15094 + return;
15097 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
15098 +static void
15099 +ixp_request_wq(struct work_struct *work)
15101 + request_t *r = container_of(work, request_t, work);
15102 + ixp_request(r);
15104 +#endif
15106 +/*************************************************************************/
15107 +#endif /* BENCH_IXP_ACCESS_LIB */
15108 +/*************************************************************************/
15110 +int
15111 +ocfbench_init(void)
15113 + int i, jstart, jstop;
15115 + printk("Crypto Speed tests\n");
15117 + requests = kmalloc(sizeof(request_t) * request_q_len, GFP_KERNEL);
15118 + if (!requests) {
15119 + printk("malloc failed\n");
15120 + return -EINVAL;
15123 + for (i = 0; i < request_q_len; i++) {
15124 + /* +64 for return data */
15125 + requests[i].buffer = kmalloc(request_size + 128, GFP_DMA);
15126 + if (!requests[i].buffer) {
15127 + printk("malloc failed\n");
15128 + return -EINVAL;
15130 + memset(requests[i].buffer, '0' + i, request_size + 128);
15133 + /*
15134 + * OCF benchmark
15135 + */
15136 + printk("OCF: testing ...\n");
15137 + ocf_init();
15138 + total = outstanding = 0;
15139 + jstart = jiffies;
15140 + for (i = 0; i < request_q_len; i++) {
15141 + outstanding++;
15142 + ocf_request(&requests[i]);
15144 + while (outstanding > 0)
15145 + schedule();
15146 + jstop = jiffies;
15148 + printk("OCF: %d requests of %d bytes in %d jiffies\n", total, request_size,
15149 + jstop - jstart);
15151 +#ifdef BENCH_IXP_ACCESS_LIB
15152 + /*
15153 + * IXP benchmark
15154 + */
15155 + printk("IXP: testing ...\n");
15156 + ixp_init();
15157 + total = outstanding = 0;
15158 + jstart = jiffies;
15159 + for (i = 0; i < request_q_len; i++) {
15160 + outstanding++;
15161 + ixp_request(&requests[i]);
15163 + while (outstanding > 0)
15164 + schedule();
15165 + jstop = jiffies;
15167 + printk("IXP: %d requests of %d bytes in %d jiffies\n", total, request_size,
15168 + jstop - jstart);
15169 +#endif /* BENCH_IXP_ACCESS_LIB */
15171 + for (i = 0; i < request_q_len; i++)
15172 + kfree(requests[i].buffer);
15173 + kfree(requests);
15174 + return -EINVAL; /* always fail to load so it can be re-run quickly ;-) */
15177 +static void __exit ocfbench_exit(void)
15181 +module_init(ocfbench_init);
15182 +module_exit(ocfbench_exit);
15184 +MODULE_LICENSE("BSD");
15185 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
15186 +MODULE_DESCRIPTION("Benchmark various in-kernel crypto speeds");
15187 diff -Nur linux-2.6.30.orig/crypto/ocf/ocf-compat.h linux-2.6.30/crypto/ocf/ocf-compat.h
15188 --- linux-2.6.30.orig/crypto/ocf/ocf-compat.h 1970-01-01 01:00:00.000000000 +0100
15189 +++ linux-2.6.30/crypto/ocf/ocf-compat.h 2009-06-11 10:55:27.000000000 +0200
15190 @@ -0,0 +1,270 @@
15191 +#ifndef _BSD_COMPAT_H_
15192 +#define _BSD_COMPAT_H_ 1
15193 +/****************************************************************************/
15195 + * Provide compat routines for older linux kernels and BSD kernels
15197 + * Written by David McCullough <david_mccullough@securecomputing.com>
15198 + * Copyright (C) 2007 David McCullough <david_mccullough@securecomputing.com>
15200 + * LICENSE TERMS
15202 + * The free distribution and use of this software in both source and binary
15203 + * form is allowed (with or without changes) provided that:
15205 + * 1. distributions of this source code include the above copyright
15206 + * notice, this list of conditions and the following disclaimer;
15208 + * 2. distributions in binary form include the above copyright
15209 + * notice, this list of conditions and the following disclaimer
15210 + * in the documentation and/or other associated materials;
15212 + * 3. the copyright holder's name is not used to endorse products
15213 + * built using this software without specific written permission.
15215 + * ALTERNATIVELY, provided that this notice is retained in full, this file
15216 + * may be distributed under the terms of the GNU General Public License (GPL),
15217 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
15219 + * DISCLAIMER
15221 + * This software is provided 'as is' with no explicit or implied warranties
15222 + * in respect of its properties, including, but not limited to, correctness
15223 + * and/or fitness for purpose.
15224 + */
15225 +/****************************************************************************/
15226 +#ifdef __KERNEL__
15228 + * fake some BSD driver interface stuff specifically for OCF use
15229 + */
15231 +typedef struct ocf_device *device_t;
15233 +typedef struct {
15234 + int (*cryptodev_newsession)(device_t dev, u_int32_t *sidp, struct cryptoini *cri);
15235 + int (*cryptodev_freesession)(device_t dev, u_int64_t tid);
15236 + int (*cryptodev_process)(device_t dev, struct cryptop *crp, int hint);
15237 + int (*cryptodev_kprocess)(device_t dev, struct cryptkop *krp, int hint);
15238 +} device_method_t;
15239 +#define DEVMETHOD(id, func) id: func
15241 +struct ocf_device {
15242 + char name[32]; /* the driver name */
15243 + char nameunit[32]; /* the driver name + HW instance */
15244 + int unit;
15245 + device_method_t methods;
15246 + void *softc;
15249 +#define CRYPTODEV_NEWSESSION(dev, sid, cri) \
15250 + ((*(dev)->methods.cryptodev_newsession)(dev,sid,cri))
15251 +#define CRYPTODEV_FREESESSION(dev, sid) \
15252 + ((*(dev)->methods.cryptodev_freesession)(dev, sid))
15253 +#define CRYPTODEV_PROCESS(dev, crp, hint) \
15254 + ((*(dev)->methods.cryptodev_process)(dev, crp, hint))
15255 +#define CRYPTODEV_KPROCESS(dev, krp, hint) \
15256 + ((*(dev)->methods.cryptodev_kprocess)(dev, krp, hint))
15258 +#define device_get_name(dev) ((dev)->name)
15259 +#define device_get_nameunit(dev) ((dev)->nameunit)
15260 +#define device_get_unit(dev) ((dev)->unit)
15261 +#define device_get_softc(dev) ((dev)->softc)
15263 +#define softc_device_decl \
15264 + struct ocf_device _device; \
15265 + device_t
15267 +#define softc_device_init(_sc, _name, _unit, _methods) \
15268 + if (1) {\
15269 + strncpy((_sc)->_device.name, _name, sizeof((_sc)->_device.name) - 1); \
15270 + snprintf((_sc)->_device.nameunit, sizeof((_sc)->_device.name), "%s%d", _name, _unit); \
15271 + (_sc)->_device.unit = _unit; \
15272 + (_sc)->_device.methods = _methods; \
15273 + (_sc)->_device.softc = (void *) _sc; \
15274 + *(device_t *)((softc_get_device(_sc))+1) = &(_sc)->_device; \
15275 + } else
15277 +#define softc_get_device(_sc) (&(_sc)->_device)
15280 + * iomem support for 2.4 and 2.6 kernels
15281 + */
15282 +#include <linux/version.h>
15283 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
15284 +#define ocf_iomem_t unsigned long
15287 + * implement simple workqueue like support for older kernels
15288 + */
15290 +#include <linux/tqueue.h>
15292 +#define work_struct tq_struct
15294 +#define INIT_WORK(wp, fp, ap) \
15295 + do { \
15296 + (wp)->sync = 0; \
15297 + (wp)->routine = (fp); \
15298 + (wp)->data = (ap); \
15299 + } while (0)
15301 +#define schedule_work(wp) \
15302 + do { \
15303 + queue_task((wp), &tq_immediate); \
15304 + mark_bh(IMMEDIATE_BH); \
15305 + } while (0)
15307 +#define flush_scheduled_work() run_task_queue(&tq_immediate)
15309 +#else
15310 +#define ocf_iomem_t void __iomem *
15312 +#include <linux/workqueue.h>
15314 +#endif
15316 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
15317 +#include <linux/fdtable.h>
15318 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
15319 +#define files_fdtable(files) (files)
15320 +#endif
15322 +#ifdef MODULE_PARM
15323 +#undef module_param /* just in case */
15324 +#define module_param(a,b,c) MODULE_PARM(a,"i")
15325 +#endif
15327 +#define bzero(s,l) memset(s,0,l)
15328 +#define bcopy(s,d,l) memcpy(d,s,l)
15329 +#define bcmp(x, y, l) memcmp(x,y,l)
15331 +#define MIN(x,y) ((x) < (y) ? (x) : (y))
15333 +#define device_printf(dev, a...) ({ \
15334 + printk("%s: ", device_get_nameunit(dev)); printk(a); \
15335 + })
15337 +#undef printf
15338 +#define printf(fmt...) printk(fmt)
15340 +#define KASSERT(c,p) if (!(c)) { printk p ; } else
15342 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
15343 +#define ocf_daemonize(str) \
15344 + daemonize(); \
15345 + spin_lock_irq(&current->sigmask_lock); \
15346 + sigemptyset(&current->blocked); \
15347 + recalc_sigpending(current); \
15348 + spin_unlock_irq(&current->sigmask_lock); \
15349 + sprintf(current->comm, str);
15350 +#else
15351 +#define ocf_daemonize(str) daemonize(str);
15352 +#endif
15354 +#define TAILQ_INSERT_TAIL(q,d,m) list_add_tail(&(d)->m, (q))
15355 +#define TAILQ_EMPTY(q) list_empty(q)
15356 +#define TAILQ_FOREACH(v, q, m) list_for_each_entry(v, q, m)
15358 +#define read_random(p,l) get_random_bytes(p,l)
15360 +#define DELAY(x) ((x) > 2000 ? mdelay((x)/1000) : udelay(x))
15361 +#define strtoul simple_strtoul
15363 +#define pci_get_vendor(dev) ((dev)->vendor)
15364 +#define pci_get_device(dev) ((dev)->device)
15366 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
15367 +#define pci_set_consistent_dma_mask(dev, mask) (0)
15368 +#endif
15369 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
15370 +#define pci_dma_sync_single_for_cpu pci_dma_sync_single
15371 +#endif
15373 +#ifndef DMA_32BIT_MASK
15374 +#define DMA_32BIT_MASK 0x00000000ffffffffULL
15375 +#endif
15377 +#define htole32(x) cpu_to_le32(x)
15378 +#define htobe32(x) cpu_to_be32(x)
15379 +#define htole16(x) cpu_to_le16(x)
15380 +#define htobe16(x) cpu_to_be16(x)
15382 +/* older kernels don't have these */
15384 +#ifndef IRQ_NONE
15385 +#define IRQ_NONE
15386 +#define IRQ_HANDLED
15387 +#define irqreturn_t void
15388 +#endif
15389 +#ifndef IRQF_SHARED
15390 +#define IRQF_SHARED SA_SHIRQ
15391 +#endif
15393 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
15394 +# define strlcpy(dest,src,len) \
15395 + ({strncpy(dest,src,(len)-1); ((char *)dest)[(len)-1] = '\0'; })
15396 +#endif
15398 +#ifndef MAX_ERRNO
15399 +#define MAX_ERRNO 4095
15400 +#endif
15401 +#ifndef IS_ERR_VALUE
15402 +#define IS_ERR_VALUE(x) ((unsigned long)(x) >= (unsigned long)-MAX_ERRNO)
15403 +#endif
15406 + * common debug for all
15407 + */
15408 +#if 1
15409 +#define dprintk(a...) do { if (debug) printk(a); } while(0)
15410 +#else
15411 +#define dprintk(a...)
15412 +#endif
15414 +#ifndef SLAB_ATOMIC
15415 +/* Changed in 2.6.20, must use GFP_ATOMIC now */
15416 +#define SLAB_ATOMIC GFP_ATOMIC
15417 +#endif
15420 + * need some additional support for older kernels */
15421 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,2)
15422 +#define pci_register_driver_compat(driver, rc) \
15423 + do { \
15424 + if ((rc) > 0) { \
15425 + (rc) = 0; \
15426 + } else if (rc == 0) { \
15427 + (rc) = -ENODEV; \
15428 + } else { \
15429 + pci_unregister_driver(driver); \
15430 + } \
15431 + } while (0)
15432 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
15433 +#define pci_register_driver_compat(driver,rc) ((rc) = (rc) < 0 ? (rc) : 0)
15434 +#else
15435 +#define pci_register_driver_compat(driver,rc)
15436 +#endif
15438 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
15440 +#include <asm/scatterlist.h>
15442 +static inline void sg_set_page(struct scatterlist *sg, struct page *page,
15443 + unsigned int len, unsigned int offset)
15445 + sg->page = page;
15446 + sg->offset = offset;
15447 + sg->length = len;
15450 +static inline void *sg_virt(struct scatterlist *sg)
15452 + return page_address(sg->page) + sg->offset;
15455 +#endif
15457 +#endif /* __KERNEL__ */
15459 +/****************************************************************************/
15460 +#endif /* _BSD_COMPAT_H_ */
15461 diff -Nur linux-2.6.30.orig/crypto/ocf/ocfnull/Makefile linux-2.6.30/crypto/ocf/ocfnull/Makefile
15462 --- linux-2.6.30.orig/crypto/ocf/ocfnull/Makefile 1970-01-01 01:00:00.000000000 +0100
15463 +++ linux-2.6.30/crypto/ocf/ocfnull/Makefile 2009-06-11 10:55:27.000000000 +0200
15464 @@ -0,0 +1,12 @@
15465 +# for SGlinux builds
15466 +-include $(ROOTDIR)/modules/.config
15468 +obj-$(CONFIG_OCF_OCFNULL) += ocfnull.o
15470 +obj ?= .
15471 +EXTRA_CFLAGS += -I$(obj)/..
15473 +ifdef TOPDIR
15474 +-include $(TOPDIR)/Rules.make
15475 +endif
15477 diff -Nur linux-2.6.30.orig/crypto/ocf/ocfnull/ocfnull.c linux-2.6.30/crypto/ocf/ocfnull/ocfnull.c
15478 --- linux-2.6.30.orig/crypto/ocf/ocfnull/ocfnull.c 1970-01-01 01:00:00.000000000 +0100
15479 +++ linux-2.6.30/crypto/ocf/ocfnull/ocfnull.c 2009-06-11 10:55:27.000000000 +0200
15480 @@ -0,0 +1,203 @@
15482 + * An OCF module for determining the cost of crypto versus the cost of
15483 + * IPSec processing outside of OCF. This modules gives us the effect of
15484 + * zero cost encryption, of course you will need to run it at both ends
15485 + * since it does no crypto at all.
15487 + * Written by David McCullough <david_mccullough@securecomputing.com>
15488 + * Copyright (C) 2006-2007 David McCullough
15490 + * LICENSE TERMS
15492 + * The free distribution and use of this software in both source and binary
15493 + * form is allowed (with or without changes) provided that:
15495 + * 1. distributions of this source code include the above copyright
15496 + * notice, this list of conditions and the following disclaimer;
15498 + * 2. distributions in binary form include the above copyright
15499 + * notice, this list of conditions and the following disclaimer
15500 + * in the documentation and/or other associated materials;
15502 + * 3. the copyright holder's name is not used to endorse products
15503 + * built using this software without specific written permission.
15505 + * ALTERNATIVELY, provided that this notice is retained in full, this product
15506 + * may be distributed under the terms of the GNU General Public License (GPL),
15507 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
15509 + * DISCLAIMER
15511 + * This software is provided 'as is' with no explicit or implied warranties
15512 + * in respect of its properties, including, but not limited to, correctness
15513 + * and/or fitness for purpose.
15514 + */
15516 +#ifndef AUTOCONF_INCLUDED
15517 +#include <linux/config.h>
15518 +#endif
15519 +#include <linux/module.h>
15520 +#include <linux/init.h>
15521 +#include <linux/list.h>
15522 +#include <linux/slab.h>
15523 +#include <linux/sched.h>
15524 +#include <linux/wait.h>
15525 +#include <linux/crypto.h>
15526 +#include <linux/interrupt.h>
15528 +#include <cryptodev.h>
15529 +#include <uio.h>
15531 +static int32_t null_id = -1;
15532 +static u_int32_t null_sesnum = 0;
15534 +static int null_process(device_t, struct cryptop *, int);
15535 +static int null_newsession(device_t, u_int32_t *, struct cryptoini *);
15536 +static int null_freesession(device_t, u_int64_t);
15538 +#define debug ocfnull_debug
15539 +int ocfnull_debug = 0;
15540 +module_param(ocfnull_debug, int, 0644);
15541 +MODULE_PARM_DESC(ocfnull_debug, "Enable debug");
15544 + * dummy device structure
15545 + */
15547 +static struct {
15548 + softc_device_decl sc_dev;
15549 +} nulldev;
15551 +static device_method_t null_methods = {
15552 + /* crypto device methods */
15553 + DEVMETHOD(cryptodev_newsession, null_newsession),
15554 + DEVMETHOD(cryptodev_freesession,null_freesession),
15555 + DEVMETHOD(cryptodev_process, null_process),
15559 + * Generate a new software session.
15560 + */
15561 +static int
15562 +null_newsession(device_t arg, u_int32_t *sid, struct cryptoini *cri)
15564 + dprintk("%s()\n", __FUNCTION__);
15565 + if (sid == NULL || cri == NULL) {
15566 + dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
15567 + return EINVAL;
15570 + if (null_sesnum == 0)
15571 + null_sesnum++;
15572 + *sid = null_sesnum++;
15573 + return 0;
15578 + * Free a session.
15579 + */
15580 +static int
15581 +null_freesession(device_t arg, u_int64_t tid)
15583 + u_int32_t sid = CRYPTO_SESID2LID(tid);
15585 + dprintk("%s()\n", __FUNCTION__);
15586 + if (sid > null_sesnum) {
15587 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
15588 + return EINVAL;
15591 + /* Silently accept and return */
15592 + if (sid == 0)
15593 + return 0;
15594 + return 0;
15599 + * Process a request.
15600 + */
15601 +static int
15602 +null_process(device_t arg, struct cryptop *crp, int hint)
15604 + unsigned int lid;
15606 + dprintk("%s()\n", __FUNCTION__);
15608 + /* Sanity check */
15609 + if (crp == NULL) {
15610 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
15611 + return EINVAL;
15614 + crp->crp_etype = 0;
15616 + if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
15617 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
15618 + crp->crp_etype = EINVAL;
15619 + goto done;
15622 + /*
15623 + * find the session we are using
15624 + */
15626 + lid = crp->crp_sid & 0xffffffff;
15627 + if (lid >= null_sesnum || lid == 0) {
15628 + crp->crp_etype = ENOENT;
15629 + dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
15630 + goto done;
15633 +done:
15634 + crypto_done(crp);
15635 + return 0;
15640 + * our driver startup and shutdown routines
15641 + */
15643 +static int
15644 +null_init(void)
15646 + dprintk("%s(%p)\n", __FUNCTION__, null_init);
15648 + memset(&nulldev, 0, sizeof(nulldev));
15649 + softc_device_init(&nulldev, "ocfnull", 0, null_methods);
15651 + null_id = crypto_get_driverid(softc_get_device(&nulldev),
15652 + CRYPTOCAP_F_HARDWARE);
15653 + if (null_id < 0)
15654 + panic("ocfnull: crypto device cannot initialize!");
15656 +#define REGISTER(alg) \
15657 + crypto_register(null_id,alg,0,0)
15658 + REGISTER(CRYPTO_DES_CBC);
15659 + REGISTER(CRYPTO_3DES_CBC);
15660 + REGISTER(CRYPTO_RIJNDAEL128_CBC);
15661 + REGISTER(CRYPTO_MD5);
15662 + REGISTER(CRYPTO_SHA1);
15663 + REGISTER(CRYPTO_MD5_HMAC);
15664 + REGISTER(CRYPTO_SHA1_HMAC);
15665 +#undef REGISTER
15667 + return 0;
15670 +static void
15671 +null_exit(void)
15673 + dprintk("%s()\n", __FUNCTION__);
15674 + crypto_unregister_all(null_id);
15675 + null_id = -1;
15678 +module_init(null_init);
15679 +module_exit(null_exit);
15681 +MODULE_LICENSE("Dual BSD/GPL");
15682 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
15683 +MODULE_DESCRIPTION("ocfnull - claims a lot but does nothing");
15684 diff -Nur linux-2.6.30.orig/crypto/ocf/pasemi/Makefile linux-2.6.30/crypto/ocf/pasemi/Makefile
15685 --- linux-2.6.30.orig/crypto/ocf/pasemi/Makefile 1970-01-01 01:00:00.000000000 +0100
15686 +++ linux-2.6.30/crypto/ocf/pasemi/Makefile 2009-06-11 10:55:27.000000000 +0200
15687 @@ -0,0 +1,12 @@
15688 +# for SGlinux builds
15689 +-include $(ROOTDIR)/modules/.config
15691 +obj-$(CONFIG_OCF_PASEMI) += pasemi.o
15693 +obj ?= .
15694 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
15696 +ifdef TOPDIR
15697 +-include $(TOPDIR)/Rules.make
15698 +endif
15700 diff -Nur linux-2.6.30.orig/crypto/ocf/pasemi/pasemi.c linux-2.6.30/crypto/ocf/pasemi/pasemi.c
15701 --- linux-2.6.30.orig/crypto/ocf/pasemi/pasemi.c 1970-01-01 01:00:00.000000000 +0100
15702 +++ linux-2.6.30/crypto/ocf/pasemi/pasemi.c 2009-06-11 10:55:27.000000000 +0200
15703 @@ -0,0 +1,1009 @@
15705 + * Copyright (C) 2007 PA Semi, Inc
15707 + * Driver for the PA Semi PWRficient DMA Crypto Engine
15709 + * This program is free software; you can redistribute it and/or modify
15710 + * it under the terms of the GNU General Public License version 2 as
15711 + * published by the Free Software Foundation.
15713 + * This program is distributed in the hope that it will be useful,
15714 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
15715 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15716 + * GNU General Public License for more details.
15718 + * You should have received a copy of the GNU General Public License
15719 + * along with this program; if not, write to the Free Software
15720 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15721 + */
15723 +#ifndef AUTOCONF_INCLUDED
15724 +#include <linux/config.h>
15725 +#endif
15726 +#include <linux/module.h>
15727 +#include <linux/init.h>
15728 +#include <linux/interrupt.h>
15729 +#include <linux/timer.h>
15730 +#include <linux/random.h>
15731 +#include <linux/skbuff.h>
15732 +#include <asm/scatterlist.h>
15733 +#include <linux/moduleparam.h>
15734 +#include <linux/pci.h>
15735 +#include <cryptodev.h>
15736 +#include <uio.h>
15737 +#include "pasemi_fnu.h"
15739 +#define DRV_NAME "pasemi"
15741 +#define TIMER_INTERVAL 1000
15743 +static void __devexit pasemi_dma_remove(struct pci_dev *pdev);
15744 +static struct pasdma_status volatile * dma_status;
15746 +static int debug;
15747 +module_param(debug, int, 0644);
15748 +MODULE_PARM_DESC(debug, "Enable debug");
15750 +static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr)
15752 + desc->postop = 0;
15753 + desc->quad[0] = hdr;
15754 + desc->quad_cnt = 1;
15755 + desc->size = 1;
15758 +static void pasemi_desc_build(struct pasemi_desc *desc, u64 val)
15760 + desc->quad[desc->quad_cnt++] = val;
15761 + desc->size = (desc->quad_cnt + 1) / 2;
15764 +static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr)
15766 + desc->quad[0] |= hdr;
15769 +static int pasemi_desc_size(struct pasemi_desc *desc)
15771 + return desc->size;
15774 +static void pasemi_ring_add_desc(
15775 + struct pasemi_fnu_txring *ring,
15776 + struct pasemi_desc *desc,
15777 + struct cryptop *crp) {
15778 + int i;
15779 + int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
15781 + TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size;
15782 + TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop;
15783 + TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp;
15785 + for (i = 0; i < desc->quad_cnt; i += 2) {
15786 + ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
15787 + ring->desc[ring_index] = desc->quad[i];
15788 + ring->desc[ring_index + 1] = desc->quad[i + 1];
15789 + ring->next_to_fill++;
15792 + if (desc->quad_cnt & 1)
15793 + ring->desc[ring_index + 1] = 0;
15796 +static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
15798 + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
15799 + incr);
15803 + * Generate a new software session.
15804 + */
15805 +static int
15806 +pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
15808 + struct cryptoini *c, *encini = NULL, *macini = NULL;
15809 + struct pasemi_softc *sc = device_get_softc(dev);
15810 + struct pasemi_session *ses = NULL, **sespp;
15811 + int sesn, blksz = 0;
15812 + u64 ccmd = 0;
15813 + unsigned long flags;
15814 + struct pasemi_desc init_desc;
15815 + struct pasemi_fnu_txring *txring;
15817 + DPRINTF("%s()\n", __FUNCTION__);
15818 + if (sidp == NULL || cri == NULL || sc == NULL) {
15819 + DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
15820 + return -EINVAL;
15822 + for (c = cri; c != NULL; c = c->cri_next) {
15823 + if (ALG_IS_SIG(c->cri_alg)) {
15824 + if (macini)
15825 + return -EINVAL;
15826 + macini = c;
15827 + } else if (ALG_IS_CIPHER(c->cri_alg)) {
15828 + if (encini)
15829 + return -EINVAL;
15830 + encini = c;
15831 + } else {
15832 + DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg);
15833 + return -EINVAL;
15836 + if (encini == NULL && macini == NULL)
15837 + return -EINVAL;
15838 + if (encini) {
15839 + /* validate key length */
15840 + switch (encini->cri_alg) {
15841 + case CRYPTO_DES_CBC:
15842 + if (encini->cri_klen != 64)
15843 + return -EINVAL;
15844 + ccmd = DMA_CALGO_DES;
15845 + break;
15846 + case CRYPTO_3DES_CBC:
15847 + if (encini->cri_klen != 192)
15848 + return -EINVAL;
15849 + ccmd = DMA_CALGO_3DES;
15850 + break;
15851 + case CRYPTO_AES_CBC:
15852 + if (encini->cri_klen != 128 &&
15853 + encini->cri_klen != 192 &&
15854 + encini->cri_klen != 256)
15855 + return -EINVAL;
15856 + ccmd = DMA_CALGO_AES;
15857 + break;
15858 + case CRYPTO_ARC4:
15859 + if (encini->cri_klen != 128)
15860 + return -EINVAL;
15861 + ccmd = DMA_CALGO_ARC;
15862 + break;
15863 + default:
15864 + DPRINTF("UNKNOWN encini->cri_alg %d\n",
15865 + encini->cri_alg);
15866 + return -EINVAL;
15870 + if (macini) {
15871 + switch (macini->cri_alg) {
15872 + case CRYPTO_MD5:
15873 + case CRYPTO_MD5_HMAC:
15874 + blksz = 16;
15875 + break;
15876 + case CRYPTO_SHA1:
15877 + case CRYPTO_SHA1_HMAC:
15878 + blksz = 20;
15879 + break;
15880 + default:
15881 + DPRINTF("UNKNOWN macini->cri_alg %d\n",
15882 + macini->cri_alg);
15883 + return -EINVAL;
15885 + if (((macini->cri_klen + 7) / 8) > blksz) {
15886 + DPRINTF("key length %d bigger than blksize %d not supported\n",
15887 + ((macini->cri_klen + 7) / 8), blksz);
15888 + return -EINVAL;
15892 + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
15893 + if (sc->sc_sessions[sesn] == NULL) {
15894 + sc->sc_sessions[sesn] = (struct pasemi_session *)
15895 + kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
15896 + ses = sc->sc_sessions[sesn];
15897 + break;
15898 + } else if (sc->sc_sessions[sesn]->used == 0) {
15899 + ses = sc->sc_sessions[sesn];
15900 + break;
15904 + if (ses == NULL) {
15905 + sespp = (struct pasemi_session **)
15906 + kzalloc(sc->sc_nsessions * 2 *
15907 + sizeof(struct pasemi_session *), GFP_ATOMIC);
15908 + if (sespp == NULL)
15909 + return -ENOMEM;
15910 + memcpy(sespp, sc->sc_sessions,
15911 + sc->sc_nsessions * sizeof(struct pasemi_session *));
15912 + kfree(sc->sc_sessions);
15913 + sc->sc_sessions = sespp;
15914 + sesn = sc->sc_nsessions;
15915 + ses = sc->sc_sessions[sesn] = (struct pasemi_session *)
15916 + kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
15917 + if (ses == NULL)
15918 + return -ENOMEM;
15919 + sc->sc_nsessions *= 2;
15922 + ses->used = 1;
15924 + ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ,
15925 + sizeof(struct pasemi_session), DMA_TO_DEVICE);
15927 + /* enter the channel scheduler */
15928 + spin_lock_irqsave(&sc->sc_chnlock, flags);
15930 + /* ARC4 has to be processed by the even channel */
15931 + if (encini && (encini->cri_alg == CRYPTO_ARC4))
15932 + ses->chan = sc->sc_lastchn & ~1;
15933 + else
15934 + ses->chan = sc->sc_lastchn;
15935 + sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels;
15937 + spin_unlock_irqrestore(&sc->sc_chnlock, flags);
15939 + txring = &sc->tx[ses->chan];
15941 + if (encini) {
15942 + ses->ccmd = ccmd;
15944 + /* get an IV */
15945 + /* XXX may read fewer than requested */
15946 + get_random_bytes(ses->civ, sizeof(ses->civ));
15948 + ses->keysz = (encini->cri_klen - 63) / 64;
15949 + memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8);
15951 + pasemi_desc_start(&init_desc,
15952 + XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0));
15953 + pasemi_desc_build(&init_desc,
15954 + XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr));
15956 + if (macini) {
15957 + if (macini->cri_alg == CRYPTO_MD5_HMAC ||
15958 + macini->cri_alg == CRYPTO_SHA1_HMAC)
15959 + memcpy(ses->hkey, macini->cri_key, blksz);
15960 + else {
15961 + /* Load initialization constants(RFC 1321, 3174) */
15962 + ses->hiv[0] = 0x67452301efcdab89ULL;
15963 + ses->hiv[1] = 0x98badcfe10325476ULL;
15964 + ses->hiv[2] = 0xc3d2e1f000000000ULL;
15966 + ses->hseq = 0ULL;
15969 + spin_lock_irqsave(&txring->fill_lock, flags);
15971 + if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) -
15972 + txring->next_to_clean) > TX_RING_SIZE) {
15973 + spin_unlock_irqrestore(&txring->fill_lock, flags);
15974 + return ERESTART;
15977 + if (encini) {
15978 + pasemi_ring_add_desc(txring, &init_desc, NULL);
15979 + pasemi_ring_incr(sc, ses->chan,
15980 + pasemi_desc_size(&init_desc));
15983 + txring->sesn = sesn;
15984 + spin_unlock_irqrestore(&txring->fill_lock, flags);
15986 + *sidp = PASEMI_SID(sesn);
15987 + return 0;
15991 + * Deallocate a session.
15992 + */
15993 +static int
15994 +pasemi_freesession(device_t dev, u_int64_t tid)
15996 + struct pasemi_softc *sc = device_get_softc(dev);
15997 + int session;
15998 + u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
16000 + DPRINTF("%s()\n", __FUNCTION__);
16002 + if (sc == NULL)
16003 + return -EINVAL;
16004 + session = PASEMI_SESSION(sid);
16005 + if (session >= sc->sc_nsessions || !sc->sc_sessions[session])
16006 + return -EINVAL;
16008 + pci_unmap_single(sc->dma_pdev,
16009 + sc->sc_sessions[session]->dma_addr,
16010 + sizeof(struct pasemi_session), DMA_TO_DEVICE);
16011 + memset(sc->sc_sessions[session], 0,
16012 + sizeof(struct pasemi_session));
16014 + return 0;
16017 +static int
16018 +pasemi_process(device_t dev, struct cryptop *crp, int hint)
16021 + int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
16022 + struct pasemi_softc *sc = device_get_softc(dev);
16023 + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
16024 + caddr_t ivp;
16025 + struct pasemi_desc init_desc, work_desc;
16026 + struct pasemi_session *ses;
16027 + struct sk_buff *skb;
16028 + struct uio *uiop;
16029 + unsigned long flags;
16030 + struct pasemi_fnu_txring *txring;
16032 + DPRINTF("%s()\n", __FUNCTION__);
16034 + if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
16035 + return -EINVAL;
16037 + crp->crp_etype = 0;
16038 + if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
16039 + return -EINVAL;
16041 + ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];
16043 + crd1 = crp->crp_desc;
16044 + if (crd1 == NULL) {
16045 + err = -EINVAL;
16046 + goto errout;
16048 + crd2 = crd1->crd_next;
16050 + if (ALG_IS_SIG(crd1->crd_alg)) {
16051 + maccrd = crd1;
16052 + if (crd2 == NULL)
16053 + enccrd = NULL;
16054 + else if (ALG_IS_CIPHER(crd2->crd_alg) &&
16055 + (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
16056 + enccrd = crd2;
16057 + else
16058 + goto erralg;
16059 + } else if (ALG_IS_CIPHER(crd1->crd_alg)) {
16060 + enccrd = crd1;
16061 + if (crd2 == NULL)
16062 + maccrd = NULL;
16063 + else if (ALG_IS_SIG(crd2->crd_alg) &&
16064 + (crd1->crd_flags & CRD_F_ENCRYPT))
16065 + maccrd = crd2;
16066 + else
16067 + goto erralg;
16068 + } else
16069 + goto erralg;
16071 + chsel = ses->chan;
16073 + txring = &sc->tx[chsel];
16075 + if (enccrd && !maccrd) {
16076 + if (enccrd->crd_alg == CRYPTO_ARC4)
16077 + reinit = 1;
16078 + reinit_size = 0x40;
16079 + srclen = crp->crp_ilen;
16081 + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
16082 + | XCT_FUN_FUN(chsel));
16083 + if (enccrd->crd_flags & CRD_F_ENCRYPT)
16084 + pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
16085 + else
16086 + pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
16087 + } else if (enccrd && maccrd) {
16088 + if (enccrd->crd_alg == CRYPTO_ARC4)
16089 + reinit = 1;
16090 + reinit_size = 0x68;
16092 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
16093 + /* Encrypt -> Authenticate */
16094 + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
16095 + | XCT_FUN_A | XCT_FUN_FUN(chsel));
16096 + srclen = maccrd->crd_skip + maccrd->crd_len;
16097 + } else {
16098 + /* Authenticate -> Decrypt */
16099 + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
16100 + | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
16101 + pasemi_desc_build(&work_desc, 0);
16102 + pasemi_desc_build(&work_desc, 0);
16103 + pasemi_desc_build(&work_desc, 0);
16104 + work_desc.postop = PASEMI_CHECK_SIG;
16105 + srclen = crp->crp_ilen;
16108 + pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
16109 + pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
16110 + } else if (!enccrd && maccrd) {
16111 + srclen = maccrd->crd_len;
16113 + pasemi_desc_start(&init_desc,
16114 + XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
16115 + pasemi_desc_build(&init_desc,
16116 + XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));
16118 + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
16119 + | XCT_FUN_A | XCT_FUN_FUN(chsel));
16122 + if (enccrd) {
16123 + switch (enccrd->crd_alg) {
16124 + case CRYPTO_3DES_CBC:
16125 + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
16126 + XCT_FUN_BCM_CBC);
16127 + ivsize = sizeof(u64);
16128 + break;
16129 + case CRYPTO_DES_CBC:
16130 + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
16131 + XCT_FUN_BCM_CBC);
16132 + ivsize = sizeof(u64);
16133 + break;
16134 + case CRYPTO_AES_CBC:
16135 + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
16136 + XCT_FUN_BCM_CBC);
16137 + ivsize = 2 * sizeof(u64);
16138 + break;
16139 + case CRYPTO_ARC4:
16140 + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
16141 + ivsize = 0;
16142 + break;
16143 + default:
16144 + printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
16145 + enccrd->crd_alg);
16146 + err = -EINVAL;
16147 + goto errout;
16150 + ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
16151 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
16152 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
16153 + memcpy(ivp, enccrd->crd_iv, ivsize);
16154 + /* If IV is not present in the buffer already, it has to be copied there */
16155 + if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
16156 + crypto_copyback(crp->crp_flags, crp->crp_buf,
16157 + enccrd->crd_inject, ivsize, ivp);
16158 + } else {
16159 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
16160 + /* IV is provided expicitly in descriptor */
16161 + memcpy(ivp, enccrd->crd_iv, ivsize);
16162 + else
16163 + /* IV is provided in the packet */
16164 + crypto_copydata(crp->crp_flags, crp->crp_buf,
16165 + enccrd->crd_inject, ivsize,
16166 + ivp);
16170 + if (maccrd) {
16171 + switch (maccrd->crd_alg) {
16172 + case CRYPTO_MD5:
16173 + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
16174 + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
16175 + break;
16176 + case CRYPTO_SHA1:
16177 + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
16178 + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
16179 + break;
16180 + case CRYPTO_MD5_HMAC:
16181 + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
16182 + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
16183 + break;
16184 + case CRYPTO_SHA1_HMAC:
16185 + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
16186 + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
16187 + break;
16188 + default:
16189 + printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
16190 + maccrd->crd_alg);
16191 + err = -EINVAL;
16192 + goto errout;
16196 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
16197 + /* using SKB buffers */
16198 + skb = (struct sk_buff *)crp->crp_buf;
16199 + if (skb_shinfo(skb)->nr_frags) {
16200 + printk(DRV_NAME ": skb frags unimplemented\n");
16201 + err = -EINVAL;
16202 + goto errout;
16204 + pasemi_desc_build(
16205 + &work_desc,
16206 + XCT_FUN_DST_PTR(skb->len, pci_map_single(
16207 + sc->dma_pdev, skb->data,
16208 + skb->len, DMA_TO_DEVICE)));
16209 + pasemi_desc_build(
16210 + &work_desc,
16211 + XCT_FUN_SRC_PTR(
16212 + srclen, pci_map_single(
16213 + sc->dma_pdev, skb->data,
16214 + srclen, DMA_TO_DEVICE)));
16215 + pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
16216 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
16217 + /* using IOV buffers */
16218 + uiop = (struct uio *)crp->crp_buf;
16219 + if (uiop->uio_iovcnt > 1) {
16220 + printk(DRV_NAME ": iov frags unimplemented\n");
16221 + err = -EINVAL;
16222 + goto errout;
16225 + /* crp_olen is never set; always use crp_ilen */
16226 + pasemi_desc_build(
16227 + &work_desc,
16228 + XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
16229 + sc->dma_pdev,
16230 + uiop->uio_iov->iov_base,
16231 + crp->crp_ilen, DMA_TO_DEVICE)));
16232 + pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
16234 + pasemi_desc_build(
16235 + &work_desc,
16236 + XCT_FUN_SRC_PTR(srclen, pci_map_single(
16237 + sc->dma_pdev,
16238 + uiop->uio_iov->iov_base,
16239 + srclen, DMA_TO_DEVICE)));
16240 + } else {
16241 + /* using contig buffers */
16242 + pasemi_desc_build(
16243 + &work_desc,
16244 + XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
16245 + sc->dma_pdev,
16246 + crp->crp_buf,
16247 + crp->crp_ilen, DMA_TO_DEVICE)));
16248 + pasemi_desc_build(
16249 + &work_desc,
16250 + XCT_FUN_SRC_PTR(srclen, pci_map_single(
16251 + sc->dma_pdev,
16252 + crp->crp_buf, srclen,
16253 + DMA_TO_DEVICE)));
16254 + pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
16257 + spin_lock_irqsave(&txring->fill_lock, flags);
16259 + if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
16260 + txring->sesn = PASEMI_SESSION(crp->crp_sid);
16261 + reinit = 1;
16264 + if (enccrd) {
16265 + pasemi_desc_start(&init_desc,
16266 + XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
16267 + pasemi_desc_build(&init_desc,
16268 + XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
16271 + if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
16272 + pasemi_desc_size(&work_desc)) -
16273 + txring->next_to_clean) > TX_RING_SIZE) {
16274 + spin_unlock_irqrestore(&txring->fill_lock, flags);
16275 + err = ERESTART;
16276 + goto errout;
16279 + pasemi_ring_add_desc(txring, &init_desc, NULL);
16280 + pasemi_ring_add_desc(txring, &work_desc, crp);
16282 + pasemi_ring_incr(sc, chsel,
16283 + pasemi_desc_size(&init_desc) +
16284 + pasemi_desc_size(&work_desc));
16286 + spin_unlock_irqrestore(&txring->fill_lock, flags);
16288 + mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);
16290 + return 0;
16292 +erralg:
16293 + printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
16294 + crd1->crd_alg, crd2->crd_alg);
16295 + err = -EINVAL;
16297 +errout:
16298 + if (err != ERESTART) {
16299 + crp->crp_etype = err;
16300 + crypto_done(crp);
16302 + return err;
16305 +static int pasemi_clean_tx(struct pasemi_softc *sc, int chan)
16307 + int i, j, ring_idx;
16308 + struct pasemi_fnu_txring *ring = &sc->tx[chan];
16309 + u16 delta_cnt;
16310 + int flags, loops = 10;
16311 + int desc_size;
16312 + struct cryptop *crp;
16314 + spin_lock_irqsave(&ring->clean_lock, flags);
16316 + while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan]
16317 + & PAS_STATUS_PCNT_M) - ring->total_pktcnt)
16318 + && loops--) {
16320 + for (i = 0; i < delta_cnt; i++) {
16321 + desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size;
16322 + crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp;
16323 + if (crp) {
16324 + ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
16325 + if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) {
16326 + /* Need to make sure signature matched,
16327 + * if not - return error */
16328 + if (!(ring->desc[ring_idx + 1] & (1ULL << 63)))
16329 + crp->crp_etype = -EINVAL;
16331 + crypto_done(TX_DESC_INFO(ring,
16332 + ring->next_to_clean).cf_crp);
16333 + TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL;
16334 + pci_unmap_single(
16335 + sc->dma_pdev,
16336 + XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]),
16337 + PCI_DMA_TODEVICE);
16339 + ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0;
16341 + ring->next_to_clean++;
16342 + for (j = 1; j < desc_size; j++) {
16343 + ring_idx = 2 *
16344 + (ring->next_to_clean &
16345 + (TX_RING_SIZE-1));
16346 + pci_unmap_single(
16347 + sc->dma_pdev,
16348 + XCT_PTR_ADDR_LEN(ring->desc[ring_idx]),
16349 + PCI_DMA_TODEVICE);
16350 + if (ring->desc[ring_idx + 1])
16351 + pci_unmap_single(
16352 + sc->dma_pdev,
16353 + XCT_PTR_ADDR_LEN(
16354 + ring->desc[
16355 + ring_idx + 1]),
16356 + PCI_DMA_TODEVICE);
16357 + ring->desc[ring_idx] =
16358 + ring->desc[ring_idx + 1] = 0;
16359 + ring->next_to_clean++;
16361 + } else {
16362 + for (j = 0; j < desc_size; j++) {
16363 + ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
16364 + ring->desc[ring_idx] =
16365 + ring->desc[ring_idx + 1] = 0;
16366 + ring->next_to_clean++;
16371 + ring->total_pktcnt += delta_cnt;
16373 + spin_unlock_irqrestore(&ring->clean_lock, flags);
16375 + return 0;
16378 +static void sweepup_tx(struct pasemi_softc *sc)
16380 + int i;
16382 + for (i = 0; i < sc->sc_num_channels; i++)
16383 + pasemi_clean_tx(sc, i);
16386 +static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs)
16388 + struct pasemi_softc *sc = arg;
16389 + unsigned int reg;
16390 + int chan = irq - sc->base_irq;
16391 + int chan_index = sc->base_chan + chan;
16392 + u64 stat = dma_status->tx_sta[chan_index];
16394 + DPRINTF("%s()\n", __FUNCTION__);
16396 + if (!(stat & PAS_STATUS_CAUSE_M))
16397 + return IRQ_NONE;
16399 + pasemi_clean_tx(sc, chan);
16401 + stat = dma_status->tx_sta[chan_index];
16403 + reg = PAS_IOB_DMA_TXCH_RESET_PINTC |
16404 + PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt);
16406 + if (stat & PAS_STATUS_SOFT)
16407 + reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
16409 + out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg);
16412 + return IRQ_HANDLED;
16415 +static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
16417 + u32 val;
16418 + int chan_index = chan + sc->base_chan;
16419 + int ret;
16420 + struct pasemi_fnu_txring *ring;
16422 + ring = &sc->tx[chan];
16424 + spin_lock_init(&ring->fill_lock);
16425 + spin_lock_init(&ring->clean_lock);
16427 + ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
16428 + TX_RING_SIZE, GFP_KERNEL);
16429 + if (!ring->desc_info)
16430 + return -ENOMEM;
16432 + /* Allocate descriptors */
16433 + ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
16434 + TX_RING_SIZE *
16435 + 2 * sizeof(u64),
16436 + &ring->dma, GFP_KERNEL);
16437 + if (!ring->desc)
16438 + return -ENOMEM;
16440 + memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));
16442 + out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);
16444 + ring->total_pktcnt = 0;
16446 + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
16447 + PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
16449 + val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
16450 + val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
16452 + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);
16454 + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
16455 + PAS_DMA_TXCHAN_CFG_TY_FUNC |
16456 + PAS_DMA_TXCHAN_CFG_TATTR(chan) |
16457 + PAS_DMA_TXCHAN_CFG_WT(2));
16459 + /* enable tx channel */
16460 + out_le32(sc->dma_regs +
16461 + PAS_DMA_TXCHAN_TCMDSTA(chan_index),
16462 + PAS_DMA_TXCHAN_TCMDSTA_EN);
16464 + out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
16465 + PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));
16467 + ring->next_to_fill = 0;
16468 + ring->next_to_clean = 0;
16470 + snprintf(ring->irq_name, sizeof(ring->irq_name),
16471 + "%s%d", "crypto", chan);
16473 + ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
16474 + ret = request_irq(ring->irq, (irq_handler_t)
16475 + pasemi_intr, IRQF_DISABLED, ring->irq_name, sc);
16476 + if (ret) {
16477 + printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
16478 + ring->irq, ret);
16479 + ring->irq = -1;
16480 + return ret;
16483 + setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);
16485 + return 0;
16488 +static device_method_t pasemi_methods = {
16489 + /* crypto device methods */
16490 + DEVMETHOD(cryptodev_newsession, pasemi_newsession),
16491 + DEVMETHOD(cryptodev_freesession, pasemi_freesession),
16492 + DEVMETHOD(cryptodev_process, pasemi_process),
16495 +/* Set up the crypto device structure, private data,
16496 + * and anything else we need before we start */
16498 +static int __devinit
16499 +pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
16501 + struct pasemi_softc *sc;
16502 + int ret, i;
16504 + DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);
16506 + sc = kzalloc(sizeof(*sc), GFP_KERNEL);
16507 + if (!sc)
16508 + return -ENOMEM;
16510 + softc_device_init(sc, DRV_NAME, 1, pasemi_methods);
16512 + pci_set_drvdata(pdev, sc);
16514 + spin_lock_init(&sc->sc_chnlock);
16516 + sc->sc_sessions = (struct pasemi_session **)
16517 + kzalloc(PASEMI_INITIAL_SESSIONS *
16518 + sizeof(struct pasemi_session *), GFP_ATOMIC);
16519 + if (sc->sc_sessions == NULL) {
16520 + ret = -ENOMEM;
16521 + goto out;
16524 + sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
16525 + sc->sc_lastchn = 0;
16526 + sc->base_irq = pdev->irq + 6;
16527 + sc->base_chan = 6;
16528 + sc->sc_cid = -1;
16529 + sc->dma_pdev = pdev;
16531 + sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
16532 + if (!sc->iob_pdev) {
16533 + dev_err(&pdev->dev, "Can't find I/O Bridge\n");
16534 + ret = -ENODEV;
16535 + goto out;
16538 + /* This is hardcoded and ugly, but we have some firmware versions
16539 + * who don't provide the register space in the device tree. Luckily
16540 + * they are at well-known locations so we can just do the math here.
16541 + */
16542 + sc->dma_regs =
16543 + ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
16544 + sc->iob_regs =
16545 + ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
16546 + if (!sc->dma_regs || !sc->iob_regs) {
16547 + dev_err(&pdev->dev, "Can't map registers\n");
16548 + ret = -ENODEV;
16549 + goto out;
16552 + dma_status = __ioremap(0xfd800000, 0x1000, 0);
16553 + if (!dma_status) {
16554 + ret = -ENODEV;
16555 + dev_err(&pdev->dev, "Can't map dmastatus space\n");
16556 + goto out;
16559 + sc->tx = (struct pasemi_fnu_txring *)
16560 + kzalloc(sizeof(struct pasemi_fnu_txring)
16561 + * 8, GFP_KERNEL);
16562 + if (!sc->tx) {
16563 + ret = -ENOMEM;
16564 + goto out;
16567 + /* Initialize the h/w */
16568 + out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
16569 + (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
16570 + PAS_DMA_COM_CFG_FWF));
16571 + out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
16573 + for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
16574 + sc->sc_num_channels++;
16575 + ret = pasemi_dma_setup_tx_resources(sc, i);
16576 + if (ret)
16577 + goto out;
16580 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
16581 + CRYPTOCAP_F_HARDWARE);
16582 + if (sc->sc_cid < 0) {
16583 + printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
16584 + ret = -ENXIO;
16585 + goto out;
16588 + /* register algorithms with the framework */
16589 + printk(DRV_NAME ":");
16591 + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
16592 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
16593 + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
16594 + crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
16595 + crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
16596 + crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
16597 + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
16598 + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
16600 + return 0;
16602 +out:
16603 + pasemi_dma_remove(pdev);
16604 + return ret;
16607 +#define MAX_RETRIES 5000
16609 +static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan)
16611 + struct pasemi_fnu_txring *ring = &sc->tx[chan];
16612 + int chan_index = chan + sc->base_chan;
16613 + int retries;
16614 + u32 stat;
16616 + /* Stop the channel */
16617 + out_le32(sc->dma_regs +
16618 + PAS_DMA_TXCHAN_TCMDSTA(chan_index),
16619 + PAS_DMA_TXCHAN_TCMDSTA_ST);
16621 + for (retries = 0; retries < MAX_RETRIES; retries++) {
16622 + stat = in_le32(sc->dma_regs +
16623 + PAS_DMA_TXCHAN_TCMDSTA(chan_index));
16624 + if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
16625 + break;
16626 + cond_resched();
16629 + if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
16630 + dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n",
16631 + chan_index);
16633 + /* Disable the channel */
16634 + out_le32(sc->dma_regs +
16635 + PAS_DMA_TXCHAN_TCMDSTA(chan_index),
16636 + 0);
16638 + if (ring->desc_info)
16639 + kfree((void *) ring->desc_info);
16640 + if (ring->desc)
16641 + dma_free_coherent(&sc->dma_pdev->dev,
16642 + TX_RING_SIZE *
16643 + 2 * sizeof(u64),
16644 + (void *) ring->desc, ring->dma);
16645 + if (ring->irq != -1)
16646 + free_irq(ring->irq, sc);
16648 + del_timer(&ring->crypto_timer);
16651 +static void __devexit pasemi_dma_remove(struct pci_dev *pdev)
16653 + struct pasemi_softc *sc = pci_get_drvdata(pdev);
16654 + int i;
16656 + DPRINTF("%s()\n", __FUNCTION__);
16658 + if (sc->sc_cid >= 0) {
16659 + crypto_unregister_all(sc->sc_cid);
16662 + if (sc->tx) {
16663 + for (i = 0; i < sc->sc_num_channels; i++)
16664 + pasemi_free_tx_resources(sc, i);
16666 + kfree(sc->tx);
16668 + if (sc->sc_sessions) {
16669 + for (i = 0; i < sc->sc_nsessions; i++)
16670 + kfree(sc->sc_sessions[i]);
16671 + kfree(sc->sc_sessions);
16673 + if (sc->iob_pdev)
16674 + pci_dev_put(sc->iob_pdev);
16675 + if (sc->dma_regs)
16676 + iounmap(sc->dma_regs);
16677 + if (sc->iob_regs)
16678 + iounmap(sc->iob_regs);
16679 + kfree(sc);
16682 +static struct pci_device_id pasemi_dma_pci_tbl[] = {
16683 + { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) },
16686 +MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl);
16688 +static struct pci_driver pasemi_dma_driver = {
16689 + .name = "pasemi_dma",
16690 + .id_table = pasemi_dma_pci_tbl,
16691 + .probe = pasemi_dma_probe,
16692 + .remove = __devexit_p(pasemi_dma_remove),
16695 +static void __exit pasemi_dma_cleanup_module(void)
16697 + pci_unregister_driver(&pasemi_dma_driver);
16698 + __iounmap(dma_status);
16699 + dma_status = NULL;
16702 +int pasemi_dma_init_module(void)
16704 + return pci_register_driver(&pasemi_dma_driver);
16707 +module_init(pasemi_dma_init_module);
16708 +module_exit(pasemi_dma_cleanup_module);
16710 +MODULE_LICENSE("Dual BSD/GPL");
16711 +MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com");
16712 +MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine");
16713 diff -Nur linux-2.6.30.orig/crypto/ocf/pasemi/pasemi_fnu.h linux-2.6.30/crypto/ocf/pasemi/pasemi_fnu.h
16714 --- linux-2.6.30.orig/crypto/ocf/pasemi/pasemi_fnu.h 1970-01-01 01:00:00.000000000 +0100
16715 +++ linux-2.6.30/crypto/ocf/pasemi/pasemi_fnu.h 2009-06-11 10:55:27.000000000 +0200
16716 @@ -0,0 +1,410 @@
16718 + * Copyright (C) 2007 PA Semi, Inc
16720 + * Driver for the PA Semi PWRficient DMA Crypto Engine, soft state and
16721 + * hardware register layouts.
16723 + * This program is free software; you can redistribute it and/or modify
16724 + * it under the terms of the GNU General Public License version 2 as
16725 + * published by the Free Software Foundation.
16727 + * This program is distributed in the hope that it will be useful,
16728 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
16729 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16730 + * GNU General Public License for more details.
16732 + * You should have received a copy of the GNU General Public License
16733 + * along with this program; if not, write to the Free Software
16734 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16735 + */
16737 +#ifndef PASEMI_FNU_H
16738 +#define PASEMI_FNU_H
16740 +#include <linux/spinlock.h>
16742 +#define PASEMI_SESSION(sid) ((sid) & 0xffffffff)
16743 +#define PASEMI_SID(sesn) ((sesn) & 0xffffffff)
16744 +#define DPRINTF(a...) if (debug) { printk(DRV_NAME ": " a); }
16746 +/* Must be a power of two */
16747 +#define RX_RING_SIZE 512
16748 +#define TX_RING_SIZE 512
16749 +#define TX_DESC(ring, num) ((ring)->desc[2 * (num & (TX_RING_SIZE-1))])
16750 +#define TX_DESC_INFO(ring, num) ((ring)->desc_info[(num) & (TX_RING_SIZE-1)])
16751 +#define MAX_DESC_SIZE 8
16752 +#define PASEMI_INITIAL_SESSIONS 10
16753 +#define PASEMI_FNU_CHANNELS 8
16755 +/* DMA descriptor */
16756 +struct pasemi_desc {
16757 + u64 quad[2*MAX_DESC_SIZE];
16758 + int quad_cnt;
16759 + int size;
16760 + int postop;
16764 + * Holds per descriptor data
16765 + */
16766 +struct pasemi_desc_info {
16767 + int desc_size;
16768 + int desc_postop;
16769 +#define PASEMI_CHECK_SIG 0x1
16771 + struct cryptop *cf_crp;
16775 + * Holds per channel data
16776 + */
16777 +struct pasemi_fnu_txring {
16778 + volatile u64 *desc;
16779 + volatile struct
16780 + pasemi_desc_info *desc_info;
16781 + dma_addr_t dma;
16782 + struct timer_list crypto_timer;
16783 + spinlock_t fill_lock;
16784 + spinlock_t clean_lock;
16785 + unsigned int next_to_fill;
16786 + unsigned int next_to_clean;
16787 + u16 total_pktcnt;
16788 + int irq;
16789 + int sesn;
16790 + char irq_name[10];
16794 + * Holds data specific to a single pasemi device.
16795 + */
16796 +struct pasemi_softc {
16797 + softc_device_decl sc_cdev;
16798 + struct pci_dev *dma_pdev; /* device backpointer */
16799 + struct pci_dev *iob_pdev; /* device backpointer */
16800 + void __iomem *dma_regs;
16801 + void __iomem *iob_regs;
16802 + int base_irq;
16803 + int base_chan;
16804 + int32_t sc_cid; /* crypto tag */
16805 + int sc_nsessions;
16806 + struct pasemi_session **sc_sessions;
16807 + int sc_num_channels;/* number of crypto channels */
16809 + /* pointer to the array of txring datastructures, one txring per channel */
16810 + struct pasemi_fnu_txring *tx;
16812 + /*
16813 + * mutual exclusion for the channel scheduler
16814 + */
16815 + spinlock_t sc_chnlock;
16816 + /* last channel used, for now use round-robin to allocate channels */
16817 + int sc_lastchn;
16820 +struct pasemi_session {
16821 + u64 civ[2];
16822 + u64 keysz;
16823 + u64 key[4];
16824 + u64 ccmd;
16825 + u64 hkey[4];
16826 + u64 hseq;
16827 + u64 giv[2];
16828 + u64 hiv[4];
16830 + int used;
16831 + dma_addr_t dma_addr;
16832 + int chan;
16835 +/* status register layout in IOB region, at 0xfd800000 */
16836 +struct pasdma_status {
16837 + u64 rx_sta[64];
16838 + u64 tx_sta[20];
16841 +#define ALG_IS_CIPHER(alg) ((alg == CRYPTO_DES_CBC) || \
16842 + (alg == CRYPTO_3DES_CBC) || \
16843 + (alg == CRYPTO_AES_CBC) || \
16844 + (alg == CRYPTO_ARC4) || \
16845 + (alg == CRYPTO_NULL_CBC))
16847 +#define ALG_IS_SIG(alg) ((alg == CRYPTO_MD5) || \
16848 + (alg == CRYPTO_MD5_HMAC) || \
16849 + (alg == CRYPTO_SHA1) || \
16850 + (alg == CRYPTO_SHA1_HMAC) || \
16851 + (alg == CRYPTO_NULL_HMAC))
16853 +enum {
16854 + PAS_DMA_COM_TXCMD = 0x100, /* Transmit Command Register */
16855 + PAS_DMA_COM_TXSTA = 0x104, /* Transmit Status Register */
16856 + PAS_DMA_COM_RXCMD = 0x108, /* Receive Command Register */
16857 + PAS_DMA_COM_RXSTA = 0x10c, /* Receive Status Register */
16858 + PAS_DMA_COM_CFG = 0x114, /* DMA Configuration Register */
16861 +/* All these registers live in the PCI configuration space for the DMA PCI
16862 + * device. Use the normal PCI config access functions for them.
16863 + */
16865 +#define PAS_DMA_COM_CFG_FWF 0x18000000
16867 +#define PAS_DMA_COM_TXCMD_EN 0x00000001 /* enable */
16868 +#define PAS_DMA_COM_TXSTA_ACT 0x00000001 /* active */
16869 +#define PAS_DMA_COM_RXCMD_EN 0x00000001 /* enable */
16870 +#define PAS_DMA_COM_RXSTA_ACT 0x00000001 /* active */
16872 +#define _PAS_DMA_TXCHAN_STRIDE 0x20 /* Size per channel */
16873 +#define _PAS_DMA_TXCHAN_TCMDSTA 0x300 /* Command / Status */
16874 +#define _PAS_DMA_TXCHAN_CFG 0x304 /* Configuration */
16875 +#define _PAS_DMA_TXCHAN_DSCRBU 0x308 /* Descriptor BU Allocation */
16876 +#define _PAS_DMA_TXCHAN_INCR 0x310 /* Descriptor increment */
16877 +#define _PAS_DMA_TXCHAN_CNT 0x314 /* Descriptor count/offset */
16878 +#define _PAS_DMA_TXCHAN_BASEL 0x318 /* Descriptor ring base (low) */
16879 +#define _PAS_DMA_TXCHAN_BASEU 0x31c /* (high) */
16880 +#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
16881 +#define PAS_DMA_TXCHAN_TCMDSTA_EN 0x00000001 /* Enabled */
16882 +#define PAS_DMA_TXCHAN_TCMDSTA_ST 0x00000002 /* Stop interface */
16883 +#define PAS_DMA_TXCHAN_TCMDSTA_ACT 0x00010000 /* Active */
16884 +#define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
16885 +#define PAS_DMA_TXCHAN_CFG_TY_FUNC 0x00000002 /* Type = interface */
16886 +#define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */
16887 +#define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c
16888 +#define PAS_DMA_TXCHAN_CFG_TATTR_S 2
16889 +#define PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
16890 + PAS_DMA_TXCHAN_CFG_TATTR_M)
16891 +#define PAS_DMA_TXCHAN_CFG_WT_M 0x000001c0
16892 +#define PAS_DMA_TXCHAN_CFG_WT_S 6
16893 +#define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
16894 + PAS_DMA_TXCHAN_CFG_WT_M)
16895 +#define PAS_DMA_TXCHAN_CFG_LPSQ_FAST 0x00000400
16896 +#define PAS_DMA_TXCHAN_CFG_LPDQ_FAST 0x00000800
16897 +#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */
16898 +#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */
16899 +#define PAS_DMA_TXCHAN_CFG_UP 0x00004000 /* update tx descr when sent */
16900 +#define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
16901 +#define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
16902 +#define PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0
16903 +#define PAS_DMA_TXCHAN_BASEL_BRBL_S 0
16904 +#define PAS_DMA_TXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
16905 + PAS_DMA_TXCHAN_BASEL_BRBL_M)
16906 +#define PAS_DMA_TXCHAN_BASEU(c) (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
16907 +#define PAS_DMA_TXCHAN_BASEU_BRBH_M 0x00000fff
16908 +#define PAS_DMA_TXCHAN_BASEU_BRBH_S 0
16909 +#define PAS_DMA_TXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
16910 + PAS_DMA_TXCHAN_BASEU_BRBH_M)
16911 +/* # of cache lines worth of buffer ring */
16912 +#define PAS_DMA_TXCHAN_BASEU_SIZ_M 0x3fff0000
16913 +#define PAS_DMA_TXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
16914 +#define PAS_DMA_TXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
16915 + PAS_DMA_TXCHAN_BASEU_SIZ_M)
16917 +#define PAS_STATUS_PCNT_M 0x000000000000ffffull
16918 +#define PAS_STATUS_PCNT_S 0
16919 +#define PAS_STATUS_DCNT_M 0x00000000ffff0000ull
16920 +#define PAS_STATUS_DCNT_S 16
16921 +#define PAS_STATUS_BPCNT_M 0x0000ffff00000000ull
16922 +#define PAS_STATUS_BPCNT_S 32
16923 +#define PAS_STATUS_CAUSE_M 0xf000000000000000ull
16924 +#define PAS_STATUS_TIMER 0x1000000000000000ull
16925 +#define PAS_STATUS_ERROR 0x2000000000000000ull
16926 +#define PAS_STATUS_SOFT 0x4000000000000000ull
16927 +#define PAS_STATUS_INT 0x8000000000000000ull
16929 +#define PAS_IOB_DMA_RXCH_CFG(i) (0x1100 + (i)*4)
16930 +#define PAS_IOB_DMA_RXCH_CFG_CNTTH_M 0x00000fff
16931 +#define PAS_IOB_DMA_RXCH_CFG_CNTTH_S 0
16932 +#define PAS_IOB_DMA_RXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
16933 + PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
16934 +#define PAS_IOB_DMA_TXCH_CFG(i) (0x1200 + (i)*4)
16935 +#define PAS_IOB_DMA_TXCH_CFG_CNTTH_M 0x00000fff
16936 +#define PAS_IOB_DMA_TXCH_CFG_CNTTH_S 0
16937 +#define PAS_IOB_DMA_TXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
16938 + PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
16939 +#define PAS_IOB_DMA_RXCH_STAT(i) (0x1300 + (i)*4)
16940 +#define PAS_IOB_DMA_RXCH_STAT_INTGEN 0x00001000
16941 +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_M 0x00000fff
16942 +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_S 0
16943 +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
16944 + PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
16945 +#define PAS_IOB_DMA_TXCH_STAT(i) (0x1400 + (i)*4)
16946 +#define PAS_IOB_DMA_TXCH_STAT_INTGEN 0x00001000
16947 +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_M 0x00000fff
16948 +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_S 0
16949 +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
16950 + PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
16951 +#define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4)
16952 +#define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000
16953 +#define PAS_IOB_DMA_RXCH_RESET_PCNT_S 16
16954 +#define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
16955 + PAS_IOB_DMA_RXCH_RESET_PCNT_M)
16956 +#define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020
16957 +#define PAS_IOB_DMA_RXCH_RESET_DCNTRST 0x00000010
16958 +#define PAS_IOB_DMA_RXCH_RESET_TINTC 0x00000008
16959 +#define PAS_IOB_DMA_RXCH_RESET_DINTC 0x00000004
16960 +#define PAS_IOB_DMA_RXCH_RESET_SINTC 0x00000002
16961 +#define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001
16962 +#define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4)
16963 +#define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000
16964 +#define PAS_IOB_DMA_TXCH_RESET_PCNT_S 16
16965 +#define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
16966 + PAS_IOB_DMA_TXCH_RESET_PCNT_M)
16967 +#define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020
16968 +#define PAS_IOB_DMA_TXCH_RESET_DCNTRST 0x00000010
16969 +#define PAS_IOB_DMA_TXCH_RESET_TINTC 0x00000008
16970 +#define PAS_IOB_DMA_TXCH_RESET_DINTC 0x00000004
16971 +#define PAS_IOB_DMA_TXCH_RESET_SINTC 0x00000002
16972 +#define PAS_IOB_DMA_TXCH_RESET_PINTC 0x00000001
16974 +#define PAS_IOB_DMA_COM_TIMEOUTCFG 0x1700
16975 +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M 0x00ffffff
16976 +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S 0
16977 +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x) (((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
16978 + PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
16980 +/* Transmit descriptor fields */
16981 +#define XCT_MACTX_T 0x8000000000000000ull
16982 +#define XCT_MACTX_ST 0x4000000000000000ull
16983 +#define XCT_MACTX_NORES 0x0000000000000000ull
16984 +#define XCT_MACTX_8BRES 0x1000000000000000ull
16985 +#define XCT_MACTX_24BRES 0x2000000000000000ull
16986 +#define XCT_MACTX_40BRES 0x3000000000000000ull
16987 +#define XCT_MACTX_I 0x0800000000000000ull
16988 +#define XCT_MACTX_O 0x0400000000000000ull
16989 +#define XCT_MACTX_E 0x0200000000000000ull
16990 +#define XCT_MACTX_VLAN_M 0x0180000000000000ull
16991 +#define XCT_MACTX_VLAN_NOP 0x0000000000000000ull
16992 +#define XCT_MACTX_VLAN_REMOVE 0x0080000000000000ull
16993 +#define XCT_MACTX_VLAN_INSERT 0x0100000000000000ull
16994 +#define XCT_MACTX_VLAN_REPLACE 0x0180000000000000ull
16995 +#define XCT_MACTX_CRC_M 0x0060000000000000ull
16996 +#define XCT_MACTX_CRC_NOP 0x0000000000000000ull
16997 +#define XCT_MACTX_CRC_INSERT 0x0020000000000000ull
16998 +#define XCT_MACTX_CRC_PAD 0x0040000000000000ull
16999 +#define XCT_MACTX_CRC_REPLACE 0x0060000000000000ull
17000 +#define XCT_MACTX_SS 0x0010000000000000ull
17001 +#define XCT_MACTX_LLEN_M 0x00007fff00000000ull
17002 +#define XCT_MACTX_LLEN_S 32ull
17003 +#define XCT_MACTX_LLEN(x) ((((long)(x)) << XCT_MACTX_LLEN_S) & \
17004 + XCT_MACTX_LLEN_M)
17005 +#define XCT_MACTX_IPH_M 0x00000000f8000000ull
17006 +#define XCT_MACTX_IPH_S 27ull
17007 +#define XCT_MACTX_IPH(x) ((((long)(x)) << XCT_MACTX_IPH_S) & \
17008 + XCT_MACTX_IPH_M)
17009 +#define XCT_MACTX_IPO_M 0x0000000007c00000ull
17010 +#define XCT_MACTX_IPO_S 22ull
17011 +#define XCT_MACTX_IPO(x) ((((long)(x)) << XCT_MACTX_IPO_S) & \
17012 + XCT_MACTX_IPO_M)
17013 +#define XCT_MACTX_CSUM_M 0x0000000000000060ull
17014 +#define XCT_MACTX_CSUM_NOP 0x0000000000000000ull
17015 +#define XCT_MACTX_CSUM_TCP 0x0000000000000040ull
17016 +#define XCT_MACTX_CSUM_UDP 0x0000000000000060ull
17017 +#define XCT_MACTX_V6 0x0000000000000010ull
17018 +#define XCT_MACTX_C 0x0000000000000004ull
17019 +#define XCT_MACTX_AL2 0x0000000000000002ull
17021 +#define XCT_PTR_T 0x8000000000000000ull
17022 +#define XCT_PTR_LEN_M 0x7ffff00000000000ull
17023 +#define XCT_PTR_LEN_S 44
17024 +#define XCT_PTR_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & \
17025 + XCT_PTR_LEN_M)
17026 +#define XCT_PTR_ADDR_M 0x00000fffffffffffull
17027 +#define XCT_PTR_ADDR_S 0
17028 +#define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \
17029 + XCT_PTR_ADDR_M)
17031 +/* Function descriptor fields */
17032 +#define XCT_FUN_T 0x8000000000000000ull
17033 +#define XCT_FUN_ST 0x4000000000000000ull
17034 +#define XCT_FUN_NORES 0x0000000000000000ull
17035 +#define XCT_FUN_8BRES 0x1000000000000000ull
17036 +#define XCT_FUN_24BRES 0x2000000000000000ull
17037 +#define XCT_FUN_40BRES 0x3000000000000000ull
17038 +#define XCT_FUN_I 0x0800000000000000ull
17039 +#define XCT_FUN_O 0x0400000000000000ull
17040 +#define XCT_FUN_E 0x0200000000000000ull
17041 +#define XCT_FUN_FUN_S 54
17042 +#define XCT_FUN_FUN_M 0x01c0000000000000ull
17043 +#define XCT_FUN_FUN(num) ((((long)(num)) << XCT_FUN_FUN_S) & \
17044 + XCT_FUN_FUN_M)
17045 +#define XCT_FUN_CRM_NOP 0x0000000000000000ull
17046 +#define XCT_FUN_CRM_SIG 0x0008000000000000ull
17047 +#define XCT_FUN_CRM_ENC 0x0010000000000000ull
17048 +#define XCT_FUN_CRM_DEC 0x0018000000000000ull
17049 +#define XCT_FUN_CRM_SIG_ENC 0x0020000000000000ull
17050 +#define XCT_FUN_CRM_ENC_SIG 0x0028000000000000ull
17051 +#define XCT_FUN_CRM_SIG_DEC 0x0030000000000000ull
17052 +#define XCT_FUN_CRM_DEC_SIG 0x0038000000000000ull
17053 +#define XCT_FUN_LLEN_M 0x0007ffff00000000ull
17054 +#define XCT_FUN_LLEN_S 32ULL
17055 +#define XCT_FUN_LLEN(x) ((((long)(x)) << XCT_FUN_LLEN_S) & \
17056 + XCT_FUN_LLEN_M)
17057 +#define XCT_FUN_SHL_M 0x00000000f8000000ull
17058 +#define XCT_FUN_SHL_S 27ull
17059 +#define XCT_FUN_SHL(x) ((((long)(x)) << XCT_FUN_SHL_S) & \
17060 + XCT_FUN_SHL_M)
17061 +#define XCT_FUN_CHL_M 0x0000000007c00000ull
17062 +#define XCT_FUN_CHL_S 22ull
17063 +#define XCT_FUN_CHL(x) ((((long)(x)) << XCT_FUN_CHL_S) & \
17064 + XCT_FUN_CHL_M)
17065 +#define XCT_FUN_HSZ_M 0x00000000003c0000ull
17066 +#define XCT_FUN_HSZ_S 18ull
17067 +#define XCT_FUN_HSZ(x) ((((long)(x)) << XCT_FUN_HSZ_S) & \
17068 + XCT_FUN_HSZ_M)
17069 +#define XCT_FUN_ALG_DES 0x0000000000000000ull
17070 +#define XCT_FUN_ALG_3DES 0x0000000000008000ull
17071 +#define XCT_FUN_ALG_AES 0x0000000000010000ull
17072 +#define XCT_FUN_ALG_ARC 0x0000000000018000ull
17073 +#define XCT_FUN_ALG_KASUMI 0x0000000000020000ull
17074 +#define XCT_FUN_BCM_ECB 0x0000000000000000ull
17075 +#define XCT_FUN_BCM_CBC 0x0000000000001000ull
17076 +#define XCT_FUN_BCM_CFB 0x0000000000002000ull
17077 +#define XCT_FUN_BCM_OFB 0x0000000000003000ull
17078 +#define XCT_FUN_BCM_CNT 0x0000000000003800ull
17079 +#define XCT_FUN_BCM_KAS_F8 0x0000000000002800ull
17080 +#define XCT_FUN_BCM_KAS_F9 0x0000000000001800ull
17081 +#define XCT_FUN_BCP_NO_PAD 0x0000000000000000ull
17082 +#define XCT_FUN_BCP_ZRO 0x0000000000000200ull
17083 +#define XCT_FUN_BCP_PL 0x0000000000000400ull
17084 +#define XCT_FUN_BCP_INCR 0x0000000000000600ull
17085 +#define XCT_FUN_SIG_MD5 (0ull << 4)
17086 +#define XCT_FUN_SIG_SHA1 (2ull << 4)
17087 +#define XCT_FUN_SIG_HMAC_MD5 (8ull << 4)
17088 +#define XCT_FUN_SIG_HMAC_SHA1 (10ull << 4)
17089 +#define XCT_FUN_A 0x0000000000000008ull
17090 +#define XCT_FUN_C 0x0000000000000004ull
17091 +#define XCT_FUN_AL2 0x0000000000000002ull
17092 +#define XCT_FUN_SE 0x0000000000000001ull
17094 +#define XCT_FUN_SRC_PTR(len, addr) (XCT_PTR_LEN(len) | XCT_PTR_ADDR(addr))
17095 +#define XCT_FUN_DST_PTR(len, addr) (XCT_FUN_SRC_PTR(len, addr) | \
17096 + 0x8000000000000000ull)
17098 +#define XCT_CTRL_HDR_FUN_NUM_M 0x01c0000000000000ull
17099 +#define XCT_CTRL_HDR_FUN_NUM_S 54
17100 +#define XCT_CTRL_HDR_LEN_M 0x0007ffff00000000ull
17101 +#define XCT_CTRL_HDR_LEN_S 32
17102 +#define XCT_CTRL_HDR_REG_M 0x00000000000000ffull
17103 +#define XCT_CTRL_HDR_REG_S 0
17105 +#define XCT_CTRL_HDR(funcN,len,reg) (0x9400000000000000ull | \
17106 + ((((long)(funcN)) << XCT_CTRL_HDR_FUN_NUM_S) \
17107 + & XCT_CTRL_HDR_FUN_NUM_M) | \
17108 + ((((long)(len)) << \
17109 + XCT_CTRL_HDR_LEN_S) & XCT_CTRL_HDR_LEN_M) | \
17110 + ((((long)(reg)) << \
17111 + XCT_CTRL_HDR_REG_S) & XCT_CTRL_HDR_REG_M))
17113 +/* Function config command options */
17114 +#define DMA_CALGO_DES 0x00
17115 +#define DMA_CALGO_3DES 0x01
17116 +#define DMA_CALGO_AES 0x02
17117 +#define DMA_CALGO_ARC 0x03
17119 +#define DMA_FN_CIV0 0x02
17120 +#define DMA_FN_CIV1 0x03
17121 +#define DMA_FN_HKEY0 0x0a
17123 +#define XCT_PTR_ADDR_LEN(ptr) ((ptr) & XCT_PTR_ADDR_M), \
17124 + (((ptr) & XCT_PTR_LEN_M) >> XCT_PTR_LEN_S)
17126 +#endif /* PASEMI_FNU_H */
17127 diff -Nur linux-2.6.30.orig/crypto/ocf/random.c linux-2.6.30/crypto/ocf/random.c
17128 --- linux-2.6.30.orig/crypto/ocf/random.c 1970-01-01 01:00:00.000000000 +0100
17129 +++ linux-2.6.30/crypto/ocf/random.c 2009-06-11 10:55:27.000000000 +0200
17130 @@ -0,0 +1,317 @@
17132 + * A system independant way of adding entropy to the kernels pool
17133 + * this way the drivers can focus on the real work and we can take
17134 + * care of pushing it to the appropriate place in the kernel.
17136 + * This should be fast and callable from timers/interrupts
17138 + * Written by David McCullough <david_mccullough@securecomputing.com>
17139 + * Copyright (C) 2006-2007 David McCullough
17140 + * Copyright (C) 2004-2005 Intel Corporation.
17142 + * LICENSE TERMS
17144 + * The free distribution and use of this software in both source and binary
17145 + * form is allowed (with or without changes) provided that:
17147 + * 1. distributions of this source code include the above copyright
17148 + * notice, this list of conditions and the following disclaimer;
17150 + * 2. distributions in binary form include the above copyright
17151 + * notice, this list of conditions and the following disclaimer
17152 + * in the documentation and/or other associated materials;
17154 + * 3. the copyright holder's name is not used to endorse products
17155 + * built using this software without specific written permission.
17157 + * ALTERNATIVELY, provided that this notice is retained in full, this product
17158 + * may be distributed under the terms of the GNU General Public License (GPL),
17159 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
17161 + * DISCLAIMER
17163 + * This software is provided 'as is' with no explicit or implied warranties
17164 + * in respect of its properties, including, but not limited to, correctness
17165 + * and/or fitness for purpose.
17166 + */
17168 +#ifndef AUTOCONF_INCLUDED
17169 +#include <linux/config.h>
17170 +#endif
17171 +#include <linux/module.h>
17172 +#include <linux/init.h>
17173 +#include <linux/list.h>
17174 +#include <linux/slab.h>
17175 +#include <linux/wait.h>
17176 +#include <linux/sched.h>
17177 +#include <linux/spinlock.h>
17178 +#include <linux/version.h>
17179 +#include <linux/unistd.h>
17180 +#include <linux/poll.h>
17181 +#include <linux/random.h>
17182 +#include <cryptodev.h>
17184 +#ifdef CONFIG_OCF_FIPS
17185 +#include "rndtest.h"
17186 +#endif
17188 +#ifndef HAS_RANDOM_INPUT_WAIT
17189 +#error "Please do not enable OCF_RANDOMHARVEST unless you have applied patches"
17190 +#endif
17193 + * a hack to access the debug levels from the crypto driver
17194 + */
17195 +extern int crypto_debug;
17196 +#define debug crypto_debug
17199 + * a list of all registered random providers
17200 + */
17201 +static LIST_HEAD(random_ops);
17202 +static int started = 0;
17203 +static int initted = 0;
17205 +struct random_op {
17206 + struct list_head random_list;
17207 + u_int32_t driverid;
17208 + int (*read_random)(void *arg, u_int32_t *buf, int len);
17209 + void *arg;
17212 +static int random_proc(void *arg);
17214 +static pid_t randomproc = (pid_t) -1;
17215 +static spinlock_t random_lock;
17218 + * just init the spin locks
17219 + */
17220 +static int
17221 +crypto_random_init(void)
17223 + spin_lock_init(&random_lock);
17224 + initted = 1;
17225 + return(0);
17229 + * Add the given random reader to our list (if not present)
17230 + * and start the thread (if not already started)
17232 + * we have to assume that driver id is ok for now
17233 + */
17234 +int
17235 +crypto_rregister(
17236 + u_int32_t driverid,
17237 + int (*read_random)(void *arg, u_int32_t *buf, int len),
17238 + void *arg)
17240 + unsigned long flags;
17241 + int ret = 0;
17242 + struct random_op *rops, *tmp;
17244 + dprintk("%s,%d: %s(0x%x, %p, %p)\n", __FILE__, __LINE__,
17245 + __FUNCTION__, driverid, read_random, arg);
17247 + if (!initted)
17248 + crypto_random_init();
17250 +#if 0
17251 + struct cryptocap *cap;
17253 + cap = crypto_checkdriver(driverid);
17254 + if (!cap)
17255 + return EINVAL;
17256 +#endif
17258 + list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
17259 + if (rops->driverid == driverid && rops->read_random == read_random)
17260 + return EEXIST;
17263 + rops = (struct random_op *) kmalloc(sizeof(*rops), GFP_KERNEL);
17264 + if (!rops)
17265 + return ENOMEM;
17267 + rops->driverid = driverid;
17268 + rops->read_random = read_random;
17269 + rops->arg = arg;
17271 + spin_lock_irqsave(&random_lock, flags);
17272 + list_add_tail(&rops->random_list, &random_ops);
17273 + if (!started) {
17274 + randomproc = kernel_thread(random_proc, NULL, CLONE_FS|CLONE_FILES);
17275 + if (randomproc < 0) {
17276 + ret = randomproc;
17277 + printk("crypto: crypto_rregister cannot start random thread; "
17278 + "error %d", ret);
17279 + } else
17280 + started = 1;
17282 + spin_unlock_irqrestore(&random_lock, flags);
17284 + return ret;
17286 +EXPORT_SYMBOL(crypto_rregister);
17288 +int
17289 +crypto_runregister_all(u_int32_t driverid)
17291 + struct random_op *rops, *tmp;
17292 + unsigned long flags;
17294 + dprintk("%s,%d: %s(0x%x)\n", __FILE__, __LINE__, __FUNCTION__, driverid);
17296 + list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
17297 + if (rops->driverid == driverid) {
17298 + list_del(&rops->random_list);
17299 + kfree(rops);
17303 + spin_lock_irqsave(&random_lock, flags);
17304 + if (list_empty(&random_ops) && started)
17305 + kill_pid(randomproc, SIGKILL, 1);
17306 + spin_unlock_irqrestore(&random_lock, flags);
17307 + return(0);
17309 +EXPORT_SYMBOL(crypto_runregister_all);
17312 + * while we can add entropy to random.c continue to read random data from
17313 + * the drivers and push it to random.
17314 + */
17315 +static int
17316 +random_proc(void *arg)
17318 + int n;
17319 + int wantcnt;
17320 + int bufcnt = 0;
17321 + int retval = 0;
17322 + int *buf = NULL;
17324 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
17325 + daemonize();
17326 + spin_lock_irq(&current->sigmask_lock);
17327 + sigemptyset(&current->blocked);
17328 + recalc_sigpending(current);
17329 + spin_unlock_irq(&current->sigmask_lock);
17330 + sprintf(current->comm, "ocf-random");
17331 +#else
17332 + daemonize("ocf-random");
17333 + allow_signal(SIGKILL);
17334 +#endif
17336 + (void) get_fs();
17337 + set_fs(get_ds());
17339 +#ifdef CONFIG_OCF_FIPS
17340 +#define NUM_INT (RNDTEST_NBYTES/sizeof(int))
17341 +#else
17342 +#define NUM_INT 32
17343 +#endif
17345 + /*
17346 + * some devices can transferr their RNG data direct into memory,
17347 + * so make sure it is device friendly
17348 + */
17349 + buf = kmalloc(NUM_INT * sizeof(int), GFP_DMA);
17350 + if (NULL == buf) {
17351 + printk("crypto: RNG could not allocate memory\n");
17352 + retval = -ENOMEM;
17353 + goto bad_alloc;
17356 + wantcnt = NUM_INT; /* start by adding some entropy */
17358 + /*
17359 + * its possible due to errors or driver removal that we no longer
17360 + * have anything to do, if so exit or we will consume all the CPU
17361 + * doing nothing
17362 + */
17363 + while (!list_empty(&random_ops)) {
17364 + struct random_op *rops, *tmp;
17366 +#ifdef CONFIG_OCF_FIPS
17367 + if (wantcnt)
17368 + wantcnt = NUM_INT; /* FIPs mode can do 20000 bits or none */
17369 +#endif
17371 + /* see if we can get enough entropy to make the world
17372 + * a better place.
17373 + */
17374 + while (bufcnt < wantcnt && bufcnt < NUM_INT) {
17375 + list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
17377 + n = (*rops->read_random)(rops->arg, &buf[bufcnt],
17378 + NUM_INT - bufcnt);
17380 + /* on failure remove the random number generator */
17381 + if (n == -1) {
17382 + list_del(&rops->random_list);
17383 + printk("crypto: RNG (driverid=0x%x) failed, disabling\n",
17384 + rops->driverid);
17385 + kfree(rops);
17386 + } else if (n > 0)
17387 + bufcnt += n;
17389 + /* give up CPU for a bit, just in case as this is a loop */
17390 + schedule();
17394 +#ifdef CONFIG_OCF_FIPS
17395 + if (bufcnt > 0 && rndtest_buf((unsigned char *) &buf[0])) {
17396 + dprintk("crypto: buffer had fips errors, discarding\n");
17397 + bufcnt = 0;
17399 +#endif
17401 + /*
17402 + * if we have a certified buffer, we can send some data
17403 + * to /dev/random and move along
17404 + */
17405 + if (bufcnt > 0) {
17406 + /* add what we have */
17407 + random_input_words(buf, bufcnt, bufcnt*sizeof(int)*8);
17408 + bufcnt = 0;
17411 + /* give up CPU for a bit so we don't hog while filling */
17412 + schedule();
17414 + /* wait for needing more */
17415 + wantcnt = random_input_wait();
17417 + if (wantcnt <= 0)
17418 + wantcnt = 0; /* try to get some info again */
17419 + else
17420 + /* round up to one word or we can loop forever */
17421 + wantcnt = (wantcnt + (sizeof(int)*8)) / (sizeof(int)*8);
17422 + if (wantcnt > NUM_INT) {
17423 + wantcnt = NUM_INT;
17426 + if (signal_pending(current)) {
17427 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
17428 + spin_lock_irq(&current->sigmask_lock);
17429 +#endif
17430 + flush_signals(current);
17431 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
17432 + spin_unlock_irq(&current->sigmask_lock);
17433 +#endif
17437 + kfree(buf);
17439 +bad_alloc:
17440 + spin_lock_irq(&random_lock);
17441 + randomproc = (pid_t) -1;
17442 + started = 0;
17443 + spin_unlock_irq(&random_lock);
17445 + return retval;
17448 diff -Nur linux-2.6.30.orig/crypto/ocf/README linux-2.6.30/crypto/ocf/README
17449 --- linux-2.6.30.orig/crypto/ocf/README 1970-01-01 01:00:00.000000000 +0100
17450 +++ linux-2.6.30/crypto/ocf/README 2009-06-11 10:55:27.000000000 +0200
17451 @@ -0,0 +1,167 @@
17452 +README - ocf-linux-20071215
17453 +---------------------------
17455 +This README provides instructions for getting ocf-linux compiled and
17456 +operating in a generic linux environment. For other information you
17457 +might like to visit the home page for this project:
17459 + http://ocf-linux.sourceforge.net/
17461 +Adding OCF to linux
17462 +-------------------
17464 + Not much in this file for now, just some notes. I usually build
17465 + the ocf support as modules but it can be built into the kernel as
17466 + well. To use it:
17468 + * mknod /dev/crypto c 10 70
17470 + * to add OCF to your kernel source, you have two options. Apply
17471 + the kernel specific patch:
17473 + cd linux-2.4*; gunzip < ocf-linux-24-XXXXXXXX.patch.gz | patch -p1
17474 + cd linux-2.6*; gunzip < ocf-linux-26-XXXXXXXX.patch.gz | patch -p1
17476 + if you do one of the above, then you can proceed to the next step,
17477 + or you can do the above process by hand with using the patches against
17478 + linux-2.4.35 and 2.6.23 to include the ocf code under crypto/ocf.
17479 + Here's how to add it:
17481 + for 2.4.35 (and later)
17483 + cd linux-2.4.35/crypto
17484 + tar xvzf ocf-linux.tar.gz
17485 + cd ..
17486 + patch -p1 < crypto/ocf/patches/linux-2.4.35-ocf.patch
17488 + for 2.6.23 (and later), find the kernel patch specific (or nearest)
17489 + to your kernel versions and then:
17491 + cd linux-2.6.NN/crypto
17492 + tar xvzf ocf-linux.tar.gz
17493 + cd ..
17494 + patch -p1 < crypto/ocf/patches/linux-2.6.NN-ocf.patch
17496 + It should be easy to take this patch and apply it to other more
17497 + recent versions of the kernels. The same patches should also work
17498 + relatively easily on kernels as old as 2.6.11 and 2.4.18.
17500 + * under 2.4 if you are on a non-x86 platform, you may need to:
17502 + cp linux-2.X.x/include/asm-i386/kmap_types.h linux-2.X.x/include/asm-YYY
17504 + so that you can build the kernel crypto support needed for the cryptosoft
17505 + driver.
17507 + * For simplicity you should enable all the crypto support in your kernel
17508 + except for the test driver. Likewise for the OCF options. Do not
17509 + enable OCF crypto drivers for HW that you do not have (for example
17510 + ixp4xx will not compile on non-Xscale systems).
17512 + * make sure that cryptodev.h (from ocf-linux.tar.gz) is installed as
17513 + crypto/cryptodev.h in an include directory that is used for building
17514 + applications for your platform. For example on a host system that
17515 + might be:
17517 + /usr/include/crypto/cryptodev.h
17519 + * patch your openssl-0.9.8i code with the openssl-0.9.8i.patch.
17520 + (NOTE: there is no longer a need to patch ssh). The patch is against:
17521 + openssl-0_9_8e
17523 + If you need a patch for an older version of openssl, you should look
17524 + to older OCF releases. This patch is unlikely to work on older
17525 + openssl versions.
17527 + openssl-0.9.8i.patch
17528 + - enables --with-cryptodev for non BSD systems
17529 + - adds -cpu option to openssl speed for calculating CPU load
17530 + under linux
17531 + - fixes null pointer in openssl speed multi thread output.
17532 + - fixes test keys to work with linux crypto's more stringent
17533 + key checking.
17534 + - adds MD5/SHA acceleration (Ronen Shitrit), only enabled
17535 + with the --with-cryptodev-digests option
17536 + - fixes bug in engine code caching.
17538 + * build crypto-tools-XXXXXXXX.tar.gz if you want to try some of the BSD
17539 + tools for testing OCF (ie., cryptotest).
17541 +How to load the OCF drivers
17542 +---------------------------
17544 + First insert the base modules:
17546 + insmod ocf
17547 + insmod cryptodev
17549 + You can then install the software OCF driver with:
17551 + insmod cryptosoft
17553 + and one or more of the OCF HW drivers with:
17555 + insmod safe
17556 + insmod hifn7751
17557 + insmod ixp4xx
17558 + ...
17560 + all the drivers take a debug option to enable verbose debug so that
17561 + you can see what is going on. For debug you load them as:
17563 + insmod ocf crypto_debug=1
17564 + insmod cryptodev cryptodev_debug=1
17565 + insmod cryptosoft swcr_debug=1
17567 + You may load more than one OCF crypto driver but then there is no guarantee
17568 + as to which will be used.
17570 + You can also enable debug at run time on 2.6 systems with the following:
17572 + echo 1 > /sys/module/ocf/parameters/crypto_debug
17573 + echo 1 > /sys/module/cryptodev/parameters/cryptodev_debug
17574 + echo 1 > /sys/module/cryptosoft/parameters/swcr_debug
17575 + echo 1 > /sys/module/hifn7751/parameters/hifn_debug
17576 + echo 1 > /sys/module/safe/parameters/safe_debug
17577 + echo 1 > /sys/module/ixp4xx/parameters/ixp_debug
17578 + ...
17580 +Testing the OCF support
17581 +-----------------------
17583 + run "cryptotest", it should do a short test for a couple of
17584 + des packets. If it does everything is working.
17586 + If this works, then ssh will use the driver when invoked as:
17588 + ssh -c 3des username@host
17590 + to see for sure that it is operating, enable debug as defined above.
17592 + To get a better idea of performance run:
17594 + cryptotest 100 4096
17596 + There are more options to cryptotest, see the help.
17598 + It is also possible to use openssl to test the speed of the crypto
17599 + drivers.
17601 + openssl speed -evp des -engine cryptodev -elapsed
17602 + openssl speed -evp des3 -engine cryptodev -elapsed
17603 + openssl speed -evp aes128 -engine cryptodev -elapsed
17605 + and multiple threads (10) with:
17607 + openssl speed -evp des -engine cryptodev -elapsed -multi 10
17608 + openssl speed -evp des3 -engine cryptodev -elapsed -multi 10
17609 + openssl speed -evp aes128 -engine cryptodev -elapsed -multi 10
17611 + for public key testing you can try:
17613 + cryptokeytest
17614 + openssl speed -engine cryptodev rsa -elapsed
17615 + openssl speed -engine cryptodev dsa -elapsed
17617 +David McCullough
17618 +david_mccullough@securecomputing.com
17619 diff -Nur linux-2.6.30.orig/crypto/ocf/rndtest.c linux-2.6.30/crypto/ocf/rndtest.c
17620 --- linux-2.6.30.orig/crypto/ocf/rndtest.c 1970-01-01 01:00:00.000000000 +0100
17621 +++ linux-2.6.30/crypto/ocf/rndtest.c 2009-06-11 10:55:27.000000000 +0200
17622 @@ -0,0 +1,300 @@
17623 +/* $OpenBSD$ */
17626 + * OCF/Linux port done by David McCullough <david_mccullough@securecomputing.com>
17627 + * Copyright (C) 2006-2007 David McCullough
17628 + * Copyright (C) 2004-2005 Intel Corporation.
17629 + * The license and original author are listed below.
17631 + * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
17632 + * All rights reserved.
17634 + * Redistribution and use in source and binary forms, with or without
17635 + * modification, are permitted provided that the following conditions
17636 + * are met:
17637 + * 1. Redistributions of source code must retain the above copyright
17638 + * notice, this list of conditions and the following disclaimer.
17639 + * 2. Redistributions in binary form must reproduce the above copyright
17640 + * notice, this list of conditions and the following disclaimer in the
17641 + * documentation and/or other materials provided with the distribution.
17642 + * 3. All advertising materials mentioning features or use of this software
17643 + * must display the following acknowledgement:
17644 + * This product includes software developed by Jason L. Wright
17645 + * 4. The name of the author may not be used to endorse or promote products
17646 + * derived from this software without specific prior written permission.
17648 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17649 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17650 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17651 + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
17652 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
17653 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
17654 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
17655 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
17656 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
17657 + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
17658 + * POSSIBILITY OF SUCH DAMAGE.
17659 + */
17661 +#ifndef AUTOCONF_INCLUDED
17662 +#include <linux/config.h>
17663 +#endif
17664 +#include <linux/module.h>
17665 +#include <linux/list.h>
17666 +#include <linux/wait.h>
17667 +#include <linux/time.h>
17668 +#include <linux/version.h>
17669 +#include <linux/unistd.h>
17670 +#include <linux/kernel.h>
17671 +#include <linux/string.h>
17672 +#include <linux/time.h>
17673 +#include <cryptodev.h>
17674 +#include "rndtest.h"
17676 +static struct rndtest_stats rndstats;
17678 +static void rndtest_test(struct rndtest_state *);
17680 +/* The tests themselves */
17681 +static int rndtest_monobit(struct rndtest_state *);
17682 +static int rndtest_runs(struct rndtest_state *);
17683 +static int rndtest_longruns(struct rndtest_state *);
17684 +static int rndtest_chi_4(struct rndtest_state *);
17686 +static int rndtest_runs_check(struct rndtest_state *, int, int *);
17687 +static void rndtest_runs_record(struct rndtest_state *, int, int *);
17689 +static const struct rndtest_testfunc {
17690 + int (*test)(struct rndtest_state *);
17691 +} rndtest_funcs[] = {
17692 + { rndtest_monobit },
17693 + { rndtest_runs },
17694 + { rndtest_chi_4 },
17695 + { rndtest_longruns },
17698 +#define RNDTEST_NTESTS (sizeof(rndtest_funcs)/sizeof(rndtest_funcs[0]))
17700 +static void
17701 +rndtest_test(struct rndtest_state *rsp)
17703 + int i, rv = 0;
17705 + rndstats.rst_tests++;
17706 + for (i = 0; i < RNDTEST_NTESTS; i++)
17707 + rv |= (*rndtest_funcs[i].test)(rsp);
17708 + rsp->rs_discard = (rv != 0);
17712 +extern int crypto_debug;
17713 +#define rndtest_verbose 2
17714 +#define rndtest_report(rsp, failure, fmt, a...) \
17715 + { if (failure || crypto_debug) { printk("rng_test: " fmt "\n", a); } else; }
17717 +#define RNDTEST_MONOBIT_MINONES 9725
17718 +#define RNDTEST_MONOBIT_MAXONES 10275
17720 +static int
17721 +rndtest_monobit(struct rndtest_state *rsp)
17723 + int i, ones = 0, j;
17724 + u_int8_t r;
17726 + for (i = 0; i < RNDTEST_NBYTES; i++) {
17727 + r = rsp->rs_buf[i];
17728 + for (j = 0; j < 8; j++, r <<= 1)
17729 + if (r & 0x80)
17730 + ones++;
17732 + if (ones > RNDTEST_MONOBIT_MINONES &&
17733 + ones < RNDTEST_MONOBIT_MAXONES) {
17734 + if (rndtest_verbose > 1)
17735 + rndtest_report(rsp, 0, "monobit pass (%d < %d < %d)",
17736 + RNDTEST_MONOBIT_MINONES, ones,
17737 + RNDTEST_MONOBIT_MAXONES);
17738 + return (0);
17739 + } else {
17740 + if (rndtest_verbose)
17741 + rndtest_report(rsp, 1,
17742 + "monobit failed (%d ones)", ones);
17743 + rndstats.rst_monobit++;
17744 + return (-1);
17748 +#define RNDTEST_RUNS_NINTERVAL 6
17750 +static const struct rndtest_runs_tabs {
17751 + u_int16_t min, max;
17752 +} rndtest_runs_tab[] = {
17753 + { 2343, 2657 },
17754 + { 1135, 1365 },
17755 + { 542, 708 },
17756 + { 251, 373 },
17757 + { 111, 201 },
17758 + { 111, 201 },
17761 +static int
17762 +rndtest_runs(struct rndtest_state *rsp)
17764 + int i, j, ones, zeros, rv = 0;
17765 + int onei[RNDTEST_RUNS_NINTERVAL], zeroi[RNDTEST_RUNS_NINTERVAL];
17766 + u_int8_t c;
17768 + bzero(onei, sizeof(onei));
17769 + bzero(zeroi, sizeof(zeroi));
17770 + ones = zeros = 0;
17771 + for (i = 0; i < RNDTEST_NBYTES; i++) {
17772 + c = rsp->rs_buf[i];
17773 + for (j = 0; j < 8; j++, c <<= 1) {
17774 + if (c & 0x80) {
17775 + ones++;
17776 + rndtest_runs_record(rsp, zeros, zeroi);
17777 + zeros = 0;
17778 + } else {
17779 + zeros++;
17780 + rndtest_runs_record(rsp, ones, onei);
17781 + ones = 0;
17785 + rndtest_runs_record(rsp, ones, onei);
17786 + rndtest_runs_record(rsp, zeros, zeroi);
17788 + rv |= rndtest_runs_check(rsp, 0, zeroi);
17789 + rv |= rndtest_runs_check(rsp, 1, onei);
17791 + if (rv)
17792 + rndstats.rst_runs++;
17794 + return (rv);
17797 +static void
17798 +rndtest_runs_record(struct rndtest_state *rsp, int len, int *intrv)
17800 + if (len == 0)
17801 + return;
17802 + if (len > RNDTEST_RUNS_NINTERVAL)
17803 + len = RNDTEST_RUNS_NINTERVAL;
17804 + len -= 1;
17805 + intrv[len]++;
17808 +static int
17809 +rndtest_runs_check(struct rndtest_state *rsp, int val, int *src)
17811 + int i, rv = 0;
17813 + for (i = 0; i < RNDTEST_RUNS_NINTERVAL; i++) {
17814 + if (src[i] < rndtest_runs_tab[i].min ||
17815 + src[i] > rndtest_runs_tab[i].max) {
17816 + rndtest_report(rsp, 1,
17817 + "%s interval %d failed (%d, %d-%d)",
17818 + val ? "ones" : "zeros",
17819 + i + 1, src[i], rndtest_runs_tab[i].min,
17820 + rndtest_runs_tab[i].max);
17821 + rv = -1;
17822 + } else {
17823 + rndtest_report(rsp, 0,
17824 + "runs pass %s interval %d (%d < %d < %d)",
17825 + val ? "ones" : "zeros",
17826 + i + 1, rndtest_runs_tab[i].min, src[i],
17827 + rndtest_runs_tab[i].max);
17830 + return (rv);
17833 +static int
17834 +rndtest_longruns(struct rndtest_state *rsp)
17836 + int i, j, ones = 0, zeros = 0, maxones = 0, maxzeros = 0;
17837 + u_int8_t c;
17839 + for (i = 0; i < RNDTEST_NBYTES; i++) {
17840 + c = rsp->rs_buf[i];
17841 + for (j = 0; j < 8; j++, c <<= 1) {
17842 + if (c & 0x80) {
17843 + zeros = 0;
17844 + ones++;
17845 + if (ones > maxones)
17846 + maxones = ones;
17847 + } else {
17848 + ones = 0;
17849 + zeros++;
17850 + if (zeros > maxzeros)
17851 + maxzeros = zeros;
17856 + if (maxones < 26 && maxzeros < 26) {
17857 + rndtest_report(rsp, 0, "longruns pass (%d ones, %d zeros)",
17858 + maxones, maxzeros);
17859 + return (0);
17860 + } else {
17861 + rndtest_report(rsp, 1, "longruns fail (%d ones, %d zeros)",
17862 + maxones, maxzeros);
17863 + rndstats.rst_longruns++;
17864 + return (-1);
17869 + * chi^2 test over 4 bits: (this is called the poker test in FIPS 140-2,
17870 + * but it is really the chi^2 test over 4 bits (the poker test as described
17871 + * by Knuth vol 2 is something different, and I take him as authoritative
17872 + * on nomenclature over NIST).
17873 + */
17874 +#define RNDTEST_CHI4_K 16
17875 +#define RNDTEST_CHI4_K_MASK (RNDTEST_CHI4_K - 1)
17878 + * The unnormalized values are used so that we don't have to worry about
17879 + * fractional precision. The "real" value is found by:
17880 + * (V - 1562500) * (16 / 5000) = Vn (where V is the unnormalized value)
17881 + */
17882 +#define RNDTEST_CHI4_VMIN 1563181 /* 2.1792 */
17883 +#define RNDTEST_CHI4_VMAX 1576929 /* 46.1728 */
17885 +static int
17886 +rndtest_chi_4(struct rndtest_state *rsp)
17888 + unsigned int freq[RNDTEST_CHI4_K], i, sum;
17890 + for (i = 0; i < RNDTEST_CHI4_K; i++)
17891 + freq[i] = 0;
17893 + /* Get number of occurances of each 4 bit pattern */
17894 + for (i = 0; i < RNDTEST_NBYTES; i++) {
17895 + freq[(rsp->rs_buf[i] >> 4) & RNDTEST_CHI4_K_MASK]++;
17896 + freq[(rsp->rs_buf[i] >> 0) & RNDTEST_CHI4_K_MASK]++;
17899 + for (i = 0, sum = 0; i < RNDTEST_CHI4_K; i++)
17900 + sum += freq[i] * freq[i];
17902 + if (sum >= 1563181 && sum <= 1576929) {
17903 + rndtest_report(rsp, 0, "chi^2(4): pass (sum %u)", sum);
17904 + return (0);
17905 + } else {
17906 + rndtest_report(rsp, 1, "chi^2(4): failed (sum %u)", sum);
17907 + rndstats.rst_chi++;
17908 + return (-1);
17912 +int
17913 +rndtest_buf(unsigned char *buf)
17915 + struct rndtest_state rsp;
17917 + memset(&rsp, 0, sizeof(rsp));
17918 + rsp.rs_buf = buf;
17919 + rndtest_test(&rsp);
17920 + return(rsp.rs_discard);
17923 diff -Nur linux-2.6.30.orig/crypto/ocf/rndtest.h linux-2.6.30/crypto/ocf/rndtest.h
17924 --- linux-2.6.30.orig/crypto/ocf/rndtest.h 1970-01-01 01:00:00.000000000 +0100
17925 +++ linux-2.6.30/crypto/ocf/rndtest.h 2009-06-11 10:55:27.000000000 +0200
17926 @@ -0,0 +1,54 @@
17927 +/* $FreeBSD: src/sys/dev/rndtest/rndtest.h,v 1.1 2003/03/11 22:54:44 sam Exp $ */
17928 +/* $OpenBSD$ */
17931 + * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
17932 + * All rights reserved.
17934 + * Redistribution and use in source and binary forms, with or without
17935 + * modification, are permitted provided that the following conditions
17936 + * are met:
17937 + * 1. Redistributions of source code must retain the above copyright
17938 + * notice, this list of conditions and the following disclaimer.
17939 + * 2. Redistributions in binary form must reproduce the above copyright
17940 + * notice, this list of conditions and the following disclaimer in the
17941 + * documentation and/or other materials provided with the distribution.
17942 + * 3. All advertising materials mentioning features or use of this software
17943 + * must display the following acknowledgement:
17944 + * This product includes software developed by Jason L. Wright
17945 + * 4. The name of the author may not be used to endorse or promote products
17946 + * derived from this software without specific prior written permission.
17948 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17949 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17950 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17951 + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
17952 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
17953 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
17954 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
17955 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
17956 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
17957 + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
17958 + * POSSIBILITY OF SUCH DAMAGE.
17959 + */
17962 +/* Some of the tests depend on these values */
17963 +#define RNDTEST_NBYTES 2500
17964 +#define RNDTEST_NBITS (8 * RNDTEST_NBYTES)
17966 +struct rndtest_state {
17967 + int rs_discard; /* discard/accept random data */
17968 + u_int8_t *rs_buf;
17971 +struct rndtest_stats {
17972 + u_int32_t rst_discard; /* number of bytes discarded */
17973 + u_int32_t rst_tests; /* number of test runs */
17974 + u_int32_t rst_monobit; /* monobit test failures */
17975 + u_int32_t rst_runs; /* 0/1 runs failures */
17976 + u_int32_t rst_longruns; /* longruns failures */
17977 + u_int32_t rst_chi; /* chi^2 failures */
17980 +extern int rndtest_buf(unsigned char *buf);
17981 diff -Nur linux-2.6.30.orig/crypto/ocf/safe/Makefile linux-2.6.30/crypto/ocf/safe/Makefile
17982 --- linux-2.6.30.orig/crypto/ocf/safe/Makefile 1970-01-01 01:00:00.000000000 +0100
17983 +++ linux-2.6.30/crypto/ocf/safe/Makefile 2009-06-11 10:55:27.000000000 +0200
17984 @@ -0,0 +1,12 @@
17985 +# for SGlinux builds
17986 +-include $(ROOTDIR)/modules/.config
17988 +obj-$(CONFIG_OCF_SAFE) += safe.o
17990 +obj ?= .
17991 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
17993 +ifdef TOPDIR
17994 +-include $(TOPDIR)/Rules.make
17995 +endif
17997 diff -Nur linux-2.6.30.orig/crypto/ocf/safe/md5.c linux-2.6.30/crypto/ocf/safe/md5.c
17998 --- linux-2.6.30.orig/crypto/ocf/safe/md5.c 1970-01-01 01:00:00.000000000 +0100
17999 +++ linux-2.6.30/crypto/ocf/safe/md5.c 2009-06-11 10:55:27.000000000 +0200
18000 @@ -0,0 +1,308 @@
18001 +/* $KAME: md5.c,v 1.5 2000/11/08 06:13:08 itojun Exp $ */
18003 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
18004 + * All rights reserved.
18006 + * Redistribution and use in source and binary forms, with or without
18007 + * modification, are permitted provided that the following conditions
18008 + * are met:
18009 + * 1. Redistributions of source code must retain the above copyright
18010 + * notice, this list of conditions and the following disclaimer.
18011 + * 2. Redistributions in binary form must reproduce the above copyright
18012 + * notice, this list of conditions and the following disclaimer in the
18013 + * documentation and/or other materials provided with the distribution.
18014 + * 3. Neither the name of the project nor the names of its contributors
18015 + * may be used to endorse or promote products derived from this software
18016 + * without specific prior written permission.
18018 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
18019 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18020 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18021 + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
18022 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18023 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
18024 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
18025 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
18026 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
18027 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
18028 + * SUCH DAMAGE.
18029 + */
18031 +#if 0
18032 +#include <sys/cdefs.h>
18033 +__FBSDID("$FreeBSD: src/sys/crypto/md5.c,v 1.9 2004/01/27 19:49:19 des Exp $");
18035 +#include <sys/types.h>
18036 +#include <sys/cdefs.h>
18037 +#include <sys/time.h>
18038 +#include <sys/systm.h>
18039 +#include <crypto/md5.h>
18040 +#endif
18042 +#define SHIFT(X, s) (((X) << (s)) | ((X) >> (32 - (s))))
18044 +#define F(X, Y, Z) (((X) & (Y)) | ((~X) & (Z)))
18045 +#define G(X, Y, Z) (((X) & (Z)) | ((Y) & (~Z)))
18046 +#define H(X, Y, Z) ((X) ^ (Y) ^ (Z))
18047 +#define I(X, Y, Z) ((Y) ^ ((X) | (~Z)))
18049 +#define ROUND1(a, b, c, d, k, s, i) { \
18050 + (a) = (a) + F((b), (c), (d)) + X[(k)] + T[(i)]; \
18051 + (a) = SHIFT((a), (s)); \
18052 + (a) = (b) + (a); \
18055 +#define ROUND2(a, b, c, d, k, s, i) { \
18056 + (a) = (a) + G((b), (c), (d)) + X[(k)] + T[(i)]; \
18057 + (a) = SHIFT((a), (s)); \
18058 + (a) = (b) + (a); \
18061 +#define ROUND3(a, b, c, d, k, s, i) { \
18062 + (a) = (a) + H((b), (c), (d)) + X[(k)] + T[(i)]; \
18063 + (a) = SHIFT((a), (s)); \
18064 + (a) = (b) + (a); \
18067 +#define ROUND4(a, b, c, d, k, s, i) { \
18068 + (a) = (a) + I((b), (c), (d)) + X[(k)] + T[(i)]; \
18069 + (a) = SHIFT((a), (s)); \
18070 + (a) = (b) + (a); \
18073 +#define Sa 7
18074 +#define Sb 12
18075 +#define Sc 17
18076 +#define Sd 22
18078 +#define Se 5
18079 +#define Sf 9
18080 +#define Sg 14
18081 +#define Sh 20
18083 +#define Si 4
18084 +#define Sj 11
18085 +#define Sk 16
18086 +#define Sl 23
18088 +#define Sm 6
18089 +#define Sn 10
18090 +#define So 15
18091 +#define Sp 21
18093 +#define MD5_A0 0x67452301
18094 +#define MD5_B0 0xefcdab89
18095 +#define MD5_C0 0x98badcfe
18096 +#define MD5_D0 0x10325476
18098 +/* Integer part of 4294967296 times abs(sin(i)), where i is in radians. */
18099 +static const u_int32_t T[65] = {
18100 + 0,
18101 + 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee,
18102 + 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501,
18103 + 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be,
18104 + 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821,
18106 + 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa,
18107 + 0xd62f105d, 0x2441453, 0xd8a1e681, 0xe7d3fbc8,
18108 + 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed,
18109 + 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a,
18111 + 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c,
18112 + 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70,
18113 + 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x4881d05,
18114 + 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665,
18116 + 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039,
18117 + 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1,
18118 + 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1,
18119 + 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391,
18122 +static const u_int8_t md5_paddat[MD5_BUFLEN] = {
18123 + 0x80, 0, 0, 0, 0, 0, 0, 0,
18124 + 0, 0, 0, 0, 0, 0, 0, 0,
18125 + 0, 0, 0, 0, 0, 0, 0, 0,
18126 + 0, 0, 0, 0, 0, 0, 0, 0,
18127 + 0, 0, 0, 0, 0, 0, 0, 0,
18128 + 0, 0, 0, 0, 0, 0, 0, 0,
18129 + 0, 0, 0, 0, 0, 0, 0, 0,
18130 + 0, 0, 0, 0, 0, 0, 0, 0,
18133 +static void md5_calc(u_int8_t *, md5_ctxt *);
18135 +void md5_init(ctxt)
18136 + md5_ctxt *ctxt;
18138 + ctxt->md5_n = 0;
18139 + ctxt->md5_i = 0;
18140 + ctxt->md5_sta = MD5_A0;
18141 + ctxt->md5_stb = MD5_B0;
18142 + ctxt->md5_stc = MD5_C0;
18143 + ctxt->md5_std = MD5_D0;
18144 + bzero(ctxt->md5_buf, sizeof(ctxt->md5_buf));
18147 +void md5_loop(ctxt, input, len)
18148 + md5_ctxt *ctxt;
18149 + u_int8_t *input;
18150 + u_int len; /* number of bytes */
18152 + u_int gap, i;
18154 + ctxt->md5_n += len * 8; /* byte to bit */
18155 + gap = MD5_BUFLEN - ctxt->md5_i;
18157 + if (len >= gap) {
18158 + bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
18159 + gap);
18160 + md5_calc(ctxt->md5_buf, ctxt);
18162 + for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN) {
18163 + md5_calc((u_int8_t *)(input + i), ctxt);
18166 + ctxt->md5_i = len - i;
18167 + bcopy((void *)(input + i), (void *)ctxt->md5_buf, ctxt->md5_i);
18168 + } else {
18169 + bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
18170 + len);
18171 + ctxt->md5_i += len;
18175 +void md5_pad(ctxt)
18176 + md5_ctxt *ctxt;
18178 + u_int gap;
18180 + /* Don't count up padding. Keep md5_n. */
18181 + gap = MD5_BUFLEN - ctxt->md5_i;
18182 + if (gap > 8) {
18183 + bcopy(md5_paddat,
18184 + (void *)(ctxt->md5_buf + ctxt->md5_i),
18185 + gap - sizeof(ctxt->md5_n));
18186 + } else {
18187 + /* including gap == 8 */
18188 + bcopy(md5_paddat, (void *)(ctxt->md5_buf + ctxt->md5_i),
18189 + gap);
18190 + md5_calc(ctxt->md5_buf, ctxt);
18191 + bcopy((md5_paddat + gap),
18192 + (void *)ctxt->md5_buf,
18193 + MD5_BUFLEN - sizeof(ctxt->md5_n));
18196 + /* 8 byte word */
18197 +#if BYTE_ORDER == LITTLE_ENDIAN
18198 + bcopy(&ctxt->md5_n8[0], &ctxt->md5_buf[56], 8);
18199 +#endif
18200 +#if BYTE_ORDER == BIG_ENDIAN
18201 + ctxt->md5_buf[56] = ctxt->md5_n8[7];
18202 + ctxt->md5_buf[57] = ctxt->md5_n8[6];
18203 + ctxt->md5_buf[58] = ctxt->md5_n8[5];
18204 + ctxt->md5_buf[59] = ctxt->md5_n8[4];
18205 + ctxt->md5_buf[60] = ctxt->md5_n8[3];
18206 + ctxt->md5_buf[61] = ctxt->md5_n8[2];
18207 + ctxt->md5_buf[62] = ctxt->md5_n8[1];
18208 + ctxt->md5_buf[63] = ctxt->md5_n8[0];
18209 +#endif
18211 + md5_calc(ctxt->md5_buf, ctxt);
18214 +void md5_result(digest, ctxt)
18215 + u_int8_t *digest;
18216 + md5_ctxt *ctxt;
18218 + /* 4 byte words */
18219 +#if BYTE_ORDER == LITTLE_ENDIAN
18220 + bcopy(&ctxt->md5_st8[0], digest, 16);
18221 +#endif
18222 +#if BYTE_ORDER == BIG_ENDIAN
18223 + digest[ 0] = ctxt->md5_st8[ 3]; digest[ 1] = ctxt->md5_st8[ 2];
18224 + digest[ 2] = ctxt->md5_st8[ 1]; digest[ 3] = ctxt->md5_st8[ 0];
18225 + digest[ 4] = ctxt->md5_st8[ 7]; digest[ 5] = ctxt->md5_st8[ 6];
18226 + digest[ 6] = ctxt->md5_st8[ 5]; digest[ 7] = ctxt->md5_st8[ 4];
18227 + digest[ 8] = ctxt->md5_st8[11]; digest[ 9] = ctxt->md5_st8[10];
18228 + digest[10] = ctxt->md5_st8[ 9]; digest[11] = ctxt->md5_st8[ 8];
18229 + digest[12] = ctxt->md5_st8[15]; digest[13] = ctxt->md5_st8[14];
18230 + digest[14] = ctxt->md5_st8[13]; digest[15] = ctxt->md5_st8[12];
18231 +#endif
18234 +static void md5_calc(b64, ctxt)
18235 + u_int8_t *b64;
18236 + md5_ctxt *ctxt;
18238 + u_int32_t A = ctxt->md5_sta;
18239 + u_int32_t B = ctxt->md5_stb;
18240 + u_int32_t C = ctxt->md5_stc;
18241 + u_int32_t D = ctxt->md5_std;
18242 +#if BYTE_ORDER == LITTLE_ENDIAN
18243 + u_int32_t *X = (u_int32_t *)b64;
18244 +#endif
18245 +#if BYTE_ORDER == BIG_ENDIAN
18246 + /* 4 byte words */
18247 + /* what a brute force but fast! */
18248 + u_int32_t X[16];
18249 + u_int8_t *y = (u_int8_t *)X;
18250 + y[ 0] = b64[ 3]; y[ 1] = b64[ 2]; y[ 2] = b64[ 1]; y[ 3] = b64[ 0];
18251 + y[ 4] = b64[ 7]; y[ 5] = b64[ 6]; y[ 6] = b64[ 5]; y[ 7] = b64[ 4];
18252 + y[ 8] = b64[11]; y[ 9] = b64[10]; y[10] = b64[ 9]; y[11] = b64[ 8];
18253 + y[12] = b64[15]; y[13] = b64[14]; y[14] = b64[13]; y[15] = b64[12];
18254 + y[16] = b64[19]; y[17] = b64[18]; y[18] = b64[17]; y[19] = b64[16];
18255 + y[20] = b64[23]; y[21] = b64[22]; y[22] = b64[21]; y[23] = b64[20];
18256 + y[24] = b64[27]; y[25] = b64[26]; y[26] = b64[25]; y[27] = b64[24];
18257 + y[28] = b64[31]; y[29] = b64[30]; y[30] = b64[29]; y[31] = b64[28];
18258 + y[32] = b64[35]; y[33] = b64[34]; y[34] = b64[33]; y[35] = b64[32];
18259 + y[36] = b64[39]; y[37] = b64[38]; y[38] = b64[37]; y[39] = b64[36];
18260 + y[40] = b64[43]; y[41] = b64[42]; y[42] = b64[41]; y[43] = b64[40];
18261 + y[44] = b64[47]; y[45] = b64[46]; y[46] = b64[45]; y[47] = b64[44];
18262 + y[48] = b64[51]; y[49] = b64[50]; y[50] = b64[49]; y[51] = b64[48];
18263 + y[52] = b64[55]; y[53] = b64[54]; y[54] = b64[53]; y[55] = b64[52];
18264 + y[56] = b64[59]; y[57] = b64[58]; y[58] = b64[57]; y[59] = b64[56];
18265 + y[60] = b64[63]; y[61] = b64[62]; y[62] = b64[61]; y[63] = b64[60];
18266 +#endif
18268 + ROUND1(A, B, C, D, 0, Sa, 1); ROUND1(D, A, B, C, 1, Sb, 2);
18269 + ROUND1(C, D, A, B, 2, Sc, 3); ROUND1(B, C, D, A, 3, Sd, 4);
18270 + ROUND1(A, B, C, D, 4, Sa, 5); ROUND1(D, A, B, C, 5, Sb, 6);
18271 + ROUND1(C, D, A, B, 6, Sc, 7); ROUND1(B, C, D, A, 7, Sd, 8);
18272 + ROUND1(A, B, C, D, 8, Sa, 9); ROUND1(D, A, B, C, 9, Sb, 10);
18273 + ROUND1(C, D, A, B, 10, Sc, 11); ROUND1(B, C, D, A, 11, Sd, 12);
18274 + ROUND1(A, B, C, D, 12, Sa, 13); ROUND1(D, A, B, C, 13, Sb, 14);
18275 + ROUND1(C, D, A, B, 14, Sc, 15); ROUND1(B, C, D, A, 15, Sd, 16);
18277 + ROUND2(A, B, C, D, 1, Se, 17); ROUND2(D, A, B, C, 6, Sf, 18);
18278 + ROUND2(C, D, A, B, 11, Sg, 19); ROUND2(B, C, D, A, 0, Sh, 20);
18279 + ROUND2(A, B, C, D, 5, Se, 21); ROUND2(D, A, B, C, 10, Sf, 22);
18280 + ROUND2(C, D, A, B, 15, Sg, 23); ROUND2(B, C, D, A, 4, Sh, 24);
18281 + ROUND2(A, B, C, D, 9, Se, 25); ROUND2(D, A, B, C, 14, Sf, 26);
18282 + ROUND2(C, D, A, B, 3, Sg, 27); ROUND2(B, C, D, A, 8, Sh, 28);
18283 + ROUND2(A, B, C, D, 13, Se, 29); ROUND2(D, A, B, C, 2, Sf, 30);
18284 + ROUND2(C, D, A, B, 7, Sg, 31); ROUND2(B, C, D, A, 12, Sh, 32);
18286 + ROUND3(A, B, C, D, 5, Si, 33); ROUND3(D, A, B, C, 8, Sj, 34);
18287 + ROUND3(C, D, A, B, 11, Sk, 35); ROUND3(B, C, D, A, 14, Sl, 36);
18288 + ROUND3(A, B, C, D, 1, Si, 37); ROUND3(D, A, B, C, 4, Sj, 38);
18289 + ROUND3(C, D, A, B, 7, Sk, 39); ROUND3(B, C, D, A, 10, Sl, 40);
18290 + ROUND3(A, B, C, D, 13, Si, 41); ROUND3(D, A, B, C, 0, Sj, 42);
18291 + ROUND3(C, D, A, B, 3, Sk, 43); ROUND3(B, C, D, A, 6, Sl, 44);
18292 + ROUND3(A, B, C, D, 9, Si, 45); ROUND3(D, A, B, C, 12, Sj, 46);
18293 + ROUND3(C, D, A, B, 15, Sk, 47); ROUND3(B, C, D, A, 2, Sl, 48);
18295 + ROUND4(A, B, C, D, 0, Sm, 49); ROUND4(D, A, B, C, 7, Sn, 50);
18296 + ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A, 5, Sp, 52);
18297 + ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C, 3, Sn, 54);
18298 + ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A, 1, Sp, 56);
18299 + ROUND4(A, B, C, D, 8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58);
18300 + ROUND4(C, D, A, B, 6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60);
18301 + ROUND4(A, B, C, D, 4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62);
18302 + ROUND4(C, D, A, B, 2, So, 63); ROUND4(B, C, D, A, 9, Sp, 64);
18304 + ctxt->md5_sta += A;
18305 + ctxt->md5_stb += B;
18306 + ctxt->md5_stc += C;
18307 + ctxt->md5_std += D;
18309 diff -Nur linux-2.6.30.orig/crypto/ocf/safe/md5.h linux-2.6.30/crypto/ocf/safe/md5.h
18310 --- linux-2.6.30.orig/crypto/ocf/safe/md5.h 1970-01-01 01:00:00.000000000 +0100
18311 +++ linux-2.6.30/crypto/ocf/safe/md5.h 2009-06-11 10:55:27.000000000 +0200
18312 @@ -0,0 +1,76 @@
18313 +/* $FreeBSD: src/sys/crypto/md5.h,v 1.4 2002/03/20 05:13:50 alfred Exp $ */
18314 +/* $KAME: md5.h,v 1.4 2000/03/27 04:36:22 sumikawa Exp $ */
18317 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
18318 + * All rights reserved.
18320 + * Redistribution and use in source and binary forms, with or without
18321 + * modification, are permitted provided that the following conditions
18322 + * are met:
18323 + * 1. Redistributions of source code must retain the above copyright
18324 + * notice, this list of conditions and the following disclaimer.
18325 + * 2. Redistributions in binary form must reproduce the above copyright
18326 + * notice, this list of conditions and the following disclaimer in the
18327 + * documentation and/or other materials provided with the distribution.
18328 + * 3. Neither the name of the project nor the names of its contributors
18329 + * may be used to endorse or promote products derived from this software
18330 + * without specific prior written permission.
18332 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
18333 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18334 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18335 + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
18336 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18337 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
18338 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
18339 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
18340 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
18341 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
18342 + * SUCH DAMAGE.
18343 + */
18345 +#ifndef _NETINET6_MD5_H_
18346 +#define _NETINET6_MD5_H_
18348 +#define MD5_BUFLEN 64
18350 +typedef struct {
18351 + union {
18352 + u_int32_t md5_state32[4];
18353 + u_int8_t md5_state8[16];
18354 + } md5_st;
18356 +#define md5_sta md5_st.md5_state32[0]
18357 +#define md5_stb md5_st.md5_state32[1]
18358 +#define md5_stc md5_st.md5_state32[2]
18359 +#define md5_std md5_st.md5_state32[3]
18360 +#define md5_st8 md5_st.md5_state8
18362 + union {
18363 + u_int64_t md5_count64;
18364 + u_int8_t md5_count8[8];
18365 + } md5_count;
18366 +#define md5_n md5_count.md5_count64
18367 +#define md5_n8 md5_count.md5_count8
18369 + u_int md5_i;
18370 + u_int8_t md5_buf[MD5_BUFLEN];
18371 +} md5_ctxt;
18373 +extern void md5_init(md5_ctxt *);
18374 +extern void md5_loop(md5_ctxt *, u_int8_t *, u_int);
18375 +extern void md5_pad(md5_ctxt *);
18376 +extern void md5_result(u_int8_t *, md5_ctxt *);
18378 +/* compatibility */
18379 +#define MD5_CTX md5_ctxt
18380 +#define MD5Init(x) md5_init((x))
18381 +#define MD5Update(x, y, z) md5_loop((x), (y), (z))
18382 +#define MD5Final(x, y) \
18383 +do { \
18384 + md5_pad((y)); \
18385 + md5_result((x), (y)); \
18386 +} while (0)
18388 +#endif /* ! _NETINET6_MD5_H_*/
18389 diff -Nur linux-2.6.30.orig/crypto/ocf/safe/safe.c linux-2.6.30/crypto/ocf/safe/safe.c
18390 --- linux-2.6.30.orig/crypto/ocf/safe/safe.c 1970-01-01 01:00:00.000000000 +0100
18391 +++ linux-2.6.30/crypto/ocf/safe/safe.c 2009-06-11 10:55:27.000000000 +0200
18392 @@ -0,0 +1,2288 @@
18393 +/*-
18394 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
18395 + * Copyright (C) 2004-2007 David McCullough
18396 + * The license and original author are listed below.
18398 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
18399 + * Copyright (c) 2003 Global Technology Associates, Inc.
18400 + * All rights reserved.
18402 + * Redistribution and use in source and binary forms, with or without
18403 + * modification, are permitted provided that the following conditions
18404 + * are met:
18405 + * 1. Redistributions of source code must retain the above copyright
18406 + * notice, this list of conditions and the following disclaimer.
18407 + * 2. Redistributions in binary form must reproduce the above copyright
18408 + * notice, this list of conditions and the following disclaimer in the
18409 + * documentation and/or other materials provided with the distribution.
18411 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18412 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18413 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18414 + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18415 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18416 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
18417 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
18418 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
18419 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
18420 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
18421 + * SUCH DAMAGE.
18423 +__FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
18424 + */
18426 +#ifndef AUTOCONF_INCLUDED
18427 +#include <linux/config.h>
18428 +#endif
18429 +#include <linux/module.h>
18430 +#include <linux/kernel.h>
18431 +#include <linux/init.h>
18432 +#include <linux/list.h>
18433 +#include <linux/slab.h>
18434 +#include <linux/wait.h>
18435 +#include <linux/sched.h>
18436 +#include <linux/pci.h>
18437 +#include <linux/delay.h>
18438 +#include <linux/interrupt.h>
18439 +#include <linux/spinlock.h>
18440 +#include <linux/random.h>
18441 +#include <linux/version.h>
18442 +#include <linux/skbuff.h>
18443 +#include <asm/io.h>
18446 + * SafeNet SafeXcel-1141 hardware crypto accelerator
18447 + */
18449 +#include <cryptodev.h>
18450 +#include <uio.h>
18451 +#include <safe/safereg.h>
18452 +#include <safe/safevar.h>
18454 +#if 1
18455 +#define DPRINTF(a) do { \
18456 + if (debug) { \
18457 + printk("%s: ", sc ? \
18458 + device_get_nameunit(sc->sc_dev) : "safe"); \
18459 + printk a; \
18460 + } \
18461 + } while (0)
18462 +#else
18463 +#define DPRINTF(a)
18464 +#endif
18467 + * until we find a cleaner way, include the BSD md5/sha1 code
18468 + * here
18469 + */
18470 +#define HMAC_HACK 1
18471 +#ifdef HMAC_HACK
18472 +#define LITTLE_ENDIAN 1234
18473 +#define BIG_ENDIAN 4321
18474 +#ifdef __LITTLE_ENDIAN
18475 +#define BYTE_ORDER LITTLE_ENDIAN
18476 +#endif
18477 +#ifdef __BIG_ENDIAN
18478 +#define BYTE_ORDER BIG_ENDIAN
18479 +#endif
18480 +#include <safe/md5.h>
18481 +#include <safe/md5.c>
18482 +#include <safe/sha1.h>
18483 +#include <safe/sha1.c>
18485 +u_int8_t hmac_ipad_buffer[64] = {
18486 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
18487 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
18488 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
18489 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
18490 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
18491 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
18492 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
18493 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
18496 +u_int8_t hmac_opad_buffer[64] = {
18497 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
18498 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
18499 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
18500 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
18501 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
18502 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
18503 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
18504 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
18506 +#endif /* HMAC_HACK */
18508 +/* add proc entry for this */
18509 +struct safe_stats safestats;
18511 +#define debug safe_debug
18512 +int safe_debug = 0;
18513 +module_param(safe_debug, int, 0644);
18514 +MODULE_PARM_DESC(safe_debug, "Enable debug");
18516 +static void safe_callback(struct safe_softc *, struct safe_ringentry *);
18517 +static void safe_feed(struct safe_softc *, struct safe_ringentry *);
18518 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
18519 +static void safe_rng_init(struct safe_softc *);
18520 +int safe_rngbufsize = 8; /* 32 bytes each read */
18521 +module_param(safe_rngbufsize, int, 0644);
18522 +MODULE_PARM_DESC(safe_rngbufsize, "RNG polling buffer size (32-bit words)");
18523 +int safe_rngmaxalarm = 8; /* max alarms before reset */
18524 +module_param(safe_rngmaxalarm, int, 0644);
18525 +MODULE_PARM_DESC(safe_rngmaxalarm, "RNG max alarms before reset");
18526 +#endif /* SAFE_NO_RNG */
18528 +static void safe_totalreset(struct safe_softc *sc);
18529 +static int safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op);
18530 +static int safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op);
18531 +static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re);
18532 +static int safe_kprocess(device_t dev, struct cryptkop *krp, int hint);
18533 +static int safe_kstart(struct safe_softc *sc);
18534 +static int safe_ksigbits(struct safe_softc *sc, struct crparam *cr);
18535 +static void safe_kfeed(struct safe_softc *sc);
18536 +static void safe_kpoll(unsigned long arg);
18537 +static void safe_kload_reg(struct safe_softc *sc, u_int32_t off,
18538 + u_int32_t len, struct crparam *n);
18540 +static int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
18541 +static int safe_freesession(device_t, u_int64_t);
18542 +static int safe_process(device_t, struct cryptop *, int);
18544 +static device_method_t safe_methods = {
18545 + /* crypto device methods */
18546 + DEVMETHOD(cryptodev_newsession, safe_newsession),
18547 + DEVMETHOD(cryptodev_freesession,safe_freesession),
18548 + DEVMETHOD(cryptodev_process, safe_process),
18549 + DEVMETHOD(cryptodev_kprocess, safe_kprocess),
18552 +#define READ_REG(sc,r) readl((sc)->sc_base_addr + (r))
18553 +#define WRITE_REG(sc,r,val) writel((val), (sc)->sc_base_addr + (r))
18555 +#define SAFE_MAX_CHIPS 8
18556 +static struct safe_softc *safe_chip_idx[SAFE_MAX_CHIPS];
18559 + * split our buffers up into safe DMAable byte fragments to avoid lockup
18560 + * bug in 1141 HW on rev 1.0.
18561 + */
18563 +static int
18564 +pci_map_linear(
18565 + struct safe_softc *sc,
18566 + struct safe_operand *buf,
18567 + void *addr,
18568 + int len)
18570 + dma_addr_t tmp;
18571 + int chunk, tlen = len;
18573 + tmp = pci_map_single(sc->sc_pcidev, addr, len, PCI_DMA_BIDIRECTIONAL);
18575 + buf->mapsize += len;
18576 + while (len > 0) {
18577 + chunk = (len > sc->sc_max_dsize) ? sc->sc_max_dsize : len;
18578 + buf->segs[buf->nsegs].ds_addr = tmp;
18579 + buf->segs[buf->nsegs].ds_len = chunk;
18580 + buf->segs[buf->nsegs].ds_tlen = tlen;
18581 + buf->nsegs++;
18582 + tmp += chunk;
18583 + len -= chunk;
18584 + tlen = 0;
18586 + return 0;
18590 + * map in a given uio buffer (great on some arches :-)
18591 + */
18593 +static int
18594 +pci_map_uio(struct safe_softc *sc, struct safe_operand *buf, struct uio *uio)
18596 + struct iovec *iov = uio->uio_iov;
18597 + int n;
18599 + DPRINTF(("%s()\n", __FUNCTION__));
18601 + buf->mapsize = 0;
18602 + buf->nsegs = 0;
18604 + for (n = 0; n < uio->uio_iovcnt; n++) {
18605 + pci_map_linear(sc, buf, iov->iov_base, iov->iov_len);
18606 + iov++;
18609 + /* identify this buffer by the first segment */
18610 + buf->map = (void *) buf->segs[0].ds_addr;
18611 + return(0);
18615 + * map in a given sk_buff
18616 + */
18618 +static int
18619 +pci_map_skb(struct safe_softc *sc,struct safe_operand *buf,struct sk_buff *skb)
18621 + int i;
18623 + DPRINTF(("%s()\n", __FUNCTION__));
18625 + buf->mapsize = 0;
18626 + buf->nsegs = 0;
18628 + pci_map_linear(sc, buf, skb->data, skb_headlen(skb));
18630 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
18631 + pci_map_linear(sc, buf,
18632 + page_address(skb_shinfo(skb)->frags[i].page) +
18633 + skb_shinfo(skb)->frags[i].page_offset,
18634 + skb_shinfo(skb)->frags[i].size);
18637 + /* identify this buffer by the first segment */
18638 + buf->map = (void *) buf->segs[0].ds_addr;
18639 + return(0);
18643 +#if 0 /* not needed at this time */
18644 +static void
18645 +pci_sync_operand(struct safe_softc *sc, struct safe_operand *buf)
18647 + int i;
18649 + DPRINTF(("%s()\n", __FUNCTION__));
18650 + for (i = 0; i < buf->nsegs; i++)
18651 + pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
18652 + buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
18654 +#endif
18656 +static void
18657 +pci_unmap_operand(struct safe_softc *sc, struct safe_operand *buf)
18659 + int i;
18660 + DPRINTF(("%s()\n", __FUNCTION__));
18661 + for (i = 0; i < buf->nsegs; i++) {
18662 + if (buf->segs[i].ds_tlen) {
18663 + DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
18664 + pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
18665 + buf->segs[i].ds_tlen, PCI_DMA_BIDIRECTIONAL);
18666 + DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
18668 + buf->segs[i].ds_addr = 0;
18669 + buf->segs[i].ds_len = 0;
18670 + buf->segs[i].ds_tlen = 0;
18672 + buf->nsegs = 0;
18673 + buf->mapsize = 0;
18674 + buf->map = 0;
18679 + * SafeXcel Interrupt routine
18680 + */
18681 +static irqreturn_t
18682 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
18683 +safe_intr(int irq, void *arg)
18684 +#else
18685 +safe_intr(int irq, void *arg, struct pt_regs *regs)
18686 +#endif
18688 + struct safe_softc *sc = arg;
18689 + int stat;
18690 + unsigned long flags;
18692 + stat = READ_REG(sc, SAFE_HM_STAT);
18694 + DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__, stat));
18696 + if (stat == 0) /* shared irq, not for us */
18697 + return IRQ_NONE;
18699 + WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
18701 + if ((stat & SAFE_INT_PE_DDONE)) {
18702 + /*
18703 + * Descriptor(s) done; scan the ring and
18704 + * process completed operations.
18705 + */
18706 + spin_lock_irqsave(&sc->sc_ringmtx, flags);
18707 + while (sc->sc_back != sc->sc_front) {
18708 + struct safe_ringentry *re = sc->sc_back;
18710 +#ifdef SAFE_DEBUG
18711 + if (debug) {
18712 + safe_dump_ringstate(sc, __func__);
18713 + safe_dump_request(sc, __func__, re);
18715 +#endif
18716 + /*
18717 + * safe_process marks ring entries that were allocated
18718 + * but not used with a csr of zero. This insures the
18719 + * ring front pointer never needs to be set backwards
18720 + * in the event that an entry is allocated but not used
18721 + * because of a setup error.
18722 + */
18723 + DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__, re->re_desc.d_csr));
18724 + if (re->re_desc.d_csr != 0) {
18725 + if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) {
18726 + DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__));
18727 + break;
18729 + if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) {
18730 + DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__));
18731 + break;
18733 + sc->sc_nqchip--;
18734 + safe_callback(sc, re);
18736 + if (++(sc->sc_back) == sc->sc_ringtop)
18737 + sc->sc_back = sc->sc_ring;
18739 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
18742 + /*
18743 + * Check to see if we got any DMA Error
18744 + */
18745 + if (stat & SAFE_INT_PE_ERROR) {
18746 + printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc->sc_dev),
18747 + (int)READ_REG(sc, SAFE_PE_DMASTAT));
18748 + safestats.st_dmaerr++;
18749 + safe_totalreset(sc);
18750 +#if 0
18751 + safe_feed(sc);
18752 +#endif
18755 + if (sc->sc_needwakeup) { /* XXX check high watermark */
18756 + int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
18757 + DPRINTF(("%s: wakeup crypto %x\n", __func__,
18758 + sc->sc_needwakeup));
18759 + sc->sc_needwakeup &= ~wakeup;
18760 + crypto_unblock(sc->sc_cid, wakeup);
18763 + return IRQ_HANDLED;
18767 + * safe_feed() - post a request to chip
18768 + */
18769 +static void
18770 +safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
18772 + DPRINTF(("%s()\n", __FUNCTION__));
18773 +#ifdef SAFE_DEBUG
18774 + if (debug) {
18775 + safe_dump_ringstate(sc, __func__);
18776 + safe_dump_request(sc, __func__, re);
18778 +#endif
18779 + sc->sc_nqchip++;
18780 + if (sc->sc_nqchip > safestats.st_maxqchip)
18781 + safestats.st_maxqchip = sc->sc_nqchip;
18782 + /* poke h/w to check descriptor ring, any value can be written */
18783 + WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
18786 +#define N(a) (sizeof(a) / sizeof (a[0]))
18787 +static void
18788 +safe_setup_enckey(struct safe_session *ses, caddr_t key)
18790 + int i;
18792 + bcopy(key, ses->ses_key, ses->ses_klen / 8);
18794 + /* PE is little-endian, insure proper byte order */
18795 + for (i = 0; i < N(ses->ses_key); i++)
18796 + ses->ses_key[i] = htole32(ses->ses_key[i]);
18799 +static void
18800 +safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
18802 +#ifdef HMAC_HACK
18803 + MD5_CTX md5ctx;
18804 + SHA1_CTX sha1ctx;
18805 + int i;
18808 + for (i = 0; i < klen; i++)
18809 + key[i] ^= HMAC_IPAD_VAL;
18811 + if (algo == CRYPTO_MD5_HMAC) {
18812 + MD5Init(&md5ctx);
18813 + MD5Update(&md5ctx, key, klen);
18814 + MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
18815 + bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
18816 + } else {
18817 + SHA1Init(&sha1ctx);
18818 + SHA1Update(&sha1ctx, key, klen);
18819 + SHA1Update(&sha1ctx, hmac_ipad_buffer,
18820 + SHA1_HMAC_BLOCK_LEN - klen);
18821 + bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
18824 + for (i = 0; i < klen; i++)
18825 + key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
18827 + if (algo == CRYPTO_MD5_HMAC) {
18828 + MD5Init(&md5ctx);
18829 + MD5Update(&md5ctx, key, klen);
18830 + MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
18831 + bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
18832 + } else {
18833 + SHA1Init(&sha1ctx);
18834 + SHA1Update(&sha1ctx, key, klen);
18835 + SHA1Update(&sha1ctx, hmac_opad_buffer,
18836 + SHA1_HMAC_BLOCK_LEN - klen);
18837 + bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
18840 + for (i = 0; i < klen; i++)
18841 + key[i] ^= HMAC_OPAD_VAL;
18843 +#if 0
18844 + /*
18845 + * this code prevents SHA working on a BE host,
18846 + * so it is obviously wrong. I think the byte
18847 + * swap setup we do with the chip fixes this for us
18848 + */
18850 + /* PE is little-endian, insure proper byte order */
18851 + for (i = 0; i < N(ses->ses_hminner); i++) {
18852 + ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
18853 + ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
18855 +#endif
18856 +#else /* HMAC_HACK */
18857 + printk("safe: md5/sha not implemented\n");
18858 +#endif /* HMAC_HACK */
18860 +#undef N
18863 + * Allocate a new 'session' and return an encoded session id. 'sidp'
18864 + * contains our registration id, and should contain an encoded session
18865 + * id on successful allocation.
18866 + */
18867 +static int
18868 +safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
18870 + struct safe_softc *sc = device_get_softc(dev);
18871 + struct cryptoini *c, *encini = NULL, *macini = NULL;
18872 + struct safe_session *ses = NULL;
18873 + int sesn;
18875 + DPRINTF(("%s()\n", __FUNCTION__));
18877 + if (sidp == NULL || cri == NULL || sc == NULL)
18878 + return (EINVAL);
18880 + for (c = cri; c != NULL; c = c->cri_next) {
18881 + if (c->cri_alg == CRYPTO_MD5_HMAC ||
18882 + c->cri_alg == CRYPTO_SHA1_HMAC ||
18883 + c->cri_alg == CRYPTO_NULL_HMAC) {
18884 + if (macini)
18885 + return (EINVAL);
18886 + macini = c;
18887 + } else if (c->cri_alg == CRYPTO_DES_CBC ||
18888 + c->cri_alg == CRYPTO_3DES_CBC ||
18889 + c->cri_alg == CRYPTO_AES_CBC ||
18890 + c->cri_alg == CRYPTO_NULL_CBC) {
18891 + if (encini)
18892 + return (EINVAL);
18893 + encini = c;
18894 + } else
18895 + return (EINVAL);
18897 + if (encini == NULL && macini == NULL)
18898 + return (EINVAL);
18899 + if (encini) { /* validate key length */
18900 + switch (encini->cri_alg) {
18901 + case CRYPTO_DES_CBC:
18902 + if (encini->cri_klen != 64)
18903 + return (EINVAL);
18904 + break;
18905 + case CRYPTO_3DES_CBC:
18906 + if (encini->cri_klen != 192)
18907 + return (EINVAL);
18908 + break;
18909 + case CRYPTO_AES_CBC:
18910 + if (encini->cri_klen != 128 &&
18911 + encini->cri_klen != 192 &&
18912 + encini->cri_klen != 256)
18913 + return (EINVAL);
18914 + break;
18918 + if (sc->sc_sessions == NULL) {
18919 + ses = sc->sc_sessions = (struct safe_session *)
18920 + kmalloc(sizeof(struct safe_session), SLAB_ATOMIC);
18921 + if (ses == NULL)
18922 + return (ENOMEM);
18923 + memset(ses, 0, sizeof(struct safe_session));
18924 + sesn = 0;
18925 + sc->sc_nsessions = 1;
18926 + } else {
18927 + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
18928 + if (sc->sc_sessions[sesn].ses_used == 0) {
18929 + ses = &sc->sc_sessions[sesn];
18930 + break;
18934 + if (ses == NULL) {
18935 + sesn = sc->sc_nsessions;
18936 + ses = (struct safe_session *)
18937 + kmalloc((sesn + 1) * sizeof(struct safe_session), SLAB_ATOMIC);
18938 + if (ses == NULL)
18939 + return (ENOMEM);
18940 + memset(ses, 0, (sesn + 1) * sizeof(struct safe_session));
18941 + bcopy(sc->sc_sessions, ses, sesn *
18942 + sizeof(struct safe_session));
18943 + bzero(sc->sc_sessions, sesn *
18944 + sizeof(struct safe_session));
18945 + kfree(sc->sc_sessions);
18946 + sc->sc_sessions = ses;
18947 + ses = &sc->sc_sessions[sesn];
18948 + sc->sc_nsessions++;
18952 + bzero(ses, sizeof(struct safe_session));
18953 + ses->ses_used = 1;
18955 + if (encini) {
18956 + /* get an IV */
18957 + /* XXX may read fewer than requested */
18958 + read_random(ses->ses_iv, sizeof(ses->ses_iv));
18960 + ses->ses_klen = encini->cri_klen;
18961 + if (encini->cri_key != NULL)
18962 + safe_setup_enckey(ses, encini->cri_key);
18965 + if (macini) {
18966 + ses->ses_mlen = macini->cri_mlen;
18967 + if (ses->ses_mlen == 0) {
18968 + if (macini->cri_alg == CRYPTO_MD5_HMAC)
18969 + ses->ses_mlen = MD5_HASH_LEN;
18970 + else
18971 + ses->ses_mlen = SHA1_HASH_LEN;
18974 + if (macini->cri_key != NULL) {
18975 + safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
18976 + macini->cri_klen / 8);
18980 + *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
18981 + return (0);
18985 + * Deallocate a session.
18986 + */
18987 +static int
18988 +safe_freesession(device_t dev, u_int64_t tid)
18990 + struct safe_softc *sc = device_get_softc(dev);
18991 + int session, ret;
18992 + u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
18994 + DPRINTF(("%s()\n", __FUNCTION__));
18996 + if (sc == NULL)
18997 + return (EINVAL);
18999 + session = SAFE_SESSION(sid);
19000 + if (session < sc->sc_nsessions) {
19001 + bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
19002 + ret = 0;
19003 + } else
19004 + ret = EINVAL;
19005 + return (ret);
19009 +static int
19010 +safe_process(device_t dev, struct cryptop *crp, int hint)
19012 + struct safe_softc *sc = device_get_softc(dev);
19013 + int err = 0, i, nicealign, uniform;
19014 + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
19015 + int bypass, oplen, ivsize;
19016 + caddr_t iv;
19017 + int16_t coffset;
19018 + struct safe_session *ses;
19019 + struct safe_ringentry *re;
19020 + struct safe_sarec *sa;
19021 + struct safe_pdesc *pd;
19022 + u_int32_t cmd0, cmd1, staterec;
19023 + unsigned long flags;
19025 + DPRINTF(("%s()\n", __FUNCTION__));
19027 + if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
19028 + safestats.st_invalid++;
19029 + return (EINVAL);
19031 + if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
19032 + safestats.st_badsession++;
19033 + return (EINVAL);
19036 + spin_lock_irqsave(&sc->sc_ringmtx, flags);
19037 + if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
19038 + safestats.st_ringfull++;
19039 + sc->sc_needwakeup |= CRYPTO_SYMQ;
19040 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
19041 + return (ERESTART);
19043 + re = sc->sc_front;
19045 + staterec = re->re_sa.sa_staterec; /* save */
19046 + /* NB: zero everything but the PE descriptor */
19047 + bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
19048 + re->re_sa.sa_staterec = staterec; /* restore */
19050 + re->re_crp = crp;
19051 + re->re_sesn = SAFE_SESSION(crp->crp_sid);
19053 + re->re_src.nsegs = 0;
19054 + re->re_dst.nsegs = 0;
19056 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
19057 + re->re_src_skb = (struct sk_buff *)crp->crp_buf;
19058 + re->re_dst_skb = (struct sk_buff *)crp->crp_buf;
19059 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
19060 + re->re_src_io = (struct uio *)crp->crp_buf;
19061 + re->re_dst_io = (struct uio *)crp->crp_buf;
19062 + } else {
19063 + safestats.st_badflags++;
19064 + err = EINVAL;
19065 + goto errout; /* XXX we don't handle contiguous blocks! */
19068 + sa = &re->re_sa;
19069 + ses = &sc->sc_sessions[re->re_sesn];
19071 + crd1 = crp->crp_desc;
19072 + if (crd1 == NULL) {
19073 + safestats.st_nodesc++;
19074 + err = EINVAL;
19075 + goto errout;
19077 + crd2 = crd1->crd_next;
19079 + cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
19080 + cmd1 = 0;
19081 + if (crd2 == NULL) {
19082 + if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
19083 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
19084 + crd1->crd_alg == CRYPTO_NULL_HMAC) {
19085 + maccrd = crd1;
19086 + enccrd = NULL;
19087 + cmd0 |= SAFE_SA_CMD0_OP_HASH;
19088 + } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
19089 + crd1->crd_alg == CRYPTO_3DES_CBC ||
19090 + crd1->crd_alg == CRYPTO_AES_CBC ||
19091 + crd1->crd_alg == CRYPTO_NULL_CBC) {
19092 + maccrd = NULL;
19093 + enccrd = crd1;
19094 + cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
19095 + } else {
19096 + safestats.st_badalg++;
19097 + err = EINVAL;
19098 + goto errout;
19100 + } else {
19101 + if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
19102 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
19103 + crd1->crd_alg == CRYPTO_NULL_HMAC) &&
19104 + (crd2->crd_alg == CRYPTO_DES_CBC ||
19105 + crd2->crd_alg == CRYPTO_3DES_CBC ||
19106 + crd2->crd_alg == CRYPTO_AES_CBC ||
19107 + crd2->crd_alg == CRYPTO_NULL_CBC) &&
19108 + ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
19109 + maccrd = crd1;
19110 + enccrd = crd2;
19111 + } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
19112 + crd1->crd_alg == CRYPTO_3DES_CBC ||
19113 + crd1->crd_alg == CRYPTO_AES_CBC ||
19114 + crd1->crd_alg == CRYPTO_NULL_CBC) &&
19115 + (crd2->crd_alg == CRYPTO_MD5_HMAC ||
19116 + crd2->crd_alg == CRYPTO_SHA1_HMAC ||
19117 + crd2->crd_alg == CRYPTO_NULL_HMAC) &&
19118 + (crd1->crd_flags & CRD_F_ENCRYPT)) {
19119 + enccrd = crd1;
19120 + maccrd = crd2;
19121 + } else {
19122 + safestats.st_badalg++;
19123 + err = EINVAL;
19124 + goto errout;
19126 + cmd0 |= SAFE_SA_CMD0_OP_BOTH;
19129 + if (enccrd) {
19130 + if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
19131 + safe_setup_enckey(ses, enccrd->crd_key);
19133 + if (enccrd->crd_alg == CRYPTO_DES_CBC) {
19134 + cmd0 |= SAFE_SA_CMD0_DES;
19135 + cmd1 |= SAFE_SA_CMD1_CBC;
19136 + ivsize = 2*sizeof(u_int32_t);
19137 + } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
19138 + cmd0 |= SAFE_SA_CMD0_3DES;
19139 + cmd1 |= SAFE_SA_CMD1_CBC;
19140 + ivsize = 2*sizeof(u_int32_t);
19141 + } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
19142 + cmd0 |= SAFE_SA_CMD0_AES;
19143 + cmd1 |= SAFE_SA_CMD1_CBC;
19144 + if (ses->ses_klen == 128)
19145 + cmd1 |= SAFE_SA_CMD1_AES128;
19146 + else if (ses->ses_klen == 192)
19147 + cmd1 |= SAFE_SA_CMD1_AES192;
19148 + else
19149 + cmd1 |= SAFE_SA_CMD1_AES256;
19150 + ivsize = 4*sizeof(u_int32_t);
19151 + } else {
19152 + cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
19153 + ivsize = 0;
19156 + /*
19157 + * Setup encrypt/decrypt state. When using basic ops
19158 + * we can't use an inline IV because hash/crypt offset
19159 + * must be from the end of the IV to the start of the
19160 + * crypt data and this leaves out the preceding header
19161 + * from the hash calculation. Instead we place the IV
19162 + * in the state record and set the hash/crypt offset to
19163 + * copy both the header+IV.
19164 + */
19165 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
19166 + cmd0 |= SAFE_SA_CMD0_OUTBOUND;
19168 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
19169 + iv = enccrd->crd_iv;
19170 + else
19171 + iv = (caddr_t) ses->ses_iv;
19172 + if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
19173 + crypto_copyback(crp->crp_flags, crp->crp_buf,
19174 + enccrd->crd_inject, ivsize, iv);
19176 + bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
19177 + /* make iv LE */
19178 + for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
19179 + re->re_sastate.sa_saved_iv[i] =
19180 + cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
19181 + cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
19182 + re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
19183 + } else {
19184 + cmd0 |= SAFE_SA_CMD0_INBOUND;
19186 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
19187 + bcopy(enccrd->crd_iv,
19188 + re->re_sastate.sa_saved_iv, ivsize);
19189 + } else {
19190 + crypto_copydata(crp->crp_flags, crp->crp_buf,
19191 + enccrd->crd_inject, ivsize,
19192 + (caddr_t)re->re_sastate.sa_saved_iv);
19194 + /* make iv LE */
19195 + for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
19196 + re->re_sastate.sa_saved_iv[i] =
19197 + cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
19198 + cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
19200 + /*
19201 + * For basic encryption use the zero pad algorithm.
19202 + * This pads results to an 8-byte boundary and
19203 + * suppresses padding verification for inbound (i.e.
19204 + * decrypt) operations.
19206 + * NB: Not sure if the 8-byte pad boundary is a problem.
19207 + */
19208 + cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
19210 + /* XXX assert key bufs have the same size */
19211 + bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
19214 + if (maccrd) {
19215 + if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
19216 + safe_setup_mackey(ses, maccrd->crd_alg,
19217 + maccrd->crd_key, maccrd->crd_klen / 8);
19220 + if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
19221 + cmd0 |= SAFE_SA_CMD0_MD5;
19222 + cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
19223 + } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
19224 + cmd0 |= SAFE_SA_CMD0_SHA1;
19225 + cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
19226 + } else {
19227 + cmd0 |= SAFE_SA_CMD0_HASH_NULL;
19229 + /*
19230 + * Digest data is loaded from the SA and the hash
19231 + * result is saved to the state block where we
19232 + * retrieve it for return to the caller.
19233 + */
19234 + /* XXX assert digest bufs have the same size */
19235 + bcopy(ses->ses_hminner, sa->sa_indigest,
19236 + sizeof(sa->sa_indigest));
19237 + bcopy(ses->ses_hmouter, sa->sa_outdigest,
19238 + sizeof(sa->sa_outdigest));
19240 + cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
19241 + re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
19244 + if (enccrd && maccrd) {
19245 + /*
19246 + * The offset from hash data to the start of
19247 + * crypt data is the difference in the skips.
19248 + */
19249 + bypass = maccrd->crd_skip;
19250 + coffset = enccrd->crd_skip - maccrd->crd_skip;
19251 + if (coffset < 0) {
19252 + DPRINTF(("%s: hash does not precede crypt; "
19253 + "mac skip %u enc skip %u\n",
19254 + __func__, maccrd->crd_skip, enccrd->crd_skip));
19255 + safestats.st_skipmismatch++;
19256 + err = EINVAL;
19257 + goto errout;
19259 + oplen = enccrd->crd_skip + enccrd->crd_len;
19260 + if (maccrd->crd_skip + maccrd->crd_len != oplen) {
19261 + DPRINTF(("%s: hash amount %u != crypt amount %u\n",
19262 + __func__, maccrd->crd_skip + maccrd->crd_len,
19263 + oplen));
19264 + safestats.st_lenmismatch++;
19265 + err = EINVAL;
19266 + goto errout;
19268 +#ifdef SAFE_DEBUG
19269 + if (debug) {
19270 + printf("mac: skip %d, len %d, inject %d\n",
19271 + maccrd->crd_skip, maccrd->crd_len,
19272 + maccrd->crd_inject);
19273 + printf("enc: skip %d, len %d, inject %d\n",
19274 + enccrd->crd_skip, enccrd->crd_len,
19275 + enccrd->crd_inject);
19276 + printf("bypass %d coffset %d oplen %d\n",
19277 + bypass, coffset, oplen);
19279 +#endif
19280 + if (coffset & 3) { /* offset must be 32-bit aligned */
19281 + DPRINTF(("%s: coffset %u misaligned\n",
19282 + __func__, coffset));
19283 + safestats.st_coffmisaligned++;
19284 + err = EINVAL;
19285 + goto errout;
19287 + coffset >>= 2;
19288 + if (coffset > 255) { /* offset must be <256 dwords */
19289 + DPRINTF(("%s: coffset %u too big\n",
19290 + __func__, coffset));
19291 + safestats.st_cofftoobig++;
19292 + err = EINVAL;
19293 + goto errout;
19295 + /*
19296 + * Tell the hardware to copy the header to the output.
19297 + * The header is defined as the data from the end of
19298 + * the bypass to the start of data to be encrypted.
19299 + * Typically this is the inline IV. Note that you need
19300 + * to do this even if src+dst are the same; it appears
19301 + * that w/o this bit the crypted data is written
19302 + * immediately after the bypass data.
19303 + */
19304 + cmd1 |= SAFE_SA_CMD1_HDRCOPY;
19305 + /*
19306 + * Disable IP header mutable bit handling. This is
19307 + * needed to get correct HMAC calculations.
19308 + */
19309 + cmd1 |= SAFE_SA_CMD1_MUTABLE;
19310 + } else {
19311 + if (enccrd) {
19312 + bypass = enccrd->crd_skip;
19313 + oplen = bypass + enccrd->crd_len;
19314 + } else {
19315 + bypass = maccrd->crd_skip;
19316 + oplen = bypass + maccrd->crd_len;
19318 + coffset = 0;
19320 + /* XXX verify multiple of 4 when using s/g */
19321 + if (bypass > 96) { /* bypass offset must be <= 96 bytes */
19322 + DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
19323 + safestats.st_bypasstoobig++;
19324 + err = EINVAL;
19325 + goto errout;
19328 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
19329 + if (pci_map_skb(sc, &re->re_src, re->re_src_skb)) {
19330 + safestats.st_noload++;
19331 + err = ENOMEM;
19332 + goto errout;
19334 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
19335 + if (pci_map_uio(sc, &re->re_src, re->re_src_io)) {
19336 + safestats.st_noload++;
19337 + err = ENOMEM;
19338 + goto errout;
19341 + nicealign = safe_dmamap_aligned(sc, &re->re_src);
19342 + uniform = safe_dmamap_uniform(sc, &re->re_src);
19344 + DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
19345 + nicealign, uniform, re->re_src.nsegs));
19346 + if (re->re_src.nsegs > 1) {
19347 + re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
19348 + ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
19349 + for (i = 0; i < re->re_src_nsegs; i++) {
19350 + /* NB: no need to check if there's space */
19351 + pd = sc->sc_spfree;
19352 + if (++(sc->sc_spfree) == sc->sc_springtop)
19353 + sc->sc_spfree = sc->sc_spring;
19355 + KASSERT((pd->pd_flags&3) == 0 ||
19356 + (pd->pd_flags&3) == SAFE_PD_DONE,
19357 + ("bogus source particle descriptor; flags %x",
19358 + pd->pd_flags));
19359 + pd->pd_addr = re->re_src_segs[i].ds_addr;
19360 + pd->pd_size = re->re_src_segs[i].ds_len;
19361 + pd->pd_flags = SAFE_PD_READY;
19363 + cmd0 |= SAFE_SA_CMD0_IGATHER;
19364 + } else {
19365 + /*
19366 + * No need for gather, reference the operand directly.
19367 + */
19368 + re->re_desc.d_src = re->re_src_segs[0].ds_addr;
19371 + if (enccrd == NULL && maccrd != NULL) {
19372 + /*
19373 + * Hash op; no destination needed.
19374 + */
19375 + } else {
19376 + if (crp->crp_flags & (CRYPTO_F_IOV|CRYPTO_F_SKBUF)) {
19377 + if (!nicealign) {
19378 + safestats.st_iovmisaligned++;
19379 + err = EINVAL;
19380 + goto errout;
19382 + if (uniform != 1) {
19383 + device_printf(sc->sc_dev, "!uniform source\n");
19384 + if (!uniform) {
19385 + /*
19386 + * There's no way to handle the DMA
19387 + * requirements with this uio. We
19388 + * could create a separate DMA area for
19389 + * the result and then copy it back,
19390 + * but for now we just bail and return
19391 + * an error. Note that uio requests
19392 + * > SAFE_MAX_DSIZE are handled because
19393 + * the DMA map and segment list for the
19394 + * destination wil result in a
19395 + * destination particle list that does
19396 + * the necessary scatter DMA.
19397 + */
19398 + safestats.st_iovnotuniform++;
19399 + err = EINVAL;
19400 + goto errout;
19402 + } else
19403 + re->re_dst = re->re_src;
19404 + } else {
19405 + safestats.st_badflags++;
19406 + err = EINVAL;
19407 + goto errout;
19410 + if (re->re_dst.nsegs > 1) {
19411 + re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
19412 + ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
19413 + for (i = 0; i < re->re_dst_nsegs; i++) {
19414 + pd = sc->sc_dpfree;
19415 + KASSERT((pd->pd_flags&3) == 0 ||
19416 + (pd->pd_flags&3) == SAFE_PD_DONE,
19417 + ("bogus dest particle descriptor; flags %x",
19418 + pd->pd_flags));
19419 + if (++(sc->sc_dpfree) == sc->sc_dpringtop)
19420 + sc->sc_dpfree = sc->sc_dpring;
19421 + pd->pd_addr = re->re_dst_segs[i].ds_addr;
19422 + pd->pd_flags = SAFE_PD_READY;
19424 + cmd0 |= SAFE_SA_CMD0_OSCATTER;
19425 + } else {
19426 + /*
19427 + * No need for scatter, reference the operand directly.
19428 + */
19429 + re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
19433 + /*
19434 + * All done with setup; fillin the SA command words
19435 + * and the packet engine descriptor. The operation
19436 + * is now ready for submission to the hardware.
19437 + */
19438 + sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
19439 + sa->sa_cmd1 = cmd1
19440 + | (coffset << SAFE_SA_CMD1_OFFSET_S)
19441 + | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
19442 + | SAFE_SA_CMD1_SRPCI
19444 + /*
19445 + * NB: the order of writes is important here. In case the
19446 + * chip is scanning the ring because of an outstanding request
19447 + * it might nab this one too. In that case we need to make
19448 + * sure the setup is complete before we write the length
19449 + * field of the descriptor as it signals the descriptor is
19450 + * ready for processing.
19451 + */
19452 + re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
19453 + if (maccrd)
19454 + re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
19455 + wmb();
19456 + re->re_desc.d_len = oplen
19457 + | SAFE_PE_LEN_READY
19458 + | (bypass << SAFE_PE_LEN_BYPASS_S)
19461 + safestats.st_ipackets++;
19462 + safestats.st_ibytes += oplen;
19464 + if (++(sc->sc_front) == sc->sc_ringtop)
19465 + sc->sc_front = sc->sc_ring;
19467 + /* XXX honor batching */
19468 + safe_feed(sc, re);
19469 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
19470 + return (0);
19472 +errout:
19473 + if (re->re_src.map != re->re_dst.map)
19474 + pci_unmap_operand(sc, &re->re_dst);
19475 + if (re->re_src.map)
19476 + pci_unmap_operand(sc, &re->re_src);
19477 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
19478 + if (err != ERESTART) {
19479 + crp->crp_etype = err;
19480 + crypto_done(crp);
19481 + } else {
19482 + sc->sc_needwakeup |= CRYPTO_SYMQ;
19484 + return (err);
19487 +static void
19488 +safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
19490 + struct cryptop *crp = (struct cryptop *)re->re_crp;
19491 + struct cryptodesc *crd;
19493 + DPRINTF(("%s()\n", __FUNCTION__));
19495 + safestats.st_opackets++;
19496 + safestats.st_obytes += re->re_dst.mapsize;
19498 + if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
19499 + device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
19500 + re->re_desc.d_csr,
19501 + re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
19502 + safestats.st_peoperr++;
19503 + crp->crp_etype = EIO; /* something more meaningful? */
19506 + if (re->re_dst.map != NULL && re->re_dst.map != re->re_src.map)
19507 + pci_unmap_operand(sc, &re->re_dst);
19508 + pci_unmap_operand(sc, &re->re_src);
19510 + /*
19511 + * If result was written to a differet mbuf chain, swap
19512 + * it in as the return value and reclaim the original.
19513 + */
19514 + if ((crp->crp_flags & CRYPTO_F_SKBUF) && re->re_src_skb != re->re_dst_skb) {
19515 + device_printf(sc->sc_dev, "no CRYPTO_F_SKBUF swapping support\n");
19516 + /* kfree_skb(skb) */
19517 + /* crp->crp_buf = (caddr_t)re->re_dst_skb */
19518 + return;
19521 + if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
19522 + /* copy out IV for future use */
19523 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
19524 + int i;
19525 + int ivsize;
19527 + if (crd->crd_alg == CRYPTO_DES_CBC ||
19528 + crd->crd_alg == CRYPTO_3DES_CBC) {
19529 + ivsize = 2*sizeof(u_int32_t);
19530 + } else if (crd->crd_alg == CRYPTO_AES_CBC) {
19531 + ivsize = 4*sizeof(u_int32_t);
19532 + } else
19533 + continue;
19534 + crypto_copydata(crp->crp_flags, crp->crp_buf,
19535 + crd->crd_skip + crd->crd_len - ivsize, ivsize,
19536 + (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
19537 + for (i = 0;
19538 + i < ivsize/sizeof(sc->sc_sessions[re->re_sesn].ses_iv[0]);
19539 + i++)
19540 + sc->sc_sessions[re->re_sesn].ses_iv[i] =
19541 + cpu_to_le32(sc->sc_sessions[re->re_sesn].ses_iv[i]);
19542 + break;
19546 + if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
19547 + /* copy out ICV result */
19548 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
19549 + if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
19550 + crd->crd_alg == CRYPTO_SHA1_HMAC ||
19551 + crd->crd_alg == CRYPTO_NULL_HMAC))
19552 + continue;
19553 + if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
19554 + /*
19555 + * SHA-1 ICV's are byte-swapped; fix 'em up
19556 + * before copy them to their destination.
19557 + */
19558 + re->re_sastate.sa_saved_indigest[0] =
19559 + cpu_to_be32(re->re_sastate.sa_saved_indigest[0]);
19560 + re->re_sastate.sa_saved_indigest[1] =
19561 + cpu_to_be32(re->re_sastate.sa_saved_indigest[1]);
19562 + re->re_sastate.sa_saved_indigest[2] =
19563 + cpu_to_be32(re->re_sastate.sa_saved_indigest[2]);
19564 + } else {
19565 + re->re_sastate.sa_saved_indigest[0] =
19566 + cpu_to_le32(re->re_sastate.sa_saved_indigest[0]);
19567 + re->re_sastate.sa_saved_indigest[1] =
19568 + cpu_to_le32(re->re_sastate.sa_saved_indigest[1]);
19569 + re->re_sastate.sa_saved_indigest[2] =
19570 + cpu_to_le32(re->re_sastate.sa_saved_indigest[2]);
19572 + crypto_copyback(crp->crp_flags, crp->crp_buf,
19573 + crd->crd_inject,
19574 + sc->sc_sessions[re->re_sesn].ses_mlen,
19575 + (caddr_t)re->re_sastate.sa_saved_indigest);
19576 + break;
19579 + crypto_done(crp);
19583 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
19584 +#define SAFE_RNG_MAXWAIT 1000
19586 +static void
19587 +safe_rng_init(struct safe_softc *sc)
19589 + u_int32_t w, v;
19590 + int i;
19592 + DPRINTF(("%s()\n", __FUNCTION__));
19594 + WRITE_REG(sc, SAFE_RNG_CTRL, 0);
19595 + /* use default value according to the manual */
19596 + WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
19597 + WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
19599 + /*
19600 + * There is a bug in rev 1.0 of the 1140 that when the RNG
19601 + * is brought out of reset the ready status flag does not
19602 + * work until the RNG has finished its internal initialization.
19604 + * So in order to determine the device is through its
19605 + * initialization we must read the data register, using the
19606 + * status reg in the read in case it is initialized. Then read
19607 + * the data register until it changes from the first read.
19608 + * Once it changes read the data register until it changes
19609 + * again. At this time the RNG is considered initialized.
19610 + * This could take between 750ms - 1000ms in time.
19611 + */
19612 + i = 0;
19613 + w = READ_REG(sc, SAFE_RNG_OUT);
19614 + do {
19615 + v = READ_REG(sc, SAFE_RNG_OUT);
19616 + if (v != w) {
19617 + w = v;
19618 + break;
19620 + DELAY(10);
19621 + } while (++i < SAFE_RNG_MAXWAIT);
19623 + /* Wait Until data changes again */
19624 + i = 0;
19625 + do {
19626 + v = READ_REG(sc, SAFE_RNG_OUT);
19627 + if (v != w)
19628 + break;
19629 + DELAY(10);
19630 + } while (++i < SAFE_RNG_MAXWAIT);
19633 +static __inline void
19634 +safe_rng_disable_short_cycle(struct safe_softc *sc)
19636 + DPRINTF(("%s()\n", __FUNCTION__));
19638 + WRITE_REG(sc, SAFE_RNG_CTRL,
19639 + READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
19642 +static __inline void
19643 +safe_rng_enable_short_cycle(struct safe_softc *sc)
19645 + DPRINTF(("%s()\n", __FUNCTION__));
19647 + WRITE_REG(sc, SAFE_RNG_CTRL,
19648 + READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
19651 +static __inline u_int32_t
19652 +safe_rng_read(struct safe_softc *sc)
19654 + int i;
19656 + i = 0;
19657 + while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
19659 + return READ_REG(sc, SAFE_RNG_OUT);
19662 +static int
19663 +safe_read_random(void *arg, u_int32_t *buf, int maxwords)
19665 + struct safe_softc *sc = (struct safe_softc *) arg;
19666 + int i, rc;
19668 + DPRINTF(("%s()\n", __FUNCTION__));
19670 + safestats.st_rng++;
19671 + /*
19672 + * Fetch the next block of data.
19673 + */
19674 + if (maxwords > safe_rngbufsize)
19675 + maxwords = safe_rngbufsize;
19676 + if (maxwords > SAFE_RNG_MAXBUFSIZ)
19677 + maxwords = SAFE_RNG_MAXBUFSIZ;
19678 +retry:
19679 + /* read as much as we can */
19680 + for (rc = 0; rc < maxwords; rc++) {
19681 + if (READ_REG(sc, SAFE_RNG_STAT) != 0)
19682 + break;
19683 + buf[rc] = READ_REG(sc, SAFE_RNG_OUT);
19685 + if (rc == 0)
19686 + return 0;
19687 + /*
19688 + * Check the comparator alarm count and reset the h/w if
19689 + * it exceeds our threshold. This guards against the
19690 + * hardware oscillators resonating with external signals.
19691 + */
19692 + if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
19693 + u_int32_t freq_inc, w;
19695 + DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
19696 + (unsigned)READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
19697 + safestats.st_rngalarm++;
19698 + safe_rng_enable_short_cycle(sc);
19699 + freq_inc = 18;
19700 + for (i = 0; i < 64; i++) {
19701 + w = READ_REG(sc, SAFE_RNG_CNFG);
19702 + freq_inc = ((w + freq_inc) & 0x3fL);
19703 + w = ((w & ~0x3fL) | freq_inc);
19704 + WRITE_REG(sc, SAFE_RNG_CNFG, w);
19706 + WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
19708 + (void) safe_rng_read(sc);
19709 + DELAY(25);
19711 + if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
19712 + safe_rng_disable_short_cycle(sc);
19713 + goto retry;
19715 + freq_inc = 1;
19717 + safe_rng_disable_short_cycle(sc);
19718 + } else
19719 + WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
19721 + return(rc);
19723 +#endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
19727 + * Resets the board. Values in the regesters are left as is
19728 + * from the reset (i.e. initial values are assigned elsewhere).
19729 + */
19730 +static void
19731 +safe_reset_board(struct safe_softc *sc)
19733 + u_int32_t v;
19734 + /*
19735 + * Reset the device. The manual says no delay
19736 + * is needed between marking and clearing reset.
19737 + */
19738 + DPRINTF(("%s()\n", __FUNCTION__));
19740 + v = READ_REG(sc, SAFE_PE_DMACFG) &~
19741 + (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
19742 + SAFE_PE_DMACFG_SGRESET);
19743 + WRITE_REG(sc, SAFE_PE_DMACFG, v
19744 + | SAFE_PE_DMACFG_PERESET
19745 + | SAFE_PE_DMACFG_PDRRESET
19746 + | SAFE_PE_DMACFG_SGRESET);
19747 + WRITE_REG(sc, SAFE_PE_DMACFG, v);
19751 + * Initialize registers we need to touch only once.
19752 + */
19753 +static void
19754 +safe_init_board(struct safe_softc *sc)
19756 + u_int32_t v, dwords;
19758 + DPRINTF(("%s()\n", __FUNCTION__));
19760 + v = READ_REG(sc, SAFE_PE_DMACFG);
19761 + v &=~ ( SAFE_PE_DMACFG_PEMODE
19762 + | SAFE_PE_DMACFG_FSENA /* failsafe enable */
19763 + | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
19764 + | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
19765 + | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
19766 + | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
19767 + | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
19768 + | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
19769 + );
19770 + v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
19771 + | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
19772 + | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
19773 + | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
19774 + | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
19775 + | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
19776 +#if 0
19777 + | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
19778 +#endif
19780 + WRITE_REG(sc, SAFE_PE_DMACFG, v);
19782 +#ifdef __BIG_ENDIAN
19783 + /* tell the safenet that we are 4321 and not 1234 */
19784 + WRITE_REG(sc, SAFE_ENDIAN, 0xe4e41b1b);
19785 +#endif
19787 + if (sc->sc_chiprev == SAFE_REV(1,0)) {
19788 + /*
19789 + * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
19790 + * "target mode transfers" done while the chip is DMA'ing
19791 + * >1020 bytes cause the hardware to lockup. To avoid this
19792 + * we reduce the max PCI transfer size and use small source
19793 + * particle descriptors (<= 256 bytes).
19794 + */
19795 + WRITE_REG(sc, SAFE_DMA_CFG, 256);
19796 + device_printf(sc->sc_dev,
19797 + "Reduce max DMA size to %u words for rev %u.%u WAR\n",
19798 + (unsigned) ((READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff),
19799 + (unsigned) SAFE_REV_MAJ(sc->sc_chiprev),
19800 + (unsigned) SAFE_REV_MIN(sc->sc_chiprev));
19801 + sc->sc_max_dsize = 256;
19802 + } else {
19803 + sc->sc_max_dsize = SAFE_MAX_DSIZE;
19806 + /* NB: operands+results are overlaid */
19807 + WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
19808 + WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
19809 + /*
19810 + * Configure ring entry size and number of items in the ring.
19811 + */
19812 + KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
19813 + ("PE ring entry not 32-bit aligned!"));
19814 + dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
19815 + WRITE_REG(sc, SAFE_PE_RINGCFG,
19816 + (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
19817 + WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
19819 + WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
19820 + WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
19821 + WRITE_REG(sc, SAFE_PE_PARTSIZE,
19822 + (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
19823 + /*
19824 + * NB: destination particles are fixed size. We use
19825 + * an mbuf cluster and require all results go to
19826 + * clusters or smaller.
19827 + */
19828 + WRITE_REG(sc, SAFE_PE_PARTCFG, sc->sc_max_dsize);
19830 + /* it's now safe to enable PE mode, do it */
19831 + WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
19833 + /*
19834 + * Configure hardware to use level-triggered interrupts and
19835 + * to interrupt after each descriptor is processed.
19836 + */
19837 + WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
19838 + WRITE_REG(sc, SAFE_HI_CLR, 0xffffffff);
19839 + WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
19840 + WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
19845 + * Clean up after a chip crash.
19846 + * It is assumed that the caller in splimp()
19847 + */
19848 +static void
19849 +safe_cleanchip(struct safe_softc *sc)
19851 + DPRINTF(("%s()\n", __FUNCTION__));
19853 + if (sc->sc_nqchip != 0) {
19854 + struct safe_ringentry *re = sc->sc_back;
19856 + while (re != sc->sc_front) {
19857 + if (re->re_desc.d_csr != 0)
19858 + safe_free_entry(sc, re);
19859 + if (++re == sc->sc_ringtop)
19860 + re = sc->sc_ring;
19862 + sc->sc_back = re;
19863 + sc->sc_nqchip = 0;
19868 + * free a safe_q
19869 + * It is assumed that the caller is within splimp().
19870 + */
19871 +static int
19872 +safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
19874 + struct cryptop *crp;
19876 + DPRINTF(("%s()\n", __FUNCTION__));
19878 + /*
19879 + * Free header MCR
19880 + */
19881 + if ((re->re_dst_skb != NULL) && (re->re_src_skb != re->re_dst_skb))
19882 +#ifdef NOTYET
19883 + m_freem(re->re_dst_m);
19884 +#else
19885 + printk("%s,%d: SKB not supported\n", __FILE__, __LINE__);
19886 +#endif
19888 + crp = (struct cryptop *)re->re_crp;
19890 + re->re_desc.d_csr = 0;
19892 + crp->crp_etype = EFAULT;
19893 + crypto_done(crp);
19894 + return(0);
19898 + * Routine to reset the chip and clean up.
19899 + * It is assumed that the caller is in splimp()
19900 + */
19901 +static void
19902 +safe_totalreset(struct safe_softc *sc)
19904 + DPRINTF(("%s()\n", __FUNCTION__));
19906 + safe_reset_board(sc);
19907 + safe_init_board(sc);
19908 + safe_cleanchip(sc);
19912 + * Is the operand suitable aligned for direct DMA. Each
19913 + * segment must be aligned on a 32-bit boundary and all
19914 + * but the last segment must be a multiple of 4 bytes.
19915 + */
19916 +static int
19917 +safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op)
19919 + int i;
19921 + DPRINTF(("%s()\n", __FUNCTION__));
19923 + for (i = 0; i < op->nsegs; i++) {
19924 + if (op->segs[i].ds_addr & 3)
19925 + return (0);
19926 + if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
19927 + return (0);
19929 + return (1);
19933 + * Is the operand suitable for direct DMA as the destination
19934 + * of an operation. The hardware requires that each ``particle''
19935 + * but the last in an operation result have the same size. We
19936 + * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
19937 + * 0 if some segment is not a multiple of of this size, 1 if all
19938 + * segments are exactly this size, or 2 if segments are at worst
19939 + * a multple of this size.
19940 + */
19941 +static int
19942 +safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op)
19944 + int result = 1;
19946 + DPRINTF(("%s()\n", __FUNCTION__));
19948 + if (op->nsegs > 0) {
19949 + int i;
19951 + for (i = 0; i < op->nsegs-1; i++) {
19952 + if (op->segs[i].ds_len % sc->sc_max_dsize)
19953 + return (0);
19954 + if (op->segs[i].ds_len != sc->sc_max_dsize)
19955 + result = 2;
19958 + return (result);
19961 +static int
19962 +safe_kprocess(device_t dev, struct cryptkop *krp, int hint)
19964 + struct safe_softc *sc = device_get_softc(dev);
19965 + struct safe_pkq *q;
19966 + unsigned long flags;
19968 + DPRINTF(("%s()\n", __FUNCTION__));
19970 + if (sc == NULL) {
19971 + krp->krp_status = EINVAL;
19972 + goto err;
19975 + if (krp->krp_op != CRK_MOD_EXP) {
19976 + krp->krp_status = EOPNOTSUPP;
19977 + goto err;
19980 + q = (struct safe_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
19981 + if (q == NULL) {
19982 + krp->krp_status = ENOMEM;
19983 + goto err;
19985 + memset(q, 0, sizeof(*q));
19986 + q->pkq_krp = krp;
19987 + INIT_LIST_HEAD(&q->pkq_list);
19989 + spin_lock_irqsave(&sc->sc_pkmtx, flags);
19990 + list_add_tail(&q->pkq_list, &sc->sc_pkq);
19991 + safe_kfeed(sc);
19992 + spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
19993 + return (0);
19995 +err:
19996 + crypto_kdone(krp);
19997 + return (0);
20000 +#define SAFE_CRK_PARAM_BASE 0
20001 +#define SAFE_CRK_PARAM_EXP 1
20002 +#define SAFE_CRK_PARAM_MOD 2
20004 +static int
20005 +safe_kstart(struct safe_softc *sc)
20007 + struct cryptkop *krp = sc->sc_pkq_cur->pkq_krp;
20008 + int exp_bits, mod_bits, base_bits;
20009 + u_int32_t op, a_off, b_off, c_off, d_off;
20011 + DPRINTF(("%s()\n", __FUNCTION__));
20013 + if (krp->krp_iparams < 3 || krp->krp_oparams != 1) {
20014 + krp->krp_status = EINVAL;
20015 + return (1);
20018 + base_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_BASE]);
20019 + if (base_bits > 2048)
20020 + goto too_big;
20021 + if (base_bits <= 0) /* 5. base not zero */
20022 + goto too_small;
20024 + exp_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_EXP]);
20025 + if (exp_bits > 2048)
20026 + goto too_big;
20027 + if (exp_bits <= 0) /* 1. exponent word length > 0 */
20028 + goto too_small; /* 4. exponent not zero */
20030 + mod_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_MOD]);
20031 + if (mod_bits > 2048)
20032 + goto too_big;
20033 + if (mod_bits <= 32) /* 2. modulus word length > 1 */
20034 + goto too_small; /* 8. MSW of modulus != zero */
20035 + if (mod_bits < exp_bits) /* 3 modulus len >= exponent len */
20036 + goto too_small;
20037 + if ((krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p[0] & 1) == 0)
20038 + goto bad_domain; /* 6. modulus is odd */
20039 + if (mod_bits > krp->krp_param[krp->krp_iparams].crp_nbits)
20040 + goto too_small; /* make sure result will fit */
20042 + /* 7. modulus > base */
20043 + if (mod_bits < base_bits)
20044 + goto too_small;
20045 + if (mod_bits == base_bits) {
20046 + u_int8_t *basep, *modp;
20047 + int i;
20049 + basep = krp->krp_param[SAFE_CRK_PARAM_BASE].crp_p +
20050 + ((base_bits + 7) / 8) - 1;
20051 + modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
20052 + ((mod_bits + 7) / 8) - 1;
20054 + for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
20055 + if (*modp < *basep)
20056 + goto too_small;
20057 + if (*modp > *basep)
20058 + break;
20062 + /* And on the 9th step, he rested. */
20064 + WRITE_REG(sc, SAFE_PK_A_LEN, (exp_bits + 31) / 32);
20065 + WRITE_REG(sc, SAFE_PK_B_LEN, (mod_bits + 31) / 32);
20066 + if (mod_bits > 1024) {
20067 + op = SAFE_PK_FUNC_EXP4;
20068 + a_off = 0x000;
20069 + b_off = 0x100;
20070 + c_off = 0x200;
20071 + d_off = 0x300;
20072 + } else {
20073 + op = SAFE_PK_FUNC_EXP16;
20074 + a_off = 0x000;
20075 + b_off = 0x080;
20076 + c_off = 0x100;
20077 + d_off = 0x180;
20079 + sc->sc_pk_reslen = b_off - a_off;
20080 + sc->sc_pk_resoff = d_off;
20082 + /* A is exponent, B is modulus, C is base, D is result */
20083 + safe_kload_reg(sc, a_off, b_off - a_off,
20084 + &krp->krp_param[SAFE_CRK_PARAM_EXP]);
20085 + WRITE_REG(sc, SAFE_PK_A_ADDR, a_off >> 2);
20086 + safe_kload_reg(sc, b_off, b_off - a_off,
20087 + &krp->krp_param[SAFE_CRK_PARAM_MOD]);
20088 + WRITE_REG(sc, SAFE_PK_B_ADDR, b_off >> 2);
20089 + safe_kload_reg(sc, c_off, b_off - a_off,
20090 + &krp->krp_param[SAFE_CRK_PARAM_BASE]);
20091 + WRITE_REG(sc, SAFE_PK_C_ADDR, c_off >> 2);
20092 + WRITE_REG(sc, SAFE_PK_D_ADDR, d_off >> 2);
20094 + WRITE_REG(sc, SAFE_PK_FUNC, op | SAFE_PK_FUNC_RUN);
20096 + return (0);
20098 +too_big:
20099 + krp->krp_status = E2BIG;
20100 + return (1);
20101 +too_small:
20102 + krp->krp_status = ERANGE;
20103 + return (1);
20104 +bad_domain:
20105 + krp->krp_status = EDOM;
20106 + return (1);
20109 +static int
20110 +safe_ksigbits(struct safe_softc *sc, struct crparam *cr)
20112 + u_int plen = (cr->crp_nbits + 7) / 8;
20113 + int i, sig = plen * 8;
20114 + u_int8_t c, *p = cr->crp_p;
20116 + DPRINTF(("%s()\n", __FUNCTION__));
20118 + for (i = plen - 1; i >= 0; i--) {
20119 + c = p[i];
20120 + if (c != 0) {
20121 + while ((c & 0x80) == 0) {
20122 + sig--;
20123 + c <<= 1;
20125 + break;
20127 + sig -= 8;
20129 + return (sig);
20132 +static void
20133 +safe_kfeed(struct safe_softc *sc)
20135 + struct safe_pkq *q, *tmp;
20137 + DPRINTF(("%s()\n", __FUNCTION__));
20139 + if (list_empty(&sc->sc_pkq) && sc->sc_pkq_cur == NULL)
20140 + return;
20141 + if (sc->sc_pkq_cur != NULL)
20142 + return;
20143 + list_for_each_entry_safe(q, tmp, &sc->sc_pkq, pkq_list) {
20144 + sc->sc_pkq_cur = q;
20145 + list_del(&q->pkq_list);
20146 + if (safe_kstart(sc) != 0) {
20147 + crypto_kdone(q->pkq_krp);
20148 + kfree(q);
20149 + sc->sc_pkq_cur = NULL;
20150 + } else {
20151 + /* op started, start polling */
20152 + mod_timer(&sc->sc_pkto, jiffies + 1);
20153 + break;
20158 +static void
20159 +safe_kpoll(unsigned long arg)
20161 + struct safe_softc *sc = NULL;
20162 + struct safe_pkq *q;
20163 + struct crparam *res;
20164 + int i;
20165 + u_int32_t buf[64];
20166 + unsigned long flags;
20168 + DPRINTF(("%s()\n", __FUNCTION__));
20170 + if (arg >= SAFE_MAX_CHIPS)
20171 + return;
20172 + sc = safe_chip_idx[arg];
20173 + if (!sc) {
20174 + DPRINTF(("%s() - bad callback\n", __FUNCTION__));
20175 + return;
20178 + spin_lock_irqsave(&sc->sc_pkmtx, flags);
20179 + if (sc->sc_pkq_cur == NULL)
20180 + goto out;
20181 + if (READ_REG(sc, SAFE_PK_FUNC) & SAFE_PK_FUNC_RUN) {
20182 + /* still running, check back later */
20183 + mod_timer(&sc->sc_pkto, jiffies + 1);
20184 + goto out;
20187 + q = sc->sc_pkq_cur;
20188 + res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
20189 + bzero(buf, sizeof(buf));
20190 + bzero(res->crp_p, (res->crp_nbits + 7) / 8);
20191 + for (i = 0; i < sc->sc_pk_reslen >> 2; i++)
20192 + buf[i] = le32_to_cpu(READ_REG(sc, SAFE_PK_RAM_START +
20193 + sc->sc_pk_resoff + (i << 2)));
20194 + bcopy(buf, res->crp_p, (res->crp_nbits + 7) / 8);
20195 + /*
20196 + * reduce the bits that need copying if possible
20197 + */
20198 + res->crp_nbits = min(res->crp_nbits,sc->sc_pk_reslen * 8);
20199 + res->crp_nbits = safe_ksigbits(sc, res);
20201 + for (i = SAFE_PK_RAM_START; i < SAFE_PK_RAM_END; i += 4)
20202 + WRITE_REG(sc, i, 0);
20204 + crypto_kdone(q->pkq_krp);
20205 + kfree(q);
20206 + sc->sc_pkq_cur = NULL;
20208 + safe_kfeed(sc);
20209 +out:
20210 + spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
20213 +static void
20214 +safe_kload_reg(struct safe_softc *sc, u_int32_t off, u_int32_t len,
20215 + struct crparam *n)
20217 + u_int32_t buf[64], i;
20219 + DPRINTF(("%s()\n", __FUNCTION__));
20221 + bzero(buf, sizeof(buf));
20222 + bcopy(n->crp_p, buf, (n->crp_nbits + 7) / 8);
20224 + for (i = 0; i < len >> 2; i++)
20225 + WRITE_REG(sc, SAFE_PK_RAM_START + off + (i << 2),
20226 + cpu_to_le32(buf[i]));
20229 +#ifdef SAFE_DEBUG
20230 +static void
20231 +safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
20233 + printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
20234 + , tag
20235 + , READ_REG(sc, SAFE_DMA_ENDIAN)
20236 + , READ_REG(sc, SAFE_DMA_SRCADDR)
20237 + , READ_REG(sc, SAFE_DMA_DSTADDR)
20238 + , READ_REG(sc, SAFE_DMA_STAT)
20239 + );
20242 +static void
20243 +safe_dump_intrstate(struct safe_softc *sc, const char *tag)
20245 + printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
20246 + , tag
20247 + , READ_REG(sc, SAFE_HI_CFG)
20248 + , READ_REG(sc, SAFE_HI_MASK)
20249 + , READ_REG(sc, SAFE_HI_DESC_CNT)
20250 + , READ_REG(sc, SAFE_HU_STAT)
20251 + , READ_REG(sc, SAFE_HM_STAT)
20252 + );
20255 +static void
20256 +safe_dump_ringstate(struct safe_softc *sc, const char *tag)
20258 + u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
20260 + /* NB: assume caller has lock on ring */
20261 + printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
20262 + tag,
20263 + estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
20264 + (unsigned long)(sc->sc_back - sc->sc_ring),
20265 + (unsigned long)(sc->sc_front - sc->sc_ring));
20268 +static void
20269 +safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
20271 + int ix, nsegs;
20273 + ix = re - sc->sc_ring;
20274 + printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
20275 + , tag
20276 + , re, ix
20277 + , re->re_desc.d_csr
20278 + , re->re_desc.d_src
20279 + , re->re_desc.d_dst
20280 + , re->re_desc.d_sa
20281 + , re->re_desc.d_len
20282 + );
20283 + if (re->re_src.nsegs > 1) {
20284 + ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
20285 + sizeof(struct safe_pdesc);
20286 + for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
20287 + printf(" spd[%u] %p: %p size %u flags %x"
20288 + , ix, &sc->sc_spring[ix]
20289 + , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
20290 + , sc->sc_spring[ix].pd_size
20291 + , sc->sc_spring[ix].pd_flags
20292 + );
20293 + if (sc->sc_spring[ix].pd_size == 0)
20294 + printf(" (zero!)");
20295 + printf("\n");
20296 + if (++ix == SAFE_TOTAL_SPART)
20297 + ix = 0;
20300 + if (re->re_dst.nsegs > 1) {
20301 + ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
20302 + sizeof(struct safe_pdesc);
20303 + for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
20304 + printf(" dpd[%u] %p: %p flags %x\n"
20305 + , ix, &sc->sc_dpring[ix]
20306 + , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
20307 + , sc->sc_dpring[ix].pd_flags
20308 + );
20309 + if (++ix == SAFE_TOTAL_DPART)
20310 + ix = 0;
20313 + printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
20314 + re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
20315 + printf("sa: key %x %x %x %x %x %x %x %x\n"
20316 + , re->re_sa.sa_key[0]
20317 + , re->re_sa.sa_key[1]
20318 + , re->re_sa.sa_key[2]
20319 + , re->re_sa.sa_key[3]
20320 + , re->re_sa.sa_key[4]
20321 + , re->re_sa.sa_key[5]
20322 + , re->re_sa.sa_key[6]
20323 + , re->re_sa.sa_key[7]
20324 + );
20325 + printf("sa: indigest %x %x %x %x %x\n"
20326 + , re->re_sa.sa_indigest[0]
20327 + , re->re_sa.sa_indigest[1]
20328 + , re->re_sa.sa_indigest[2]
20329 + , re->re_sa.sa_indigest[3]
20330 + , re->re_sa.sa_indigest[4]
20331 + );
20332 + printf("sa: outdigest %x %x %x %x %x\n"
20333 + , re->re_sa.sa_outdigest[0]
20334 + , re->re_sa.sa_outdigest[1]
20335 + , re->re_sa.sa_outdigest[2]
20336 + , re->re_sa.sa_outdigest[3]
20337 + , re->re_sa.sa_outdigest[4]
20338 + );
20339 + printf("sr: iv %x %x %x %x\n"
20340 + , re->re_sastate.sa_saved_iv[0]
20341 + , re->re_sastate.sa_saved_iv[1]
20342 + , re->re_sastate.sa_saved_iv[2]
20343 + , re->re_sastate.sa_saved_iv[3]
20344 + );
20345 + printf("sr: hashbc %u indigest %x %x %x %x %x\n"
20346 + , re->re_sastate.sa_saved_hashbc
20347 + , re->re_sastate.sa_saved_indigest[0]
20348 + , re->re_sastate.sa_saved_indigest[1]
20349 + , re->re_sastate.sa_saved_indigest[2]
20350 + , re->re_sastate.sa_saved_indigest[3]
20351 + , re->re_sastate.sa_saved_indigest[4]
20352 + );
20355 +static void
20356 +safe_dump_ring(struct safe_softc *sc, const char *tag)
20358 + unsigned long flags;
20360 + spin_lock_irqsave(&sc->sc_ringmtx, flags);
20361 + printf("\nSafeNet Ring State:\n");
20362 + safe_dump_intrstate(sc, tag);
20363 + safe_dump_dmastatus(sc, tag);
20364 + safe_dump_ringstate(sc, tag);
20365 + if (sc->sc_nqchip) {
20366 + struct safe_ringentry *re = sc->sc_back;
20367 + do {
20368 + safe_dump_request(sc, tag, re);
20369 + if (++re == sc->sc_ringtop)
20370 + re = sc->sc_ring;
20371 + } while (re != sc->sc_front);
20373 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
20375 +#endif /* SAFE_DEBUG */
20378 +static int safe_probe(struct pci_dev *dev, const struct pci_device_id *ent)
20380 + struct safe_softc *sc = NULL;
20381 + u32 mem_start, mem_len, cmd;
20382 + int i, rc, devinfo;
20383 + dma_addr_t raddr;
20384 + static int num_chips = 0;
20386 + DPRINTF(("%s()\n", __FUNCTION__));
20388 + if (pci_enable_device(dev) < 0)
20389 + return(-ENODEV);
20391 + if (!dev->irq) {
20392 + printk("safe: found device with no IRQ assigned. check BIOS settings!");
20393 + pci_disable_device(dev);
20394 + return(-ENODEV);
20397 + if (pci_set_mwi(dev)) {
20398 + printk("safe: pci_set_mwi failed!");
20399 + return(-ENODEV);
20402 + sc = (struct safe_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
20403 + if (!sc)
20404 + return(-ENOMEM);
20405 + memset(sc, 0, sizeof(*sc));
20407 + softc_device_init(sc, "safe", num_chips, safe_methods);
20409 + sc->sc_irq = -1;
20410 + sc->sc_cid = -1;
20411 + sc->sc_pcidev = dev;
20412 + if (num_chips < SAFE_MAX_CHIPS) {
20413 + safe_chip_idx[device_get_unit(sc->sc_dev)] = sc;
20414 + num_chips++;
20417 + INIT_LIST_HEAD(&sc->sc_pkq);
20418 + spin_lock_init(&sc->sc_pkmtx);
20420 + pci_set_drvdata(sc->sc_pcidev, sc);
20422 + /* we read its hardware registers as memory */
20423 + mem_start = pci_resource_start(sc->sc_pcidev, 0);
20424 + mem_len = pci_resource_len(sc->sc_pcidev, 0);
20426 + sc->sc_base_addr = (ocf_iomem_t) ioremap(mem_start, mem_len);
20427 + if (!sc->sc_base_addr) {
20428 + device_printf(sc->sc_dev, "failed to ioremap 0x%x-0x%x\n",
20429 + mem_start, mem_start + mem_len - 1);
20430 + goto out;
20433 + /* fix up the bus size */
20434 + if (pci_set_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
20435 + device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
20436 + goto out;
20438 + if (pci_set_consistent_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
20439 + device_printf(sc->sc_dev, "No usable consistent DMA configuration, aborting.\n");
20440 + goto out;
20443 + pci_set_master(sc->sc_pcidev);
20445 + pci_read_config_dword(sc->sc_pcidev, PCI_COMMAND, &cmd);
20447 + if (!(cmd & PCI_COMMAND_MEMORY)) {
20448 + device_printf(sc->sc_dev, "failed to enable memory mapping\n");
20449 + goto out;
20452 + if (!(cmd & PCI_COMMAND_MASTER)) {
20453 + device_printf(sc->sc_dev, "failed to enable bus mastering\n");
20454 + goto out;
20457 + rc = request_irq(dev->irq, safe_intr, IRQF_SHARED, "safe", sc);
20458 + if (rc) {
20459 + device_printf(sc->sc_dev, "failed to hook irq %d\n", sc->sc_irq);
20460 + goto out;
20462 + sc->sc_irq = dev->irq;
20464 + sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
20465 + (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
20467 + /*
20468 + * Allocate packet engine descriptors.
20469 + */
20470 + sc->sc_ringalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
20471 + SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
20472 + &sc->sc_ringalloc.dma_paddr);
20473 + if (!sc->sc_ringalloc.dma_vaddr) {
20474 + device_printf(sc->sc_dev, "cannot allocate PE descriptor ring\n");
20475 + goto out;
20478 + /*
20479 + * Hookup the static portion of all our data structures.
20480 + */
20481 + sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
20482 + sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
20483 + sc->sc_front = sc->sc_ring;
20484 + sc->sc_back = sc->sc_ring;
20485 + raddr = sc->sc_ringalloc.dma_paddr;
20486 + bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
20487 + for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
20488 + struct safe_ringentry *re = &sc->sc_ring[i];
20490 + re->re_desc.d_sa = raddr +
20491 + offsetof(struct safe_ringentry, re_sa);
20492 + re->re_sa.sa_staterec = raddr +
20493 + offsetof(struct safe_ringentry, re_sastate);
20495 + raddr += sizeof (struct safe_ringentry);
20497 + spin_lock_init(&sc->sc_ringmtx);
20499 + /*
20500 + * Allocate scatter and gather particle descriptors.
20501 + */
20502 + sc->sc_spalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
20503 + SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
20504 + &sc->sc_spalloc.dma_paddr);
20505 + if (!sc->sc_spalloc.dma_vaddr) {
20506 + device_printf(sc->sc_dev, "cannot allocate source particle descriptor ring\n");
20507 + goto out;
20509 + sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
20510 + sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
20511 + sc->sc_spfree = sc->sc_spring;
20512 + bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
20514 + sc->sc_dpalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
20515 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
20516 + &sc->sc_dpalloc.dma_paddr);
20517 + if (!sc->sc_dpalloc.dma_vaddr) {
20518 + device_printf(sc->sc_dev, "cannot allocate destination particle descriptor ring\n");
20519 + goto out;
20521 + sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
20522 + sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
20523 + sc->sc_dpfree = sc->sc_dpring;
20524 + bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
20526 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
20527 + if (sc->sc_cid < 0) {
20528 + device_printf(sc->sc_dev, "could not get crypto driver id\n");
20529 + goto out;
20532 + printf("%s:", device_get_nameunit(sc->sc_dev));
20534 + devinfo = READ_REG(sc, SAFE_DEVINFO);
20535 + if (devinfo & SAFE_DEVINFO_RNG) {
20536 + sc->sc_flags |= SAFE_FLAGS_RNG;
20537 + printf(" rng");
20539 + if (devinfo & SAFE_DEVINFO_PKEY) {
20540 + printf(" key");
20541 + sc->sc_flags |= SAFE_FLAGS_KEY;
20542 + crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
20543 +#if 0
20544 + crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
20545 +#endif
20546 + init_timer(&sc->sc_pkto);
20547 + sc->sc_pkto.function = safe_kpoll;
20548 + sc->sc_pkto.data = (unsigned long) device_get_unit(sc->sc_dev);
20550 + if (devinfo & SAFE_DEVINFO_DES) {
20551 + printf(" des/3des");
20552 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
20553 + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
20555 + if (devinfo & SAFE_DEVINFO_AES) {
20556 + printf(" aes");
20557 + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
20559 + if (devinfo & SAFE_DEVINFO_MD5) {
20560 + printf(" md5");
20561 + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
20563 + if (devinfo & SAFE_DEVINFO_SHA1) {
20564 + printf(" sha1");
20565 + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
20567 + printf(" null");
20568 + crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
20569 + crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
20570 + /* XXX other supported algorithms */
20571 + printf("\n");
20573 + safe_reset_board(sc); /* reset h/w */
20574 + safe_init_board(sc); /* init h/w */
20576 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
20577 + if (sc->sc_flags & SAFE_FLAGS_RNG) {
20578 + safe_rng_init(sc);
20579 + crypto_rregister(sc->sc_cid, safe_read_random, sc);
20581 +#endif /* SAFE_NO_RNG */
20583 + return (0);
20585 +out:
20586 + if (sc->sc_cid >= 0)
20587 + crypto_unregister_all(sc->sc_cid);
20588 + if (sc->sc_irq != -1)
20589 + free_irq(sc->sc_irq, sc);
20590 + if (sc->sc_ringalloc.dma_vaddr)
20591 + pci_free_consistent(sc->sc_pcidev,
20592 + SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
20593 + sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
20594 + if (sc->sc_spalloc.dma_vaddr)
20595 + pci_free_consistent(sc->sc_pcidev,
20596 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
20597 + sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
20598 + if (sc->sc_dpalloc.dma_vaddr)
20599 + pci_free_consistent(sc->sc_pcidev,
20600 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
20601 + sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
20602 + kfree(sc);
20603 + return(-ENODEV);
20606 +static void safe_remove(struct pci_dev *dev)
20608 + struct safe_softc *sc = pci_get_drvdata(dev);
20610 + DPRINTF(("%s()\n", __FUNCTION__));
20612 + /* XXX wait/abort active ops */
20614 + WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
20616 + del_timer_sync(&sc->sc_pkto);
20618 + crypto_unregister_all(sc->sc_cid);
20620 + safe_cleanchip(sc);
20622 + if (sc->sc_irq != -1)
20623 + free_irq(sc->sc_irq, sc);
20624 + if (sc->sc_ringalloc.dma_vaddr)
20625 + pci_free_consistent(sc->sc_pcidev,
20626 + SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
20627 + sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
20628 + if (sc->sc_spalloc.dma_vaddr)
20629 + pci_free_consistent(sc->sc_pcidev,
20630 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
20631 + sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
20632 + if (sc->sc_dpalloc.dma_vaddr)
20633 + pci_free_consistent(sc->sc_pcidev,
20634 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
20635 + sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
20636 + sc->sc_irq = -1;
20637 + sc->sc_ringalloc.dma_vaddr = NULL;
20638 + sc->sc_spalloc.dma_vaddr = NULL;
20639 + sc->sc_dpalloc.dma_vaddr = NULL;
20642 +static struct pci_device_id safe_pci_tbl[] = {
20643 + { PCI_VENDOR_SAFENET, PCI_PRODUCT_SAFEXCEL,
20644 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
20645 + { },
20647 +MODULE_DEVICE_TABLE(pci, safe_pci_tbl);
20649 +static struct pci_driver safe_driver = {
20650 + .name = "safe",
20651 + .id_table = safe_pci_tbl,
20652 + .probe = safe_probe,
20653 + .remove = safe_remove,
20654 + /* add PM stuff here one day */
20657 +static int __init safe_init (void)
20659 + struct safe_softc *sc = NULL;
20660 + int rc;
20662 + DPRINTF(("%s(%p)\n", __FUNCTION__, safe_init));
20664 + rc = pci_register_driver(&safe_driver);
20665 + pci_register_driver_compat(&safe_driver, rc);
20667 + return rc;
20670 +static void __exit safe_exit (void)
20672 + pci_unregister_driver(&safe_driver);
20675 +module_init(safe_init);
20676 +module_exit(safe_exit);
20678 +MODULE_LICENSE("BSD");
20679 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
20680 +MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");
20681 diff -Nur linux-2.6.30.orig/crypto/ocf/safe/safereg.h linux-2.6.30/crypto/ocf/safe/safereg.h
20682 --- linux-2.6.30.orig/crypto/ocf/safe/safereg.h 1970-01-01 01:00:00.000000000 +0100
20683 +++ linux-2.6.30/crypto/ocf/safe/safereg.h 2009-06-11 10:55:27.000000000 +0200
20684 @@ -0,0 +1,421 @@
20685 +/*-
20686 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
20687 + * Copyright (c) 2003 Global Technology Associates, Inc.
20688 + * All rights reserved.
20690 + * Redistribution and use in source and binary forms, with or without
20691 + * modification, are permitted provided that the following conditions
20692 + * are met:
20693 + * 1. Redistributions of source code must retain the above copyright
20694 + * notice, this list of conditions and the following disclaimer.
20695 + * 2. Redistributions in binary form must reproduce the above copyright
20696 + * notice, this list of conditions and the following disclaimer in the
20697 + * documentation and/or other materials provided with the distribution.
20699 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20700 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20701 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20702 + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20703 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20704 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20705 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20706 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
20707 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
20708 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
20709 + * SUCH DAMAGE.
20711 + * $FreeBSD: src/sys/dev/safe/safereg.h,v 1.1 2003/07/21 21:46:07 sam Exp $
20712 + */
20713 +#ifndef _SAFE_SAFEREG_H_
20714 +#define _SAFE_SAFEREG_H_
20717 + * Register definitions for SafeNet SafeXcel-1141 crypto device.
20718 + * Definitions from revision 1.3 (Nov 6 2002) of the User's Manual.
20719 + */
20721 +#define BS_BAR 0x10 /* DMA base address register */
20722 +#define BS_TRDY_TIMEOUT 0x40 /* TRDY timeout */
20723 +#define BS_RETRY_TIMEOUT 0x41 /* DMA retry timeout */
20725 +#define PCI_VENDOR_SAFENET 0x16ae /* SafeNet, Inc. */
20727 +/* SafeNet */
20728 +#define PCI_PRODUCT_SAFEXCEL 0x1141 /* 1141 */
20730 +#define SAFE_PE_CSR 0x0000 /* Packet Enginge Ctrl/Status */
20731 +#define SAFE_PE_SRC 0x0004 /* Packet Engine Source */
20732 +#define SAFE_PE_DST 0x0008 /* Packet Engine Destination */
20733 +#define SAFE_PE_SA 0x000c /* Packet Engine SA */
20734 +#define SAFE_PE_LEN 0x0010 /* Packet Engine Length */
20735 +#define SAFE_PE_DMACFG 0x0040 /* Packet Engine DMA Configuration */
20736 +#define SAFE_PE_DMASTAT 0x0044 /* Packet Engine DMA Status */
20737 +#define SAFE_PE_PDRBASE 0x0048 /* Packet Engine Descriptor Ring Base */
20738 +#define SAFE_PE_RDRBASE 0x004c /* Packet Engine Result Ring Base */
20739 +#define SAFE_PE_RINGCFG 0x0050 /* Packet Engine Ring Configuration */
20740 +#define SAFE_PE_RINGPOLL 0x0054 /* Packet Engine Ring Poll */
20741 +#define SAFE_PE_IRNGSTAT 0x0058 /* Packet Engine Internal Ring Status */
20742 +#define SAFE_PE_ERNGSTAT 0x005c /* Packet Engine External Ring Status */
20743 +#define SAFE_PE_IOTHRESH 0x0060 /* Packet Engine I/O Threshold */
20744 +#define SAFE_PE_GRNGBASE 0x0064 /* Packet Engine Gather Ring Base */
20745 +#define SAFE_PE_SRNGBASE 0x0068 /* Packet Engine Scatter Ring Base */
20746 +#define SAFE_PE_PARTSIZE 0x006c /* Packet Engine Particlar Ring Size */
20747 +#define SAFE_PE_PARTCFG 0x0070 /* Packet Engine Particle Ring Config */
20748 +#define SAFE_CRYPTO_CTRL 0x0080 /* Crypto Control */
20749 +#define SAFE_DEVID 0x0084 /* Device ID */
20750 +#define SAFE_DEVINFO 0x0088 /* Device Info */
20751 +#define SAFE_HU_STAT 0x00a0 /* Host Unmasked Status */
20752 +#define SAFE_HM_STAT 0x00a4 /* Host Masked Status (read-only) */
20753 +#define SAFE_HI_CLR 0x00a4 /* Host Clear Interrupt (write-only) */
20754 +#define SAFE_HI_MASK 0x00a8 /* Host Mask Control */
20755 +#define SAFE_HI_CFG 0x00ac /* Interrupt Configuration */
20756 +#define SAFE_HI_RD_DESCR 0x00b4 /* Force Descriptor Read */
20757 +#define SAFE_HI_DESC_CNT 0x00b8 /* Host Descriptor Done Count */
20758 +#define SAFE_DMA_ENDIAN 0x00c0 /* Master Endian Status */
20759 +#define SAFE_DMA_SRCADDR 0x00c4 /* DMA Source Address Status */
20760 +#define SAFE_DMA_DSTADDR 0x00c8 /* DMA Destination Address Status */
20761 +#define SAFE_DMA_STAT 0x00cc /* DMA Current Status */
20762 +#define SAFE_DMA_CFG 0x00d4 /* DMA Configuration/Status */
20763 +#define SAFE_ENDIAN 0x00e0 /* Endian Configuration */
20764 +#define SAFE_PK_A_ADDR 0x0800 /* Public Key A Address */
20765 +#define SAFE_PK_B_ADDR 0x0804 /* Public Key B Address */
20766 +#define SAFE_PK_C_ADDR 0x0808 /* Public Key C Address */
20767 +#define SAFE_PK_D_ADDR 0x080c /* Public Key D Address */
20768 +#define SAFE_PK_A_LEN 0x0810 /* Public Key A Length */
20769 +#define SAFE_PK_B_LEN 0x0814 /* Public Key B Length */
20770 +#define SAFE_PK_SHIFT 0x0818 /* Public Key Shift */
20771 +#define SAFE_PK_FUNC 0x081c /* Public Key Function */
20772 +#define SAFE_PK_RAM_START 0x1000 /* Public Key RAM start address */
20773 +#define SAFE_PK_RAM_END 0x1fff /* Public Key RAM end address */
20775 +#define SAFE_RNG_OUT 0x0100 /* RNG Output */
20776 +#define SAFE_RNG_STAT 0x0104 /* RNG Status */
20777 +#define SAFE_RNG_CTRL 0x0108 /* RNG Control */
20778 +#define SAFE_RNG_A 0x010c /* RNG A */
20779 +#define SAFE_RNG_B 0x0110 /* RNG B */
20780 +#define SAFE_RNG_X_LO 0x0114 /* RNG X [31:0] */
20781 +#define SAFE_RNG_X_MID 0x0118 /* RNG X [63:32] */
20782 +#define SAFE_RNG_X_HI 0x011c /* RNG X [80:64] */
20783 +#define SAFE_RNG_X_CNTR 0x0120 /* RNG Counter */
20784 +#define SAFE_RNG_ALM_CNT 0x0124 /* RNG Alarm Count */
20785 +#define SAFE_RNG_CNFG 0x0128 /* RNG Configuration */
20786 +#define SAFE_RNG_LFSR1_LO 0x012c /* RNG LFSR1 [31:0] */
20787 +#define SAFE_RNG_LFSR1_HI 0x0130 /* RNG LFSR1 [47:32] */
20788 +#define SAFE_RNG_LFSR2_LO 0x0134 /* RNG LFSR1 [31:0] */
20789 +#define SAFE_RNG_LFSR2_HI 0x0138 /* RNG LFSR1 [47:32] */
20791 +#define SAFE_PE_CSR_READY 0x00000001 /* ready for processing */
20792 +#define SAFE_PE_CSR_DONE 0x00000002 /* h/w completed processing */
20793 +#define SAFE_PE_CSR_LOADSA 0x00000004 /* load SA digests */
20794 +#define SAFE_PE_CSR_HASHFINAL 0x00000010 /* do hash pad & write result */
20795 +#define SAFE_PE_CSR_SABUSID 0x000000c0 /* bus id for SA */
20796 +#define SAFE_PE_CSR_SAPCI 0x00000040 /* PCI bus id for SA */
20797 +#define SAFE_PE_CSR_NXTHDR 0x0000ff00 /* next hdr value for IPsec */
20798 +#define SAFE_PE_CSR_FPAD 0x0000ff00 /* fixed pad for basic ops */
20799 +#define SAFE_PE_CSR_STATUS 0x00ff0000 /* operation result status */
20800 +#define SAFE_PE_CSR_AUTH_FAIL 0x00010000 /* ICV mismatch (inbound) */
20801 +#define SAFE_PE_CSR_PAD_FAIL 0x00020000 /* pad verify fail (inbound) */
20802 +#define SAFE_PE_CSR_SEQ_FAIL 0x00040000 /* sequence number (inbound) */
20803 +#define SAFE_PE_CSR_XERROR 0x00080000 /* extended error follows */
20804 +#define SAFE_PE_CSR_XECODE 0x00f00000 /* extended error code */
20805 +#define SAFE_PE_CSR_XECODE_S 20
20806 +#define SAFE_PE_CSR_XECODE_BADCMD 0 /* invalid command */
20807 +#define SAFE_PE_CSR_XECODE_BADALG 1 /* invalid algorithm */
20808 +#define SAFE_PE_CSR_XECODE_ALGDIS 2 /* algorithm disabled */
20809 +#define SAFE_PE_CSR_XECODE_ZEROLEN 3 /* zero packet length */
20810 +#define SAFE_PE_CSR_XECODE_DMAERR 4 /* bus DMA error */
20811 +#define SAFE_PE_CSR_XECODE_PIPEABORT 5 /* secondary bus DMA error */
20812 +#define SAFE_PE_CSR_XECODE_BADSPI 6 /* IPsec SPI mismatch */
20813 +#define SAFE_PE_CSR_XECODE_TIMEOUT 10 /* failsafe timeout */
20814 +#define SAFE_PE_CSR_PAD 0xff000000 /* ESP padding control/status */
20815 +#define SAFE_PE_CSR_PAD_MIN 0x00000000 /* minimum IPsec padding */
20816 +#define SAFE_PE_CSR_PAD_16 0x08000000 /* pad to 16-byte boundary */
20817 +#define SAFE_PE_CSR_PAD_32 0x10000000 /* pad to 32-byte boundary */
20818 +#define SAFE_PE_CSR_PAD_64 0x20000000 /* pad to 64-byte boundary */
20819 +#define SAFE_PE_CSR_PAD_128 0x40000000 /* pad to 128-byte boundary */
20820 +#define SAFE_PE_CSR_PAD_256 0x80000000 /* pad to 256-byte boundary */
20823 + * Check the CSR to see if the PE has returned ownership to
20824 + * the host. Note that before processing a descriptor this
20825 + * must be done followed by a check of the SAFE_PE_LEN register
20826 + * status bits to avoid premature processing of a descriptor
20827 + * on its way back to the host.
20828 + */
20829 +#define SAFE_PE_CSR_IS_DONE(_csr) \
20830 + (((_csr) & (SAFE_PE_CSR_READY | SAFE_PE_CSR_DONE)) == SAFE_PE_CSR_DONE)
20832 +#define SAFE_PE_LEN_LENGTH 0x000fffff /* total length (bytes) */
20833 +#define SAFE_PE_LEN_READY 0x00400000 /* ready for processing */
20834 +#define SAFE_PE_LEN_DONE 0x00800000 /* h/w completed processing */
20835 +#define SAFE_PE_LEN_BYPASS 0xff000000 /* bypass offset (bytes) */
20836 +#define SAFE_PE_LEN_BYPASS_S 24
20838 +#define SAFE_PE_LEN_IS_DONE(_len) \
20839 + (((_len) & (SAFE_PE_LEN_READY | SAFE_PE_LEN_DONE)) == SAFE_PE_LEN_DONE)
20841 +/* NB: these apply to HU_STAT, HM_STAT, HI_CLR, and HI_MASK */
20842 +#define SAFE_INT_PE_CDONE 0x00000002 /* PE context done */
20843 +#define SAFE_INT_PE_DDONE 0x00000008 /* PE descriptor done */
20844 +#define SAFE_INT_PE_ERROR 0x00000010 /* PE error */
20845 +#define SAFE_INT_PE_ODONE 0x00000020 /* PE operation done */
20847 +#define SAFE_HI_CFG_PULSE 0x00000001 /* use pulse interrupt */
20848 +#define SAFE_HI_CFG_LEVEL 0x00000000 /* use level interrupt */
20849 +#define SAFE_HI_CFG_AUTOCLR 0x00000002 /* auto-clear pulse interrupt */
20851 +#define SAFE_ENDIAN_PASS 0x000000e4 /* straight pass-thru */
20852 +#define SAFE_ENDIAN_SWAB 0x0000001b /* swap bytes in 32-bit word */
20854 +#define SAFE_PE_DMACFG_PERESET 0x00000001 /* reset packet engine */
20855 +#define SAFE_PE_DMACFG_PDRRESET 0x00000002 /* reset PDR counters/ptrs */
20856 +#define SAFE_PE_DMACFG_SGRESET 0x00000004 /* reset scatter/gather cache */
20857 +#define SAFE_PE_DMACFG_FSENA 0x00000008 /* enable failsafe reset */
20858 +#define SAFE_PE_DMACFG_PEMODE 0x00000100 /* packet engine mode */
20859 +#define SAFE_PE_DMACFG_SAPREC 0x00000200 /* SA precedes packet */
20860 +#define SAFE_PE_DMACFG_PKFOLL 0x00000400 /* packet follows descriptor */
20861 +#define SAFE_PE_DMACFG_GPRBID 0x00003000 /* gather particle ring busid */
20862 +#define SAFE_PE_DMACFG_GPRPCI 0x00001000 /* PCI gather particle ring */
20863 +#define SAFE_PE_DMACFG_SPRBID 0x0000c000 /* scatter part. ring busid */
20864 +#define SAFE_PE_DMACFG_SPRPCI 0x00004000 /* PCI scatter part. ring */
20865 +#define SAFE_PE_DMACFG_ESDESC 0x00010000 /* endian swap descriptors */
20866 +#define SAFE_PE_DMACFG_ESSA 0x00020000 /* endian swap SA data */
20867 +#define SAFE_PE_DMACFG_ESPACKET 0x00040000 /* endian swap packet data */
20868 +#define SAFE_PE_DMACFG_ESPDESC 0x00080000 /* endian swap particle desc. */
20869 +#define SAFE_PE_DMACFG_NOPDRUP 0x00100000 /* supp. PDR ownership update */
20870 +#define SAFE_PD_EDMACFG_PCIMODE 0x01000000 /* PCI target mode */
20872 +#define SAFE_PE_DMASTAT_PEIDONE 0x00000001 /* PE core input done */
20873 +#define SAFE_PE_DMASTAT_PEODONE 0x00000002 /* PE core output done */
20874 +#define SAFE_PE_DMASTAT_ENCDONE 0x00000004 /* encryption done */
20875 +#define SAFE_PE_DMASTAT_IHDONE 0x00000008 /* inner hash done */
20876 +#define SAFE_PE_DMASTAT_OHDONE 0x00000010 /* outer hash (HMAC) done */
20877 +#define SAFE_PE_DMASTAT_PADFLT 0x00000020 /* crypto pad fault */
20878 +#define SAFE_PE_DMASTAT_ICVFLT 0x00000040 /* ICV fault */
20879 +#define SAFE_PE_DMASTAT_SPIMIS 0x00000080 /* SPI mismatch */
20880 +#define SAFE_PE_DMASTAT_CRYPTO 0x00000100 /* crypto engine timeout */
20881 +#define SAFE_PE_DMASTAT_CQACT 0x00000200 /* command queue active */
20882 +#define SAFE_PE_DMASTAT_IRACT 0x00000400 /* input request active */
20883 +#define SAFE_PE_DMASTAT_ORACT 0x00000800 /* output request active */
20884 +#define SAFE_PE_DMASTAT_PEISIZE 0x003ff000 /* PE input size:32-bit words */
20885 +#define SAFE_PE_DMASTAT_PEOSIZE 0xffc00000 /* PE out. size:32-bit words */
20887 +#define SAFE_PE_RINGCFG_SIZE 0x000003ff /* ring size (descriptors) */
20888 +#define SAFE_PE_RINGCFG_OFFSET 0xffff0000 /* offset btw desc's (dwords) */
20889 +#define SAFE_PE_RINGCFG_OFFSET_S 16
20891 +#define SAFE_PE_RINGPOLL_POLL 0x00000fff /* polling frequency/divisor */
20892 +#define SAFE_PE_RINGPOLL_RETRY 0x03ff0000 /* polling frequency/divisor */
20893 +#define SAFE_PE_RINGPOLL_CONT 0x80000000 /* continuously poll */
20895 +#define SAFE_PE_IRNGSTAT_CQAVAIL 0x00000001 /* command queue available */
20897 +#define SAFE_PE_ERNGSTAT_NEXT 0x03ff0000 /* index of next packet desc. */
20898 +#define SAFE_PE_ERNGSTAT_NEXT_S 16
20900 +#define SAFE_PE_IOTHRESH_INPUT 0x000003ff /* input threshold (dwords) */
20901 +#define SAFE_PE_IOTHRESH_OUTPUT 0x03ff0000 /* output threshold (dwords) */
20903 +#define SAFE_PE_PARTCFG_SIZE 0x0000ffff /* scatter particle size */
20904 +#define SAFE_PE_PARTCFG_GBURST 0x00030000 /* gather particle burst */
20905 +#define SAFE_PE_PARTCFG_GBURST_2 0x00000000
20906 +#define SAFE_PE_PARTCFG_GBURST_4 0x00010000
20907 +#define SAFE_PE_PARTCFG_GBURST_8 0x00020000
20908 +#define SAFE_PE_PARTCFG_GBURST_16 0x00030000
20909 +#define SAFE_PE_PARTCFG_SBURST 0x000c0000 /* scatter particle burst */
20910 +#define SAFE_PE_PARTCFG_SBURST_2 0x00000000
20911 +#define SAFE_PE_PARTCFG_SBURST_4 0x00040000
20912 +#define SAFE_PE_PARTCFG_SBURST_8 0x00080000
20913 +#define SAFE_PE_PARTCFG_SBURST_16 0x000c0000
20915 +#define SAFE_PE_PARTSIZE_SCAT 0xffff0000 /* scatter particle ring size */
20916 +#define SAFE_PE_PARTSIZE_GATH 0x0000ffff /* gather particle ring size */
20918 +#define SAFE_CRYPTO_CTRL_3DES 0x00000001 /* enable 3DES support */
20919 +#define SAFE_CRYPTO_CTRL_PKEY 0x00010000 /* enable public key support */
20920 +#define SAFE_CRYPTO_CTRL_RNG 0x00020000 /* enable RNG support */
20922 +#define SAFE_DEVINFO_REV_MIN 0x0000000f /* minor rev for chip */
20923 +#define SAFE_DEVINFO_REV_MAJ 0x000000f0 /* major rev for chip */
20924 +#define SAFE_DEVINFO_REV_MAJ_S 4
20925 +#define SAFE_DEVINFO_DES 0x00000100 /* DES/3DES support present */
20926 +#define SAFE_DEVINFO_ARC4 0x00000200 /* ARC4 support present */
20927 +#define SAFE_DEVINFO_AES 0x00000400 /* AES support present */
20928 +#define SAFE_DEVINFO_MD5 0x00001000 /* MD5 support present */
20929 +#define SAFE_DEVINFO_SHA1 0x00002000 /* SHA-1 support present */
20930 +#define SAFE_DEVINFO_RIPEMD 0x00004000 /* RIPEMD support present */
20931 +#define SAFE_DEVINFO_DEFLATE 0x00010000 /* Deflate support present */
20932 +#define SAFE_DEVINFO_SARAM 0x00100000 /* on-chip SA RAM present */
20933 +#define SAFE_DEVINFO_EMIBUS 0x00200000 /* EMI bus present */
20934 +#define SAFE_DEVINFO_PKEY 0x00400000 /* public key support present */
20935 +#define SAFE_DEVINFO_RNG 0x00800000 /* RNG present */
20937 +#define SAFE_REV(_maj, _min) (((_maj) << SAFE_DEVINFO_REV_MAJ_S) | (_min))
20938 +#define SAFE_REV_MAJ(_chiprev) \
20939 + (((_chiprev) & SAFE_DEVINFO_REV_MAJ) >> SAFE_DEVINFO_REV_MAJ_S)
20940 +#define SAFE_REV_MIN(_chiprev) ((_chiprev) & SAFE_DEVINFO_REV_MIN)
20942 +#define SAFE_PK_FUNC_MULT 0x00000001 /* Multiply function */
20943 +#define SAFE_PK_FUNC_SQUARE 0x00000004 /* Square function */
20944 +#define SAFE_PK_FUNC_ADD 0x00000010 /* Add function */
20945 +#define SAFE_PK_FUNC_SUB 0x00000020 /* Subtract function */
20946 +#define SAFE_PK_FUNC_LSHIFT 0x00000040 /* Left-shift function */
20947 +#define SAFE_PK_FUNC_RSHIFT 0x00000080 /* Right-shift function */
20948 +#define SAFE_PK_FUNC_DIV 0x00000100 /* Divide function */
20949 +#define SAFE_PK_FUNC_CMP 0x00000400 /* Compare function */
20950 +#define SAFE_PK_FUNC_COPY 0x00000800 /* Copy function */
20951 +#define SAFE_PK_FUNC_EXP16 0x00002000 /* Exponentiate (4-bit ACT) */
20952 +#define SAFE_PK_FUNC_EXP4 0x00004000 /* Exponentiate (2-bit ACT) */
20953 +#define SAFE_PK_FUNC_RUN 0x00008000 /* start/status */
20955 +#define SAFE_RNG_STAT_BUSY 0x00000001 /* busy, data not valid */
20957 +#define SAFE_RNG_CTRL_PRE_LFSR 0x00000001 /* enable output pre-LFSR */
20958 +#define SAFE_RNG_CTRL_TST_MODE 0x00000002 /* enable test mode */
20959 +#define SAFE_RNG_CTRL_TST_RUN 0x00000004 /* start test state machine */
20960 +#define SAFE_RNG_CTRL_ENA_RING1 0x00000008 /* test entropy oscillator #1 */
20961 +#define SAFE_RNG_CTRL_ENA_RING2 0x00000010 /* test entropy oscillator #2 */
20962 +#define SAFE_RNG_CTRL_DIS_ALARM 0x00000020 /* disable RNG alarm reports */
20963 +#define SAFE_RNG_CTRL_TST_CLOCK 0x00000040 /* enable test clock */
20964 +#define SAFE_RNG_CTRL_SHORTEN 0x00000080 /* shorten state timers */
20965 +#define SAFE_RNG_CTRL_TST_ALARM 0x00000100 /* simulate alarm state */
20966 +#define SAFE_RNG_CTRL_RST_LFSR 0x00000200 /* reset LFSR */
20969 + * Packet engine descriptor. Note that d_csr is a copy of the
20970 + * SAFE_PE_CSR register and all definitions apply, and d_len
20971 + * is a copy of the SAFE_PE_LEN register and all definitions apply.
20972 + * d_src and d_len may point directly to contiguous data or to a
20973 + * list of ``particle descriptors'' when using scatter/gather i/o.
20974 + */
20975 +struct safe_desc {
20976 + u_int32_t d_csr; /* per-packet control/status */
20977 + u_int32_t d_src; /* source address */
20978 + u_int32_t d_dst; /* destination address */
20979 + u_int32_t d_sa; /* SA address */
20980 + u_int32_t d_len; /* length, bypass, status */
20984 + * Scatter/Gather particle descriptor.
20986 + * NB: scatter descriptors do not specify a size; this is fixed
20987 + * by the setting of the SAFE_PE_PARTCFG register.
20988 + */
20989 +struct safe_pdesc {
20990 + u_int32_t pd_addr; /* particle address */
20991 +#ifdef __BIG_ENDIAN
20992 + u_int16_t pd_flags; /* control word */
20993 + u_int16_t pd_size; /* particle size (bytes) */
20994 +#else
20995 + u_int16_t pd_flags; /* control word */
20996 + u_int16_t pd_size; /* particle size (bytes) */
20997 +#endif
21000 +#define SAFE_PD_READY 0x0001 /* ready for processing */
21001 +#define SAFE_PD_DONE 0x0002 /* h/w completed processing */
21004 + * Security Association (SA) Record (Rev 1). One of these is
21005 + * required for each operation processed by the packet engine.
21006 + */
21007 +struct safe_sarec {
21008 + u_int32_t sa_cmd0;
21009 + u_int32_t sa_cmd1;
21010 + u_int32_t sa_resv0;
21011 + u_int32_t sa_resv1;
21012 + u_int32_t sa_key[8]; /* DES/3DES/AES key */
21013 + u_int32_t sa_indigest[5]; /* inner digest */
21014 + u_int32_t sa_outdigest[5]; /* outer digest */
21015 + u_int32_t sa_spi; /* SPI */
21016 + u_int32_t sa_seqnum; /* sequence number */
21017 + u_int32_t sa_seqmask[2]; /* sequence number mask */
21018 + u_int32_t sa_resv2;
21019 + u_int32_t sa_staterec; /* address of state record */
21020 + u_int32_t sa_resv3[2];
21021 + u_int32_t sa_samgmt0; /* SA management field 0 */
21022 + u_int32_t sa_samgmt1; /* SA management field 0 */
21025 +#define SAFE_SA_CMD0_OP 0x00000007 /* operation code */
21026 +#define SAFE_SA_CMD0_OP_CRYPT 0x00000000 /* encrypt/decrypt (basic) */
21027 +#define SAFE_SA_CMD0_OP_BOTH 0x00000001 /* encrypt-hash/hash-decrypto */
21028 +#define SAFE_SA_CMD0_OP_HASH 0x00000003 /* hash (outbound-only) */
21029 +#define SAFE_SA_CMD0_OP_ESP 0x00000000 /* ESP in/out (proto) */
21030 +#define SAFE_SA_CMD0_OP_AH 0x00000001 /* AH in/out (proto) */
21031 +#define SAFE_SA_CMD0_INBOUND 0x00000008 /* inbound operation */
21032 +#define SAFE_SA_CMD0_OUTBOUND 0x00000000 /* outbound operation */
21033 +#define SAFE_SA_CMD0_GROUP 0x00000030 /* operation group */
21034 +#define SAFE_SA_CMD0_BASIC 0x00000000 /* basic operation */
21035 +#define SAFE_SA_CMD0_PROTO 0x00000010 /* protocol/packet operation */
21036 +#define SAFE_SA_CMD0_BUNDLE 0x00000020 /* bundled operation (resvd) */
21037 +#define SAFE_SA_CMD0_PAD 0x000000c0 /* crypto pad method */
21038 +#define SAFE_SA_CMD0_PAD_IPSEC 0x00000000 /* IPsec padding */
21039 +#define SAFE_SA_CMD0_PAD_PKCS7 0x00000040 /* PKCS#7 padding */
21040 +#define SAFE_SA_CMD0_PAD_CONS 0x00000080 /* constant padding */
21041 +#define SAFE_SA_CMD0_PAD_ZERO 0x000000c0 /* zero padding */
21042 +#define SAFE_SA_CMD0_CRYPT_ALG 0x00000f00 /* symmetric crypto algorithm */
21043 +#define SAFE_SA_CMD0_DES 0x00000000 /* DES crypto algorithm */
21044 +#define SAFE_SA_CMD0_3DES 0x00000100 /* 3DES crypto algorithm */
21045 +#define SAFE_SA_CMD0_AES 0x00000300 /* AES crypto algorithm */
21046 +#define SAFE_SA_CMD0_CRYPT_NULL 0x00000f00 /* null crypto algorithm */
21047 +#define SAFE_SA_CMD0_HASH_ALG 0x0000f000 /* hash algorithm */
21048 +#define SAFE_SA_CMD0_MD5 0x00000000 /* MD5 hash algorithm */
21049 +#define SAFE_SA_CMD0_SHA1 0x00001000 /* SHA-1 hash algorithm */
21050 +#define SAFE_SA_CMD0_HASH_NULL 0x0000f000 /* null hash algorithm */
21051 +#define SAFE_SA_CMD0_HDR_PROC 0x00080000 /* header processing */
21052 +#define SAFE_SA_CMD0_IBUSID 0x00300000 /* input bus id */
21053 +#define SAFE_SA_CMD0_IPCI 0x00100000 /* PCI input bus id */
21054 +#define SAFE_SA_CMD0_OBUSID 0x00c00000 /* output bus id */
21055 +#define SAFE_SA_CMD0_OPCI 0x00400000 /* PCI output bus id */
21056 +#define SAFE_SA_CMD0_IVLD 0x03000000 /* IV loading */
21057 +#define SAFE_SA_CMD0_IVLD_NONE 0x00000000 /* IV no load (reuse) */
21058 +#define SAFE_SA_CMD0_IVLD_IBUF 0x01000000 /* IV load from input buffer */
21059 +#define SAFE_SA_CMD0_IVLD_STATE 0x02000000 /* IV load from state */
21060 +#define SAFE_SA_CMD0_HSLD 0x0c000000 /* hash state loading */
21061 +#define SAFE_SA_CMD0_HSLD_SA 0x00000000 /* hash state load from SA */
21062 +#define SAFE_SA_CMD0_HSLD_STATE 0x08000000 /* hash state load from state */
21063 +#define SAFE_SA_CMD0_HSLD_NONE 0x0c000000 /* hash state no load */
21064 +#define SAFE_SA_CMD0_SAVEIV 0x10000000 /* save IV */
21065 +#define SAFE_SA_CMD0_SAVEHASH 0x20000000 /* save hash state */
21066 +#define SAFE_SA_CMD0_IGATHER 0x40000000 /* input gather */
21067 +#define SAFE_SA_CMD0_OSCATTER 0x80000000 /* output scatter */
21069 +#define SAFE_SA_CMD1_HDRCOPY 0x00000002 /* copy header to output */
21070 +#define SAFE_SA_CMD1_PAYCOPY 0x00000004 /* copy payload to output */
21071 +#define SAFE_SA_CMD1_PADCOPY 0x00000008 /* copy pad to output */
21072 +#define SAFE_SA_CMD1_IPV4 0x00000000 /* IPv4 protocol */
21073 +#define SAFE_SA_CMD1_IPV6 0x00000010 /* IPv6 protocol */
21074 +#define SAFE_SA_CMD1_MUTABLE 0x00000020 /* mutable bit processing */
21075 +#define SAFE_SA_CMD1_SRBUSID 0x000000c0 /* state record bus id */
21076 +#define SAFE_SA_CMD1_SRPCI 0x00000040 /* state record from PCI */
21077 +#define SAFE_SA_CMD1_CRMODE 0x00000300 /* crypto mode */
21078 +#define SAFE_SA_CMD1_ECB 0x00000000 /* ECB crypto mode */
21079 +#define SAFE_SA_CMD1_CBC 0x00000100 /* CBC crypto mode */
21080 +#define SAFE_SA_CMD1_OFB 0x00000200 /* OFB crypto mode */
21081 +#define SAFE_SA_CMD1_CFB 0x00000300 /* CFB crypto mode */
21082 +#define SAFE_SA_CMD1_CRFEEDBACK 0x00000c00 /* crypto feedback mode */
21083 +#define SAFE_SA_CMD1_64BIT 0x00000000 /* 64-bit crypto feedback */
21084 +#define SAFE_SA_CMD1_8BIT 0x00000400 /* 8-bit crypto feedback */
21085 +#define SAFE_SA_CMD1_1BIT 0x00000800 /* 1-bit crypto feedback */
21086 +#define SAFE_SA_CMD1_128BIT 0x00000c00 /* 128-bit crypto feedback */
21087 +#define SAFE_SA_CMD1_OPTIONS 0x00001000 /* HMAC/options mutable bit */
21088 +#define SAFE_SA_CMD1_HMAC SAFE_SA_CMD1_OPTIONS
21089 +#define SAFE_SA_CMD1_SAREV1 0x00008000 /* SA Revision 1 */
21090 +#define SAFE_SA_CMD1_OFFSET 0x00ff0000 /* hash/crypto offset(dwords) */
21091 +#define SAFE_SA_CMD1_OFFSET_S 16
21092 +#define SAFE_SA_CMD1_AESKEYLEN 0x0f000000 /* AES key length */
21093 +#define SAFE_SA_CMD1_AES128 0x02000000 /* 128-bit AES key */
21094 +#define SAFE_SA_CMD1_AES192 0x03000000 /* 192-bit AES key */
21095 +#define SAFE_SA_CMD1_AES256 0x04000000 /* 256-bit AES key */
21097 +/*
21098 + * Security Associate State Record (Rev 1).
21099 + */
21100 +struct safe_sastate {
21101 + u_int32_t sa_saved_iv[4]; /* saved IV (DES/3DES/AES) */
21102 + u_int32_t sa_saved_hashbc; /* saved hash byte count */
21103 + u_int32_t sa_saved_indigest[5]; /* saved inner digest */
21105 +#endif /* _SAFE_SAFEREG_H_ */
21106 diff -Nur linux-2.6.30.orig/crypto/ocf/safe/safevar.h linux-2.6.30/crypto/ocf/safe/safevar.h
21107 --- linux-2.6.30.orig/crypto/ocf/safe/safevar.h 1970-01-01 01:00:00.000000000 +0100
21108 +++ linux-2.6.30/crypto/ocf/safe/safevar.h 2009-06-11 10:55:27.000000000 +0200
21109 @@ -0,0 +1,230 @@
21110 +/*-
21111 + * The linux port of this code done by David McCullough
21112 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
21113 + * The license and original author are listed below.
21115 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
21116 + * Copyright (c) 2003 Global Technology Associates, Inc.
21117 + * All rights reserved.
21119 + * Redistribution and use in source and binary forms, with or without
21120 + * modification, are permitted provided that the following conditions
21121 + * are met:
21122 + * 1. Redistributions of source code must retain the above copyright
21123 + * notice, this list of conditions and the following disclaimer.
21124 + * 2. Redistributions in binary form must reproduce the above copyright
21125 + * notice, this list of conditions and the following disclaimer in the
21126 + * documentation and/or other materials provided with the distribution.
21128 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21129 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21130 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21131 + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21132 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21133 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21134 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21135 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21136 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
21137 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
21138 + * SUCH DAMAGE.
21140 + * $FreeBSD: src/sys/dev/safe/safevar.h,v 1.2 2006/05/17 18:34:26 pjd Exp $
21141 + */
21142 +#ifndef _SAFE_SAFEVAR_H_
21143 +#define _SAFE_SAFEVAR_H_
21145 +/* Maximum queue length */
21146 +#ifndef SAFE_MAX_NQUEUE
21147 +#define SAFE_MAX_NQUEUE 60
21148 +#endif
21150 +#define SAFE_MAX_PART 64 /* Maximum scatter/gather depth */
21151 +#define SAFE_DMA_BOUNDARY 0 /* No boundary for source DMA ops */
21152 +#define SAFE_MAX_DSIZE 2048 /* MCLBYTES Fixed scatter particle size */
21153 +#define SAFE_MAX_SSIZE 0x0ffff /* Maximum gather particle size */
21154 +#define SAFE_MAX_DMA 0xfffff /* Maximum PE operand size (20 bits) */
21155 +/* total src+dst particle descriptors */
21156 +#define SAFE_TOTAL_DPART (SAFE_MAX_NQUEUE * SAFE_MAX_PART)
21157 +#define SAFE_TOTAL_SPART (SAFE_MAX_NQUEUE * SAFE_MAX_PART)
21159 +#define SAFE_RNG_MAXBUFSIZ 128 /* 32-bit words */
21161 +#define SAFE_CARD(sid) (((sid) & 0xf0000000) >> 28)
21162 +#define SAFE_SESSION(sid) ( (sid) & 0x0fffffff)
21163 +#define SAFE_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff))
21165 +#define SAFE_DEF_RTY 0xff /* PCI Retry Timeout */
21166 +#define SAFE_DEF_TOUT 0xff /* PCI TRDY Timeout */
21167 +#define SAFE_DEF_CACHELINE 0x01 /* Cache Line setting */
21169 +#ifdef __KERNEL__
21171 + * State associated with the allocation of each chunk
21172 + * of memory setup for DMA.
21173 + */
21174 +struct safe_dma_alloc {
21175 + dma_addr_t dma_paddr;
21176 + void *dma_vaddr;
21180 + * Cryptographic operand state. One of these exists for each
21181 + * source and destination operand passed in from the crypto
21182 + * subsystem. When possible source and destination operands
21183 + * refer to the same memory. More often they are distinct.
21184 + * We track the virtual address of each operand as well as
21185 + * where each is mapped for DMA.
21186 + */
21187 +struct safe_operand {
21188 + union {
21189 + struct sk_buff *skb;
21190 + struct uio *io;
21191 + } u;
21192 + void *map;
21193 + int mapsize; /* total number of bytes in segs */
21194 + struct {
21195 + dma_addr_t ds_addr;
21196 + int ds_len;
21197 + int ds_tlen;
21198 + } segs[SAFE_MAX_PART];
21199 + int nsegs;
21203 + * Packet engine ring entry and cryptographic operation state.
21204 + * The packet engine requires a ring of descriptors that contain
21205 + * pointers to various cryptographic state. However the ring
21206 + * configuration register allows you to specify an arbitrary size
21207 + * for ring entries. We use this feature to collect most of the
21208 + * state for each cryptographic request into one spot. Other than
21209 + * ring entries only the ``particle descriptors'' (scatter/gather
21210 + * lists) and the actual operand data are kept separate. The
21211 + * particle descriptors must also be organized in rings. The
21212 + * operand data can be located aribtrarily (modulo alignment constraints).
21214 + * Note that the descriptor ring is mapped onto the PCI bus so
21215 + * the hardware can DMA data. This means the entire ring must be
21216 + * contiguous.
21217 + */
21218 +struct safe_ringentry {
21219 + struct safe_desc re_desc; /* command descriptor */
21220 + struct safe_sarec re_sa; /* SA record */
21221 + struct safe_sastate re_sastate; /* SA state record */
21223 + struct cryptop *re_crp; /* crypto operation */
21225 + struct safe_operand re_src; /* source operand */
21226 + struct safe_operand re_dst; /* destination operand */
21228 + int re_sesn; /* crypto session ID */
21229 + int re_flags;
21230 +#define SAFE_QFLAGS_COPYOUTIV 0x1 /* copy back on completion */
21231 +#define SAFE_QFLAGS_COPYOUTICV 0x2 /* copy back on completion */
21234 +#define re_src_skb re_src.u.skb
21235 +#define re_src_io re_src.u.io
21236 +#define re_src_map re_src.map
21237 +#define re_src_nsegs re_src.nsegs
21238 +#define re_src_segs re_src.segs
21239 +#define re_src_mapsize re_src.mapsize
21241 +#define re_dst_skb re_dst.u.skb
21242 +#define re_dst_io re_dst.u.io
21243 +#define re_dst_map re_dst.map
21244 +#define re_dst_nsegs re_dst.nsegs
21245 +#define re_dst_segs re_dst.segs
21246 +#define re_dst_mapsize re_dst.mapsize
21248 +struct rndstate_test;
21250 +struct safe_session {
21251 + u_int32_t ses_used;
21252 + u_int32_t ses_klen; /* key length in bits */
21253 + u_int32_t ses_key[8]; /* DES/3DES/AES key */
21254 + u_int32_t ses_mlen; /* hmac length in bytes */
21255 + u_int32_t ses_hminner[5]; /* hmac inner state */
21256 + u_int32_t ses_hmouter[5]; /* hmac outer state */
21257 + u_int32_t ses_iv[4]; /* DES/3DES/AES iv */
21260 +struct safe_pkq {
21261 + struct list_head pkq_list;
21262 + struct cryptkop *pkq_krp;
21265 +struct safe_softc {
21266 + softc_device_decl sc_dev;
21267 + u32 sc_irq;
21269 + struct pci_dev *sc_pcidev;
21270 + ocf_iomem_t sc_base_addr;
21272 + u_int sc_chiprev; /* major/minor chip revision */
21273 + int sc_flags; /* device specific flags */
21274 +#define SAFE_FLAGS_KEY 0x01 /* has key accelerator */
21275 +#define SAFE_FLAGS_RNG 0x02 /* hardware rng */
21276 + int sc_suspended;
21277 + int sc_needwakeup; /* notify crypto layer */
21278 + int32_t sc_cid; /* crypto tag */
21280 + struct safe_dma_alloc sc_ringalloc; /* PE ring allocation state */
21281 + struct safe_ringentry *sc_ring; /* PE ring */
21282 + struct safe_ringentry *sc_ringtop; /* PE ring top */
21283 + struct safe_ringentry *sc_front; /* next free entry */
21284 + struct safe_ringentry *sc_back; /* next pending entry */
21285 + int sc_nqchip; /* # passed to chip */
21286 + spinlock_t sc_ringmtx; /* PE ring lock */
21287 + struct safe_pdesc *sc_spring; /* src particle ring */
21288 + struct safe_pdesc *sc_springtop; /* src particle ring top */
21289 + struct safe_pdesc *sc_spfree; /* next free src particle */
21290 + struct safe_dma_alloc sc_spalloc; /* src particle ring state */
21291 + struct safe_pdesc *sc_dpring; /* dest particle ring */
21292 + struct safe_pdesc *sc_dpringtop; /* dest particle ring top */
21293 + struct safe_pdesc *sc_dpfree; /* next free dest particle */
21294 + struct safe_dma_alloc sc_dpalloc; /* dst particle ring state */
21295 + int sc_nsessions; /* # of sessions */
21296 + struct safe_session *sc_sessions; /* sessions */
21298 + struct timer_list sc_pkto; /* PK polling */
21299 + spinlock_t sc_pkmtx; /* PK lock */
21300 + struct list_head sc_pkq; /* queue of PK requests */
21301 + struct safe_pkq *sc_pkq_cur; /* current processing request */
21302 + u_int32_t sc_pk_reslen, sc_pk_resoff;
21304 + int sc_max_dsize; /* maximum safe DMA size */
21306 +#endif /* __KERNEL__ */
21308 +struct safe_stats {
21309 + u_int64_t st_ibytes;
21310 + u_int64_t st_obytes;
21311 + u_int32_t st_ipackets;
21312 + u_int32_t st_opackets;
21313 + u_int32_t st_invalid; /* invalid argument */
21314 + u_int32_t st_badsession; /* invalid session id */
21315 + u_int32_t st_badflags; /* flags indicate !(mbuf | uio) */
21316 + u_int32_t st_nodesc; /* op submitted w/o descriptors */
21317 + u_int32_t st_badalg; /* unsupported algorithm */
21318 + u_int32_t st_ringfull; /* PE descriptor ring full */
21319 + u_int32_t st_peoperr; /* PE marked error */
21320 + u_int32_t st_dmaerr; /* PE DMA error */
21321 + u_int32_t st_bypasstoobig; /* bypass > 96 bytes */
21322 + u_int32_t st_skipmismatch; /* enc part begins before auth part */
21323 + u_int32_t st_lenmismatch; /* enc length different auth length */
21324 + u_int32_t st_coffmisaligned; /* crypto offset not 32-bit aligned */
21325 + u_int32_t st_cofftoobig; /* crypto offset > 255 words */
21326 + u_int32_t st_iovmisaligned; /* iov op not aligned */
21327 + u_int32_t st_iovnotuniform; /* iov op not suitable */
21328 + u_int32_t st_unaligned; /* unaligned src caused copy */
21329 + u_int32_t st_notuniform; /* non-uniform src caused copy */
21330 + u_int32_t st_nomap; /* bus_dmamap_create failed */
21331 + u_int32_t st_noload; /* bus_dmamap_load_* failed */
21332 + u_int32_t st_nombuf; /* MGET* failed */
21333 + u_int32_t st_nomcl; /* MCLGET* failed */
21334 + u_int32_t st_maxqchip; /* max mcr1 ops out for processing */
21335 + u_int32_t st_rng; /* RNG requests */
21336 + u_int32_t st_rngalarm; /* RNG alarm requests */
21337 + u_int32_t st_noicvcopy; /* ICV data copies suppressed */
21339 +#endif /* _SAFE_SAFEVAR_H_ */
21340 diff -Nur linux-2.6.30.orig/crypto/ocf/safe/sha1.c linux-2.6.30/crypto/ocf/safe/sha1.c
21341 --- linux-2.6.30.orig/crypto/ocf/safe/sha1.c 1970-01-01 01:00:00.000000000 +0100
21342 +++ linux-2.6.30/crypto/ocf/safe/sha1.c 2009-06-11 10:55:27.000000000 +0200
21343 @@ -0,0 +1,279 @@
21344 +/* $KAME: sha1.c,v 1.5 2000/11/08 06:13:08 itojun Exp $ */
21346 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
21347 + * All rights reserved.
21349 + * Redistribution and use in source and binary forms, with or without
21350 + * modification, are permitted provided that the following conditions
21351 + * are met:
21352 + * 1. Redistributions of source code must retain the above copyright
21353 + * notice, this list of conditions and the following disclaimer.
21354 + * 2. Redistributions in binary form must reproduce the above copyright
21355 + * notice, this list of conditions and the following disclaimer in the
21356 + * documentation and/or other materials provided with the distribution.
21357 + * 3. Neither the name of the project nor the names of its contributors
21358 + * may be used to endorse or promote products derived from this software
21359 + * without specific prior written permission.
21361 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21362 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21363 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21364 + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
21365 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21366 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21367 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21368 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21369 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
21370 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
21371 + * SUCH DAMAGE.
21372 + */
21375 + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
21376 + * based on: http://csrc.nist.gov/fips/fip180-1.txt
21377 + * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
21378 + */
21380 +#if 0
21381 +#include <sys/cdefs.h>
21382 +__FBSDID("$FreeBSD: src/sys/crypto/sha1.c,v 1.9 2003/06/10 21:36:57 obrien Exp $");
21384 +#include <sys/types.h>
21385 +#include <sys/cdefs.h>
21386 +#include <sys/time.h>
21387 +#include <sys/systm.h>
21389 +#include <crypto/sha1.h>
21390 +#endif
21392 +/* sanity check */
21393 +#if BYTE_ORDER != BIG_ENDIAN
21394 +# if BYTE_ORDER != LITTLE_ENDIAN
21395 +# define unsupported 1
21396 +# endif
21397 +#endif
21399 +#ifndef unsupported
21401 +/* constant table */
21402 +static u_int32_t _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 };
21403 +#define K(t) _K[(t) / 20]
21405 +#define F0(b, c, d) (((b) & (c)) | ((~(b)) & (d)))
21406 +#define F1(b, c, d) (((b) ^ (c)) ^ (d))
21407 +#define F2(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d)))
21408 +#define F3(b, c, d) (((b) ^ (c)) ^ (d))
21410 +#define S(n, x) (((x) << (n)) | ((x) >> (32 - n)))
21412 +#undef H
21413 +#define H(n) (ctxt->h.b32[(n)])
21414 +#define COUNT (ctxt->count)
21415 +#define BCOUNT (ctxt->c.b64[0] / 8)
21416 +#define W(n) (ctxt->m.b32[(n)])
21418 +#define PUTBYTE(x) { \
21419 + ctxt->m.b8[(COUNT % 64)] = (x); \
21420 + COUNT++; \
21421 + COUNT %= 64; \
21422 + ctxt->c.b64[0] += 8; \
21423 + if (COUNT % 64 == 0) \
21424 + sha1_step(ctxt); \
21427 +#define PUTPAD(x) { \
21428 + ctxt->m.b8[(COUNT % 64)] = (x); \
21429 + COUNT++; \
21430 + COUNT %= 64; \
21431 + if (COUNT % 64 == 0) \
21432 + sha1_step(ctxt); \
21435 +static void sha1_step(struct sha1_ctxt *);
21437 +static void
21438 +sha1_step(ctxt)
21439 + struct sha1_ctxt *ctxt;
21441 + u_int32_t a, b, c, d, e;
21442 + size_t t, s;
21443 + u_int32_t tmp;
21445 +#if BYTE_ORDER == LITTLE_ENDIAN
21446 + struct sha1_ctxt tctxt;
21447 + bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64);
21448 + ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2];
21449 + ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0];
21450 + ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6];
21451 + ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4];
21452 + ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10];
21453 + ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8];
21454 + ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14];
21455 + ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12];
21456 + ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18];
21457 + ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16];
21458 + ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22];
21459 + ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20];
21460 + ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26];
21461 + ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24];
21462 + ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30];
21463 + ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28];
21464 + ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34];
21465 + ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32];
21466 + ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38];
21467 + ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36];
21468 + ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42];
21469 + ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40];
21470 + ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46];
21471 + ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44];
21472 + ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50];
21473 + ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48];
21474 + ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54];
21475 + ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52];
21476 + ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58];
21477 + ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56];
21478 + ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62];
21479 + ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60];
21480 +#endif
21482 + a = H(0); b = H(1); c = H(2); d = H(3); e = H(4);
21484 + for (t = 0; t < 20; t++) {
21485 + s = t & 0x0f;
21486 + if (t >= 16) {
21487 + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
21489 + tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
21490 + e = d; d = c; c = S(30, b); b = a; a = tmp;
21492 + for (t = 20; t < 40; t++) {
21493 + s = t & 0x0f;
21494 + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
21495 + tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
21496 + e = d; d = c; c = S(30, b); b = a; a = tmp;
21498 + for (t = 40; t < 60; t++) {
21499 + s = t & 0x0f;
21500 + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
21501 + tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
21502 + e = d; d = c; c = S(30, b); b = a; a = tmp;
21504 + for (t = 60; t < 80; t++) {
21505 + s = t & 0x0f;
21506 + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
21507 + tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
21508 + e = d; d = c; c = S(30, b); b = a; a = tmp;
21511 + H(0) = H(0) + a;
21512 + H(1) = H(1) + b;
21513 + H(2) = H(2) + c;
21514 + H(3) = H(3) + d;
21515 + H(4) = H(4) + e;
21517 + bzero(&ctxt->m.b8[0], 64);
21520 +/*------------------------------------------------------------*/
21522 +void
21523 +sha1_init(ctxt)
21524 + struct sha1_ctxt *ctxt;
21526 + bzero(ctxt, sizeof(struct sha1_ctxt));
21527 + H(0) = 0x67452301;
21528 + H(1) = 0xefcdab89;
21529 + H(2) = 0x98badcfe;
21530 + H(3) = 0x10325476;
21531 + H(4) = 0xc3d2e1f0;
21534 +void
21535 +sha1_pad(ctxt)
21536 + struct sha1_ctxt *ctxt;
21538 + size_t padlen; /*pad length in bytes*/
21539 + size_t padstart;
21541 + PUTPAD(0x80);
21543 + padstart = COUNT % 64;
21544 + padlen = 64 - padstart;
21545 + if (padlen < 8) {
21546 + bzero(&ctxt->m.b8[padstart], padlen);
21547 + COUNT += padlen;
21548 + COUNT %= 64;
21549 + sha1_step(ctxt);
21550 + padstart = COUNT % 64; /* should be 0 */
21551 + padlen = 64 - padstart; /* should be 64 */
21553 + bzero(&ctxt->m.b8[padstart], padlen - 8);
21554 + COUNT += (padlen - 8);
21555 + COUNT %= 64;
21556 +#if BYTE_ORDER == BIG_ENDIAN
21557 + PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]);
21558 + PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]);
21559 + PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]);
21560 + PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]);
21561 +#else
21562 + PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]);
21563 + PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]);
21564 + PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]);
21565 + PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]);
21566 +#endif
21569 +void
21570 +sha1_loop(ctxt, input, len)
21571 + struct sha1_ctxt *ctxt;
21572 + const u_int8_t *input;
21573 + size_t len;
21575 + size_t gaplen;
21576 + size_t gapstart;
21577 + size_t off;
21578 + size_t copysiz;
21580 + off = 0;
21582 + while (off < len) {
21583 + gapstart = COUNT % 64;
21584 + gaplen = 64 - gapstart;
21586 + copysiz = (gaplen < len - off) ? gaplen : len - off;
21587 + bcopy(&input[off], &ctxt->m.b8[gapstart], copysiz);
21588 + COUNT += copysiz;
21589 + COUNT %= 64;
21590 + ctxt->c.b64[0] += copysiz * 8;
21591 + if (COUNT % 64 == 0)
21592 + sha1_step(ctxt);
21593 + off += copysiz;
21597 +void
21598 +sha1_result(ctxt, digest0)
21599 + struct sha1_ctxt *ctxt;
21600 + caddr_t digest0;
21602 + u_int8_t *digest;
21604 + digest = (u_int8_t *)digest0;
21605 + sha1_pad(ctxt);
21606 +#if BYTE_ORDER == BIG_ENDIAN
21607 + bcopy(&ctxt->h.b8[0], digest, 20);
21608 +#else
21609 + digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2];
21610 + digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0];
21611 + digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6];
21612 + digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4];
21613 + digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10];
21614 + digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8];
21615 + digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14];
21616 + digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12];
21617 + digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18];
21618 + digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16];
21619 +#endif
21622 +#endif /*unsupported*/
21623 diff -Nur linux-2.6.30.orig/crypto/ocf/safe/sha1.h linux-2.6.30/crypto/ocf/safe/sha1.h
21624 --- linux-2.6.30.orig/crypto/ocf/safe/sha1.h 1970-01-01 01:00:00.000000000 +0100
21625 +++ linux-2.6.30/crypto/ocf/safe/sha1.h 2009-06-11 10:55:27.000000000 +0200
21626 @@ -0,0 +1,72 @@
21627 +/* $FreeBSD: src/sys/crypto/sha1.h,v 1.8 2002/03/20 05:13:50 alfred Exp $ */
21628 +/* $KAME: sha1.h,v 1.5 2000/03/27 04:36:23 sumikawa Exp $ */
21631 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
21632 + * All rights reserved.
21634 + * Redistribution and use in source and binary forms, with or without
21635 + * modification, are permitted provided that the following conditions
21636 + * are met:
21637 + * 1. Redistributions of source code must retain the above copyright
21638 + * notice, this list of conditions and the following disclaimer.
21639 + * 2. Redistributions in binary form must reproduce the above copyright
21640 + * notice, this list of conditions and the following disclaimer in the
21641 + * documentation and/or other materials provided with the distribution.
21642 + * 3. Neither the name of the project nor the names of its contributors
21643 + * may be used to endorse or promote products derived from this software
21644 + * without specific prior written permission.
21646 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21647 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21648 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21649 + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
21650 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21651 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21652 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21653 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21654 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
21655 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
21656 + * SUCH DAMAGE.
21657 + */
21659 + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
21660 + * based on: http://csrc.nist.gov/fips/fip180-1.txt
21661 + * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
21662 + */
21664 +#ifndef _NETINET6_SHA1_H_
21665 +#define _NETINET6_SHA1_H_
21667 +struct sha1_ctxt {
21668 + union {
21669 + u_int8_t b8[20];
21670 + u_int32_t b32[5];
21671 + } h;
21672 + union {
21673 + u_int8_t b8[8];
21674 + u_int64_t b64[1];
21675 + } c;
21676 + union {
21677 + u_int8_t b8[64];
21678 + u_int32_t b32[16];
21679 + } m;
21680 + u_int8_t count;
21683 +#ifdef __KERNEL__
21684 +extern void sha1_init(struct sha1_ctxt *);
21685 +extern void sha1_pad(struct sha1_ctxt *);
21686 +extern void sha1_loop(struct sha1_ctxt *, const u_int8_t *, size_t);
21687 +extern void sha1_result(struct sha1_ctxt *, caddr_t);
21689 +/* compatibilty with other SHA1 source codes */
21690 +typedef struct sha1_ctxt SHA1_CTX;
21691 +#define SHA1Init(x) sha1_init((x))
21692 +#define SHA1Update(x, y, z) sha1_loop((x), (y), (z))
21693 +#define SHA1Final(x, y) sha1_result((y), (x))
21694 +#endif /* __KERNEL__ */
21696 +#define SHA1_RESULTLEN (160/8)
21698 +#endif /*_NETINET6_SHA1_H_*/
21699 diff -Nur linux-2.6.30.orig/crypto/ocf/talitos/Makefile linux-2.6.30/crypto/ocf/talitos/Makefile
21700 --- linux-2.6.30.orig/crypto/ocf/talitos/Makefile 1970-01-01 01:00:00.000000000 +0100
21701 +++ linux-2.6.30/crypto/ocf/talitos/Makefile 2009-06-11 10:55:27.000000000 +0200
21702 @@ -0,0 +1,12 @@
21703 +# for SGlinux builds
21704 +-include $(ROOTDIR)/modules/.config
21706 +obj-$(CONFIG_OCF_TALITOS) += talitos.o
21708 +obj ?= .
21709 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
21711 +ifdef TOPDIR
21712 +-include $(TOPDIR)/Rules.make
21713 +endif
21715 diff -Nur linux-2.6.30.orig/crypto/ocf/talitos/talitos.c linux-2.6.30/crypto/ocf/talitos/talitos.c
21716 --- linux-2.6.30.orig/crypto/ocf/talitos/talitos.c 1970-01-01 01:00:00.000000000 +0100
21717 +++ linux-2.6.30/crypto/ocf/talitos/talitos.c 2009-06-11 10:55:27.000000000 +0200
21718 @@ -0,0 +1,1359 @@
21720 + * crypto/ocf/talitos/talitos.c
21722 + * An OCF-Linux module that uses Freescale's SEC to do the crypto.
21723 + * Based on crypto/ocf/hifn and crypto/ocf/safe OCF drivers
21725 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
21727 + * This code written by Kim A. B. Phillips <kim.phillips@freescale.com>
21728 + * some code copied from files with the following:
21729 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com
21731 + * Redistribution and use in source and binary forms, with or without
21732 + * modification, are permitted provided that the following conditions
21733 + * are met:
21735 + * 1. Redistributions of source code must retain the above copyright
21736 + * notice, this list of conditions and the following disclaimer.
21737 + * 2. Redistributions in binary form must reproduce the above copyright
21738 + * notice, this list of conditions and the following disclaimer in the
21739 + * documentation and/or other materials provided with the distribution.
21740 + * 3. The name of the author may not be used to endorse or promote products
21741 + * derived from this software without specific prior written permission.
21743 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21744 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21745 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21746 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21747 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21748 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21749 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21750 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21751 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
21752 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21754 + * ---------------------------------------------------------------------------
21756 + * NOTES:
21758 + * The Freescale SEC (also known as 'talitos') resides on the
21759 + * internal bus, and runs asynchronous to the processor core. It has
21760 + * a wide gamut of cryptographic acceleration features, including single-
21761 + * pass IPsec (also known as algorithm chaining). To properly utilize
21762 + * all of the SEC's performance enhancing features, further reworking
21763 + * of higher level code (framework, applications) will be necessary.
21765 + * The following table shows which SEC version is present in which devices:
21766 + *
21767 + * Devices SEC version
21769 + * 8272, 8248 SEC 1.0
21770 + * 885, 875 SEC 1.2
21771 + * 8555E, 8541E SEC 2.0
21772 + * 8349E SEC 2.01
21773 + * 8548E SEC 2.1
21775 + * The following table shows the features offered by each SEC version:
21777 + * Max. chan-
21778 + * version Bus I/F Clock nels DEU AESU AFEU MDEU PKEU RNG KEU
21780 + * SEC 1.0 internal 64b 100MHz 4 1 1 1 1 1 1 0
21781 + * SEC 1.2 internal 32b 66MHz 1 1 1 0 1 0 0 0
21782 + * SEC 2.0 internal 64b 166MHz 4 1 1 1 1 1 1 0
21783 + * SEC 2.01 internal 64b 166MHz 4 1 1 1 1 1 1 0
21784 + * SEC 2.1 internal 64b 333MHz 4 1 1 1 1 1 1 1
21786 + * Each execution unit in the SEC has two modes of execution; channel and
21787 + * slave/debug. This driver employs the channel infrastructure in the
21788 + * device for convenience. Only the RNG is directly accessed due to the
21789 + * convenience of its random fifo pool. The relationship between the
21790 + * channels and execution units is depicted in the following diagram:
21792 + * ------- ------------
21793 + * ---| ch0 |---| |
21794 + * ------- | |
21795 + * | |------+-------+-------+-------+------------
21796 + * ------- | | | | | | |
21797 + * ---| ch1 |---| | | | | | |
21798 + * ------- | | ------ ------ ------ ------ ------
21799 + * |controller| |DEU | |AESU| |MDEU| |PKEU| ... |RNG |
21800 + * ------- | | ------ ------ ------ ------ ------
21801 + * ---| ch2 |---| | | | | | |
21802 + * ------- | | | | | | |
21803 + * | |------+-------+-------+-------+------------
21804 + * ------- | |
21805 + * ---| ch3 |---| |
21806 + * ------- ------------
21808 + * Channel ch0 may drive an aes operation to the aes unit (AESU),
21809 + * and, at the same time, ch1 may drive a message digest operation
21810 + * to the mdeu. Each channel has an input descriptor FIFO, and the
21811 + * FIFO can contain, e.g. on the 8541E, up to 24 entries, before a
21812 + * a buffer overrun error is triggered. The controller is responsible
21813 + * for fetching the data from descriptor pointers, and passing the
21814 + * data to the appropriate EUs. The controller also writes the
21815 + * cryptographic operation's result to memory. The SEC notifies
21816 + * completion by triggering an interrupt and/or setting the 1st byte
21817 + * of the hdr field to 0xff.
21819 + * TODO:
21820 + * o support more algorithms
21821 + * o support more versions of the SEC
21822 + * o add support for linux 2.4
21823 + * o scatter-gather (sg) support
21824 + * o add support for public key ops (PKEU)
21825 + * o add statistics
21826 + */
21828 +#ifndef AUTOCONF_INCLUDED
21829 +#include <linux/config.h>
21830 +#endif
21831 +#include <linux/module.h>
21832 +#include <linux/init.h>
21833 +#include <linux/interrupt.h>
21834 +#include <linux/spinlock.h>
21835 +#include <linux/random.h>
21836 +#include <linux/skbuff.h>
21837 +#include <asm/scatterlist.h>
21838 +#include <linux/dma-mapping.h> /* dma_map_single() */
21839 +#include <linux/moduleparam.h>
21841 +#include <linux/version.h>
21842 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
21843 +#include <linux/platform_device.h>
21844 +#endif
21846 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
21847 +#include <linux/of_platform.h>
21848 +#endif
21850 +#include <cryptodev.h>
21851 +#include <uio.h>
21853 +#define DRV_NAME "talitos"
21855 +#include "talitos_dev.h"
21856 +#include "talitos_soft.h"
21858 +#define read_random(p,l) get_random_bytes(p,l)
21860 +const char talitos_driver_name[] = "Talitos OCF";
21861 +const char talitos_driver_version[] = "0.2";
21863 +static int talitos_newsession(device_t dev, u_int32_t *sidp,
21864 + struct cryptoini *cri);
21865 +static int talitos_freesession(device_t dev, u_int64_t tid);
21866 +static int talitos_process(device_t dev, struct cryptop *crp, int hint);
21867 +static void dump_talitos_status(struct talitos_softc *sc);
21868 +static int talitos_submit(struct talitos_softc *sc, struct talitos_desc *td,
21869 + int chsel);
21870 +static void talitos_doneprocessing(struct talitos_softc *sc);
21871 +static void talitos_init_device(struct talitos_softc *sc);
21872 +static void talitos_reset_device_master(struct talitos_softc *sc);
21873 +static void talitos_reset_device(struct talitos_softc *sc);
21874 +static void talitos_errorprocessing(struct talitos_softc *sc);
21875 +#ifdef CONFIG_PPC_MERGE
21876 +static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match);
21877 +static int talitos_remove(struct of_device *ofdev);
21878 +#else
21879 +static int talitos_probe(struct platform_device *pdev);
21880 +static int talitos_remove(struct platform_device *pdev);
21881 +#endif
21882 +#ifdef CONFIG_OCF_RANDOMHARVEST
21883 +static int talitos_read_random(void *arg, u_int32_t *buf, int maxwords);
21884 +static void talitos_rng_init(struct talitos_softc *sc);
21885 +#endif
21887 +static device_method_t talitos_methods = {
21888 + /* crypto device methods */
21889 + DEVMETHOD(cryptodev_newsession, talitos_newsession),
21890 + DEVMETHOD(cryptodev_freesession,talitos_freesession),
21891 + DEVMETHOD(cryptodev_process, talitos_process),
21894 +#define debug talitos_debug
21895 +int talitos_debug = 0;
21896 +module_param(talitos_debug, int, 0644);
21897 +MODULE_PARM_DESC(talitos_debug, "Enable debug");
21899 +static inline void talitos_write(volatile unsigned *addr, u32 val)
21901 + out_be32(addr, val);
21904 +static inline u32 talitos_read(volatile unsigned *addr)
21906 + u32 val;
21907 + val = in_be32(addr);
21908 + return val;
21911 +static void dump_talitos_status(struct talitos_softc *sc)
21913 + unsigned int v, v_hi, i, *ptr;
21914 + v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
21915 + v_hi = talitos_read(sc->sc_base_addr + TALITOS_MCR_HI);
21916 + printk(KERN_INFO "%s: MCR 0x%08x_%08x\n",
21917 + device_get_nameunit(sc->sc_cdev), v, v_hi);
21918 + v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
21919 + v_hi = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
21920 + printk(KERN_INFO "%s: IMR 0x%08x_%08x\n",
21921 + device_get_nameunit(sc->sc_cdev), v, v_hi);
21922 + v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
21923 + v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
21924 + printk(KERN_INFO "%s: ISR 0x%08x_%08x\n",
21925 + device_get_nameunit(sc->sc_cdev), v, v_hi);
21926 + for (i = 0; i < sc->sc_num_channels; i++) {
21927 + v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
21928 + TALITOS_CH_CDPR);
21929 + v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
21930 + TALITOS_CH_CDPR_HI);
21931 + printk(KERN_INFO "%s: CDPR ch%d 0x%08x_%08x\n",
21932 + device_get_nameunit(sc->sc_cdev), i, v, v_hi);
21934 + for (i = 0; i < sc->sc_num_channels; i++) {
21935 + v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
21936 + TALITOS_CH_CCPSR);
21937 + v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
21938 + TALITOS_CH_CCPSR_HI);
21939 + printk(KERN_INFO "%s: CCPSR ch%d 0x%08x_%08x\n",
21940 + device_get_nameunit(sc->sc_cdev), i, v, v_hi);
21942 + ptr = sc->sc_base_addr + TALITOS_CH_DESCBUF;
21943 + for (i = 0; i < 16; i++) {
21944 + v = talitos_read(ptr++); v_hi = talitos_read(ptr++);
21945 + printk(KERN_INFO "%s: DESCBUF ch0 0x%08x_%08x (tdp%02d)\n",
21946 + device_get_nameunit(sc->sc_cdev), v, v_hi, i);
21948 + return;
21952 +#ifdef CONFIG_OCF_RANDOMHARVEST
21953 +/*
21954 + * pull random numbers off the RNG FIFO, not exceeding amount available
21955 + */
21956 +static int
21957 +talitos_read_random(void *arg, u_int32_t *buf, int maxwords)
21959 + struct talitos_softc *sc = (struct talitos_softc *) arg;
21960 + int rc;
21961 + u_int32_t v;
21963 + DPRINTF("%s()\n", __FUNCTION__);
21965 + /* check for things like FIFO underflow */
21966 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
21967 + if (unlikely(v)) {
21968 + printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
21969 + device_get_nameunit(sc->sc_cdev), v);
21970 + return 0;
21972 + /*
21973 + * OFL is number of available 64-bit words,
21974 + * shift and convert to a 32-bit word count
21975 + */
21976 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI);
21977 + v = (v & TALITOS_RNGSR_HI_OFL) >> (16 - 1);
21978 + if (maxwords > v)
21979 + maxwords = v;
21980 + for (rc = 0; rc < maxwords; rc++) {
21981 + buf[rc] = talitos_read(sc->sc_base_addr +
21982 + TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
21984 + if (maxwords & 1) {
21985 + /*
21986 + * RNG will complain with an AE in the RNGISR
21987 + * if we don't complete the pairs of 32-bit reads
21988 + * to its 64-bit register based FIFO
21989 + */
21990 + v = talitos_read(sc->sc_base_addr +
21991 + TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
21994 + return rc;
21997 +static void
21998 +talitos_rng_init(struct talitos_softc *sc)
22000 + u_int32_t v;
22002 + DPRINTF("%s()\n", __FUNCTION__);
22003 + /* reset RNG EU */
22004 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGRCR_HI);
22005 + v |= TALITOS_RNGRCR_HI_SR;
22006 + talitos_write(sc->sc_base_addr + TALITOS_RNGRCR_HI, v);
22007 + while ((talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI)
22008 + & TALITOS_RNGSR_HI_RD) == 0)
22009 + cpu_relax();
22010 + /*
22011 + * we tell the RNG to start filling the RNG FIFO
22012 + * by writing the RNGDSR
22013 + */
22014 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGDSR_HI);
22015 + talitos_write(sc->sc_base_addr + TALITOS_RNGDSR_HI, v);
22016 + /*
22017 + * 64 bits of data will be pushed onto the FIFO every
22018 + * 256 SEC cycles until the FIFO is full. The RNG then
22019 + * attempts to keep the FIFO full.
22020 + */
22021 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
22022 + if (v) {
22023 + printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
22024 + device_get_nameunit(sc->sc_cdev), v);
22025 + return;
22027 + /*
22028 + * n.b. we need to add a FIPS test here - if the RNG is going
22029 + * to fail, it's going to fail at reset time
22030 + */
22031 + return;
22033 +#endif /* CONFIG_OCF_RANDOMHARVEST */
22036 + * Generate a new software session.
22037 + */
22038 +static int
22039 +talitos_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
22041 + struct cryptoini *c, *encini = NULL, *macini = NULL;
22042 + struct talitos_softc *sc = device_get_softc(dev);
22043 + struct talitos_session *ses = NULL;
22044 + int sesn;
22046 + DPRINTF("%s()\n", __FUNCTION__);
22047 + if (sidp == NULL || cri == NULL || sc == NULL) {
22048 + DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
22049 + return EINVAL;
22051 + for (c = cri; c != NULL; c = c->cri_next) {
22052 + if (c->cri_alg == CRYPTO_MD5 ||
22053 + c->cri_alg == CRYPTO_MD5_HMAC ||
22054 + c->cri_alg == CRYPTO_SHA1 ||
22055 + c->cri_alg == CRYPTO_SHA1_HMAC ||
22056 + c->cri_alg == CRYPTO_NULL_HMAC) {
22057 + if (macini)
22058 + return EINVAL;
22059 + macini = c;
22060 + } else if (c->cri_alg == CRYPTO_DES_CBC ||
22061 + c->cri_alg == CRYPTO_3DES_CBC ||
22062 + c->cri_alg == CRYPTO_AES_CBC ||
22063 + c->cri_alg == CRYPTO_NULL_CBC) {
22064 + if (encini)
22065 + return EINVAL;
22066 + encini = c;
22067 + } else {
22068 + DPRINTF("UNKNOWN c->cri_alg %d\n", encini->cri_alg);
22069 + return EINVAL;
22072 + if (encini == NULL && macini == NULL)
22073 + return EINVAL;
22074 + if (encini) {
22075 + /* validate key length */
22076 + switch (encini->cri_alg) {
22077 + case CRYPTO_DES_CBC:
22078 + if (encini->cri_klen != 64)
22079 + return EINVAL;
22080 + break;
22081 + case CRYPTO_3DES_CBC:
22082 + if (encini->cri_klen != 192) {
22083 + return EINVAL;
22085 + break;
22086 + case CRYPTO_AES_CBC:
22087 + if (encini->cri_klen != 128 &&
22088 + encini->cri_klen != 192 &&
22089 + encini->cri_klen != 256)
22090 + return EINVAL;
22091 + break;
22092 + default:
22093 + DPRINTF("UNKNOWN encini->cri_alg %d\n",
22094 + encini->cri_alg);
22095 + return EINVAL;
22099 + if (sc->sc_sessions == NULL) {
22100 + ses = sc->sc_sessions = (struct talitos_session *)
22101 + kmalloc(sizeof(struct talitos_session), SLAB_ATOMIC);
22102 + if (ses == NULL)
22103 + return ENOMEM;
22104 + memset(ses, 0, sizeof(struct talitos_session));
22105 + sesn = 0;
22106 + sc->sc_nsessions = 1;
22107 + } else {
22108 + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
22109 + if (sc->sc_sessions[sesn].ses_used == 0) {
22110 + ses = &sc->sc_sessions[sesn];
22111 + break;
22115 + if (ses == NULL) {
22116 + /* allocating session */
22117 + sesn = sc->sc_nsessions;
22118 + ses = (struct talitos_session *) kmalloc(
22119 + (sesn + 1) * sizeof(struct talitos_session),
22120 + SLAB_ATOMIC);
22121 + if (ses == NULL)
22122 + return ENOMEM;
22123 + memset(ses, 0,
22124 + (sesn + 1) * sizeof(struct talitos_session));
22125 + memcpy(ses, sc->sc_sessions,
22126 + sesn * sizeof(struct talitos_session));
22127 + memset(sc->sc_sessions, 0,
22128 + sesn * sizeof(struct talitos_session));
22129 + kfree(sc->sc_sessions);
22130 + sc->sc_sessions = ses;
22131 + ses = &sc->sc_sessions[sesn];
22132 + sc->sc_nsessions++;
22136 + ses->ses_used = 1;
22138 + if (encini) {
22139 + /* get an IV */
22140 + /* XXX may read fewer than requested */
22141 + read_random(ses->ses_iv, sizeof(ses->ses_iv));
22143 + ses->ses_klen = (encini->cri_klen + 7) / 8;
22144 + memcpy(ses->ses_key, encini->cri_key, ses->ses_klen);
22145 + if (macini) {
22146 + /* doing hash on top of cipher */
22147 + ses->ses_hmac_len = (macini->cri_klen + 7) / 8;
22148 + memcpy(ses->ses_hmac, macini->cri_key,
22149 + ses->ses_hmac_len);
22151 + } else if (macini) {
22152 + /* doing hash */
22153 + ses->ses_klen = (macini->cri_klen + 7) / 8;
22154 + memcpy(ses->ses_key, macini->cri_key, ses->ses_klen);
22157 + /* back compat way of determining MSC result len */
22158 + if (macini) {
22159 + ses->ses_mlen = macini->cri_mlen;
22160 + if (ses->ses_mlen == 0) {
22161 + if (macini->cri_alg == CRYPTO_MD5_HMAC)
22162 + ses->ses_mlen = MD5_HASH_LEN;
22163 + else
22164 + ses->ses_mlen = SHA1_HASH_LEN;
22168 + /* really should make up a template td here,
22169 + * and only fill things like i/o and direction in process() */
22171 + /* assign session ID */
22172 + *sidp = TALITOS_SID(sc->sc_num, sesn);
22173 + return 0;
22177 + * Deallocate a session.
22178 + */
22179 +static int
22180 +talitos_freesession(device_t dev, u_int64_t tid)
22182 + struct talitos_softc *sc = device_get_softc(dev);
22183 + int session, ret;
22184 + u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
22186 + if (sc == NULL)
22187 + return EINVAL;
22188 + session = TALITOS_SESSION(sid);
22189 + if (session < sc->sc_nsessions) {
22190 + memset(&sc->sc_sessions[session], 0,
22191 + sizeof(sc->sc_sessions[session]));
22192 + ret = 0;
22193 + } else
22194 + ret = EINVAL;
22195 + return ret;
22199 + * launch device processing - it will come back with done notification
22200 + * in the form of an interrupt and/or HDR_DONE_BITS in header
22201 + */
22202 +static int
22203 +talitos_submit(
22204 + struct talitos_softc *sc,
22205 + struct talitos_desc *td,
22206 + int chsel)
22208 + u_int32_t v;
22210 + v = dma_map_single(NULL, td, sizeof(*td), DMA_TO_DEVICE);
22211 + talitos_write(sc->sc_base_addr +
22212 + chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF, 0);
22213 + talitos_write(sc->sc_base_addr +
22214 + chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF_HI, v);
22215 + return 0;
22218 +static int
22219 +talitos_process(device_t dev, struct cryptop *crp, int hint)
22221 + int i, err = 0, ivsize;
22222 + struct talitos_softc *sc = device_get_softc(dev);
22223 + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
22224 + caddr_t iv;
22225 + struct talitos_session *ses;
22226 + struct talitos_desc *td;
22227 + unsigned long flags;
22228 + /* descriptor mappings */
22229 + int hmac_key, hmac_data, cipher_iv, cipher_key,
22230 + in_fifo, out_fifo, cipher_iv_out;
22231 + static int chsel = -1;
22233 + DPRINTF("%s()\n", __FUNCTION__);
22235 + if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
22236 + return EINVAL;
22238 + crp->crp_etype = 0;
22239 + if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
22240 + return EINVAL;
22243 + ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)];
22245 + /* enter the channel scheduler */
22246 + spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
22248 + /* reuse channel that already had/has requests for the required EU */
22249 + for (i = 0; i < sc->sc_num_channels; i++) {
22250 + if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg)
22251 + break;
22253 + if (i == sc->sc_num_channels) {
22254 + /*
22255 + * haven't seen this algo the last sc_num_channels or more
22256 + * use round robin in this case
22257 + * nb: sc->sc_num_channels must be power of 2
22258 + */
22259 + chsel = (chsel + 1) & (sc->sc_num_channels - 1);
22260 + } else {
22261 + /*
22262 + * matches channel with same target execution unit;
22263 + * use same channel in this case
22264 + */
22265 + chsel = i;
22267 + sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg;
22269 + /* release the channel scheduler lock */
22270 + spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
22272 + /* acquire the selected channel fifo lock */
22273 + spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags);
22275 + /* find and reserve next available descriptor-cryptop pair */
22276 + for (i = 0; i < sc->sc_chfifo_len; i++) {
22277 + if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) {
22278 + /*
22279 + * ensure correct descriptor formation by
22280 + * avoiding inadvertently setting "optional" entries
22281 + * e.g. not using "optional" dptr2 for MD/HMAC descs
22282 + */
22283 + memset(&sc->sc_chnfifo[chsel][i].cf_desc,
22284 + 0, sizeof(*td));
22285 + /* reserve it with done notification request bit */
22286 + sc->sc_chnfifo[chsel][i].cf_desc.hdr |=
22287 + TALITOS_DONE_NOTIFY;
22288 + break;
22291 + spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags);
22293 + if (i == sc->sc_chfifo_len) {
22294 + /* fifo full */
22295 + err = ERESTART;
22296 + goto errout;
22299 + td = &sc->sc_chnfifo[chsel][i].cf_desc;
22300 + sc->sc_chnfifo[chsel][i].cf_crp = crp;
22302 + crd1 = crp->crp_desc;
22303 + if (crd1 == NULL) {
22304 + err = EINVAL;
22305 + goto errout;
22307 + crd2 = crd1->crd_next;
22308 + /* prevent compiler warning */
22309 + hmac_key = 0;
22310 + hmac_data = 0;
22311 + if (crd2 == NULL) {
22312 + td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU;
22313 + /* assign descriptor dword ptr mappings for this desc. type */
22314 + cipher_iv = 1;
22315 + cipher_key = 2;
22316 + in_fifo = 3;
22317 + cipher_iv_out = 5;
22318 + if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
22319 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
22320 + crd1->crd_alg == CRYPTO_SHA1 ||
22321 + crd1->crd_alg == CRYPTO_MD5) {
22322 + out_fifo = 5;
22323 + maccrd = crd1;
22324 + enccrd = NULL;
22325 + } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
22326 + crd1->crd_alg == CRYPTO_3DES_CBC ||
22327 + crd1->crd_alg == CRYPTO_AES_CBC ||
22328 + crd1->crd_alg == CRYPTO_ARC4) {
22329 + out_fifo = 4;
22330 + maccrd = NULL;
22331 + enccrd = crd1;
22332 + } else {
22333 + DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg);
22334 + err = EINVAL;
22335 + goto errout;
22337 + } else {
22338 + if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) {
22339 + td->hdr |= TD_TYPE_IPSEC_ESP;
22340 + } else {
22341 + DPRINTF("unimplemented: multiple descriptor ipsec\n");
22342 + err = EINVAL;
22343 + goto errout;
22345 + /* assign descriptor dword ptr mappings for this desc. type */
22346 + hmac_key = 0;
22347 + hmac_data = 1;
22348 + cipher_iv = 2;
22349 + cipher_key = 3;
22350 + in_fifo = 4;
22351 + out_fifo = 5;
22352 + cipher_iv_out = 6;
22353 + if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
22354 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
22355 + crd1->crd_alg == CRYPTO_MD5 ||
22356 + crd1->crd_alg == CRYPTO_SHA1) &&
22357 + (crd2->crd_alg == CRYPTO_DES_CBC ||
22358 + crd2->crd_alg == CRYPTO_3DES_CBC ||
22359 + crd2->crd_alg == CRYPTO_AES_CBC ||
22360 + crd2->crd_alg == CRYPTO_ARC4) &&
22361 + ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
22362 + maccrd = crd1;
22363 + enccrd = crd2;
22364 + } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
22365 + crd1->crd_alg == CRYPTO_ARC4 ||
22366 + crd1->crd_alg == CRYPTO_3DES_CBC ||
22367 + crd1->crd_alg == CRYPTO_AES_CBC) &&
22368 + (crd2->crd_alg == CRYPTO_MD5_HMAC ||
22369 + crd2->crd_alg == CRYPTO_SHA1_HMAC ||
22370 + crd2->crd_alg == CRYPTO_MD5 ||
22371 + crd2->crd_alg == CRYPTO_SHA1) &&
22372 + (crd1->crd_flags & CRD_F_ENCRYPT)) {
22373 + enccrd = crd1;
22374 + maccrd = crd2;
22375 + } else {
22376 + /* We cannot order the SEC as requested */
22377 + printk("%s: cannot do the order\n",
22378 + device_get_nameunit(sc->sc_cdev));
22379 + err = EINVAL;
22380 + goto errout;
22383 + /* assign in_fifo and out_fifo based on input/output struct type */
22384 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
22385 + /* using SKB buffers */
22386 + struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
22387 + if (skb_shinfo(skb)->nr_frags) {
22388 + printk("%s: skb frags unimplemented\n",
22389 + device_get_nameunit(sc->sc_cdev));
22390 + err = EINVAL;
22391 + goto errout;
22393 + td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data,
22394 + skb->len, DMA_TO_DEVICE);
22395 + td->ptr[in_fifo].len = skb->len;
22396 + td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data,
22397 + skb->len, DMA_TO_DEVICE);
22398 + td->ptr[out_fifo].len = skb->len;
22399 + td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
22400 + skb->len, DMA_TO_DEVICE);
22401 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
22402 + /* using IOV buffers */
22403 + struct uio *uiop = (struct uio *)crp->crp_buf;
22404 + if (uiop->uio_iovcnt > 1) {
22405 + printk("%s: iov frags unimplemented\n",
22406 + device_get_nameunit(sc->sc_cdev));
22407 + err = EINVAL;
22408 + goto errout;
22410 + td->ptr[in_fifo].ptr = dma_map_single(NULL,
22411 + uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
22412 + td->ptr[in_fifo].len = crp->crp_ilen;
22413 + /* crp_olen is never set; always use crp_ilen */
22414 + td->ptr[out_fifo].ptr = dma_map_single(NULL,
22415 + uiop->uio_iov->iov_base,
22416 + crp->crp_ilen, DMA_TO_DEVICE);
22417 + td->ptr[out_fifo].len = crp->crp_ilen;
22418 + } else {
22419 + /* using contig buffers */
22420 + td->ptr[in_fifo].ptr = dma_map_single(NULL,
22421 + crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
22422 + td->ptr[in_fifo].len = crp->crp_ilen;
22423 + td->ptr[out_fifo].ptr = dma_map_single(NULL,
22424 + crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
22425 + td->ptr[out_fifo].len = crp->crp_ilen;
22427 + if (enccrd) {
22428 + switch (enccrd->crd_alg) {
22429 + case CRYPTO_3DES_CBC:
22430 + td->hdr |= TALITOS_MODE0_DEU_3DES;
22431 + /* FALLTHROUGH */
22432 + case CRYPTO_DES_CBC:
22433 + td->hdr |= TALITOS_SEL0_DEU
22434 + | TALITOS_MODE0_DEU_CBC;
22435 + if (enccrd->crd_flags & CRD_F_ENCRYPT)
22436 + td->hdr |= TALITOS_MODE0_DEU_ENC;
22437 + ivsize = 2*sizeof(u_int32_t);
22438 + DPRINTF("%cDES ses %d ch %d len %d\n",
22439 + (td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1',
22440 + (u32)TALITOS_SESSION(crp->crp_sid),
22441 + chsel, td->ptr[in_fifo].len);
22442 + break;
22443 + case CRYPTO_AES_CBC:
22444 + td->hdr |= TALITOS_SEL0_AESU
22445 + | TALITOS_MODE0_AESU_CBC;
22446 + if (enccrd->crd_flags & CRD_F_ENCRYPT)
22447 + td->hdr |= TALITOS_MODE0_AESU_ENC;
22448 + ivsize = 4*sizeof(u_int32_t);
22449 + DPRINTF("AES ses %d ch %d len %d\n",
22450 + (u32)TALITOS_SESSION(crp->crp_sid),
22451 + chsel, td->ptr[in_fifo].len);
22452 + break;
22453 + default:
22454 + printk("%s: unimplemented enccrd->crd_alg %d\n",
22455 + device_get_nameunit(sc->sc_cdev), enccrd->crd_alg);
22456 + err = EINVAL;
22457 + goto errout;
22459 + /*
22460 + * Setup encrypt/decrypt state. When using basic ops
22461 + * we can't use an inline IV because hash/crypt offset
22462 + * must be from the end of the IV to the start of the
22463 + * crypt data and this leaves out the preceding header
22464 + * from the hash calculation. Instead we place the IV
22465 + * in the state record and set the hash/crypt offset to
22466 + * copy both the header+IV.
22467 + */
22468 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
22469 + td->hdr |= TALITOS_DIR_OUTBOUND;
22470 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
22471 + iv = enccrd->crd_iv;
22472 + else
22473 + iv = (caddr_t) ses->ses_iv;
22474 + if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
22475 + crypto_copyback(crp->crp_flags, crp->crp_buf,
22476 + enccrd->crd_inject, ivsize, iv);
22478 + } else {
22479 + td->hdr |= TALITOS_DIR_INBOUND;
22480 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
22481 + iv = enccrd->crd_iv;
22482 + bcopy(enccrd->crd_iv, iv, ivsize);
22483 + } else {
22484 + iv = (caddr_t) ses->ses_iv;
22485 + crypto_copydata(crp->crp_flags, crp->crp_buf,
22486 + enccrd->crd_inject, ivsize, iv);
22489 + td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize,
22490 + DMA_TO_DEVICE);
22491 + td->ptr[cipher_iv].len = ivsize;
22492 + /*
22493 + * we don't need the cipher iv out length/pointer
22494 + * field to do ESP IPsec. Therefore we set the len field as 0,
22495 + * which tells the SEC not to do anything with this len/ptr
22496 + * field. Previously, when length/pointer as pointing to iv,
22497 + * it gave us corruption of packets.
22498 + */
22499 + td->ptr[cipher_iv_out].len = 0;
22501 + if (enccrd && maccrd) {
22502 + /* this is ipsec only for now */
22503 + td->hdr |= TALITOS_SEL1_MDEU
22504 + | TALITOS_MODE1_MDEU_INIT
22505 + | TALITOS_MODE1_MDEU_PAD;
22506 + switch (maccrd->crd_alg) {
22507 + case CRYPTO_MD5:
22508 + td->hdr |= TALITOS_MODE1_MDEU_MD5;
22509 + break;
22510 + case CRYPTO_MD5_HMAC:
22511 + td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC;
22512 + break;
22513 + case CRYPTO_SHA1:
22514 + td->hdr |= TALITOS_MODE1_MDEU_SHA1;
22515 + break;
22516 + case CRYPTO_SHA1_HMAC:
22517 + td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC;
22518 + break;
22519 + default:
22520 + /* We cannot order the SEC as requested */
22521 + printk("%s: cannot do the order\n",
22522 + device_get_nameunit(sc->sc_cdev));
22523 + err = EINVAL;
22524 + goto errout;
22526 + if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
22527 + (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
22528 + /*
22529 + * The offset from hash data to the start of
22530 + * crypt data is the difference in the skips.
22531 + */
22532 + /* ipsec only for now */
22533 + td->ptr[hmac_key].ptr = dma_map_single(NULL,
22534 + ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE);
22535 + td->ptr[hmac_key].len = ses->ses_hmac_len;
22536 + td->ptr[in_fifo].ptr += enccrd->crd_skip;
22537 + td->ptr[in_fifo].len = enccrd->crd_len;
22538 + td->ptr[out_fifo].ptr += enccrd->crd_skip;
22539 + td->ptr[out_fifo].len = enccrd->crd_len;
22540 + /* bytes of HMAC to postpend to ciphertext */
22541 + td->ptr[out_fifo].extent = ses->ses_mlen;
22542 + td->ptr[hmac_data].ptr += maccrd->crd_skip;
22543 + td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip;
22545 + if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
22546 + printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n",
22547 + device_get_nameunit(sc->sc_cdev));
22550 + if (!enccrd && maccrd) {
22551 + /* single MD5 or SHA */
22552 + td->hdr |= TALITOS_SEL0_MDEU
22553 + | TALITOS_MODE0_MDEU_INIT
22554 + | TALITOS_MODE0_MDEU_PAD;
22555 + switch (maccrd->crd_alg) {
22556 + case CRYPTO_MD5:
22557 + td->hdr |= TALITOS_MODE0_MDEU_MD5;
22558 + DPRINTF("MD5 ses %d ch %d len %d\n",
22559 + (u32)TALITOS_SESSION(crp->crp_sid),
22560 + chsel, td->ptr[in_fifo].len);
22561 + break;
22562 + case CRYPTO_MD5_HMAC:
22563 + td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC;
22564 + break;
22565 + case CRYPTO_SHA1:
22566 + td->hdr |= TALITOS_MODE0_MDEU_SHA1;
22567 + DPRINTF("SHA1 ses %d ch %d len %d\n",
22568 + (u32)TALITOS_SESSION(crp->crp_sid),
22569 + chsel, td->ptr[in_fifo].len);
22570 + break;
22571 + case CRYPTO_SHA1_HMAC:
22572 + td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC;
22573 + break;
22574 + default:
22575 + /* We cannot order the SEC as requested */
22576 + DPRINTF("cannot do the order\n");
22577 + err = EINVAL;
22578 + goto errout;
22581 + if (crp->crp_flags & CRYPTO_F_IOV)
22582 + td->ptr[out_fifo].ptr += maccrd->crd_inject;
22584 + if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
22585 + (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
22586 + td->ptr[hmac_key].ptr = dma_map_single(NULL,
22587 + ses->ses_hmac, ses->ses_hmac_len,
22588 + DMA_TO_DEVICE);
22589 + td->ptr[hmac_key].len = ses->ses_hmac_len;
22591 + }
22592 + else {
22593 + /* using process key (session data has duplicate) */
22594 + td->ptr[cipher_key].ptr = dma_map_single(NULL,
22595 + enccrd->crd_key, (enccrd->crd_klen + 7) / 8,
22596 + DMA_TO_DEVICE);
22597 + td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8;
22599 + /* descriptor complete - GO! */
22600 + return talitos_submit(sc, td, chsel);
22602 +errout:
22603 + if (err != ERESTART) {
22604 + crp->crp_etype = err;
22605 + crypto_done(crp);
22607 + return err;
22610 +/* go through all channels descriptors, notifying OCF what has
22611 + * _and_hasn't_ successfully completed and reset the device
22612 + * (otherwise it's up to decoding desc hdrs!)
22613 + */
22614 +static void talitos_errorprocessing(struct talitos_softc *sc)
22616 + unsigned long flags;
22617 + int i, j;
22619 + /* disable further scheduling until under control */
22620 + spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
22622 + if (debug) dump_talitos_status(sc);
22623 + /* go through descriptors, try and salvage those successfully done,
22624 + * and EIO those that weren't
22625 + */
22626 + for (i = 0; i < sc->sc_num_channels; i++) {
22627 + spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
22628 + for (j = 0; j < sc->sc_chfifo_len; j++) {
22629 + if (sc->sc_chnfifo[i][j].cf_desc.hdr) {
22630 + if ((sc->sc_chnfifo[i][j].cf_desc.hdr
22631 + & TALITOS_HDR_DONE_BITS)
22632 + != TALITOS_HDR_DONE_BITS) {
22633 + /* this one didn't finish */
22634 + /* signify in crp->etype */
22635 + sc->sc_chnfifo[i][j].cf_crp->crp_etype
22636 + = EIO;
22638 + } else
22639 + continue; /* free entry */
22640 + /* either way, notify ocf */
22641 + crypto_done(sc->sc_chnfifo[i][j].cf_crp);
22642 + /* and tag it available again
22644 + * memset to ensure correct descriptor formation by
22645 + * avoiding inadvertently setting "optional" entries
22646 + * e.g. not using "optional" dptr2 MD/HMAC processing
22647 + */
22648 + memset(&sc->sc_chnfifo[i][j].cf_desc,
22649 + 0, sizeof(struct talitos_desc));
22651 + spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
22653 + /* reset and initialize the SEC h/w device */
22654 + talitos_reset_device(sc);
22655 + talitos_init_device(sc);
22656 +#ifdef CONFIG_OCF_RANDOMHARVEST
22657 + if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)
22658 + talitos_rng_init(sc);
22659 +#endif
22661 + /* Okay. Stand by. */
22662 + spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
22664 + return;
22667 +/* go through all channels descriptors, notifying OCF what's been done */
22668 +static void talitos_doneprocessing(struct talitos_softc *sc)
22670 + unsigned long flags;
22671 + int i, j;
22673 + /* go through descriptors looking for done bits */
22674 + for (i = 0; i < sc->sc_num_channels; i++) {
22675 + spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
22676 + for (j = 0; j < sc->sc_chfifo_len; j++) {
22677 + /* descriptor has done bits set? */
22678 + if ((sc->sc_chnfifo[i][j].cf_desc.hdr
22679 + & TALITOS_HDR_DONE_BITS)
22680 + == TALITOS_HDR_DONE_BITS) {
22681 + /* notify ocf */
22682 + crypto_done(sc->sc_chnfifo[i][j].cf_crp);
22683 + /* and tag it available again
22685 + * memset to ensure correct descriptor formation by
22686 + * avoiding inadvertently setting "optional" entries
22687 + * e.g. not using "optional" dptr2 MD/HMAC processing
22688 + */
22689 + memset(&sc->sc_chnfifo[i][j].cf_desc,
22690 + 0, sizeof(struct talitos_desc));
22693 + spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
22695 + return;
22698 +static irqreturn_t
22699 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
22700 +talitos_intr(int irq, void *arg)
22701 +#else
22702 +talitos_intr(int irq, void *arg, struct pt_regs *regs)
22703 +#endif
22705 + struct talitos_softc *sc = arg;
22706 + u_int32_t v, v_hi;
22708 + /* ack */
22709 + v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
22710 + v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
22711 + talitos_write(sc->sc_base_addr + TALITOS_ICR, v);
22712 + talitos_write(sc->sc_base_addr + TALITOS_ICR_HI, v_hi);
22714 + if (unlikely(v & TALITOS_ISR_ERROR)) {
22715 + /* Okay, Houston, we've had a problem here. */
22716 + printk(KERN_DEBUG "%s: got error interrupt - ISR 0x%08x_%08x\n",
22717 + device_get_nameunit(sc->sc_cdev), v, v_hi);
22718 + talitos_errorprocessing(sc);
22719 + } else
22720 + if (likely(v & TALITOS_ISR_DONE)) {
22721 + talitos_doneprocessing(sc);
22723 + return IRQ_HANDLED;
22727 + * Initialize registers we need to touch only once.
22728 + */
22729 +static void
22730 +talitos_init_device(struct talitos_softc *sc)
22732 + u_int32_t v;
22733 + int i;
22735 + DPRINTF("%s()\n", __FUNCTION__);
22737 + /* init all channels */
22738 + for (i = 0; i < sc->sc_num_channels; i++) {
22739 + v = talitos_read(sc->sc_base_addr +
22740 + i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI);
22741 + v |= TALITOS_CH_CCCR_HI_CDWE
22742 + | TALITOS_CH_CCCR_HI_CDIE; /* invoke interrupt if done */
22743 + talitos_write(sc->sc_base_addr +
22744 + i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI, v);
22746 + /* enable all interrupts */
22747 + v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
22748 + v |= TALITOS_IMR_ALL;
22749 + talitos_write(sc->sc_base_addr + TALITOS_IMR, v);
22750 + v = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
22751 + v |= TALITOS_IMR_HI_ERRONLY;
22752 + talitos_write(sc->sc_base_addr + TALITOS_IMR_HI, v);
22753 + return;
22757 + * set the master reset bit on the device.
22758 + */
22759 +static void
22760 +talitos_reset_device_master(struct talitos_softc *sc)
22762 + u_int32_t v;
22764 + /* Reset the device by writing 1 to MCR:SWR and waiting 'til cleared */
22765 + v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
22766 + talitos_write(sc->sc_base_addr + TALITOS_MCR, v | TALITOS_MCR_SWR);
22768 + while (talitos_read(sc->sc_base_addr + TALITOS_MCR) & TALITOS_MCR_SWR)
22769 + cpu_relax();
22771 + return;
22775 + * Resets the device. Values in the registers are left as is
22776 + * from the reset (i.e. initial values are assigned elsewhere).
22777 + */
22778 +static void
22779 +talitos_reset_device(struct talitos_softc *sc)
22781 + u_int32_t v;
22782 + int i;
22784 + DPRINTF("%s()\n", __FUNCTION__);
22786 + /*
22787 + * Master reset
22788 + * errata documentation: warning: certain SEC interrupts
22789 + * are not fully cleared by writing the MCR:SWR bit,
22790 + * set bit twice to completely reset
22791 + */
22792 + talitos_reset_device_master(sc); /* once */
22793 + talitos_reset_device_master(sc); /* and once again */
22795 + /* reset all channels */
22796 + for (i = 0; i < sc->sc_num_channels; i++) {
22797 + v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
22798 + TALITOS_CH_CCCR);
22799 + talitos_write(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
22800 + TALITOS_CH_CCCR, v | TALITOS_CH_CCCR_RESET);
22804 +/* Set up the crypto device structure, private data,
22805 + * and anything else we need before we start */
22806 +#ifdef CONFIG_PPC_MERGE
22807 +static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match)
22808 +#else
22809 +static int talitos_probe(struct platform_device *pdev)
22810 +#endif
22812 + struct talitos_softc *sc = NULL;
22813 + struct resource *r;
22814 +#ifdef CONFIG_PPC_MERGE
22815 + struct device *device = &ofdev->dev;
22816 + struct device_node *np = ofdev->node;
22817 + const unsigned int *prop;
22818 + int err;
22819 + struct resource res;
22820 +#endif
22821 + static int num_chips = 0;
22822 + int rc;
22823 + int i;
22825 + DPRINTF("%s()\n", __FUNCTION__);
22827 + sc = (struct talitos_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
22828 + if (!sc)
22829 + return -ENOMEM;
22830 + memset(sc, 0, sizeof(*sc));
22832 + softc_device_init(sc, DRV_NAME, num_chips, talitos_methods);
22834 + sc->sc_irq = -1;
22835 + sc->sc_cid = -1;
22836 +#ifndef CONFIG_PPC_MERGE
22837 + sc->sc_dev = pdev;
22838 +#endif
22839 + sc->sc_num = num_chips++;
22841 +#ifdef CONFIG_PPC_MERGE
22842 + dev_set_drvdata(device, sc);
22843 +#else
22844 + platform_set_drvdata(sc->sc_dev, sc);
22845 +#endif
22847 + /* get the irq line */
22848 +#ifdef CONFIG_PPC_MERGE
22849 + err = of_address_to_resource(np, 0, &res);
22850 + if (err)
22851 + return -EINVAL;
22852 + r = &res;
22854 + sc->sc_irq = irq_of_parse_and_map(np, 0);
22855 +#else
22856 + /* get a pointer to the register memory */
22857 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
22859 + sc->sc_irq = platform_get_irq(pdev, 0);
22860 +#endif
22861 + rc = request_irq(sc->sc_irq, talitos_intr, 0,
22862 + device_get_nameunit(sc->sc_cdev), sc);
22863 + if (rc) {
22864 + printk(KERN_ERR "%s: failed to hook irq %d\n",
22865 + device_get_nameunit(sc->sc_cdev), sc->sc_irq);
22866 + sc->sc_irq = -1;
22867 + goto out;
22870 + sc->sc_base_addr = (ocf_iomem_t) ioremap(r->start, (r->end - r->start));
22871 + if (!sc->sc_base_addr) {
22872 + printk(KERN_ERR "%s: failed to ioremap\n",
22873 + device_get_nameunit(sc->sc_cdev));
22874 + goto out;
22877 + /* figure out our SEC's properties and capabilities */
22878 + sc->sc_chiprev = (u64)talitos_read(sc->sc_base_addr + TALITOS_ID) << 32
22879 + | talitos_read(sc->sc_base_addr + TALITOS_ID_HI);
22880 + DPRINTF("sec id 0x%llx\n", sc->sc_chiprev);
22882 +#ifdef CONFIG_PPC_MERGE
22883 + /* get SEC properties from device tree, defaulting to SEC 2.0 */
22885 + prop = of_get_property(np, "num-channels", NULL);
22886 + sc->sc_num_channels = prop ? *prop : TALITOS_NCHANNELS_SEC_2_0;
22888 + prop = of_get_property(np, "channel-fifo-len", NULL);
22889 + sc->sc_chfifo_len = prop ? *prop : TALITOS_CHFIFOLEN_SEC_2_0;
22891 + prop = of_get_property(np, "exec-units-mask", NULL);
22892 + sc->sc_exec_units = prop ? *prop : TALITOS_HAS_EUS_SEC_2_0;
22894 + prop = of_get_property(np, "descriptor-types-mask", NULL);
22895 + sc->sc_desc_types = prop ? *prop : TALITOS_HAS_DESCTYPES_SEC_2_0;
22896 +#else
22897 + /* bulk should go away with openfirmware flat device tree support */
22898 + if (sc->sc_chiprev & TALITOS_ID_SEC_2_0) {
22899 + sc->sc_num_channels = TALITOS_NCHANNELS_SEC_2_0;
22900 + sc->sc_chfifo_len = TALITOS_CHFIFOLEN_SEC_2_0;
22901 + sc->sc_exec_units = TALITOS_HAS_EUS_SEC_2_0;
22902 + sc->sc_desc_types = TALITOS_HAS_DESCTYPES_SEC_2_0;
22903 + } else {
22904 + printk(KERN_ERR "%s: failed to id device\n",
22905 + device_get_nameunit(sc->sc_cdev));
22906 + goto out;
22908 +#endif
22910 + /* + 1 is for the meta-channel lock used by the channel scheduler */
22911 + sc->sc_chnfifolock = (spinlock_t *) kmalloc(
22912 + (sc->sc_num_channels + 1) * sizeof(spinlock_t), GFP_KERNEL);
22913 + if (!sc->sc_chnfifolock)
22914 + goto out;
22915 + for (i = 0; i < sc->sc_num_channels + 1; i++) {
22916 + spin_lock_init(&sc->sc_chnfifolock[i]);
22919 + sc->sc_chnlastalg = (int *) kmalloc(
22920 + sc->sc_num_channels * sizeof(int), GFP_KERNEL);
22921 + if (!sc->sc_chnlastalg)
22922 + goto out;
22923 + memset(sc->sc_chnlastalg, 0, sc->sc_num_channels * sizeof(int));
22925 + sc->sc_chnfifo = (struct desc_cryptop_pair **) kmalloc(
22926 + sc->sc_num_channels * sizeof(struct desc_cryptop_pair *),
22927 + GFP_KERNEL);
22928 + if (!sc->sc_chnfifo)
22929 + goto out;
22930 + for (i = 0; i < sc->sc_num_channels; i++) {
22931 + sc->sc_chnfifo[i] = (struct desc_cryptop_pair *) kmalloc(
22932 + sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair),
22933 + GFP_KERNEL);
22934 + if (!sc->sc_chnfifo[i])
22935 + goto out;
22936 + memset(sc->sc_chnfifo[i], 0,
22937 + sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair));
22940 + /* reset and initialize the SEC h/w device */
22941 + talitos_reset_device(sc);
22942 + talitos_init_device(sc);
22944 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
22945 + if (sc->sc_cid < 0) {
22946 + printk(KERN_ERR "%s: could not get crypto driver id\n",
22947 + device_get_nameunit(sc->sc_cdev));
22948 + goto out;
22951 + /* register algorithms with the framework */
22952 + printk("%s:", device_get_nameunit(sc->sc_cdev));
22954 + if (sc->sc_exec_units & TALITOS_HAS_EU_RNG) {
22955 + printk(" rng");
22956 +#ifdef CONFIG_OCF_RANDOMHARVEST
22957 + talitos_rng_init(sc);
22958 + crypto_rregister(sc->sc_cid, talitos_read_random, sc);
22959 +#endif
22961 + if (sc->sc_exec_units & TALITOS_HAS_EU_DEU) {
22962 + printk(" des/3des");
22963 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
22964 + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
22966 + if (sc->sc_exec_units & TALITOS_HAS_EU_AESU) {
22967 + printk(" aes");
22968 + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
22970 + if (sc->sc_exec_units & TALITOS_HAS_EU_MDEU) {
22971 + printk(" md5");
22972 + crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
22973 + /* HMAC support only with IPsec for now */
22974 + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
22975 + printk(" sha1");
22976 + crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
22977 + /* HMAC support only with IPsec for now */
22978 + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
22980 + printk("\n");
22981 + return 0;
22983 +out:
22984 +#ifndef CONFIG_PPC_MERGE
22985 + talitos_remove(pdev);
22986 +#endif
22987 + return -ENOMEM;
22990 +#ifdef CONFIG_PPC_MERGE
22991 +static int talitos_remove(struct of_device *ofdev)
22992 +#else
22993 +static int talitos_remove(struct platform_device *pdev)
22994 +#endif
22996 +#ifdef CONFIG_PPC_MERGE
22997 + struct talitos_softc *sc = dev_get_drvdata(&ofdev->dev);
22998 +#else
22999 + struct talitos_softc *sc = platform_get_drvdata(pdev);
23000 +#endif
23001 + int i;
23003 + DPRINTF("%s()\n", __FUNCTION__);
23004 + if (sc->sc_cid >= 0)
23005 + crypto_unregister_all(sc->sc_cid);
23006 + if (sc->sc_chnfifo) {
23007 + for (i = 0; i < sc->sc_num_channels; i++)
23008 + if (sc->sc_chnfifo[i])
23009 + kfree(sc->sc_chnfifo[i]);
23010 + kfree(sc->sc_chnfifo);
23012 + if (sc->sc_chnlastalg)
23013 + kfree(sc->sc_chnlastalg);
23014 + if (sc->sc_chnfifolock)
23015 + kfree(sc->sc_chnfifolock);
23016 + if (sc->sc_irq != -1)
23017 + free_irq(sc->sc_irq, sc);
23018 + if (sc->sc_base_addr)
23019 + iounmap((void *) sc->sc_base_addr);
23020 + kfree(sc);
23021 + return 0;
23024 +#ifdef CONFIG_PPC_MERGE
23025 +static struct of_device_id talitos_match[] = {
23027 + .type = "crypto",
23028 + .compatible = "talitos",
23029 + },
23030 + {},
23033 +MODULE_DEVICE_TABLE(of, talitos_match);
23035 +static struct of_platform_driver talitos_driver = {
23036 + .name = DRV_NAME,
23037 + .match_table = talitos_match,
23038 + .probe = talitos_probe,
23039 + .remove = talitos_remove,
23042 +static int __init talitos_init(void)
23044 + return of_register_platform_driver(&talitos_driver);
23047 +static void __exit talitos_exit(void)
23049 + of_unregister_platform_driver(&talitos_driver);
23051 +#else
23052 +/* Structure for a platform device driver */
23053 +static struct platform_driver talitos_driver = {
23054 + .probe = talitos_probe,
23055 + .remove = talitos_remove,
23056 + .driver = {
23057 + .name = "fsl-sec2",
23061 +static int __init talitos_init(void)
23063 + return platform_driver_register(&talitos_driver);
23066 +static void __exit talitos_exit(void)
23068 + platform_driver_unregister(&talitos_driver);
23070 +#endif
23072 +module_init(talitos_init);
23073 +module_exit(talitos_exit);
23075 +MODULE_LICENSE("Dual BSD/GPL");
23076 +MODULE_AUTHOR("kim.phillips@freescale.com");
23077 +MODULE_DESCRIPTION("OCF driver for Freescale SEC (talitos)");
23078 diff -Nur linux-2.6.30.orig/crypto/ocf/talitos/talitos_dev.h linux-2.6.30/crypto/ocf/talitos/talitos_dev.h
23079 --- linux-2.6.30.orig/crypto/ocf/talitos/talitos_dev.h 1970-01-01 01:00:00.000000000 +0100
23080 +++ linux-2.6.30/crypto/ocf/talitos/talitos_dev.h 2009-06-11 10:55:27.000000000 +0200
23081 @@ -0,0 +1,277 @@
23083 + * Freescale SEC (talitos) device dependent data structures
23085 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
23087 + * Redistribution and use in source and binary forms, with or without
23088 + * modification, are permitted provided that the following conditions
23089 + * are met:
23091 + * 1. Redistributions of source code must retain the above copyright
23092 + * notice, this list of conditions and the following disclaimer.
23093 + * 2. Redistributions in binary form must reproduce the above copyright
23094 + * notice, this list of conditions and the following disclaimer in the
23095 + * documentation and/or other materials provided with the distribution.
23096 + * 3. The name of the author may not be used to endorse or promote products
23097 + * derived from this software without specific prior written permission.
23099 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23100 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23101 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23102 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23103 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23104 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23105 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23106 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23107 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23108 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23110 + */
23112 +/* device ID register values */
23113 +#define TALITOS_ID_SEC_2_0 0x40
23114 +#define TALITOS_ID_SEC_2_1 0x40 /* cross ref with IP block revision reg */
23117 + * following num_channels, channel-fifo-depth, exec-unit-mask, and
23118 + * descriptor-types-mask are for forward-compatibility with openfirmware
23119 + * flat device trees
23120 + */
23123 + * num_channels : the number of channels available in each SEC version.
23124 + */
23126 +/* n.b. this driver requires these values be a power of 2 */
23127 +#define TALITOS_NCHANNELS_SEC_1_0 4
23128 +#define TALITOS_NCHANNELS_SEC_1_2 1
23129 +#define TALITOS_NCHANNELS_SEC_2_0 4
23130 +#define TALITOS_NCHANNELS_SEC_2_01 4
23131 +#define TALITOS_NCHANNELS_SEC_2_1 4
23132 +#define TALITOS_NCHANNELS_SEC_2_4 4
23135 + * channel-fifo-depth : The number of descriptor
23136 + * pointers a channel fetch fifo can hold.
23137 + */
23138 +#define TALITOS_CHFIFOLEN_SEC_1_0 1
23139 +#define TALITOS_CHFIFOLEN_SEC_1_2 1
23140 +#define TALITOS_CHFIFOLEN_SEC_2_0 24
23141 +#define TALITOS_CHFIFOLEN_SEC_2_01 24
23142 +#define TALITOS_CHFIFOLEN_SEC_2_1 24
23143 +#define TALITOS_CHFIFOLEN_SEC_2_4 24
23145 +/*
23146 + * exec-unit-mask : The bitmask representing what Execution Units (EUs)
23147 + * are available. EU information should be encoded following the SEC's
23148 + * EU_SEL0 bitfield documentation, i.e. as follows:
23149 + *
23150 + * bit 31 = set if SEC permits no-EU selection (should be always set)
23151 + * bit 30 = set if SEC has the ARC4 EU (AFEU)
23152 + * bit 29 = set if SEC has the des/3des EU (DEU)
23153 + * bit 28 = set if SEC has the message digest EU (MDEU)
23154 + * bit 27 = set if SEC has the random number generator EU (RNG)
23155 + * bit 26 = set if SEC has the public key EU (PKEU)
23156 + * bit 25 = set if SEC has the aes EU (AESU)
23157 + * bit 24 = set if SEC has the Kasumi EU (KEU)
23158 + *
23159 + */
23160 +#define TALITOS_HAS_EU_NONE (1<<0)
23161 +#define TALITOS_HAS_EU_AFEU (1<<1)
23162 +#define TALITOS_HAS_EU_DEU (1<<2)
23163 +#define TALITOS_HAS_EU_MDEU (1<<3)
23164 +#define TALITOS_HAS_EU_RNG (1<<4)
23165 +#define TALITOS_HAS_EU_PKEU (1<<5)
23166 +#define TALITOS_HAS_EU_AESU (1<<6)
23167 +#define TALITOS_HAS_EU_KEU (1<<7)
23169 +/* the corresponding masks for each SEC version */
23170 +#define TALITOS_HAS_EUS_SEC_1_0 0x7f
23171 +#define TALITOS_HAS_EUS_SEC_1_2 0x4d
23172 +#define TALITOS_HAS_EUS_SEC_2_0 0x7f
23173 +#define TALITOS_HAS_EUS_SEC_2_01 0x7f
23174 +#define TALITOS_HAS_EUS_SEC_2_1 0xff
23175 +#define TALITOS_HAS_EUS_SEC_2_4 0x7f
23178 + * descriptor-types-mask : The bitmask representing what descriptors
23179 + * are available. Descriptor type information should be encoded
23180 + * following the SEC's Descriptor Header Dword DESC_TYPE field
23181 + * documentation, i.e. as follows:
23183 + * bit 0 = set if SEC supports the aesu_ctr_nonsnoop desc. type
23184 + * bit 1 = set if SEC supports the ipsec_esp descriptor type
23185 + * bit 2 = set if SEC supports the common_nonsnoop desc. type
23186 + * bit 3 = set if SEC supports the 802.11i AES ccmp desc. type
23187 + * bit 4 = set if SEC supports the hmac_snoop_no_afeu desc. type
23188 + * bit 5 = set if SEC supports the srtp descriptor type
23189 + * bit 6 = set if SEC supports the non_hmac_snoop_no_afeu desc.type
23190 + * bit 7 = set if SEC supports the pkeu_assemble descriptor type
23191 + * bit 8 = set if SEC supports the aesu_key_expand_output desc.type
23192 + * bit 9 = set if SEC supports the pkeu_ptmul descriptor type
23193 + * bit 10 = set if SEC supports the common_nonsnoop_afeu desc. type
23194 + * bit 11 = set if SEC supports the pkeu_ptadd_dbl descriptor type
23196 + * ..and so on and so forth.
23197 + */
23198 +#define TALITOS_HAS_DT_AESU_CTR_NONSNOOP (1<<0)
23199 +#define TALITOS_HAS_DT_IPSEC_ESP (1<<1)
23200 +#define TALITOS_HAS_DT_COMMON_NONSNOOP (1<<2)
23202 +/* the corresponding masks for each SEC version */
23203 +#define TALITOS_HAS_DESCTYPES_SEC_2_0 0x01010ebf
23204 +#define TALITOS_HAS_DESCTYPES_SEC_2_1 0x012b0ebf
23206 +/*
23207 + * a TALITOS_xxx_HI address points to the low data bits (32-63) of the register
23208 + */
23210 +/* global register offset addresses */
23211 +#define TALITOS_ID 0x1020
23212 +#define TALITOS_ID_HI 0x1024
23213 +#define TALITOS_MCR 0x1030 /* master control register */
23214 +#define TALITOS_MCR_HI 0x1038 /* master control register */
23215 +#define TALITOS_MCR_SWR 0x1
23216 +#define TALITOS_IMR 0x1008 /* interrupt mask register */
23217 +#define TALITOS_IMR_ALL 0x00010fff /* enable all interrupts mask */
23218 +#define TALITOS_IMR_ERRONLY 0x00010aaa /* enable error interrupts */
23219 +#define TALITOS_IMR_HI 0x100C /* interrupt mask register */
23220 +#define TALITOS_IMR_HI_ALL 0x00323333 /* enable all interrupts mask */
23221 +#define TALITOS_IMR_HI_ERRONLY 0x00222222 /* enable error interrupts */
23222 +#define TALITOS_ISR 0x1010 /* interrupt status register */
23223 +#define TALITOS_ISR_ERROR 0x00010faa /* errors mask */
23224 +#define TALITOS_ISR_DONE 0x00000055 /* channel(s) done mask */
23225 +#define TALITOS_ISR_HI 0x1014 /* interrupt status register */
23226 +#define TALITOS_ICR 0x1018 /* interrupt clear register */
23227 +#define TALITOS_ICR_HI 0x101C /* interrupt clear register */
23229 +/* channel register address stride */
23230 +#define TALITOS_CH_OFFSET 0x100
23232 +/* channel register offset addresses and bits */
23233 +#define TALITOS_CH_CCCR 0x1108 /* Crypto-Channel Config Register */
23234 +#define TALITOS_CH_CCCR_RESET 0x1 /* Channel Reset bit */
23235 +#define TALITOS_CH_CCCR_HI 0x110c /* Crypto-Channel Config Register */
23236 +#define TALITOS_CH_CCCR_HI_CDWE 0x10 /* Channel done writeback enable bit */
23237 +#define TALITOS_CH_CCCR_HI_NT 0x4 /* Notification type bit */
23238 +#define TALITOS_CH_CCCR_HI_CDIE 0x2 /* Channel Done Interrupt Enable bit */
23239 +#define TALITOS_CH_CCPSR 0x1110 /* Crypto-Channel Pointer Status Reg */
23240 +#define TALITOS_CH_CCPSR_HI 0x1114 /* Crypto-Channel Pointer Status Reg */
23241 +#define TALITOS_CH_FF 0x1148 /* Fetch FIFO */
23242 +#define TALITOS_CH_FF_HI 0x114c /* Fetch FIFO's FETCH_ADRS */
23243 +#define TALITOS_CH_CDPR 0x1140 /* Crypto-Channel Pointer Status Reg */
23244 +#define TALITOS_CH_CDPR_HI 0x1144 /* Crypto-Channel Pointer Status Reg */
23245 +#define TALITOS_CH_DESCBUF 0x1180 /* (thru 11bf) Crypto-Channel
23246 + * Descriptor Buffer (debug) */
23248 +/* execution unit register offset addresses and bits */
23249 +#define TALITOS_DEUSR 0x2028 /* DEU status register */
23250 +#define TALITOS_DEUSR_HI 0x202c /* DEU status register */
23251 +#define TALITOS_DEUISR 0x2030 /* DEU interrupt status register */
23252 +#define TALITOS_DEUISR_HI 0x2034 /* DEU interrupt status register */
23253 +#define TALITOS_DEUICR 0x2038 /* DEU interrupt control register */
23254 +#define TALITOS_DEUICR_HI 0x203c /* DEU interrupt control register */
23255 +#define TALITOS_AESUISR 0x4030 /* AESU interrupt status register */
23256 +#define TALITOS_AESUISR_HI 0x4034 /* AESU interrupt status register */
23257 +#define TALITOS_AESUICR 0x4038 /* AESU interrupt control register */
23258 +#define TALITOS_AESUICR_HI 0x403c /* AESU interrupt control register */
23259 +#define TALITOS_MDEUISR 0x6030 /* MDEU interrupt status register */
23260 +#define TALITOS_MDEUISR_HI 0x6034 /* MDEU interrupt status register */
23261 +#define TALITOS_RNGSR 0xa028 /* RNG status register */
23262 +#define TALITOS_RNGSR_HI 0xa02c /* RNG status register */
23263 +#define TALITOS_RNGSR_HI_RD 0x1 /* RNG Reset done */
23264 +#define TALITOS_RNGSR_HI_OFL 0xff0000/* number of dwords in RNG output FIFO*/
23265 +#define TALITOS_RNGDSR 0xa010 /* RNG data size register */
23266 +#define TALITOS_RNGDSR_HI 0xa014 /* RNG data size register */
23267 +#define TALITOS_RNG_FIFO 0xa800 /* RNG FIFO - pool of random numbers */
23268 +#define TALITOS_RNGISR 0xa030 /* RNG Interrupt status register */
23269 +#define TALITOS_RNGISR_HI 0xa034 /* RNG Interrupt status register */
23270 +#define TALITOS_RNGRCR 0xa018 /* RNG Reset control register */
23271 +#define TALITOS_RNGRCR_HI 0xa01c /* RNG Reset control register */
23272 +#define TALITOS_RNGRCR_HI_SR 0x1 /* RNG RNGRCR:Software Reset */
23274 +/* descriptor pointer entry */
23275 +struct talitos_desc_ptr {
23276 + u16 len; /* length */
23277 + u8 extent; /* jump (to s/g link table) and extent */
23278 + u8 res; /* reserved */
23279 + u32 ptr; /* pointer */
23282 +/* descriptor */
23283 +struct talitos_desc {
23284 + u32 hdr; /* header */
23285 + u32 res; /* reserved */
23286 + struct talitos_desc_ptr ptr[7]; /* ptr/len pair array */
23289 +/* talitos descriptor header (hdr) bits */
23291 +/* primary execution unit select */
23292 +#define TALITOS_SEL0_AFEU 0x10000000
23293 +#define TALITOS_SEL0_DEU 0x20000000
23294 +#define TALITOS_SEL0_MDEU 0x30000000
23295 +#define TALITOS_SEL0_RNG 0x40000000
23296 +#define TALITOS_SEL0_PKEU 0x50000000
23297 +#define TALITOS_SEL0_AESU 0x60000000
23299 +/* primary execution unit mode (MODE0) and derivatives */
23300 +#define TALITOS_MODE0_AESU_CBC 0x00200000
23301 +#define TALITOS_MODE0_AESU_ENC 0x00100000
23302 +#define TALITOS_MODE0_DEU_CBC 0x00400000
23303 +#define TALITOS_MODE0_DEU_3DES 0x00200000
23304 +#define TALITOS_MODE0_DEU_ENC 0x00100000
23305 +#define TALITOS_MODE0_MDEU_INIT 0x01000000 /* init starting regs */
23306 +#define TALITOS_MODE0_MDEU_HMAC 0x00800000
23307 +#define TALITOS_MODE0_MDEU_PAD 0x00400000 /* PD */
23308 +#define TALITOS_MODE0_MDEU_MD5 0x00200000
23309 +#define TALITOS_MODE0_MDEU_SHA256 0x00100000
23310 +#define TALITOS_MODE0_MDEU_SHA1 0x00000000 /* SHA-160 */
23311 +#define TALITOS_MODE0_MDEU_MD5_HMAC \
23312 + (TALITOS_MODE0_MDEU_MD5 | TALITOS_MODE0_MDEU_HMAC)
23313 +#define TALITOS_MODE0_MDEU_SHA256_HMAC \
23314 + (TALITOS_MODE0_MDEU_SHA256 | TALITOS_MODE0_MDEU_HMAC)
23315 +#define TALITOS_MODE0_MDEU_SHA1_HMAC \
23316 + (TALITOS_MODE0_MDEU_SHA1 | TALITOS_MODE0_MDEU_HMAC)
23318 +/* secondary execution unit select (SEL1) */
23319 +/* it's MDEU or nothing */
23320 +#define TALITOS_SEL1_MDEU 0x00030000
23322 +/* secondary execution unit mode (MODE1) and derivatives */
23323 +#define TALITOS_MODE1_MDEU_INIT 0x00001000 /* init starting regs */
23324 +#define TALITOS_MODE1_MDEU_HMAC 0x00000800
23325 +#define TALITOS_MODE1_MDEU_PAD 0x00000400 /* PD */
23326 +#define TALITOS_MODE1_MDEU_MD5 0x00000200
23327 +#define TALITOS_MODE1_MDEU_SHA256 0x00000100
23328 +#define TALITOS_MODE1_MDEU_SHA1 0x00000000 /* SHA-160 */
23329 +#define TALITOS_MODE1_MDEU_MD5_HMAC \
23330 + (TALITOS_MODE1_MDEU_MD5 | TALITOS_MODE1_MDEU_HMAC)
23331 +#define TALITOS_MODE1_MDEU_SHA256_HMAC \
23332 + (TALITOS_MODE1_MDEU_SHA256 | TALITOS_MODE1_MDEU_HMAC)
23333 +#define TALITOS_MODE1_MDEU_SHA1_HMAC \
23334 + (TALITOS_MODE1_MDEU_SHA1 | TALITOS_MODE1_MDEU_HMAC)
23336 +/* direction of overall data flow (DIR) */
23337 +#define TALITOS_DIR_OUTBOUND 0x00000000
23338 +#define TALITOS_DIR_INBOUND 0x00000002
23340 +/* done notification (DN) */
23341 +#define TALITOS_DONE_NOTIFY 0x00000001
23343 +/* descriptor types */
23344 +/* odd numbers here are valid on SEC2 and greater only (e.g. ipsec_esp) */
23345 +#define TD_TYPE_AESU_CTR_NONSNOOP (0 << 3)
23346 +#define TD_TYPE_IPSEC_ESP (1 << 3)
23347 +#define TD_TYPE_COMMON_NONSNOOP_NO_AFEU (2 << 3)
23348 +#define TD_TYPE_HMAC_SNOOP_NO_AFEU (4 << 3)
23350 +#define TALITOS_HDR_DONE_BITS 0xff000000
23352 +#define DPRINTF(a...) do { \
23353 + if (debug) { \
23354 + printk("%s: ", sc ? \
23355 + device_get_nameunit(sc->sc_cdev) : "talitos"); \
23356 + printk(a); \
23357 + } \
23358 + } while (0)
23359 diff -Nur linux-2.6.30.orig/crypto/ocf/talitos/talitos_soft.h linux-2.6.30/crypto/ocf/talitos/talitos_soft.h
23360 --- linux-2.6.30.orig/crypto/ocf/talitos/talitos_soft.h 1970-01-01 01:00:00.000000000 +0100
23361 +++ linux-2.6.30/crypto/ocf/talitos/talitos_soft.h 2009-06-11 10:55:27.000000000 +0200
23362 @@ -0,0 +1,77 @@
23364 + * Freescale SEC data structures for integration with ocf-linux
23366 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
23368 + * Redistribution and use in source and binary forms, with or without
23369 + * modification, are permitted provided that the following conditions
23370 + * are met:
23372 + * 1. Redistributions of source code must retain the above copyright
23373 + * notice, this list of conditions and the following disclaimer.
23374 + * 2. Redistributions in binary form must reproduce the above copyright
23375 + * notice, this list of conditions and the following disclaimer in the
23376 + * documentation and/or other materials provided with the distribution.
23377 + * 3. The name of the author may not be used to endorse or promote products
23378 + * derived from this software without specific prior written permission.
23380 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23381 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23382 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23383 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23384 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23385 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23386 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23387 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23388 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23389 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23390 + */
23393 + * paired descriptor and associated crypto operation
23394 + */
23395 +struct desc_cryptop_pair {
23396 + struct talitos_desc cf_desc; /* descriptor ptr */
23397 + struct cryptop *cf_crp; /* cryptop ptr */
23401 + * Holds data specific to a single talitos device.
23402 + */
23403 +struct talitos_softc {
23404 + softc_device_decl sc_cdev;
23405 + struct platform_device *sc_dev; /* device backpointer */
23406 + ocf_iomem_t sc_base_addr;
23407 + int sc_irq;
23408 + int sc_num; /* if we have multiple chips */
23409 + int32_t sc_cid; /* crypto tag */
23410 + u64 sc_chiprev; /* major/minor chip revision */
23411 + int sc_nsessions;
23412 + struct talitos_session *sc_sessions;
23413 + int sc_num_channels;/* number of crypto channels */
23414 + int sc_chfifo_len; /* channel fetch fifo len */
23415 + int sc_exec_units; /* execution units mask */
23416 + int sc_desc_types; /* descriptor types mask */
23417 + /*
23418 + * mutual exclusion for intra-channel resources, e.g. fetch fifos
23419 + * the last entry is a meta-channel lock used by the channel scheduler
23420 + */
23421 + spinlock_t *sc_chnfifolock;
23422 + /* sc_chnlastalgo contains last algorithm for that channel */
23423 + int *sc_chnlastalg;
23424 + /* sc_chnfifo holds pending descriptor--crypto operation pairs */
23425 + struct desc_cryptop_pair **sc_chnfifo;
23428 +struct talitos_session {
23429 + u_int32_t ses_used;
23430 + u_int32_t ses_klen; /* key length in bits */
23431 + u_int32_t ses_key[8]; /* DES/3DES/AES key */
23432 + u_int32_t ses_hmac[5]; /* hmac inner state */
23433 + u_int32_t ses_hmac_len; /* hmac length */
23434 + u_int32_t ses_iv[4]; /* DES/3DES/AES iv */
23435 + u_int32_t ses_mlen; /* desired hash result len (12=ipsec or 16) */
23438 +#define TALITOS_SESSION(sid) ((sid) & 0x0fffffff)
23439 +#define TALITOS_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff))
23440 diff -Nur linux-2.6.30.orig/crypto/ocf/uio.h linux-2.6.30/crypto/ocf/uio.h
23441 --- linux-2.6.30.orig/crypto/ocf/uio.h 1970-01-01 01:00:00.000000000 +0100
23442 +++ linux-2.6.30/crypto/ocf/uio.h 2009-06-11 10:55:27.000000000 +0200
23443 @@ -0,0 +1,54 @@
23444 +#ifndef _OCF_UIO_H_
23445 +#define _OCF_UIO_H_
23447 +#include <linux/uio.h>
23450 + * The linux uio.h doesn't have all we need. To be fully api compatible
23451 + * with the BSD cryptodev, we need to keep this around. Perhaps this can
23452 + * be moved back into the linux/uio.h
23454 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
23455 + * Copyright (C) 2006-2007 David McCullough
23456 + * Copyright (C) 2004-2005 Intel Corporation.
23458 + * LICENSE TERMS
23460 + * The free distribution and use of this software in both source and binary
23461 + * form is allowed (with or without changes) provided that:
23463 + * 1. distributions of this source code include the above copyright
23464 + * notice, this list of conditions and the following disclaimer;
23466 + * 2. distributions in binary form include the above copyright
23467 + * notice, this list of conditions and the following disclaimer
23468 + * in the documentation and/or other associated materials;
23470 + * 3. the copyright holder's name is not used to endorse products
23471 + * built using this software without specific written permission.
23473 + * ALTERNATIVELY, provided that this notice is retained in full, this product
23474 + * may be distributed under the terms of the GNU General Public License (GPL),
23475 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
23477 + * DISCLAIMER
23479 + * This software is provided 'as is' with no explicit or implied warranties
23480 + * in respect of its properties, including, but not limited to, correctness
23481 + * and/or fitness for purpose.
23482 + * ---------------------------------------------------------------------------
23483 + */
23485 +struct uio {
23486 + struct iovec *uio_iov;
23487 + int uio_iovcnt;
23488 + off_t uio_offset;
23489 + int uio_resid;
23490 +#if 0
23491 + enum uio_seg uio_segflg;
23492 + enum uio_rw uio_rw;
23493 + struct thread *uio_td;
23494 +#endif
23497 +#endif
23498 diff -Nur linux-2.6.30.orig/drivers/char/random.c linux-2.6.30/drivers/char/random.c
23499 --- linux-2.6.30.orig/drivers/char/random.c 2009-06-10 05:05:27.000000000 +0200
23500 +++ linux-2.6.30/drivers/char/random.c 2009-06-11 10:55:27.000000000 +0200
23501 @@ -129,6 +129,9 @@
23502 * unsigned int value);
23503 * void add_interrupt_randomness(int irq);
23505 + * void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
23506 + * int random_input_wait(void);
23508 * add_input_randomness() uses the input layer interrupt timing, as well as
23509 * the event type information from the hardware.
23511 @@ -140,6 +143,13 @@
23512 * a better measure, since the timing of the disk interrupts are more
23513 * unpredictable.
23515 + * random_input_words() just provides a raw block of entropy to the input
23516 + * pool, such as from a hardware entropy generator.
23518 + * random_input_wait() suspends the caller until such time as the
23519 + * entropy pool falls below the write threshold, and returns a count of how
23520 + * much entropy (in bits) is needed to sustain the pool.
23522 * All of these routines try to estimate how many bits of randomness a
23523 * particular randomness source. They do this by keeping track of the
23524 * first and second order deltas of the event timings.
23525 @@ -712,6 +722,61 @@
23527 #endif
23530 + * random_input_words - add bulk entropy to pool
23532 + * @buf: buffer to add
23533 + * @wordcount: number of __u32 words to add
23534 + * @ent_count: total amount of entropy (in bits) to credit
23536 + * this provides bulk input of entropy to the input pool
23538 + */
23539 +void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
23541 + mix_pool_bytes(&input_pool, buf, wordcount*4);
23543 + credit_entropy_bits(&input_pool, ent_count);
23545 + DEBUG_ENT("crediting %d bits => %d\n",
23546 + ent_count, input_pool.entropy_count);
23547 + /*
23548 + * Wake up waiting processes if we have enough
23549 + * entropy.
23550 + */
23551 + if (input_pool.entropy_count >= random_read_wakeup_thresh)
23552 + wake_up_interruptible(&random_read_wait);
23554 +EXPORT_SYMBOL(random_input_words);
23557 + * random_input_wait - wait until random needs entropy
23559 + * this function sleeps until the /dev/random subsystem actually
23560 + * needs more entropy, and then return the amount of entropy
23561 + * that it would be nice to have added to the system.
23562 + */
23563 +int random_input_wait(void)
23565 + int count;
23567 + wait_event_interruptible(random_write_wait,
23568 + input_pool.entropy_count < random_write_wakeup_thresh);
23570 + count = random_write_wakeup_thresh - input_pool.entropy_count;
23572 + /* likely we got woken up due to a signal */
23573 + if (count <= 0) count = random_read_wakeup_thresh;
23575 + DEBUG_ENT("requesting %d bits from input_wait()er %d<%d\n",
23576 + count,
23577 + input_pool.entropy_count, random_write_wakeup_thresh);
23579 + return count;
23581 +EXPORT_SYMBOL(random_input_wait);
23584 #define EXTRACT_SIZE 10
23586 /*********************************************************************
23587 diff -Nur linux-2.6.30.orig/fs/fcntl.c linux-2.6.30/fs/fcntl.c
23588 --- linux-2.6.30.orig/fs/fcntl.c 2009-06-10 05:05:27.000000000 +0200
23589 +++ linux-2.6.30/fs/fcntl.c 2009-06-11 10:55:27.000000000 +0200
23590 @@ -142,6 +142,7 @@
23592 return ret;
23594 +EXPORT_SYMBOL(sys_dup);
23596 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
23598 diff -Nur linux-2.6.30.orig/include/linux/miscdevice.h linux-2.6.30/include/linux/miscdevice.h
23599 --- linux-2.6.30.orig/include/linux/miscdevice.h 2009-06-10 05:05:27.000000000 +0200
23600 +++ linux-2.6.30/include/linux/miscdevice.h 2009-06-11 10:55:27.000000000 +0200
23601 @@ -12,6 +12,7 @@
23602 #define APOLLO_MOUSE_MINOR 7
23603 #define PC110PAD_MINOR 9
23604 /*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */
23605 +#define CRYPTODEV_MINOR 70 /* /dev/crypto */
23606 #define WATCHDOG_MINOR 130 /* Watchdog timer */
23607 #define TEMP_MINOR 131 /* Temperature Sensor */
23608 #define RTC_MINOR 135
23609 diff -Nur linux-2.6.30.orig/include/linux/random.h linux-2.6.30/include/linux/random.h
23610 --- linux-2.6.30.orig/include/linux/random.h 2009-06-10 05:05:27.000000000 +0200
23611 +++ linux-2.6.30/include/linux/random.h 2009-06-11 10:55:27.000000000 +0200
23612 @@ -34,6 +34,30 @@
23613 /* Clear the entropy pool and associated counters. (Superuser only.) */
23614 #define RNDCLEARPOOL _IO( 'R', 0x06 )
23616 +#ifdef CONFIG_FIPS_RNG
23618 +/* Size of seed value - equal to AES blocksize */
23619 +#define AES_BLOCK_SIZE_BYTES 16
23620 +#define SEED_SIZE_BYTES AES_BLOCK_SIZE_BYTES
23621 +/* Size of AES key */
23622 +#define KEY_SIZE_BYTES 16
23624 +/* ioctl() structure used by FIPS 140-2 Tests */
23625 +struct rand_fips_test {
23626 + unsigned char key[KEY_SIZE_BYTES]; /* Input */
23627 + unsigned char datetime[SEED_SIZE_BYTES]; /* Input */
23628 + unsigned char seed[SEED_SIZE_BYTES]; /* Input */
23629 + unsigned char result[SEED_SIZE_BYTES]; /* Output */
23632 +/* FIPS 140-2 RNG Variable Seed Test. (Superuser only.) */
23633 +#define RNDFIPSVST _IOWR('R', 0x10, struct rand_fips_test)
23635 +/* FIPS 140-2 RNG Monte Carlo Test. (Superuser only.) */
23636 +#define RNDFIPSMCT _IOWR('R', 0x11, struct rand_fips_test)
23638 +#endif /* #ifdef CONFIG_FIPS_RNG */
23640 struct rand_pool_info {
23641 int entropy_count;
23642 int buf_size;
23643 @@ -50,6 +74,10 @@
23644 unsigned int value);
23645 extern void add_interrupt_randomness(int irq);
23647 +extern void random_input_words(__u32 *buf, size_t wordcount, int ent_count);
23648 +extern int random_input_wait(void);
23649 +#define HAS_RANDOM_INPUT_WAIT 1
23651 extern void get_random_bytes(void *buf, int nbytes);
23652 void generate_random_uuid(unsigned char uuid_out[16]);