1 /* $FreeBSD: src/sys/dev/ubsec/ubsec.c,v 1.6.2.12 2003/06/04 17:56:59 sam Exp $ */
2 /* $DragonFly: src/sys/dev/crypto/ubsec/ubsec.c,v 1.13 2006/12/22 23:26:15 swildner Exp $ */
3 /* $OpenBSD: ubsec.c,v 1.115 2002/09/24 18:33:26 jason Exp $ */
6 * Copyright (c) 2000 Jason L. Wright (jason@thought.net)
7 * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org)
8 * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Jason L. Wright
23 * 4. The name of the author may not be used to endorse or promote products
24 * derived from this software without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
27 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
34 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
35 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
38 * Effort sponsored in part by the Defense Advanced Research Projects
39 * Agency (DARPA) and Air Force Research Laboratory, Air Force
40 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
45 * uBsec 5[56]01, 58xx hardware crypto accelerator
48 #include "opt_ubsec.h"
50 #include <sys/param.h>
51 #include <sys/systm.h>
53 #include <sys/errno.h>
54 #include <sys/malloc.h>
55 #include <sys/kernel.h>
57 #include <sys/sysctl.h>
58 #include <sys/endian.h>
62 #include <sys/random.h>
63 #include <sys/thread2.h>
68 #include <machine/clock.h>
70 #include <crypto/sha1.h>
71 #include <opencrypto/cryptodev.h>
72 #include <opencrypto/cryptosoft.h>
74 #include <bus/pci/pcivar.h>
75 #include <bus/pci/pcireg.h>
77 /* grr, #defines for gratuitous incompatibility in queue.h */
78 #define SIMPLEQ_HEAD STAILQ_HEAD
79 #define SIMPLEQ_ENTRY STAILQ_ENTRY
80 #define SIMPLEQ_INIT STAILQ_INIT
81 #define SIMPLEQ_INSERT_TAIL STAILQ_INSERT_TAIL
82 #define SIMPLEQ_EMPTY STAILQ_EMPTY
83 #define SIMPLEQ_FIRST STAILQ_FIRST
84 #define SIMPLEQ_REMOVE_HEAD STAILQ_REMOVE_HEAD
85 #define SIMPLEQ_FOREACH STAILQ_FOREACH
86 /* ditto for endian.h */
87 #define letoh16(x) le16toh(x)
88 #define letoh32(x) le32toh(x)
91 #include "../rndtest/rndtest.h"
97 * Prototypes and count for the pci_device structure
99 static int ubsec_probe(device_t
);
100 static int ubsec_attach(device_t
);
101 static int ubsec_detach(device_t
);
102 static int ubsec_suspend(device_t
);
103 static int ubsec_resume(device_t
);
104 static void ubsec_shutdown(device_t
);
106 static device_method_t ubsec_methods
[] = {
107 /* Device interface */
108 DEVMETHOD(device_probe
, ubsec_probe
),
109 DEVMETHOD(device_attach
, ubsec_attach
),
110 DEVMETHOD(device_detach
, ubsec_detach
),
111 DEVMETHOD(device_suspend
, ubsec_suspend
),
112 DEVMETHOD(device_resume
, ubsec_resume
),
113 DEVMETHOD(device_shutdown
, ubsec_shutdown
),
116 DEVMETHOD(bus_print_child
, bus_generic_print_child
),
117 DEVMETHOD(bus_driver_added
, bus_generic_driver_added
),
121 static driver_t ubsec_driver
= {
124 sizeof (struct ubsec_softc
)
126 static devclass_t ubsec_devclass
;
128 DECLARE_DUMMY_MODULE(ubsec
);
129 DRIVER_MODULE(ubsec
, pci
, ubsec_driver
, ubsec_devclass
, 0, 0);
130 MODULE_DEPEND(ubsec
, crypto
, 1, 1, 1);
132 MODULE_DEPEND(ubsec
, rndtest
, 1, 1, 1);
135 static void ubsec_intr(void *);
136 static int ubsec_newsession(void *, u_int32_t
*, struct cryptoini
*);
137 static int ubsec_freesession(void *, u_int64_t
);
138 static int ubsec_process(void *, struct cryptop
*, int);
139 static void ubsec_callback(struct ubsec_softc
*, struct ubsec_q
*);
140 static void ubsec_feed(struct ubsec_softc
*);
141 static void ubsec_mcopy(struct mbuf
*, struct mbuf
*, int, int);
142 static void ubsec_callback2(struct ubsec_softc
*, struct ubsec_q2
*);
143 static int ubsec_feed2(struct ubsec_softc
*);
144 static void ubsec_rng(void *);
145 static int ubsec_dma_malloc(struct ubsec_softc
*, bus_size_t
,
146 struct ubsec_dma_alloc
*, int);
147 #define ubsec_dma_sync(_dma, _flags) \
148 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags))
149 static void ubsec_dma_free(struct ubsec_softc
*, struct ubsec_dma_alloc
*);
150 static int ubsec_dmamap_aligned(struct ubsec_operand
*op
);
152 static void ubsec_reset_board(struct ubsec_softc
*sc
);
153 static void ubsec_init_board(struct ubsec_softc
*sc
);
154 static void ubsec_init_pciregs(device_t dev
);
155 static void ubsec_totalreset(struct ubsec_softc
*sc
);
157 static int ubsec_free_q(struct ubsec_softc
*sc
, struct ubsec_q
*q
);
159 static int ubsec_kprocess(void*, struct cryptkop
*, int);
160 static int ubsec_kprocess_modexp_hw(struct ubsec_softc
*, struct cryptkop
*, int);
161 static int ubsec_kprocess_modexp_sw(struct ubsec_softc
*, struct cryptkop
*, int);
162 static int ubsec_kprocess_rsapriv(struct ubsec_softc
*, struct cryptkop
*, int);
163 static void ubsec_kfree(struct ubsec_softc
*, struct ubsec_q2
*);
164 static int ubsec_ksigbits(struct crparam
*);
165 static void ubsec_kshift_r(u_int
, u_int8_t
*, u_int
, u_int8_t
*, u_int
);
166 static void ubsec_kshift_l(u_int
, u_int8_t
*, u_int
, u_int8_t
*, u_int
);
168 SYSCTL_NODE(_hw
, OID_AUTO
, ubsec
, CTLFLAG_RD
, 0, "Broadcom driver parameters");
171 static void ubsec_dump_pb(volatile struct ubsec_pktbuf
*);
172 static void ubsec_dump_mcr(struct ubsec_mcr
*);
173 static void ubsec_dump_ctx2(struct ubsec_ctx_keyop
*);
175 static int ubsec_debug
= 0;
176 SYSCTL_INT(_hw_ubsec
, OID_AUTO
, debug
, CTLFLAG_RW
, &ubsec_debug
,
177 0, "control debugging msgs");
180 #define READ_REG(sc,r) \
181 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
183 #define WRITE_REG(sc,reg,val) \
184 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val)
186 #define SWAP32(x) (x) = htole32(ntohl((x)))
187 #define HTOLE32(x) (x) = htole32(x)
190 struct ubsec_stats ubsecstats
;
191 SYSCTL_STRUCT(_hw_ubsec
, OID_AUTO
, stats
, CTLFLAG_RD
, &ubsecstats
,
192 ubsec_stats
, "driver statistics");
195 ubsec_probe(device_t dev
)
197 if (pci_get_vendor(dev
) == PCI_VENDOR_SUN
&&
198 (pci_get_device(dev
) == PCI_PRODUCT_SUN_5821
||
199 pci_get_device(dev
) == PCI_PRODUCT_SUN_SCA1K
))
201 if (pci_get_vendor(dev
) == PCI_VENDOR_BLUESTEEL
&&
202 (pci_get_device(dev
) == PCI_PRODUCT_BLUESTEEL_5501
||
203 pci_get_device(dev
) == PCI_PRODUCT_BLUESTEEL_5601
))
205 if (pci_get_vendor(dev
) == PCI_VENDOR_BROADCOM
&&
206 (pci_get_device(dev
) == PCI_PRODUCT_BROADCOM_5801
||
207 pci_get_device(dev
) == PCI_PRODUCT_BROADCOM_5802
||
208 pci_get_device(dev
) == PCI_PRODUCT_BROADCOM_5805
||
209 pci_get_device(dev
) == PCI_PRODUCT_BROADCOM_5820
||
210 pci_get_device(dev
) == PCI_PRODUCT_BROADCOM_5821
||
211 pci_get_device(dev
) == PCI_PRODUCT_BROADCOM_5822
||
212 pci_get_device(dev
) == PCI_PRODUCT_BROADCOM_5823
219 ubsec_partname(struct ubsec_softc
*sc
)
221 /* XXX sprintf numbers when not decoded */
222 switch (pci_get_vendor(sc
->sc_dev
)) {
223 case PCI_VENDOR_BROADCOM
:
224 switch (pci_get_device(sc
->sc_dev
)) {
225 case PCI_PRODUCT_BROADCOM_5801
: return "Broadcom 5801";
226 case PCI_PRODUCT_BROADCOM_5802
: return "Broadcom 5802";
227 case PCI_PRODUCT_BROADCOM_5805
: return "Broadcom 5805";
228 case PCI_PRODUCT_BROADCOM_5820
: return "Broadcom 5820";
229 case PCI_PRODUCT_BROADCOM_5821
: return "Broadcom 5821";
230 case PCI_PRODUCT_BROADCOM_5822
: return "Broadcom 5822";
231 case PCI_PRODUCT_BROADCOM_5823
: return "Broadcom 5823";
233 return "Broadcom unknown-part";
234 case PCI_VENDOR_BLUESTEEL
:
235 switch (pci_get_device(sc
->sc_dev
)) {
236 case PCI_PRODUCT_BLUESTEEL_5601
: return "Bluesteel 5601";
238 return "Bluesteel unknown-part";
240 switch (pci_get_device(sc
->sc_dev
)) {
241 case PCI_PRODUCT_SUN_5821
: return "Sun Crypto 5821";
242 case PCI_PRODUCT_SUN_SCA1K
: return "Sun Crypto 1K";
244 return "Sun unknown-part";
246 return "Unknown-vendor unknown-part";
250 default_harvest(struct rndtest_state
*rsp
, void *buf
, u_int count
)
252 u_int32_t
*p
= (u_int32_t
*)buf
;
253 for (count
/= sizeof (u_int32_t
); count
; count
--)
254 add_true_randomness(*p
++);
258 ubsec_attach(device_t dev
)
260 struct ubsec_softc
*sc
= device_get_softc(dev
);
261 struct ubsec_dma
*dmap
;
265 KASSERT(sc
!= NULL
, ("ubsec_attach: null software carrier!"));
266 bzero(sc
, sizeof (*sc
));
269 SIMPLEQ_INIT(&sc
->sc_queue
);
270 SIMPLEQ_INIT(&sc
->sc_qchip
);
271 SIMPLEQ_INIT(&sc
->sc_queue2
);
272 SIMPLEQ_INIT(&sc
->sc_qchip2
);
273 SIMPLEQ_INIT(&sc
->sc_q2free
);
275 /* XXX handle power management */
277 sc
->sc_statmask
= BS_STAT_MCR1_DONE
| BS_STAT_DMAERR
;
279 if (pci_get_vendor(dev
) == PCI_VENDOR_BLUESTEEL
&&
280 pci_get_device(dev
) == PCI_PRODUCT_BLUESTEEL_5601
)
281 sc
->sc_flags
|= UBS_FLAGS_KEY
| UBS_FLAGS_RNG
;
283 if (pci_get_vendor(dev
) == PCI_VENDOR_BROADCOM
&&
284 (pci_get_device(dev
) == PCI_PRODUCT_BROADCOM_5802
||
285 pci_get_device(dev
) == PCI_PRODUCT_BROADCOM_5805
))
286 sc
->sc_flags
|= UBS_FLAGS_KEY
| UBS_FLAGS_RNG
;
288 if (pci_get_vendor(dev
) == PCI_VENDOR_BROADCOM
&&
289 pci_get_device(dev
) == PCI_PRODUCT_BROADCOM_5820
)
290 sc
->sc_flags
|= UBS_FLAGS_KEY
| UBS_FLAGS_RNG
|
291 UBS_FLAGS_LONGCTX
| UBS_FLAGS_HWNORM
| UBS_FLAGS_BIGKEY
;
293 if ((pci_get_vendor(dev
) == PCI_VENDOR_BROADCOM
&&
294 (pci_get_device(dev
) == PCI_PRODUCT_BROADCOM_5821
||
295 pci_get_device(dev
) == PCI_PRODUCT_BROADCOM_5822
||
296 pci_get_device(dev
) == PCI_PRODUCT_BROADCOM_5823
)) ||
297 (pci_get_vendor(dev
) == PCI_VENDOR_SUN
&&
298 (pci_get_device(dev
) == PCI_PRODUCT_SUN_SCA1K
||
299 pci_get_device(dev
) == PCI_PRODUCT_SUN_5821
))) {
300 /* NB: the 5821/5822 defines some additional status bits */
301 sc
->sc_statmask
|= BS_STAT_MCR1_ALLEMPTY
|
302 BS_STAT_MCR2_ALLEMPTY
;
303 sc
->sc_flags
|= UBS_FLAGS_KEY
| UBS_FLAGS_RNG
|
304 UBS_FLAGS_LONGCTX
| UBS_FLAGS_HWNORM
| UBS_FLAGS_BIGKEY
;
307 cmd
= pci_read_config(dev
, PCIR_COMMAND
, 4);
308 cmd
|= PCIM_CMD_MEMEN
| PCIM_CMD_BUSMASTEREN
;
309 pci_write_config(dev
, PCIR_COMMAND
, cmd
, 4);
310 cmd
= pci_read_config(dev
, PCIR_COMMAND
, 4);
312 if (!(cmd
& PCIM_CMD_MEMEN
)) {
313 device_printf(dev
, "failed to enable memory mapping\n");
317 if (!(cmd
& PCIM_CMD_BUSMASTEREN
)) {
318 device_printf(dev
, "failed to enable bus mastering\n");
323 * Setup memory-mapping of PCI registers.
326 sc
->sc_sr
= bus_alloc_resource(dev
, SYS_RES_MEMORY
, &rid
,
327 0, ~0, 1, RF_ACTIVE
);
328 if (sc
->sc_sr
== NULL
) {
329 device_printf(dev
, "cannot map register space\n");
332 sc
->sc_st
= rman_get_bustag(sc
->sc_sr
);
333 sc
->sc_sh
= rman_get_bushandle(sc
->sc_sr
);
336 * Arrange interrupt line.
339 sc
->sc_irq
= bus_alloc_resource(dev
, SYS_RES_IRQ
, &rid
,
340 0, ~0, 1, RF_SHAREABLE
|RF_ACTIVE
);
341 if (sc
->sc_irq
== NULL
) {
342 device_printf(dev
, "could not map interrupt\n");
346 * NB: Network code assumes we are blocked with splimp()
347 * so make sure the IRQ is mapped appropriately.
349 if (bus_setup_intr(dev
, sc
->sc_irq
, 0,
352 device_printf(dev
, "could not establish interrupt\n");
356 sc
->sc_cid
= crypto_get_driverid(0);
357 if (sc
->sc_cid
< 0) {
358 device_printf(dev
, "could not get crypto driver id\n");
363 * Setup DMA descriptor area.
365 if (bus_dma_tag_create(NULL
, /* parent */
366 1, 0, /* alignment, bounds */
367 BUS_SPACE_MAXADDR_32BIT
, /* lowaddr */
368 BUS_SPACE_MAXADDR
, /* highaddr */
369 NULL
, NULL
, /* filter, filterarg */
370 0x3ffff, /* maxsize */
371 UBS_MAX_SCATTER
, /* nsegments */
372 0xffff, /* maxsegsize */
373 BUS_DMA_ALLOCNOW
, /* flags */
375 device_printf(dev
, "cannot allocate DMA tag\n");
378 SIMPLEQ_INIT(&sc
->sc_freequeue
);
380 for (i
= 0; i
< UBS_MAX_NQUEUE
; i
++, dmap
++) {
383 q
= kmalloc(sizeof(struct ubsec_q
), M_DEVBUF
, M_WAITOK
);
384 if (ubsec_dma_malloc(sc
, sizeof(struct ubsec_dmachunk
),
385 &dmap
->d_alloc
, 0)) {
386 device_printf(dev
, "cannot allocate dma buffers\n");
390 dmap
->d_dma
= (struct ubsec_dmachunk
*)dmap
->d_alloc
.dma_vaddr
;
393 sc
->sc_queuea
[i
] = q
;
395 SIMPLEQ_INSERT_TAIL(&sc
->sc_freequeue
, q
, q_next
);
398 device_printf(sc
->sc_dev
, "%s\n", ubsec_partname(sc
));
400 crypto_register(sc
->sc_cid
, CRYPTO_3DES_CBC
, 0, 0,
401 ubsec_newsession
, ubsec_freesession
, ubsec_process
, sc
);
402 crypto_register(sc
->sc_cid
, CRYPTO_DES_CBC
, 0, 0,
403 ubsec_newsession
, ubsec_freesession
, ubsec_process
, sc
);
404 crypto_register(sc
->sc_cid
, CRYPTO_MD5_HMAC
, 0, 0,
405 ubsec_newsession
, ubsec_freesession
, ubsec_process
, sc
);
406 crypto_register(sc
->sc_cid
, CRYPTO_SHA1_HMAC
, 0, 0,
407 ubsec_newsession
, ubsec_freesession
, ubsec_process
, sc
);
410 * Reset Broadcom chip
412 ubsec_reset_board(sc
);
415 * Init Broadcom specific PCI settings
417 ubsec_init_pciregs(dev
);
422 ubsec_init_board(sc
);
425 if (sc
->sc_flags
& UBS_FLAGS_RNG
) {
426 sc
->sc_statmask
|= BS_STAT_MCR2_DONE
;
428 sc
->sc_rndtest
= rndtest_attach(dev
);
430 sc
->sc_harvest
= rndtest_harvest
;
432 sc
->sc_harvest
= default_harvest
;
434 sc
->sc_harvest
= default_harvest
;
437 if (ubsec_dma_malloc(sc
, sizeof(struct ubsec_mcr
),
438 &sc
->sc_rng
.rng_q
.q_mcr
, 0))
441 if (ubsec_dma_malloc(sc
, sizeof(struct ubsec_ctx_rngbypass
),
442 &sc
->sc_rng
.rng_q
.q_ctx
, 0)) {
443 ubsec_dma_free(sc
, &sc
->sc_rng
.rng_q
.q_mcr
);
447 if (ubsec_dma_malloc(sc
, sizeof(u_int32_t
) *
448 UBSEC_RNG_BUFSIZ
, &sc
->sc_rng
.rng_buf
, 0)) {
449 ubsec_dma_free(sc
, &sc
->sc_rng
.rng_q
.q_ctx
);
450 ubsec_dma_free(sc
, &sc
->sc_rng
.rng_q
.q_mcr
);
455 sc
->sc_rnghz
= hz
/ 100;
458 callout_init(&sc
->sc_rngto
);
459 callout_reset(&sc
->sc_rngto
, sc
->sc_rnghz
, ubsec_rng
, sc
);
463 #endif /* UBSEC_NO_RNG */
465 if (sc
->sc_flags
& UBS_FLAGS_KEY
) {
466 sc
->sc_statmask
|= BS_STAT_MCR2_DONE
;
468 crypto_kregister(sc
->sc_cid
, CRK_MOD_EXP
, 0,
471 crypto_kregister(sc
->sc_cid
, CRK_MOD_EXP_CRT
, 0,
477 crypto_unregister_all(sc
->sc_cid
);
479 bus_teardown_intr(dev
, sc
->sc_irq
, sc
->sc_ih
);
481 bus_release_resource(dev
, SYS_RES_IRQ
, 0, sc
->sc_irq
);
483 bus_release_resource(dev
, SYS_RES_MEMORY
, BS_BAR
, sc
->sc_sr
);
489 * Detach a device that successfully probed.
492 ubsec_detach(device_t dev
)
494 struct ubsec_softc
*sc
= device_get_softc(dev
);
496 KASSERT(sc
!= NULL
, ("ubsec_detach: null software carrier"));
498 /* XXX wait/abort active ops */
502 callout_stop(&sc
->sc_rngto
);
504 crypto_unregister_all(sc
->sc_cid
);
508 rndtest_detach(sc
->sc_rndtest
);
511 while (!SIMPLEQ_EMPTY(&sc
->sc_freequeue
)) {
514 q
= SIMPLEQ_FIRST(&sc
->sc_freequeue
);
515 SIMPLEQ_REMOVE_HEAD(&sc
->sc_freequeue
, q_next
);
516 ubsec_dma_free(sc
, &q
->q_dma
->d_alloc
);
520 if (sc
->sc_flags
& UBS_FLAGS_RNG
) {
521 ubsec_dma_free(sc
, &sc
->sc_rng
.rng_q
.q_mcr
);
522 ubsec_dma_free(sc
, &sc
->sc_rng
.rng_q
.q_ctx
);
523 ubsec_dma_free(sc
, &sc
->sc_rng
.rng_buf
);
525 #endif /* UBSEC_NO_RNG */
527 bus_generic_detach(dev
);
528 bus_teardown_intr(dev
, sc
->sc_irq
, sc
->sc_ih
);
529 bus_release_resource(dev
, SYS_RES_IRQ
, 0, sc
->sc_irq
);
531 bus_dma_tag_destroy(sc
->sc_dmat
);
532 bus_release_resource(dev
, SYS_RES_MEMORY
, BS_BAR
, sc
->sc_sr
);
540 * Stop all chip i/o so that the kernel's probe routines don't
541 * get confused by errant DMAs when rebooting.
544 ubsec_shutdown(device_t dev
)
547 ubsec_stop(device_get_softc(dev
));
552 * Device suspend routine.
555 ubsec_suspend(device_t dev
)
557 struct ubsec_softc
*sc
= device_get_softc(dev
);
559 KASSERT(sc
!= NULL
, ("ubsec_suspend: null software carrier"));
561 /* XXX stop the device and save PCI settings */
563 sc
->sc_suspended
= 1;
569 ubsec_resume(device_t dev
)
571 struct ubsec_softc
*sc
= device_get_softc(dev
);
573 KASSERT(sc
!= NULL
, ("ubsec_resume: null software carrier"));
575 /* XXX retore PCI settings and start the device */
577 sc
->sc_suspended
= 0;
582 * UBSEC Interrupt routine
585 ubsec_intr(void *arg
)
587 struct ubsec_softc
*sc
= arg
;
588 volatile u_int32_t stat
;
590 struct ubsec_dma
*dmap
;
593 stat
= READ_REG(sc
, BS_STAT
);
594 stat
&= sc
->sc_statmask
;
599 WRITE_REG(sc
, BS_STAT
, stat
); /* IACK */
602 * Check to see if we have any packets waiting for us
604 if ((stat
& BS_STAT_MCR1_DONE
)) {
605 while (!SIMPLEQ_EMPTY(&sc
->sc_qchip
)) {
606 q
= SIMPLEQ_FIRST(&sc
->sc_qchip
);
609 if ((dmap
->d_dma
->d_mcr
.mcr_flags
& htole16(UBS_MCR_DONE
)) == 0)
612 SIMPLEQ_REMOVE_HEAD(&sc
->sc_qchip
, q_next
);
614 npkts
= q
->q_nstacked_mcrs
;
615 sc
->sc_nqchip
-= 1+npkts
;
617 * search for further sc_qchip ubsec_q's that share
618 * the same MCR, and complete them too, they must be
621 for (i
= 0; i
< npkts
; i
++) {
622 if(q
->q_stacked_mcr
[i
]) {
623 ubsec_callback(sc
, q
->q_stacked_mcr
[i
]);
628 ubsec_callback(sc
, q
);
632 * Don't send any more packet to chip if there has been
635 if (!(stat
& BS_STAT_DMAERR
))
640 * Check to see if we have any key setups/rng's waiting for us
642 if ((sc
->sc_flags
& (UBS_FLAGS_KEY
|UBS_FLAGS_RNG
)) &&
643 (stat
& BS_STAT_MCR2_DONE
)) {
645 struct ubsec_mcr
*mcr
;
647 while (!SIMPLEQ_EMPTY(&sc
->sc_qchip2
)) {
648 q2
= SIMPLEQ_FIRST(&sc
->sc_qchip2
);
650 ubsec_dma_sync(&q2
->q_mcr
,
651 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
653 mcr
= (struct ubsec_mcr
*)q2
->q_mcr
.dma_vaddr
;
654 if ((mcr
->mcr_flags
& htole16(UBS_MCR_DONE
)) == 0) {
655 ubsec_dma_sync(&q2
->q_mcr
,
656 BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
659 SIMPLEQ_REMOVE_HEAD(&sc
->sc_qchip2
, q_next
);
660 ubsec_callback2(sc
, q2
);
662 * Don't send any more packet to chip if there has been
665 if (!(stat
& BS_STAT_DMAERR
))
671 * Check to see if we got any DMA Error
673 if (stat
& BS_STAT_DMAERR
) {
676 volatile u_int32_t a
= READ_REG(sc
, BS_ERR
);
678 kprintf("dmaerr %s@%08x\n",
679 (a
& BS_ERR_READ
) ? "read" : "write",
682 #endif /* UBSEC_DEBUG */
683 ubsecstats
.hst_dmaerr
++;
684 ubsec_totalreset(sc
);
688 if (sc
->sc_needwakeup
) { /* XXX check high watermark */
689 int wakeup
= sc
->sc_needwakeup
& (CRYPTO_SYMQ
|CRYPTO_ASYMQ
);
692 device_printf(sc
->sc_dev
, "wakeup crypto (%x)\n",
694 #endif /* UBSEC_DEBUG */
695 sc
->sc_needwakeup
&= ~wakeup
;
696 crypto_unblock(sc
->sc_cid
, wakeup
);
701 * ubsec_feed() - aggregate and post requests to chip
704 ubsec_feed(struct ubsec_softc
*sc
)
706 struct ubsec_q
*q
, *q2
;
712 * Decide how many ops to combine in a single MCR. We cannot
713 * aggregate more than UBS_MAX_AGGR because this is the number
714 * of slots defined in the data structure. Note that
715 * aggregation only happens if ops are marked batch'able.
716 * Aggregating ops reduces the number of interrupts to the host
717 * but also (potentially) increases the latency for processing
718 * completed ops as we only get an interrupt when all aggregated
719 * ops have completed.
721 if (sc
->sc_nqueue
== 0)
723 if (sc
->sc_nqueue
> 1) {
725 SIMPLEQ_FOREACH(q
, &sc
->sc_queue
, q_next
) {
727 if ((q
->q_crp
->crp_flags
& CRYPTO_F_BATCH
) == 0)
733 * Check device status before going any further.
735 if ((stat
= READ_REG(sc
, BS_STAT
)) & (BS_STAT_MCR1_FULL
| BS_STAT_DMAERR
)) {
736 if (stat
& BS_STAT_DMAERR
) {
737 ubsec_totalreset(sc
);
738 ubsecstats
.hst_dmaerr
++;
740 ubsecstats
.hst_mcr1full
++;
743 if (sc
->sc_nqueue
> ubsecstats
.hst_maxqueue
)
744 ubsecstats
.hst_maxqueue
= sc
->sc_nqueue
;
745 if (npkts
> UBS_MAX_AGGR
)
746 npkts
= UBS_MAX_AGGR
;
747 if (npkts
< 2) /* special case 1 op */
750 ubsecstats
.hst_totbatch
+= npkts
-1;
753 kprintf("merging %d records\n", npkts
);
754 #endif /* UBSEC_DEBUG */
756 q
= SIMPLEQ_FIRST(&sc
->sc_queue
);
757 SIMPLEQ_REMOVE_HEAD(&sc
->sc_queue
, q_next
);
760 bus_dmamap_sync(sc
->sc_dmat
, q
->q_src_map
, BUS_DMASYNC_PREWRITE
);
761 if (q
->q_dst_map
!= NULL
)
762 bus_dmamap_sync(sc
->sc_dmat
, q
->q_dst_map
, BUS_DMASYNC_PREREAD
);
764 q
->q_nstacked_mcrs
= npkts
- 1; /* Number of packets stacked */
766 for (i
= 0; i
< q
->q_nstacked_mcrs
; i
++) {
767 q2
= SIMPLEQ_FIRST(&sc
->sc_queue
);
768 bus_dmamap_sync(sc
->sc_dmat
, q2
->q_src_map
,
769 BUS_DMASYNC_PREWRITE
);
770 if (q2
->q_dst_map
!= NULL
)
771 bus_dmamap_sync(sc
->sc_dmat
, q2
->q_dst_map
,
772 BUS_DMASYNC_PREREAD
);
773 SIMPLEQ_REMOVE_HEAD(&sc
->sc_queue
, q_next
);
776 v
= (void*)(((char *)&q2
->q_dma
->d_dma
->d_mcr
) + sizeof(struct ubsec_mcr
) -
777 sizeof(struct ubsec_mcr_add
));
778 bcopy(v
, &q
->q_dma
->d_dma
->d_mcradd
[i
], sizeof(struct ubsec_mcr_add
));
779 q
->q_stacked_mcr
[i
] = q2
;
781 q
->q_dma
->d_dma
->d_mcr
.mcr_pkts
= htole16(npkts
);
782 SIMPLEQ_INSERT_TAIL(&sc
->sc_qchip
, q
, q_next
);
783 sc
->sc_nqchip
+= npkts
;
784 if (sc
->sc_nqchip
> ubsecstats
.hst_maxqchip
)
785 ubsecstats
.hst_maxqchip
= sc
->sc_nqchip
;
786 ubsec_dma_sync(&q
->q_dma
->d_alloc
,
787 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
788 WRITE_REG(sc
, BS_MCR1
, q
->q_dma
->d_alloc
.dma_paddr
+
789 offsetof(struct ubsec_dmachunk
, d_mcr
));
793 q
= SIMPLEQ_FIRST(&sc
->sc_queue
);
795 bus_dmamap_sync(sc
->sc_dmat
, q
->q_src_map
, BUS_DMASYNC_PREWRITE
);
796 if (q
->q_dst_map
!= NULL
)
797 bus_dmamap_sync(sc
->sc_dmat
, q
->q_dst_map
, BUS_DMASYNC_PREREAD
);
798 ubsec_dma_sync(&q
->q_dma
->d_alloc
,
799 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
801 WRITE_REG(sc
, BS_MCR1
, q
->q_dma
->d_alloc
.dma_paddr
+
802 offsetof(struct ubsec_dmachunk
, d_mcr
));
805 kprintf("feed1: q->chip %p %08x stat %08x\n",
806 q
, (u_int32_t
)vtophys(&q
->q_dma
->d_dma
->d_mcr
),
808 #endif /* UBSEC_DEBUG */
809 SIMPLEQ_REMOVE_HEAD(&sc
->sc_queue
, q_next
);
811 SIMPLEQ_INSERT_TAIL(&sc
->sc_qchip
, q
, q_next
);
813 if (sc
->sc_nqchip
> ubsecstats
.hst_maxqchip
)
814 ubsecstats
.hst_maxqchip
= sc
->sc_nqchip
;
819 * Allocate a new 'session' and return an encoded session id. 'sidp'
820 * contains our registration id, and should contain an encoded session
821 * id on successful allocation.
824 ubsec_newsession(void *arg
, u_int32_t
*sidp
, struct cryptoini
*cri
)
826 struct cryptoini
*c
, *encini
= NULL
, *macini
= NULL
;
827 struct ubsec_softc
*sc
= arg
;
828 struct ubsec_session
*ses
= NULL
;
833 KASSERT(sc
!= NULL
, ("ubsec_newsession: null softc"));
834 if (sidp
== NULL
|| cri
== NULL
|| sc
== NULL
)
837 for (c
= cri
; c
!= NULL
; c
= c
->cri_next
) {
838 if (c
->cri_alg
== CRYPTO_MD5_HMAC
||
839 c
->cri_alg
== CRYPTO_SHA1_HMAC
) {
843 } else if (c
->cri_alg
== CRYPTO_DES_CBC
||
844 c
->cri_alg
== CRYPTO_3DES_CBC
) {
851 if (encini
== NULL
&& macini
== NULL
)
854 if (sc
->sc_sessions
== NULL
) {
855 ses
= sc
->sc_sessions
= kmalloc(sizeof(struct ubsec_session
),
856 M_DEVBUF
, M_INTWAIT
);
858 sc
->sc_nsessions
= 1;
860 for (sesn
= 0; sesn
< sc
->sc_nsessions
; sesn
++) {
861 if (sc
->sc_sessions
[sesn
].ses_used
== 0) {
862 ses
= &sc
->sc_sessions
[sesn
];
868 sesn
= sc
->sc_nsessions
;
869 ses
= kmalloc((sesn
+ 1) * sizeof(struct ubsec_session
),
870 M_DEVBUF
, M_INTWAIT
);
871 bcopy(sc
->sc_sessions
, ses
, sesn
*
872 sizeof(struct ubsec_session
));
873 bzero(sc
->sc_sessions
, sesn
*
874 sizeof(struct ubsec_session
));
875 kfree(sc
->sc_sessions
, M_DEVBUF
);
876 sc
->sc_sessions
= ses
;
877 ses
= &sc
->sc_sessions
[sesn
];
882 bzero(ses
, sizeof(struct ubsec_session
));
885 /* get an IV, network byte order */
886 /* XXX may read fewer than requested */
887 read_random(ses
->ses_iv
, sizeof(ses
->ses_iv
));
889 /* Go ahead and compute key in ubsec's byte order */
890 if (encini
->cri_alg
== CRYPTO_DES_CBC
) {
891 bcopy(encini
->cri_key
, &ses
->ses_deskey
[0], 8);
892 bcopy(encini
->cri_key
, &ses
->ses_deskey
[2], 8);
893 bcopy(encini
->cri_key
, &ses
->ses_deskey
[4], 8);
895 bcopy(encini
->cri_key
, ses
->ses_deskey
, 24);
897 SWAP32(ses
->ses_deskey
[0]);
898 SWAP32(ses
->ses_deskey
[1]);
899 SWAP32(ses
->ses_deskey
[2]);
900 SWAP32(ses
->ses_deskey
[3]);
901 SWAP32(ses
->ses_deskey
[4]);
902 SWAP32(ses
->ses_deskey
[5]);
906 for (i
= 0; i
< macini
->cri_klen
/ 8; i
++)
907 macini
->cri_key
[i
] ^= HMAC_IPAD_VAL
;
909 if (macini
->cri_alg
== CRYPTO_MD5_HMAC
) {
911 MD5Update(&md5ctx
, macini
->cri_key
,
912 macini
->cri_klen
/ 8);
913 MD5Update(&md5ctx
, hmac_ipad_buffer
,
914 HMAC_BLOCK_LEN
- (macini
->cri_klen
/ 8));
915 bcopy(md5ctx
.state
, ses
->ses_hminner
,
916 sizeof(md5ctx
.state
));
919 SHA1Update(&sha1ctx
, macini
->cri_key
,
920 macini
->cri_klen
/ 8);
921 SHA1Update(&sha1ctx
, hmac_ipad_buffer
,
922 HMAC_BLOCK_LEN
- (macini
->cri_klen
/ 8));
923 bcopy(sha1ctx
.h
.b32
, ses
->ses_hminner
,
924 sizeof(sha1ctx
.h
.b32
));
927 for (i
= 0; i
< macini
->cri_klen
/ 8; i
++)
928 macini
->cri_key
[i
] ^= (HMAC_IPAD_VAL
^ HMAC_OPAD_VAL
);
930 if (macini
->cri_alg
== CRYPTO_MD5_HMAC
) {
932 MD5Update(&md5ctx
, macini
->cri_key
,
933 macini
->cri_klen
/ 8);
934 MD5Update(&md5ctx
, hmac_opad_buffer
,
935 HMAC_BLOCK_LEN
- (macini
->cri_klen
/ 8));
936 bcopy(md5ctx
.state
, ses
->ses_hmouter
,
937 sizeof(md5ctx
.state
));
940 SHA1Update(&sha1ctx
, macini
->cri_key
,
941 macini
->cri_klen
/ 8);
942 SHA1Update(&sha1ctx
, hmac_opad_buffer
,
943 HMAC_BLOCK_LEN
- (macini
->cri_klen
/ 8));
944 bcopy(sha1ctx
.h
.b32
, ses
->ses_hmouter
,
945 sizeof(sha1ctx
.h
.b32
));
948 for (i
= 0; i
< macini
->cri_klen
/ 8; i
++)
949 macini
->cri_key
[i
] ^= HMAC_OPAD_VAL
;
952 *sidp
= UBSEC_SID(device_get_unit(sc
->sc_dev
), sesn
);
957 * Deallocate a session.
960 ubsec_freesession(void *arg
, u_int64_t tid
)
962 struct ubsec_softc
*sc
= arg
;
964 u_int32_t sid
= ((u_int32_t
) tid
) & 0xffffffff;
966 KASSERT(sc
!= NULL
, ("ubsec_freesession: null softc"));
970 session
= UBSEC_SESSION(sid
);
971 if (session
>= sc
->sc_nsessions
)
974 bzero(&sc
->sc_sessions
[session
], sizeof(sc
->sc_sessions
[session
]));
979 ubsec_op_cb(void *arg
, bus_dma_segment_t
*seg
, int nsegs
, bus_size_t mapsize
, int error
)
981 struct ubsec_operand
*op
= arg
;
983 KASSERT(nsegs
<= UBS_MAX_SCATTER
,
984 ("Too many DMA segments returned when mapping operand"));
987 kprintf("ubsec_op_cb: mapsize %u nsegs %d\n",
988 (u_int
) mapsize
, nsegs
);
990 op
->mapsize
= mapsize
;
992 bcopy(seg
, op
->segs
, nsegs
* sizeof (seg
[0]));
996 ubsec_process(void *arg
, struct cryptop
*crp
, int hint
)
998 struct ubsec_q
*q
= NULL
;
999 int err
= 0, i
, j
, nicealign
;
1000 struct ubsec_softc
*sc
= arg
;
1001 struct cryptodesc
*crd1
, *crd2
, *maccrd
, *enccrd
;
1002 int encoffset
= 0, macoffset
= 0, cpskip
, cpoffset
;
1003 int sskip
, dskip
, stheend
, dtheend
;
1005 struct ubsec_session
*ses
;
1006 struct ubsec_pktctx ctx
;
1007 struct ubsec_dma
*dmap
= NULL
;
1009 if (crp
== NULL
|| crp
->crp_callback
== NULL
|| sc
== NULL
) {
1010 ubsecstats
.hst_invalid
++;
1013 if (UBSEC_SESSION(crp
->crp_sid
) >= sc
->sc_nsessions
) {
1014 ubsecstats
.hst_badsession
++;
1020 if (SIMPLEQ_EMPTY(&sc
->sc_freequeue
)) {
1021 ubsecstats
.hst_queuefull
++;
1022 sc
->sc_needwakeup
|= CRYPTO_SYMQ
;
1026 q
= SIMPLEQ_FIRST(&sc
->sc_freequeue
);
1027 SIMPLEQ_REMOVE_HEAD(&sc
->sc_freequeue
, q_next
);
1030 dmap
= q
->q_dma
; /* Save dma pointer */
1031 bzero(q
, sizeof(struct ubsec_q
));
1032 bzero(&ctx
, sizeof(ctx
));
1034 q
->q_sesn
= UBSEC_SESSION(crp
->crp_sid
);
1036 ses
= &sc
->sc_sessions
[q
->q_sesn
];
1038 if (crp
->crp_flags
& CRYPTO_F_IMBUF
) {
1039 q
->q_src_m
= (struct mbuf
*)crp
->crp_buf
;
1040 q
->q_dst_m
= (struct mbuf
*)crp
->crp_buf
;
1041 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
1042 q
->q_src_io
= (struct uio
*)crp
->crp_buf
;
1043 q
->q_dst_io
= (struct uio
*)crp
->crp_buf
;
1045 ubsecstats
.hst_badflags
++;
1047 goto errout
; /* XXX we don't handle contiguous blocks! */
1050 bzero(&dmap
->d_dma
->d_mcr
, sizeof(struct ubsec_mcr
));
1052 dmap
->d_dma
->d_mcr
.mcr_pkts
= htole16(1);
1053 dmap
->d_dma
->d_mcr
.mcr_flags
= 0;
1056 crd1
= crp
->crp_desc
;
1058 ubsecstats
.hst_nodesc
++;
1062 crd2
= crd1
->crd_next
;
1065 if (crd1
->crd_alg
== CRYPTO_MD5_HMAC
||
1066 crd1
->crd_alg
== CRYPTO_SHA1_HMAC
) {
1069 } else if (crd1
->crd_alg
== CRYPTO_DES_CBC
||
1070 crd1
->crd_alg
== CRYPTO_3DES_CBC
) {
1074 ubsecstats
.hst_badalg
++;
1079 if ((crd1
->crd_alg
== CRYPTO_MD5_HMAC
||
1080 crd1
->crd_alg
== CRYPTO_SHA1_HMAC
) &&
1081 (crd2
->crd_alg
== CRYPTO_DES_CBC
||
1082 crd2
->crd_alg
== CRYPTO_3DES_CBC
) &&
1083 ((crd2
->crd_flags
& CRD_F_ENCRYPT
) == 0)) {
1086 } else if ((crd1
->crd_alg
== CRYPTO_DES_CBC
||
1087 crd1
->crd_alg
== CRYPTO_3DES_CBC
) &&
1088 (crd2
->crd_alg
== CRYPTO_MD5_HMAC
||
1089 crd2
->crd_alg
== CRYPTO_SHA1_HMAC
) &&
1090 (crd1
->crd_flags
& CRD_F_ENCRYPT
)) {
1095 * We cannot order the ubsec as requested
1097 ubsecstats
.hst_badalg
++;
1104 encoffset
= enccrd
->crd_skip
;
1105 ctx
.pc_flags
|= htole16(UBS_PKTCTX_ENC_3DES
);
1107 if (enccrd
->crd_flags
& CRD_F_ENCRYPT
) {
1108 q
->q_flags
|= UBSEC_QFLAGS_COPYOUTIV
;
1110 if (enccrd
->crd_flags
& CRD_F_IV_EXPLICIT
)
1111 bcopy(enccrd
->crd_iv
, ctx
.pc_iv
, 8);
1113 ctx
.pc_iv
[0] = ses
->ses_iv
[0];
1114 ctx
.pc_iv
[1] = ses
->ses_iv
[1];
1117 if ((enccrd
->crd_flags
& CRD_F_IV_PRESENT
) == 0) {
1118 if (crp
->crp_flags
& CRYPTO_F_IMBUF
)
1119 m_copyback(q
->q_src_m
,
1121 8, (caddr_t
)ctx
.pc_iv
);
1122 else if (crp
->crp_flags
& CRYPTO_F_IOV
)
1123 cuio_copyback(q
->q_src_io
,
1125 8, (caddr_t
)ctx
.pc_iv
);
1128 ctx
.pc_flags
|= htole16(UBS_PKTCTX_INBOUND
);
1130 if (enccrd
->crd_flags
& CRD_F_IV_EXPLICIT
)
1131 bcopy(enccrd
->crd_iv
, ctx
.pc_iv
, 8);
1132 else if (crp
->crp_flags
& CRYPTO_F_IMBUF
)
1133 m_copydata(q
->q_src_m
, enccrd
->crd_inject
,
1134 8, (caddr_t
)ctx
.pc_iv
);
1135 else if (crp
->crp_flags
& CRYPTO_F_IOV
)
1136 cuio_copydata(q
->q_src_io
,
1137 enccrd
->crd_inject
, 8,
1138 (caddr_t
)ctx
.pc_iv
);
1141 ctx
.pc_deskey
[0] = ses
->ses_deskey
[0];
1142 ctx
.pc_deskey
[1] = ses
->ses_deskey
[1];
1143 ctx
.pc_deskey
[2] = ses
->ses_deskey
[2];
1144 ctx
.pc_deskey
[3] = ses
->ses_deskey
[3];
1145 ctx
.pc_deskey
[4] = ses
->ses_deskey
[4];
1146 ctx
.pc_deskey
[5] = ses
->ses_deskey
[5];
1147 SWAP32(ctx
.pc_iv
[0]);
1148 SWAP32(ctx
.pc_iv
[1]);
1152 macoffset
= maccrd
->crd_skip
;
1154 if (maccrd
->crd_alg
== CRYPTO_MD5_HMAC
)
1155 ctx
.pc_flags
|= htole16(UBS_PKTCTX_AUTH_MD5
);
1157 ctx
.pc_flags
|= htole16(UBS_PKTCTX_AUTH_SHA1
);
1159 for (i
= 0; i
< 5; i
++) {
1160 ctx
.pc_hminner
[i
] = ses
->ses_hminner
[i
];
1161 ctx
.pc_hmouter
[i
] = ses
->ses_hmouter
[i
];
1163 HTOLE32(ctx
.pc_hminner
[i
]);
1164 HTOLE32(ctx
.pc_hmouter
[i
]);
1168 if (enccrd
&& maccrd
) {
1170 * ubsec cannot handle packets where the end of encryption
1171 * and authentication are not the same, or where the
1172 * encrypted part begins before the authenticated part.
1174 if ((encoffset
+ enccrd
->crd_len
) !=
1175 (macoffset
+ maccrd
->crd_len
)) {
1176 ubsecstats
.hst_lenmismatch
++;
1180 if (enccrd
->crd_skip
< maccrd
->crd_skip
) {
1181 ubsecstats
.hst_skipmismatch
++;
1185 sskip
= maccrd
->crd_skip
;
1186 cpskip
= dskip
= enccrd
->crd_skip
;
1187 stheend
= maccrd
->crd_len
;
1188 dtheend
= enccrd
->crd_len
;
1189 coffset
= enccrd
->crd_skip
- maccrd
->crd_skip
;
1190 cpoffset
= cpskip
+ dtheend
;
1193 kprintf("mac: skip %d, len %d, inject %d\n",
1194 maccrd
->crd_skip
, maccrd
->crd_len
, maccrd
->crd_inject
);
1195 kprintf("enc: skip %d, len %d, inject %d\n",
1196 enccrd
->crd_skip
, enccrd
->crd_len
, enccrd
->crd_inject
);
1197 kprintf("src: skip %d, len %d\n", sskip
, stheend
);
1198 kprintf("dst: skip %d, len %d\n", dskip
, dtheend
);
1199 kprintf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n",
1200 coffset
, stheend
, cpskip
, cpoffset
);
1204 cpskip
= dskip
= sskip
= macoffset
+ encoffset
;
1205 dtheend
= stheend
= (enccrd
)?enccrd
->crd_len
:maccrd
->crd_len
;
1206 cpoffset
= cpskip
+ dtheend
;
1209 ctx
.pc_offset
= htole16(coffset
>> 2);
1211 if (bus_dmamap_create(sc
->sc_dmat
, BUS_DMA_NOWAIT
, &q
->q_src_map
)) {
1212 ubsecstats
.hst_nomap
++;
1216 if (crp
->crp_flags
& CRYPTO_F_IMBUF
) {
1217 if (bus_dmamap_load_mbuf(sc
->sc_dmat
, q
->q_src_map
,
1218 q
->q_src_m
, ubsec_op_cb
, &q
->q_src
, BUS_DMA_NOWAIT
) != 0) {
1219 bus_dmamap_destroy(sc
->sc_dmat
, q
->q_src_map
);
1220 q
->q_src_map
= NULL
;
1221 ubsecstats
.hst_noload
++;
1225 } else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
1226 if (bus_dmamap_load_uio(sc
->sc_dmat
, q
->q_src_map
,
1227 q
->q_src_io
, ubsec_op_cb
, &q
->q_src
, BUS_DMA_NOWAIT
) != 0) {
1228 bus_dmamap_destroy(sc
->sc_dmat
, q
->q_src_map
);
1229 q
->q_src_map
= NULL
;
1230 ubsecstats
.hst_noload
++;
1235 nicealign
= ubsec_dmamap_aligned(&q
->q_src
);
1237 dmap
->d_dma
->d_mcr
.mcr_pktlen
= htole16(stheend
);
1241 kprintf("src skip: %d nicealign: %u\n", sskip
, nicealign
);
1243 for (i
= j
= 0; i
< q
->q_src_nsegs
; i
++) {
1244 struct ubsec_pktbuf
*pb
;
1245 bus_size_t packl
= q
->q_src_segs
[i
].ds_len
;
1246 bus_addr_t packp
= q
->q_src_segs
[i
].ds_addr
;
1248 if (sskip
>= packl
) {
1257 if (packl
> 0xfffc) {
1263 pb
= &dmap
->d_dma
->d_mcr
.mcr_ipktbuf
;
1265 pb
= &dmap
->d_dma
->d_sbuf
[j
- 1];
1267 pb
->pb_addr
= htole32(packp
);
1270 if (packl
> stheend
) {
1271 pb
->pb_len
= htole32(stheend
);
1274 pb
->pb_len
= htole32(packl
);
1278 pb
->pb_len
= htole32(packl
);
1280 if ((i
+ 1) == q
->q_src_nsegs
)
1283 pb
->pb_next
= htole32(dmap
->d_alloc
.dma_paddr
+
1284 offsetof(struct ubsec_dmachunk
, d_sbuf
[j
]));
1288 if (enccrd
== NULL
&& maccrd
!= NULL
) {
1289 dmap
->d_dma
->d_mcr
.mcr_opktbuf
.pb_addr
= 0;
1290 dmap
->d_dma
->d_mcr
.mcr_opktbuf
.pb_len
= 0;
1291 dmap
->d_dma
->d_mcr
.mcr_opktbuf
.pb_next
= htole32(dmap
->d_alloc
.dma_paddr
+
1292 offsetof(struct ubsec_dmachunk
, d_macbuf
[0]));
1295 kprintf("opkt: %x %x %x\n",
1296 dmap
->d_dma
->d_mcr
.mcr_opktbuf
.pb_addr
,
1297 dmap
->d_dma
->d_mcr
.mcr_opktbuf
.pb_len
,
1298 dmap
->d_dma
->d_mcr
.mcr_opktbuf
.pb_next
);
1301 if (crp
->crp_flags
& CRYPTO_F_IOV
) {
1303 ubsecstats
.hst_iovmisaligned
++;
1307 if (bus_dmamap_create(sc
->sc_dmat
, BUS_DMA_NOWAIT
,
1309 ubsecstats
.hst_nomap
++;
1313 if (bus_dmamap_load_uio(sc
->sc_dmat
, q
->q_dst_map
,
1314 q
->q_dst_io
, ubsec_op_cb
, &q
->q_dst
, BUS_DMA_NOWAIT
) != 0) {
1315 bus_dmamap_destroy(sc
->sc_dmat
, q
->q_dst_map
);
1316 q
->q_dst_map
= NULL
;
1317 ubsecstats
.hst_noload
++;
1321 } else if (crp
->crp_flags
& CRYPTO_F_IMBUF
) {
1323 q
->q_dst
= q
->q_src
;
1326 struct mbuf
*m
, *top
, **mp
;
1328 ubsecstats
.hst_unaligned
++;
1329 totlen
= q
->q_src_mapsize
;
1330 if (q
->q_src_m
->m_flags
& M_PKTHDR
) {
1332 MGETHDR(m
, MB_DONTWAIT
, MT_DATA
);
1333 if (m
&& !m_dup_pkthdr(m
, q
->q_src_m
, MB_DONTWAIT
)) {
1339 MGET(m
, MB_DONTWAIT
, MT_DATA
);
1342 ubsecstats
.hst_nombuf
++;
1343 err
= sc
->sc_nqueue
? ERESTART
: ENOMEM
;
1346 if (totlen
>= MINCLSIZE
) {
1347 MCLGET(m
, MB_DONTWAIT
);
1348 if ((m
->m_flags
& M_EXT
) == 0) {
1350 ubsecstats
.hst_nomcl
++;
1351 err
= sc
->sc_nqueue
? ERESTART
: ENOMEM
;
1360 while (totlen
> 0) {
1362 MGET(m
, MB_DONTWAIT
, MT_DATA
);
1365 ubsecstats
.hst_nombuf
++;
1366 err
= sc
->sc_nqueue
? ERESTART
: ENOMEM
;
1371 if (top
&& totlen
>= MINCLSIZE
) {
1372 MCLGET(m
, MB_DONTWAIT
);
1373 if ((m
->m_flags
& M_EXT
) == 0) {
1376 ubsecstats
.hst_nomcl
++;
1377 err
= sc
->sc_nqueue
? ERESTART
: ENOMEM
;
1382 m
->m_len
= len
= min(totlen
, len
);
1388 ubsec_mcopy(q
->q_src_m
, q
->q_dst_m
,
1390 if (bus_dmamap_create(sc
->sc_dmat
,
1391 BUS_DMA_NOWAIT
, &q
->q_dst_map
) != 0) {
1392 ubsecstats
.hst_nomap
++;
1396 if (bus_dmamap_load_mbuf(sc
->sc_dmat
,
1397 q
->q_dst_map
, q
->q_dst_m
,
1398 ubsec_op_cb
, &q
->q_dst
,
1399 BUS_DMA_NOWAIT
) != 0) {
1400 bus_dmamap_destroy(sc
->sc_dmat
,
1402 q
->q_dst_map
= NULL
;
1403 ubsecstats
.hst_noload
++;
1409 ubsecstats
.hst_badflags
++;
1416 kprintf("dst skip: %d\n", dskip
);
1418 for (i
= j
= 0; i
< q
->q_dst_nsegs
; i
++) {
1419 struct ubsec_pktbuf
*pb
;
1420 bus_size_t packl
= q
->q_dst_segs
[i
].ds_len
;
1421 bus_addr_t packp
= q
->q_dst_segs
[i
].ds_addr
;
1423 if (dskip
>= packl
) {
1432 if (packl
> 0xfffc) {
1438 pb
= &dmap
->d_dma
->d_mcr
.mcr_opktbuf
;
1440 pb
= &dmap
->d_dma
->d_dbuf
[j
- 1];
1442 pb
->pb_addr
= htole32(packp
);
1445 if (packl
> dtheend
) {
1446 pb
->pb_len
= htole32(dtheend
);
1449 pb
->pb_len
= htole32(packl
);
1453 pb
->pb_len
= htole32(packl
);
1455 if ((i
+ 1) == q
->q_dst_nsegs
) {
1457 pb
->pb_next
= htole32(dmap
->d_alloc
.dma_paddr
+
1458 offsetof(struct ubsec_dmachunk
, d_macbuf
[0]));
1462 pb
->pb_next
= htole32(dmap
->d_alloc
.dma_paddr
+
1463 offsetof(struct ubsec_dmachunk
, d_dbuf
[j
]));
1468 dmap
->d_dma
->d_mcr
.mcr_cmdctxp
= htole32(dmap
->d_alloc
.dma_paddr
+
1469 offsetof(struct ubsec_dmachunk
, d_ctx
));
1471 if (sc
->sc_flags
& UBS_FLAGS_LONGCTX
) {
1472 struct ubsec_pktctx_long
*ctxl
;
1474 ctxl
= (struct ubsec_pktctx_long
*)(dmap
->d_alloc
.dma_vaddr
+
1475 offsetof(struct ubsec_dmachunk
, d_ctx
));
1477 /* transform small context into long context */
1478 ctxl
->pc_len
= htole16(sizeof(struct ubsec_pktctx_long
));
1479 ctxl
->pc_type
= htole16(UBS_PKTCTX_TYPE_IPSEC
);
1480 ctxl
->pc_flags
= ctx
.pc_flags
;
1481 ctxl
->pc_offset
= ctx
.pc_offset
;
1482 for (i
= 0; i
< 6; i
++)
1483 ctxl
->pc_deskey
[i
] = ctx
.pc_deskey
[i
];
1484 for (i
= 0; i
< 5; i
++)
1485 ctxl
->pc_hminner
[i
] = ctx
.pc_hminner
[i
];
1486 for (i
= 0; i
< 5; i
++)
1487 ctxl
->pc_hmouter
[i
] = ctx
.pc_hmouter
[i
];
1488 ctxl
->pc_iv
[0] = ctx
.pc_iv
[0];
1489 ctxl
->pc_iv
[1] = ctx
.pc_iv
[1];
1491 bcopy(&ctx
, dmap
->d_alloc
.dma_vaddr
+
1492 offsetof(struct ubsec_dmachunk
, d_ctx
),
1493 sizeof(struct ubsec_pktctx
));
1496 SIMPLEQ_INSERT_TAIL(&sc
->sc_queue
, q
, q_next
);
1498 ubsecstats
.hst_ipackets
++;
1499 ubsecstats
.hst_ibytes
+= dmap
->d_alloc
.dma_size
;
1500 if ((hint
& CRYPTO_HINT_MORE
) == 0 || sc
->sc_nqueue
>= UBS_MAX_AGGR
)
1507 if ((q
->q_dst_m
!= NULL
) && (q
->q_src_m
!= q
->q_dst_m
))
1508 m_freem(q
->q_dst_m
);
1510 if (q
->q_dst_map
!= NULL
&& q
->q_dst_map
!= q
->q_src_map
) {
1511 bus_dmamap_unload(sc
->sc_dmat
, q
->q_dst_map
);
1512 bus_dmamap_destroy(sc
->sc_dmat
, q
->q_dst_map
);
1514 if (q
->q_src_map
!= NULL
) {
1515 bus_dmamap_unload(sc
->sc_dmat
, q
->q_src_map
);
1516 bus_dmamap_destroy(sc
->sc_dmat
, q
->q_src_map
);
1520 SIMPLEQ_INSERT_TAIL(&sc
->sc_freequeue
, q
, q_next
);
1523 if (err
!= ERESTART
) {
1524 crp
->crp_etype
= err
;
1527 sc
->sc_needwakeup
|= CRYPTO_SYMQ
;
1533 ubsec_callback(struct ubsec_softc
*sc
, struct ubsec_q
*q
)
1535 struct cryptop
*crp
= (struct cryptop
*)q
->q_crp
;
1536 struct cryptodesc
*crd
;
1537 struct ubsec_dma
*dmap
= q
->q_dma
;
1539 ubsecstats
.hst_opackets
++;
1540 ubsecstats
.hst_obytes
+= dmap
->d_alloc
.dma_size
;
1542 ubsec_dma_sync(&dmap
->d_alloc
,
1543 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
1544 if (q
->q_dst_map
!= NULL
&& q
->q_dst_map
!= q
->q_src_map
) {
1545 bus_dmamap_sync(sc
->sc_dmat
, q
->q_dst_map
,
1546 BUS_DMASYNC_POSTREAD
);
1547 bus_dmamap_unload(sc
->sc_dmat
, q
->q_dst_map
);
1548 bus_dmamap_destroy(sc
->sc_dmat
, q
->q_dst_map
);
1550 bus_dmamap_sync(sc
->sc_dmat
, q
->q_src_map
, BUS_DMASYNC_POSTWRITE
);
1551 bus_dmamap_unload(sc
->sc_dmat
, q
->q_src_map
);
1552 bus_dmamap_destroy(sc
->sc_dmat
, q
->q_src_map
);
1554 if ((crp
->crp_flags
& CRYPTO_F_IMBUF
) && (q
->q_src_m
!= q
->q_dst_m
)) {
1555 m_freem(q
->q_src_m
);
1556 crp
->crp_buf
= (caddr_t
)q
->q_dst_m
;
1558 ubsecstats
.hst_obytes
+= ((struct mbuf
*)crp
->crp_buf
)->m_len
;
1560 /* copy out IV for future use */
1561 if (q
->q_flags
& UBSEC_QFLAGS_COPYOUTIV
) {
1562 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
1563 if (crd
->crd_alg
!= CRYPTO_DES_CBC
&&
1564 crd
->crd_alg
!= CRYPTO_3DES_CBC
)
1566 if (crp
->crp_flags
& CRYPTO_F_IMBUF
)
1567 m_copydata((struct mbuf
*)crp
->crp_buf
,
1568 crd
->crd_skip
+ crd
->crd_len
- 8, 8,
1569 (caddr_t
)sc
->sc_sessions
[q
->q_sesn
].ses_iv
);
1570 else if (crp
->crp_flags
& CRYPTO_F_IOV
) {
1571 cuio_copydata((struct uio
*)crp
->crp_buf
,
1572 crd
->crd_skip
+ crd
->crd_len
- 8, 8,
1573 (caddr_t
)sc
->sc_sessions
[q
->q_sesn
].ses_iv
);
1579 for (crd
= crp
->crp_desc
; crd
; crd
= crd
->crd_next
) {
1580 if (crd
->crd_alg
!= CRYPTO_MD5_HMAC
&&
1581 crd
->crd_alg
!= CRYPTO_SHA1_HMAC
)
1583 if (crp
->crp_flags
& CRYPTO_F_IMBUF
)
1584 m_copyback((struct mbuf
*)crp
->crp_buf
,
1585 crd
->crd_inject
, 12,
1586 (caddr_t
)dmap
->d_dma
->d_macbuf
);
1587 else if (crp
->crp_flags
& CRYPTO_F_IOV
&& crp
->crp_mac
)
1588 bcopy((caddr_t
)dmap
->d_dma
->d_macbuf
,
1592 SIMPLEQ_INSERT_TAIL(&sc
->sc_freequeue
, q
, q_next
);
1597 ubsec_mcopy(struct mbuf
*srcm
, struct mbuf
*dstm
, int hoffset
, int toffset
)
1599 int i
, j
, dlen
, slen
;
1603 sptr
= srcm
->m_data
;
1605 dptr
= dstm
->m_data
;
1609 for (i
= 0; i
< min(slen
, dlen
); i
++) {
1610 if (j
< hoffset
|| j
>= toffset
)
1617 srcm
= srcm
->m_next
;
1620 sptr
= srcm
->m_data
;
1624 dstm
= dstm
->m_next
;
1627 dptr
= dstm
->m_data
;
1634 * feed the key generator, must be called at splimp() or higher.
1637 ubsec_feed2(struct ubsec_softc
*sc
)
1641 while (!SIMPLEQ_EMPTY(&sc
->sc_queue2
)) {
1642 if (READ_REG(sc
, BS_STAT
) & BS_STAT_MCR2_FULL
)
1644 q
= SIMPLEQ_FIRST(&sc
->sc_queue2
);
1646 ubsec_dma_sync(&q
->q_mcr
,
1647 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
1648 ubsec_dma_sync(&q
->q_ctx
, BUS_DMASYNC_PREWRITE
);
1650 WRITE_REG(sc
, BS_MCR2
, q
->q_mcr
.dma_paddr
);
1651 SIMPLEQ_REMOVE_HEAD(&sc
->sc_queue2
, q_next
);
1653 SIMPLEQ_INSERT_TAIL(&sc
->sc_qchip2
, q
, q_next
);
1659 * Callback for handling random numbers
1662 ubsec_callback2(struct ubsec_softc
*sc
, struct ubsec_q2
*q
)
1664 struct cryptkop
*krp
;
1665 struct ubsec_ctx_keyop
*ctx
;
1667 ctx
= (struct ubsec_ctx_keyop
*)q
->q_ctx
.dma_vaddr
;
1668 ubsec_dma_sync(&q
->q_ctx
, BUS_DMASYNC_POSTWRITE
);
1670 switch (q
->q_type
) {
1671 #ifndef UBSEC_NO_RNG
1672 case UBS_CTXOP_RNGBYPASS
: {
1673 struct ubsec_q2_rng
*rng
= (struct ubsec_q2_rng
*)q
;
1675 ubsec_dma_sync(&rng
->rng_buf
, BUS_DMASYNC_POSTREAD
);
1676 (*sc
->sc_harvest
)(sc
->sc_rndtest
,
1677 rng
->rng_buf
.dma_vaddr
,
1678 UBSEC_RNG_BUFSIZ
*sizeof (u_int32_t
));
1680 callout_reset(&sc
->sc_rngto
, sc
->sc_rnghz
, ubsec_rng
, sc
);
1684 case UBS_CTXOP_MODEXP
: {
1685 struct ubsec_q2_modexp
*me
= (struct ubsec_q2_modexp
*)q
;
1689 rlen
= (me
->me_modbits
+ 7) / 8;
1690 clen
= (krp
->krp_param
[krp
->krp_iparams
].crp_nbits
+ 7) / 8;
1692 ubsec_dma_sync(&me
->me_M
, BUS_DMASYNC_POSTWRITE
);
1693 ubsec_dma_sync(&me
->me_E
, BUS_DMASYNC_POSTWRITE
);
1694 ubsec_dma_sync(&me
->me_C
, BUS_DMASYNC_POSTREAD
);
1695 ubsec_dma_sync(&me
->me_epb
, BUS_DMASYNC_POSTWRITE
);
1698 krp
->krp_status
= E2BIG
;
1700 if (sc
->sc_flags
& UBS_FLAGS_HWNORM
) {
1701 bzero(krp
->krp_param
[krp
->krp_iparams
].crp_p
,
1702 (krp
->krp_param
[krp
->krp_iparams
].crp_nbits
1704 bcopy(me
->me_C
.dma_vaddr
,
1705 krp
->krp_param
[krp
->krp_iparams
].crp_p
,
1706 (me
->me_modbits
+ 7) / 8);
1708 ubsec_kshift_l(me
->me_shiftbits
,
1709 me
->me_C
.dma_vaddr
, me
->me_normbits
,
1710 krp
->krp_param
[krp
->krp_iparams
].crp_p
,
1711 krp
->krp_param
[krp
->krp_iparams
].crp_nbits
);
1716 /* bzero all potentially sensitive data */
1717 bzero(me
->me_E
.dma_vaddr
, me
->me_E
.dma_size
);
1718 bzero(me
->me_M
.dma_vaddr
, me
->me_M
.dma_size
);
1719 bzero(me
->me_C
.dma_vaddr
, me
->me_C
.dma_size
);
1720 bzero(me
->me_q
.q_ctx
.dma_vaddr
, me
->me_q
.q_ctx
.dma_size
);
1722 /* Can't free here, so put us on the free list. */
1723 SIMPLEQ_INSERT_TAIL(&sc
->sc_q2free
, &me
->me_q
, q_next
);
1726 case UBS_CTXOP_RSAPRIV
: {
1727 struct ubsec_q2_rsapriv
*rp
= (struct ubsec_q2_rsapriv
*)q
;
1731 ubsec_dma_sync(&rp
->rpr_msgin
, BUS_DMASYNC_POSTWRITE
);
1732 ubsec_dma_sync(&rp
->rpr_msgout
, BUS_DMASYNC_POSTREAD
);
1734 len
= (krp
->krp_param
[UBS_RSAPRIV_PAR_MSGOUT
].crp_nbits
+ 7) / 8;
1735 bcopy(rp
->rpr_msgout
.dma_vaddr
,
1736 krp
->krp_param
[UBS_RSAPRIV_PAR_MSGOUT
].crp_p
, len
);
1740 bzero(rp
->rpr_msgin
.dma_vaddr
, rp
->rpr_msgin
.dma_size
);
1741 bzero(rp
->rpr_msgout
.dma_vaddr
, rp
->rpr_msgout
.dma_size
);
1742 bzero(rp
->rpr_q
.q_ctx
.dma_vaddr
, rp
->rpr_q
.q_ctx
.dma_size
);
1744 /* Can't free here, so put us on the free list. */
1745 SIMPLEQ_INSERT_TAIL(&sc
->sc_q2free
, &rp
->rpr_q
, q_next
);
1749 device_printf(sc
->sc_dev
, "unknown ctx op: %x\n",
1750 letoh16(ctx
->ctx_op
));
1755 #ifndef UBSEC_NO_RNG
1757 ubsec_rng(void *vsc
)
1759 struct ubsec_softc
*sc
= vsc
;
1760 struct ubsec_q2_rng
*rng
= &sc
->sc_rng
;
1761 struct ubsec_mcr
*mcr
;
1762 struct ubsec_ctx_rngbypass
*ctx
;
1765 if (rng
->rng_used
) {
1770 if (sc
->sc_nqueue2
>= UBS_MAX_NQUEUE
)
1773 mcr
= (struct ubsec_mcr
*)rng
->rng_q
.q_mcr
.dma_vaddr
;
1774 ctx
= (struct ubsec_ctx_rngbypass
*)rng
->rng_q
.q_ctx
.dma_vaddr
;
1776 mcr
->mcr_pkts
= htole16(1);
1778 mcr
->mcr_cmdctxp
= htole32(rng
->rng_q
.q_ctx
.dma_paddr
);
1779 mcr
->mcr_ipktbuf
.pb_addr
= mcr
->mcr_ipktbuf
.pb_next
= 0;
1780 mcr
->mcr_ipktbuf
.pb_len
= 0;
1781 mcr
->mcr_reserved
= mcr
->mcr_pktlen
= 0;
1782 mcr
->mcr_opktbuf
.pb_addr
= htole32(rng
->rng_buf
.dma_paddr
);
1783 mcr
->mcr_opktbuf
.pb_len
= htole32(((sizeof(u_int32_t
) * UBSEC_RNG_BUFSIZ
)) &
1785 mcr
->mcr_opktbuf
.pb_next
= 0;
1787 ctx
->rbp_len
= htole16(sizeof(struct ubsec_ctx_rngbypass
));
1788 ctx
->rbp_op
= htole16(UBS_CTXOP_RNGBYPASS
);
1789 rng
->rng_q
.q_type
= UBS_CTXOP_RNGBYPASS
;
1791 ubsec_dma_sync(&rng
->rng_buf
, BUS_DMASYNC_PREREAD
);
1793 SIMPLEQ_INSERT_TAIL(&sc
->sc_queue2
, &rng
->rng_q
, q_next
);
1796 ubsecstats
.hst_rng
++;
1803 * Something weird happened, generate our own call back.
1807 callout_reset(&sc
->sc_rngto
, sc
->sc_rnghz
, ubsec_rng
, sc
);
1809 #endif /* UBSEC_NO_RNG */
1812 ubsec_dmamap_cb(void *arg
, bus_dma_segment_t
*segs
, int nseg
, int error
)
1814 bus_addr_t
*paddr
= (bus_addr_t
*) arg
;
1815 *paddr
= segs
->ds_addr
;
1820 struct ubsec_softc
*sc
,
1822 struct ubsec_dma_alloc
*dma
,
1828 /* XXX could specify sc_dmat as parent but that just adds overhead */
1829 r
= bus_dma_tag_create(NULL
, /* parent */
1830 1, 0, /* alignment, bounds */
1831 BUS_SPACE_MAXADDR_32BIT
, /* lowaddr */
1832 BUS_SPACE_MAXADDR
, /* highaddr */
1833 NULL
, NULL
, /* filter, filterarg */
1836 size
, /* maxsegsize */
1837 BUS_DMA_ALLOCNOW
, /* flags */
1840 device_printf(sc
->sc_dev
, "ubsec_dma_malloc: "
1841 "bus_dma_tag_create failed; error %u\n", r
);
1845 r
= bus_dmamap_create(dma
->dma_tag
, BUS_DMA_NOWAIT
, &dma
->dma_map
);
1847 device_printf(sc
->sc_dev
, "ubsec_dma_malloc: "
1848 "bus_dmamap_create failed; error %u\n", r
);
1852 r
= bus_dmamem_alloc(dma
->dma_tag
, (void**) &dma
->dma_vaddr
,
1853 BUS_DMA_NOWAIT
, &dma
->dma_map
);
1855 device_printf(sc
->sc_dev
, "ubsec_dma_malloc: "
1856 "bus_dmammem_alloc failed; size %ju, error %u\n",
1861 r
= bus_dmamap_load(dma
->dma_tag
, dma
->dma_map
, dma
->dma_vaddr
,
1865 mapflags
| BUS_DMA_NOWAIT
);
1867 device_printf(sc
->sc_dev
, "ubsec_dma_malloc: "
1868 "bus_dmamap_load failed; error %u\n", r
);
1872 dma
->dma_size
= size
;
1876 bus_dmamap_unload(dma
->dma_tag
, dma
->dma_map
);
1878 bus_dmamem_free(dma
->dma_tag
, dma
->dma_vaddr
, dma
->dma_map
);
1880 bus_dmamap_destroy(dma
->dma_tag
, dma
->dma_map
);
1881 bus_dma_tag_destroy(dma
->dma_tag
);
1883 dma
->dma_map
= NULL
;
1884 dma
->dma_tag
= NULL
;
1889 ubsec_dma_free(struct ubsec_softc
*sc
, struct ubsec_dma_alloc
*dma
)
1891 bus_dmamap_unload(dma
->dma_tag
, dma
->dma_map
);
1892 bus_dmamem_free(dma
->dma_tag
, dma
->dma_vaddr
, dma
->dma_map
);
1893 bus_dmamap_destroy(dma
->dma_tag
, dma
->dma_map
);
1894 bus_dma_tag_destroy(dma
->dma_tag
);
1898 * Resets the board. Values in the regesters are left as is
1899 * from the reset (i.e. initial values are assigned elsewhere).
1902 ubsec_reset_board(struct ubsec_softc
*sc
)
1904 volatile u_int32_t ctrl
;
1906 ctrl
= READ_REG(sc
, BS_CTRL
);
1907 ctrl
|= BS_CTRL_RESET
;
1908 WRITE_REG(sc
, BS_CTRL
, ctrl
);
1911 * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us
1917 * Init Broadcom registers
1920 ubsec_init_board(struct ubsec_softc
*sc
)
1924 ctrl
= READ_REG(sc
, BS_CTRL
);
1925 ctrl
&= ~(BS_CTRL_BE32
| BS_CTRL_BE64
);
1926 ctrl
|= BS_CTRL_LITTLE_ENDIAN
| BS_CTRL_MCR1INT
;
1928 if (sc
->sc_flags
& (UBS_FLAGS_KEY
|UBS_FLAGS_RNG
))
1929 ctrl
|= BS_CTRL_MCR2INT
;
1931 ctrl
&= ~BS_CTRL_MCR2INT
;
1933 if (sc
->sc_flags
& UBS_FLAGS_HWNORM
)
1934 ctrl
&= ~BS_CTRL_SWNORM
;
1936 WRITE_REG(sc
, BS_CTRL
, ctrl
);
1940 * Init Broadcom PCI registers
1943 ubsec_init_pciregs(device_t dev
)
1948 misc
= pci_conf_read(pc
, pa
->pa_tag
, BS_RTY_TOUT
);
1949 misc
= (misc
& ~(UBS_PCI_RTY_MASK
<< UBS_PCI_RTY_SHIFT
))
1950 | ((UBS_DEF_RTY
& 0xff) << UBS_PCI_RTY_SHIFT
);
1951 misc
= (misc
& ~(UBS_PCI_TOUT_MASK
<< UBS_PCI_TOUT_SHIFT
))
1952 | ((UBS_DEF_TOUT
& 0xff) << UBS_PCI_TOUT_SHIFT
);
1953 pci_conf_write(pc
, pa
->pa_tag
, BS_RTY_TOUT
, misc
);
1957 * This will set the cache line size to 1, this will
1958 * force the BCM58xx chip just to do burst read/writes.
1959 * Cache line read/writes are to slow
1961 pci_write_config(dev
, PCIR_CACHELNSZ
, UBS_DEF_CACHELINE
, 1);
1965 * Clean up after a chip crash.
1966 * It is assumed that the caller in splimp()
1969 ubsec_cleanchip(struct ubsec_softc
*sc
)
1973 while (!SIMPLEQ_EMPTY(&sc
->sc_qchip
)) {
1974 q
= SIMPLEQ_FIRST(&sc
->sc_qchip
);
1975 SIMPLEQ_REMOVE_HEAD(&sc
->sc_qchip
, q_next
);
1976 ubsec_free_q(sc
, q
);
1983 * It is assumed that the caller is within spimp()
1986 ubsec_free_q(struct ubsec_softc
*sc
, struct ubsec_q
*q
)
1989 struct cryptop
*crp
;
1993 npkts
= q
->q_nstacked_mcrs
;
1995 for (i
= 0; i
< npkts
; i
++) {
1996 if(q
->q_stacked_mcr
[i
]) {
1997 q2
= q
->q_stacked_mcr
[i
];
1999 if ((q2
->q_dst_m
!= NULL
) && (q2
->q_src_m
!= q2
->q_dst_m
))
2000 m_freem(q2
->q_dst_m
);
2002 crp
= (struct cryptop
*)q2
->q_crp
;
2004 SIMPLEQ_INSERT_TAIL(&sc
->sc_freequeue
, q2
, q_next
);
2006 crp
->crp_etype
= EFAULT
;
2016 if ((q
->q_dst_m
!= NULL
) && (q
->q_src_m
!= q
->q_dst_m
))
2017 m_freem(q
->q_dst_m
);
2019 crp
= (struct cryptop
*)q
->q_crp
;
2021 SIMPLEQ_INSERT_TAIL(&sc
->sc_freequeue
, q
, q_next
);
2023 crp
->crp_etype
= EFAULT
;
2029 * Routine to reset the chip and clean up.
2030 * It is assumed that the caller is in splimp()
2033 ubsec_totalreset(struct ubsec_softc
*sc
)
2035 ubsec_reset_board(sc
);
2036 ubsec_init_board(sc
);
2037 ubsec_cleanchip(sc
);
2041 ubsec_dmamap_aligned(struct ubsec_operand
*op
)
2045 for (i
= 0; i
< op
->nsegs
; i
++) {
2046 if (op
->segs
[i
].ds_addr
& 3)
2048 if ((i
!= (op
->nsegs
- 1)) &&
2049 (op
->segs
[i
].ds_len
& 3))
2056 ubsec_kfree(struct ubsec_softc
*sc
, struct ubsec_q2
*q
)
2058 switch (q
->q_type
) {
2059 case UBS_CTXOP_MODEXP
: {
2060 struct ubsec_q2_modexp
*me
= (struct ubsec_q2_modexp
*)q
;
2062 ubsec_dma_free(sc
, &me
->me_q
.q_mcr
);
2063 ubsec_dma_free(sc
, &me
->me_q
.q_ctx
);
2064 ubsec_dma_free(sc
, &me
->me_M
);
2065 ubsec_dma_free(sc
, &me
->me_E
);
2066 ubsec_dma_free(sc
, &me
->me_C
);
2067 ubsec_dma_free(sc
, &me
->me_epb
);
2068 kfree(me
, M_DEVBUF
);
2071 case UBS_CTXOP_RSAPRIV
: {
2072 struct ubsec_q2_rsapriv
*rp
= (struct ubsec_q2_rsapriv
*)q
;
2074 ubsec_dma_free(sc
, &rp
->rpr_q
.q_mcr
);
2075 ubsec_dma_free(sc
, &rp
->rpr_q
.q_ctx
);
2076 ubsec_dma_free(sc
, &rp
->rpr_msgin
);
2077 ubsec_dma_free(sc
, &rp
->rpr_msgout
);
2078 kfree(rp
, M_DEVBUF
);
2082 device_printf(sc
->sc_dev
, "invalid kfree 0x%x\n", q
->q_type
);
2088 ubsec_kprocess(void *arg
, struct cryptkop
*krp
, int hint
)
2090 struct ubsec_softc
*sc
= arg
;
2093 if (krp
== NULL
|| krp
->krp_callback
== NULL
)
2096 while (!SIMPLEQ_EMPTY(&sc
->sc_q2free
)) {
2099 q
= SIMPLEQ_FIRST(&sc
->sc_q2free
);
2100 SIMPLEQ_REMOVE_HEAD(&sc
->sc_q2free
, q_next
);
2104 switch (krp
->krp_op
) {
2106 if (sc
->sc_flags
& UBS_FLAGS_HWNORM
)
2107 r
= ubsec_kprocess_modexp_hw(sc
, krp
, hint
);
2109 r
= ubsec_kprocess_modexp_sw(sc
, krp
, hint
);
2111 case CRK_MOD_EXP_CRT
:
2112 return (ubsec_kprocess_rsapriv(sc
, krp
, hint
));
2114 device_printf(sc
->sc_dev
, "kprocess: invalid op 0x%x\n",
2116 krp
->krp_status
= EOPNOTSUPP
;
2120 return (0); /* silence compiler */
2124 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization)
2127 ubsec_kprocess_modexp_sw(struct ubsec_softc
*sc
, struct cryptkop
*krp
, int hint
)
2129 struct ubsec_q2_modexp
*me
;
2130 struct ubsec_mcr
*mcr
;
2131 struct ubsec_ctx_modexp
*ctx
;
2132 struct ubsec_pktbuf
*epb
;
2134 u_int nbits
, normbits
, mbits
, shiftbits
, ebits
;
2136 me
= kmalloc(sizeof *me
, M_DEVBUF
, M_INTWAIT
| M_ZERO
);
2138 me
->me_q
.q_type
= UBS_CTXOP_MODEXP
;
2140 nbits
= ubsec_ksigbits(&krp
->krp_param
[UBS_MODEXP_PAR_N
]);
2143 else if (nbits
<= 768)
2145 else if (nbits
<= 1024)
2147 else if (sc
->sc_flags
& UBS_FLAGS_BIGKEY
&& nbits
<= 1536)
2149 else if (sc
->sc_flags
& UBS_FLAGS_BIGKEY
&& nbits
<= 2048)
2156 shiftbits
= normbits
- nbits
;
2158 me
->me_modbits
= nbits
;
2159 me
->me_shiftbits
= shiftbits
;
2160 me
->me_normbits
= normbits
;
2162 /* Sanity check: result bits must be >= true modulus bits. */
2163 if (krp
->krp_param
[krp
->krp_iparams
].crp_nbits
< nbits
) {
2168 if (ubsec_dma_malloc(sc
, sizeof(struct ubsec_mcr
),
2169 &me
->me_q
.q_mcr
, 0)) {
2173 mcr
= (struct ubsec_mcr
*)me
->me_q
.q_mcr
.dma_vaddr
;
2175 if (ubsec_dma_malloc(sc
, sizeof(struct ubsec_ctx_modexp
),
2176 &me
->me_q
.q_ctx
, 0)) {
2181 mbits
= ubsec_ksigbits(&krp
->krp_param
[UBS_MODEXP_PAR_M
]);
2182 if (mbits
> nbits
) {
2186 if (ubsec_dma_malloc(sc
, normbits
/ 8, &me
->me_M
, 0)) {
2190 ubsec_kshift_r(shiftbits
,
2191 krp
->krp_param
[UBS_MODEXP_PAR_M
].crp_p
, mbits
,
2192 me
->me_M
.dma_vaddr
, normbits
);
2194 if (ubsec_dma_malloc(sc
, normbits
/ 8, &me
->me_C
, 0)) {
2198 bzero(me
->me_C
.dma_vaddr
, me
->me_C
.dma_size
);
2200 ebits
= ubsec_ksigbits(&krp
->krp_param
[UBS_MODEXP_PAR_E
]);
2201 if (ebits
> nbits
) {
2205 if (ubsec_dma_malloc(sc
, normbits
/ 8, &me
->me_E
, 0)) {
2209 ubsec_kshift_r(shiftbits
,
2210 krp
->krp_param
[UBS_MODEXP_PAR_E
].crp_p
, ebits
,
2211 me
->me_E
.dma_vaddr
, normbits
);
2213 if (ubsec_dma_malloc(sc
, sizeof(struct ubsec_pktbuf
),
2218 epb
= (struct ubsec_pktbuf
*)me
->me_epb
.dma_vaddr
;
2219 epb
->pb_addr
= htole32(me
->me_E
.dma_paddr
);
2221 epb
->pb_len
= htole32(normbits
/ 8);
2230 mcr
->mcr_pkts
= htole16(1);
2232 mcr
->mcr_cmdctxp
= htole32(me
->me_q
.q_ctx
.dma_paddr
);
2233 mcr
->mcr_reserved
= 0;
2234 mcr
->mcr_pktlen
= 0;
2236 mcr
->mcr_ipktbuf
.pb_addr
= htole32(me
->me_M
.dma_paddr
);
2237 mcr
->mcr_ipktbuf
.pb_len
= htole32(normbits
/ 8);
2238 mcr
->mcr_ipktbuf
.pb_next
= htole32(me
->me_epb
.dma_paddr
);
2240 mcr
->mcr_opktbuf
.pb_addr
= htole32(me
->me_C
.dma_paddr
);
2241 mcr
->mcr_opktbuf
.pb_next
= 0;
2242 mcr
->mcr_opktbuf
.pb_len
= htole32(normbits
/ 8);
2245 /* Misaligned output buffer will hang the chip. */
2246 if ((letoh32(mcr
->mcr_opktbuf
.pb_addr
) & 3) != 0)
2247 panic("%s: modexp invalid addr 0x%x\n",
2248 device_get_nameunit(sc
->sc_dev
),
2249 letoh32(mcr
->mcr_opktbuf
.pb_addr
));
2250 if ((letoh32(mcr
->mcr_opktbuf
.pb_len
) & 3) != 0)
2251 panic("%s: modexp invalid len 0x%x\n",
2252 device_get_nameunit(sc
->sc_dev
),
2253 letoh32(mcr
->mcr_opktbuf
.pb_len
));
2256 ctx
= (struct ubsec_ctx_modexp
*)me
->me_q
.q_ctx
.dma_vaddr
;
2257 bzero(ctx
, sizeof(*ctx
));
2258 ubsec_kshift_r(shiftbits
,
2259 krp
->krp_param
[UBS_MODEXP_PAR_N
].crp_p
, nbits
,
2260 ctx
->me_N
, normbits
);
2261 ctx
->me_len
= htole16((normbits
/ 8) + (4 * sizeof(u_int16_t
)));
2262 ctx
->me_op
= htole16(UBS_CTXOP_MODEXP
);
2263 ctx
->me_E_len
= htole16(nbits
);
2264 ctx
->me_N_len
= htole16(nbits
);
2268 ubsec_dump_mcr(mcr
);
2269 ubsec_dump_ctx2((struct ubsec_ctx_keyop
*)ctx
);
2274 * ubsec_feed2 will sync mcr and ctx, we just need to sync
2277 ubsec_dma_sync(&me
->me_M
, BUS_DMASYNC_PREWRITE
);
2278 ubsec_dma_sync(&me
->me_E
, BUS_DMASYNC_PREWRITE
);
2279 ubsec_dma_sync(&me
->me_C
, BUS_DMASYNC_PREREAD
);
2280 ubsec_dma_sync(&me
->me_epb
, BUS_DMASYNC_PREWRITE
);
2282 /* Enqueue and we're done... */
2284 SIMPLEQ_INSERT_TAIL(&sc
->sc_queue2
, &me
->me_q
, q_next
);
2286 ubsecstats
.hst_modexp
++;
2293 if (me
->me_q
.q_mcr
.dma_map
!= NULL
)
2294 ubsec_dma_free(sc
, &me
->me_q
.q_mcr
);
2295 if (me
->me_q
.q_ctx
.dma_map
!= NULL
) {
2296 bzero(me
->me_q
.q_ctx
.dma_vaddr
, me
->me_q
.q_ctx
.dma_size
);
2297 ubsec_dma_free(sc
, &me
->me_q
.q_ctx
);
2299 if (me
->me_M
.dma_map
!= NULL
) {
2300 bzero(me
->me_M
.dma_vaddr
, me
->me_M
.dma_size
);
2301 ubsec_dma_free(sc
, &me
->me_M
);
2303 if (me
->me_E
.dma_map
!= NULL
) {
2304 bzero(me
->me_E
.dma_vaddr
, me
->me_E
.dma_size
);
2305 ubsec_dma_free(sc
, &me
->me_E
);
2307 if (me
->me_C
.dma_map
!= NULL
) {
2308 bzero(me
->me_C
.dma_vaddr
, me
->me_C
.dma_size
);
2309 ubsec_dma_free(sc
, &me
->me_C
);
2311 if (me
->me_epb
.dma_map
!= NULL
)
2312 ubsec_dma_free(sc
, &me
->me_epb
);
2313 kfree(me
, M_DEVBUF
);
2315 krp
->krp_status
= err
;
2321 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization)
2324 ubsec_kprocess_modexp_hw(struct ubsec_softc
*sc
, struct cryptkop
*krp
, int hint
)
2326 struct ubsec_q2_modexp
*me
;
2327 struct ubsec_mcr
*mcr
;
2328 struct ubsec_ctx_modexp
*ctx
;
2329 struct ubsec_pktbuf
*epb
;
2331 u_int nbits
, normbits
, mbits
, shiftbits
, ebits
;
2333 me
= kmalloc(sizeof *me
, M_DEVBUF
, M_INTWAIT
| M_ZERO
);
2335 me
->me_q
.q_type
= UBS_CTXOP_MODEXP
;
2337 nbits
= ubsec_ksigbits(&krp
->krp_param
[UBS_MODEXP_PAR_N
]);
2340 else if (nbits
<= 768)
2342 else if (nbits
<= 1024)
2344 else if (sc
->sc_flags
& UBS_FLAGS_BIGKEY
&& nbits
<= 1536)
2346 else if (sc
->sc_flags
& UBS_FLAGS_BIGKEY
&& nbits
<= 2048)
2353 shiftbits
= normbits
- nbits
;
2356 me
->me_modbits
= nbits
;
2357 me
->me_shiftbits
= shiftbits
;
2358 me
->me_normbits
= normbits
;
2360 /* Sanity check: result bits must be >= true modulus bits. */
2361 if (krp
->krp_param
[krp
->krp_iparams
].crp_nbits
< nbits
) {
2366 if (ubsec_dma_malloc(sc
, sizeof(struct ubsec_mcr
),
2367 &me
->me_q
.q_mcr
, 0)) {
2371 mcr
= (struct ubsec_mcr
*)me
->me_q
.q_mcr
.dma_vaddr
;
2373 if (ubsec_dma_malloc(sc
, sizeof(struct ubsec_ctx_modexp
),
2374 &me
->me_q
.q_ctx
, 0)) {
2379 mbits
= ubsec_ksigbits(&krp
->krp_param
[UBS_MODEXP_PAR_M
]);
2380 if (mbits
> nbits
) {
2384 if (ubsec_dma_malloc(sc
, normbits
/ 8, &me
->me_M
, 0)) {
2388 bzero(me
->me_M
.dma_vaddr
, normbits
/ 8);
2389 bcopy(krp
->krp_param
[UBS_MODEXP_PAR_M
].crp_p
,
2390 me
->me_M
.dma_vaddr
, (mbits
+ 7) / 8);
2392 if (ubsec_dma_malloc(sc
, normbits
/ 8, &me
->me_C
, 0)) {
2396 bzero(me
->me_C
.dma_vaddr
, me
->me_C
.dma_size
);
2398 ebits
= ubsec_ksigbits(&krp
->krp_param
[UBS_MODEXP_PAR_E
]);
2399 if (ebits
> nbits
) {
2403 if (ubsec_dma_malloc(sc
, normbits
/ 8, &me
->me_E
, 0)) {
2407 bzero(me
->me_E
.dma_vaddr
, normbits
/ 8);
2408 bcopy(krp
->krp_param
[UBS_MODEXP_PAR_E
].crp_p
,
2409 me
->me_E
.dma_vaddr
, (ebits
+ 7) / 8);
2411 if (ubsec_dma_malloc(sc
, sizeof(struct ubsec_pktbuf
),
2416 epb
= (struct ubsec_pktbuf
*)me
->me_epb
.dma_vaddr
;
2417 epb
->pb_addr
= htole32(me
->me_E
.dma_paddr
);
2419 epb
->pb_len
= htole32((ebits
+ 7) / 8);
2428 mcr
->mcr_pkts
= htole16(1);
2430 mcr
->mcr_cmdctxp
= htole32(me
->me_q
.q_ctx
.dma_paddr
);
2431 mcr
->mcr_reserved
= 0;
2432 mcr
->mcr_pktlen
= 0;
2434 mcr
->mcr_ipktbuf
.pb_addr
= htole32(me
->me_M
.dma_paddr
);
2435 mcr
->mcr_ipktbuf
.pb_len
= htole32(normbits
/ 8);
2436 mcr
->mcr_ipktbuf
.pb_next
= htole32(me
->me_epb
.dma_paddr
);
2438 mcr
->mcr_opktbuf
.pb_addr
= htole32(me
->me_C
.dma_paddr
);
2439 mcr
->mcr_opktbuf
.pb_next
= 0;
2440 mcr
->mcr_opktbuf
.pb_len
= htole32(normbits
/ 8);
2443 /* Misaligned output buffer will hang the chip. */
2444 if ((letoh32(mcr
->mcr_opktbuf
.pb_addr
) & 3) != 0)
2445 panic("%s: modexp invalid addr 0x%x\n",
2446 device_get_nameunit(sc
->sc_dev
),
2447 letoh32(mcr
->mcr_opktbuf
.pb_addr
));
2448 if ((letoh32(mcr
->mcr_opktbuf
.pb_len
) & 3) != 0)
2449 panic("%s: modexp invalid len 0x%x\n",
2450 device_get_nameunit(sc
->sc_dev
),
2451 letoh32(mcr
->mcr_opktbuf
.pb_len
));
2454 ctx
= (struct ubsec_ctx_modexp
*)me
->me_q
.q_ctx
.dma_vaddr
;
2455 bzero(ctx
, sizeof(*ctx
));
2456 bcopy(krp
->krp_param
[UBS_MODEXP_PAR_N
].crp_p
, ctx
->me_N
,
2458 ctx
->me_len
= htole16((normbits
/ 8) + (4 * sizeof(u_int16_t
)));
2459 ctx
->me_op
= htole16(UBS_CTXOP_MODEXP
);
2460 ctx
->me_E_len
= htole16(ebits
);
2461 ctx
->me_N_len
= htole16(nbits
);
2465 ubsec_dump_mcr(mcr
);
2466 ubsec_dump_ctx2((struct ubsec_ctx_keyop
*)ctx
);
2471 * ubsec_feed2 will sync mcr and ctx, we just need to sync
2474 ubsec_dma_sync(&me
->me_M
, BUS_DMASYNC_PREWRITE
);
2475 ubsec_dma_sync(&me
->me_E
, BUS_DMASYNC_PREWRITE
);
2476 ubsec_dma_sync(&me
->me_C
, BUS_DMASYNC_PREREAD
);
2477 ubsec_dma_sync(&me
->me_epb
, BUS_DMASYNC_PREWRITE
);
2479 /* Enqueue and we're done... */
2481 SIMPLEQ_INSERT_TAIL(&sc
->sc_queue2
, &me
->me_q
, q_next
);
2489 if (me
->me_q
.q_mcr
.dma_map
!= NULL
)
2490 ubsec_dma_free(sc
, &me
->me_q
.q_mcr
);
2491 if (me
->me_q
.q_ctx
.dma_map
!= NULL
) {
2492 bzero(me
->me_q
.q_ctx
.dma_vaddr
, me
->me_q
.q_ctx
.dma_size
);
2493 ubsec_dma_free(sc
, &me
->me_q
.q_ctx
);
2495 if (me
->me_M
.dma_map
!= NULL
) {
2496 bzero(me
->me_M
.dma_vaddr
, me
->me_M
.dma_size
);
2497 ubsec_dma_free(sc
, &me
->me_M
);
2499 if (me
->me_E
.dma_map
!= NULL
) {
2500 bzero(me
->me_E
.dma_vaddr
, me
->me_E
.dma_size
);
2501 ubsec_dma_free(sc
, &me
->me_E
);
2503 if (me
->me_C
.dma_map
!= NULL
) {
2504 bzero(me
->me_C
.dma_vaddr
, me
->me_C
.dma_size
);
2505 ubsec_dma_free(sc
, &me
->me_C
);
2507 if (me
->me_epb
.dma_map
!= NULL
)
2508 ubsec_dma_free(sc
, &me
->me_epb
);
2509 kfree(me
, M_DEVBUF
);
2511 krp
->krp_status
= err
;
2517 ubsec_kprocess_rsapriv(struct ubsec_softc
*sc
, struct cryptkop
*krp
, int hint
)
2519 struct ubsec_q2_rsapriv
*rp
= NULL
;
2520 struct ubsec_mcr
*mcr
;
2521 struct ubsec_ctx_rsapriv
*ctx
;
2523 u_int padlen
, msglen
;
2525 msglen
= ubsec_ksigbits(&krp
->krp_param
[UBS_RSAPRIV_PAR_P
]);
2526 padlen
= ubsec_ksigbits(&krp
->krp_param
[UBS_RSAPRIV_PAR_Q
]);
2527 if (msglen
> padlen
)
2532 else if (padlen
<= 384)
2534 else if (padlen
<= 512)
2536 else if (sc
->sc_flags
& UBS_FLAGS_BIGKEY
&& padlen
<= 768)
2538 else if (sc
->sc_flags
& UBS_FLAGS_BIGKEY
&& padlen
<= 1024)
2545 if (ubsec_ksigbits(&krp
->krp_param
[UBS_RSAPRIV_PAR_DP
]) > padlen
) {
2550 if (ubsec_ksigbits(&krp
->krp_param
[UBS_RSAPRIV_PAR_DQ
]) > padlen
) {
2555 if (ubsec_ksigbits(&krp
->krp_param
[UBS_RSAPRIV_PAR_PINV
]) > padlen
) {
2560 rp
= kmalloc(sizeof *rp
, M_DEVBUF
, M_INTWAIT
| M_ZERO
);
2562 rp
->rpr_q
.q_type
= UBS_CTXOP_RSAPRIV
;
2564 if (ubsec_dma_malloc(sc
, sizeof(struct ubsec_mcr
),
2565 &rp
->rpr_q
.q_mcr
, 0)) {
2569 mcr
= (struct ubsec_mcr
*)rp
->rpr_q
.q_mcr
.dma_vaddr
;
2571 if (ubsec_dma_malloc(sc
, sizeof(struct ubsec_ctx_rsapriv
),
2572 &rp
->rpr_q
.q_ctx
, 0)) {
2576 ctx
= (struct ubsec_ctx_rsapriv
*)rp
->rpr_q
.q_ctx
.dma_vaddr
;
2577 bzero(ctx
, sizeof *ctx
);
2580 bcopy(krp
->krp_param
[UBS_RSAPRIV_PAR_P
].crp_p
,
2581 &ctx
->rpr_buf
[0 * (padlen
/ 8)],
2582 (krp
->krp_param
[UBS_RSAPRIV_PAR_P
].crp_nbits
+ 7) / 8);
2585 bcopy(krp
->krp_param
[UBS_RSAPRIV_PAR_Q
].crp_p
,
2586 &ctx
->rpr_buf
[1 * (padlen
/ 8)],
2587 (krp
->krp_param
[UBS_RSAPRIV_PAR_Q
].crp_nbits
+ 7) / 8);
2590 bcopy(krp
->krp_param
[UBS_RSAPRIV_PAR_DP
].crp_p
,
2591 &ctx
->rpr_buf
[2 * (padlen
/ 8)],
2592 (krp
->krp_param
[UBS_RSAPRIV_PAR_DP
].crp_nbits
+ 7) / 8);
2595 bcopy(krp
->krp_param
[UBS_RSAPRIV_PAR_DQ
].crp_p
,
2596 &ctx
->rpr_buf
[3 * (padlen
/ 8)],
2597 (krp
->krp_param
[UBS_RSAPRIV_PAR_DQ
].crp_nbits
+ 7) / 8);
2600 bcopy(krp
->krp_param
[UBS_RSAPRIV_PAR_PINV
].crp_p
,
2601 &ctx
->rpr_buf
[4 * (padlen
/ 8)],
2602 (krp
->krp_param
[UBS_RSAPRIV_PAR_PINV
].crp_nbits
+ 7) / 8);
2604 msglen
= padlen
* 2;
2606 /* Copy in input message (aligned buffer/length). */
2607 if (ubsec_ksigbits(&krp
->krp_param
[UBS_RSAPRIV_PAR_MSGIN
]) > msglen
) {
2608 /* Is this likely? */
2612 if (ubsec_dma_malloc(sc
, (msglen
+ 7) / 8, &rp
->rpr_msgin
, 0)) {
2616 bzero(rp
->rpr_msgin
.dma_vaddr
, (msglen
+ 7) / 8);
2617 bcopy(krp
->krp_param
[UBS_RSAPRIV_PAR_MSGIN
].crp_p
,
2618 rp
->rpr_msgin
.dma_vaddr
,
2619 (krp
->krp_param
[UBS_RSAPRIV_PAR_MSGIN
].crp_nbits
+ 7) / 8);
2621 /* Prepare space for output message (aligned buffer/length). */
2622 if (ubsec_ksigbits(&krp
->krp_param
[UBS_RSAPRIV_PAR_MSGOUT
]) < msglen
) {
2623 /* Is this likely? */
2627 if (ubsec_dma_malloc(sc
, (msglen
+ 7) / 8, &rp
->rpr_msgout
, 0)) {
2631 bzero(rp
->rpr_msgout
.dma_vaddr
, (msglen
+ 7) / 8);
2633 mcr
->mcr_pkts
= htole16(1);
2635 mcr
->mcr_cmdctxp
= htole32(rp
->rpr_q
.q_ctx
.dma_paddr
);
2636 mcr
->mcr_ipktbuf
.pb_addr
= htole32(rp
->rpr_msgin
.dma_paddr
);
2637 mcr
->mcr_ipktbuf
.pb_next
= 0;
2638 mcr
->mcr_ipktbuf
.pb_len
= htole32(rp
->rpr_msgin
.dma_size
);
2639 mcr
->mcr_reserved
= 0;
2640 mcr
->mcr_pktlen
= htole16(msglen
);
2641 mcr
->mcr_opktbuf
.pb_addr
= htole32(rp
->rpr_msgout
.dma_paddr
);
2642 mcr
->mcr_opktbuf
.pb_next
= 0;
2643 mcr
->mcr_opktbuf
.pb_len
= htole32(rp
->rpr_msgout
.dma_size
);
2646 if (rp
->rpr_msgin
.dma_paddr
& 3 || rp
->rpr_msgin
.dma_size
& 3) {
2647 panic("%s: rsapriv: invalid msgin %x(0x%x)",
2648 device_get_nameunit(sc
->sc_dev
),
2649 rp
->rpr_msgin
.dma_paddr
, rp
->rpr_msgin
.dma_size
);
2651 if (rp
->rpr_msgout
.dma_paddr
& 3 || rp
->rpr_msgout
.dma_size
& 3) {
2652 panic("%s: rsapriv: invalid msgout %x(0x%x)",
2653 device_get_nameunit(sc
->sc_dev
),
2654 rp
->rpr_msgout
.dma_paddr
, rp
->rpr_msgout
.dma_size
);
2658 ctx
->rpr_len
= (sizeof(u_int16_t
) * 4) + (5 * (padlen
/ 8));
2659 ctx
->rpr_op
= htole16(UBS_CTXOP_RSAPRIV
);
2660 ctx
->rpr_q_len
= htole16(padlen
);
2661 ctx
->rpr_p_len
= htole16(padlen
);
2664 * ubsec_feed2 will sync mcr and ctx, we just need to sync
2667 ubsec_dma_sync(&rp
->rpr_msgin
, BUS_DMASYNC_PREWRITE
);
2668 ubsec_dma_sync(&rp
->rpr_msgout
, BUS_DMASYNC_PREREAD
);
2670 /* Enqueue and we're done... */
2672 SIMPLEQ_INSERT_TAIL(&sc
->sc_queue2
, &rp
->rpr_q
, q_next
);
2674 ubsecstats
.hst_modexpcrt
++;
2680 if (rp
->rpr_q
.q_mcr
.dma_map
!= NULL
)
2681 ubsec_dma_free(sc
, &rp
->rpr_q
.q_mcr
);
2682 if (rp
->rpr_msgin
.dma_map
!= NULL
) {
2683 bzero(rp
->rpr_msgin
.dma_vaddr
, rp
->rpr_msgin
.dma_size
);
2684 ubsec_dma_free(sc
, &rp
->rpr_msgin
);
2686 if (rp
->rpr_msgout
.dma_map
!= NULL
) {
2687 bzero(rp
->rpr_msgout
.dma_vaddr
, rp
->rpr_msgout
.dma_size
);
2688 ubsec_dma_free(sc
, &rp
->rpr_msgout
);
2690 kfree(rp
, M_DEVBUF
);
2692 krp
->krp_status
= err
;
2699 ubsec_dump_pb(volatile struct ubsec_pktbuf
*pb
)
2701 kprintf("addr 0x%x (0x%x) next 0x%x\n",
2702 pb
->pb_addr
, pb
->pb_len
, pb
->pb_next
);
2706 ubsec_dump_ctx2(struct ubsec_ctx_keyop
*c
)
2708 kprintf("CTX (0x%x):\n", c
->ctx_len
);
2709 switch (letoh16(c
->ctx_op
)) {
2710 case UBS_CTXOP_RNGBYPASS
:
2711 case UBS_CTXOP_RNGSHA1
:
2713 case UBS_CTXOP_MODEXP
:
2715 struct ubsec_ctx_modexp
*cx
= (void *)c
;
2718 kprintf(" Elen %u, Nlen %u\n",
2719 letoh16(cx
->me_E_len
), letoh16(cx
->me_N_len
));
2720 len
= (cx
->me_N_len
+ 7)/8;
2721 for (i
= 0; i
< len
; i
++)
2722 kprintf("%s%02x", (i
== 0) ? " N: " : ":", cx
->me_N
[i
]);
2727 kprintf("unknown context: %x\n", c
->ctx_op
);
2729 kprintf("END CTX\n");
2733 ubsec_dump_mcr(struct ubsec_mcr
*mcr
)
2735 volatile struct ubsec_mcr_add
*ma
;
2739 kprintf(" pkts: %u, flags 0x%x\n",
2740 letoh16(mcr
->mcr_pkts
), letoh16(mcr
->mcr_flags
));
2741 ma
= (volatile struct ubsec_mcr_add
*)&mcr
->mcr_cmdctxp
;
2742 for (i
= 0; i
< letoh16(mcr
->mcr_pkts
); i
++) {
2743 kprintf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i
,
2744 letoh32(ma
->mcr_cmdctxp
), letoh16(ma
->mcr_pktlen
),
2745 letoh16(ma
->mcr_reserved
));
2746 kprintf(" %d: ipkt ", i
);
2747 ubsec_dump_pb(&ma
->mcr_ipktbuf
);
2748 kprintf(" %d: opkt ", i
);
2749 ubsec_dump_pb(&ma
->mcr_opktbuf
);
2752 kprintf("END MCR\n");
2754 #endif /* UBSEC_DEBUG */
2757 * Return the number of significant bits of a big number.
2760 ubsec_ksigbits(struct crparam
*cr
)
2762 u_int plen
= (cr
->crp_nbits
+ 7) / 8;
2763 int i
, sig
= plen
* 8;
2764 u_int8_t c
, *p
= cr
->crp_p
;
2766 for (i
= plen
- 1; i
>= 0; i
--) {
2769 while ((c
& 0x80) == 0) {
2783 u_int8_t
*src
, u_int srcbits
,
2784 u_int8_t
*dst
, u_int dstbits
)
2789 slen
= (srcbits
+ 7) / 8;
2790 dlen
= (dstbits
+ 7) / 8;
2792 for (i
= 0; i
< slen
; i
++)
2794 for (i
= 0; i
< dlen
- slen
; i
++)
2802 dst
[di
--] = dst
[si
--];
2809 for (i
= dlen
- 1; i
> 0; i
--)
2810 dst
[i
] = (dst
[i
] << n
) |
2811 (dst
[i
- 1] >> (8 - n
));
2812 dst
[0] = dst
[0] << n
;
2819 u_int8_t
*src
, u_int srcbits
,
2820 u_int8_t
*dst
, u_int dstbits
)
2822 int slen
, dlen
, i
, n
;
2824 slen
= (srcbits
+ 7) / 8;
2825 dlen
= (dstbits
+ 7) / 8;
2828 for (i
= 0; i
< slen
; i
++)
2829 dst
[i
] = src
[i
+ n
];
2830 for (i
= 0; i
< dlen
- slen
; i
++)
2835 for (i
= 0; i
< (dlen
- 1); i
++)
2836 dst
[i
] = (dst
[i
] >> n
) | (dst
[i
+ 1] << (8 - n
));
2837 dst
[dlen
- 1] = dst
[dlen
- 1] >> n
;