2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_platform.h>
36 #include <linux/dma-mapping.h>
38 #include <linux/spinlock.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/slab.h>
41 #include <linux/string.h>
43 #include <crypto/algapi.h>
44 #include <crypto/aes.h>
45 #include <crypto/des.h>
46 #include <crypto/sha.h>
47 #include <crypto/md5.h>
48 #include <crypto/aead.h>
49 #include <crypto/authenc.h>
50 #include <crypto/skcipher.h>
51 #include <crypto/hash.h>
52 #include <crypto/internal/hash.h>
53 #include <crypto/scatterwalk.h>
57 static void to_talitos_ptr(struct talitos_ptr
*talitos_ptr
, dma_addr_t dma_addr
)
59 talitos_ptr
->ptr
= cpu_to_be32(lower_32_bits(dma_addr
));
60 talitos_ptr
->eptr
= upper_32_bits(dma_addr
);
64 * map virtual single (contiguous) pointer to h/w descriptor pointer
66 static void map_single_talitos_ptr(struct device
*dev
,
67 struct talitos_ptr
*talitos_ptr
,
68 unsigned short len
, void *data
,
70 enum dma_data_direction dir
)
72 dma_addr_t dma_addr
= dma_map_single(dev
, data
, len
, dir
);
74 talitos_ptr
->len
= cpu_to_be16(len
);
75 to_talitos_ptr(talitos_ptr
, dma_addr
);
76 talitos_ptr
->j_extent
= extent
;
80 * unmap bus single (contiguous) h/w descriptor pointer
82 static void unmap_single_talitos_ptr(struct device
*dev
,
83 struct talitos_ptr
*talitos_ptr
,
84 enum dma_data_direction dir
)
86 dma_unmap_single(dev
, be32_to_cpu(talitos_ptr
->ptr
),
87 be16_to_cpu(talitos_ptr
->len
), dir
);
90 static int reset_channel(struct device
*dev
, int ch
)
92 struct talitos_private
*priv
= dev_get_drvdata(dev
);
93 unsigned int timeout
= TALITOS_TIMEOUT
;
95 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
, TALITOS_CCCR_RESET
);
97 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) & TALITOS_CCCR_RESET
)
102 dev_err(dev
, "failed to reset channel %d\n", ch
);
106 /* set 36-bit addressing, done writeback enable and done IRQ enable */
107 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, TALITOS_CCCR_LO_EAE
|
108 TALITOS_CCCR_LO_CDWE
| TALITOS_CCCR_LO_CDIE
);
110 /* and ICCR writeback, if available */
111 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
112 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
113 TALITOS_CCCR_LO_IWSE
);
118 static int reset_device(struct device
*dev
)
120 struct talitos_private
*priv
= dev_get_drvdata(dev
);
121 unsigned int timeout
= TALITOS_TIMEOUT
;
122 u32 mcr
= TALITOS_MCR_SWR
;
124 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
126 while ((in_be32(priv
->reg
+ TALITOS_MCR
) & TALITOS_MCR_SWR
)
131 mcr
= TALITOS_MCR_RCA1
| TALITOS_MCR_RCA3
;
132 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
136 dev_err(dev
, "failed to reset device\n");
144 * Reset and initialize the device
146 static int init_device(struct device
*dev
)
148 struct talitos_private
*priv
= dev_get_drvdata(dev
);
153 * errata documentation: warning: certain SEC interrupts
154 * are not fully cleared by writing the MCR:SWR bit,
155 * set bit twice to completely reset
157 err
= reset_device(dev
);
161 err
= reset_device(dev
);
166 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
167 err
= reset_channel(dev
, ch
);
172 /* enable channel done and error interrupts */
173 setbits32(priv
->reg
+ TALITOS_IMR
, TALITOS_IMR_INIT
);
174 setbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS_IMR_LO_INIT
);
176 /* disable integrity check error interrupts (use writeback instead) */
177 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
178 setbits32(priv
->reg
+ TALITOS_MDEUICR_LO
,
179 TALITOS_MDEUICR_LO_ICE
);
185 * talitos_submit - submits a descriptor to the device for processing
186 * @dev: the SEC device to be used
187 * @ch: the SEC device channel to be used
188 * @desc: the descriptor to be processed by the device
189 * @callback: whom to call when processing is complete
190 * @context: a handle for use by caller (optional)
192 * desc must contain valid dma-mapped (bus physical) address pointers.
193 * callback must check err and feedback in descriptor header
194 * for device processing status.
196 int talitos_submit(struct device
*dev
, int ch
, struct talitos_desc
*desc
,
197 void (*callback
)(struct device
*dev
,
198 struct talitos_desc
*desc
,
199 void *context
, int error
),
202 struct talitos_private
*priv
= dev_get_drvdata(dev
);
203 struct talitos_request
*request
;
207 spin_lock_irqsave(&priv
->chan
[ch
].head_lock
, flags
);
209 if (!atomic_inc_not_zero(&priv
->chan
[ch
].submit_count
)) {
210 /* h/w fifo is full */
211 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
215 head
= priv
->chan
[ch
].head
;
216 request
= &priv
->chan
[ch
].fifo
[head
];
218 /* map descriptor and save caller data */
219 request
->dma_desc
= dma_map_single(dev
, desc
, sizeof(*desc
),
221 request
->callback
= callback
;
222 request
->context
= context
;
224 /* increment fifo head */
225 priv
->chan
[ch
].head
= (priv
->chan
[ch
].head
+ 1) & (priv
->fifo_len
- 1);
228 request
->desc
= desc
;
232 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF
,
233 upper_32_bits(request
->dma_desc
));
234 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF_LO
,
235 lower_32_bits(request
->dma_desc
));
237 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
241 EXPORT_SYMBOL(talitos_submit
);
244 * process what was done, notify callback of error if not
246 static void flush_channel(struct device
*dev
, int ch
, int error
, int reset_ch
)
248 struct talitos_private
*priv
= dev_get_drvdata(dev
);
249 struct talitos_request
*request
, saved_req
;
253 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
255 tail
= priv
->chan
[ch
].tail
;
256 while (priv
->chan
[ch
].fifo
[tail
].desc
) {
257 request
= &priv
->chan
[ch
].fifo
[tail
];
259 /* descriptors with their done bits set don't get the error */
261 if ((request
->desc
->hdr
& DESC_HDR_DONE
) == DESC_HDR_DONE
)
269 dma_unmap_single(dev
, request
->dma_desc
,
270 sizeof(struct talitos_desc
),
273 /* copy entries so we can call callback outside lock */
274 saved_req
.desc
= request
->desc
;
275 saved_req
.callback
= request
->callback
;
276 saved_req
.context
= request
->context
;
278 /* release request entry in fifo */
280 request
->desc
= NULL
;
282 /* increment fifo tail */
283 priv
->chan
[ch
].tail
= (tail
+ 1) & (priv
->fifo_len
- 1);
285 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
287 atomic_dec(&priv
->chan
[ch
].submit_count
);
289 saved_req
.callback(dev
, saved_req
.desc
, saved_req
.context
,
291 /* channel may resume processing in single desc error case */
292 if (error
&& !reset_ch
&& status
== error
)
294 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
295 tail
= priv
->chan
[ch
].tail
;
298 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
302 * process completed requests for channels that have done status
304 #define DEF_TALITOS_DONE(name, ch_done_mask) \
305 static void talitos_done_##name(unsigned long data) \
307 struct device *dev = (struct device *)data; \
308 struct talitos_private *priv = dev_get_drvdata(dev); \
309 unsigned long flags; \
311 if (ch_done_mask & 1) \
312 flush_channel(dev, 0, 0, 0); \
313 if (priv->num_channels == 1) \
315 if (ch_done_mask & (1 << 2)) \
316 flush_channel(dev, 1, 0, 0); \
317 if (ch_done_mask & (1 << 4)) \
318 flush_channel(dev, 2, 0, 0); \
319 if (ch_done_mask & (1 << 6)) \
320 flush_channel(dev, 3, 0, 0); \
323 /* At this point, all completed channels have been processed */ \
324 /* Unmask done interrupts for channels completed later on. */ \
325 spin_lock_irqsave(&priv->reg_lock, flags); \
326 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
327 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
328 spin_unlock_irqrestore(&priv->reg_lock, flags); \
330 DEF_TALITOS_DONE(4ch
, TALITOS_ISR_4CHDONE
)
331 DEF_TALITOS_DONE(ch0_2
, TALITOS_ISR_CH_0_2_DONE
)
332 DEF_TALITOS_DONE(ch1_3
, TALITOS_ISR_CH_1_3_DONE
)
335 * locate current (offending) descriptor
337 static u32
current_desc_hdr(struct device
*dev
, int ch
)
339 struct talitos_private
*priv
= dev_get_drvdata(dev
);
340 int tail
= priv
->chan
[ch
].tail
;
343 cur_desc
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR_LO
);
345 while (priv
->chan
[ch
].fifo
[tail
].dma_desc
!= cur_desc
) {
346 tail
= (tail
+ 1) & (priv
->fifo_len
- 1);
347 if (tail
== priv
->chan
[ch
].tail
) {
348 dev_err(dev
, "couldn't locate current descriptor\n");
353 return priv
->chan
[ch
].fifo
[tail
].desc
->hdr
;
357 * user diagnostics; report root cause of error based on execution unit status
359 static void report_eu_error(struct device
*dev
, int ch
, u32 desc_hdr
)
361 struct talitos_private
*priv
= dev_get_drvdata(dev
);
365 desc_hdr
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
);
367 switch (desc_hdr
& DESC_HDR_SEL0_MASK
) {
368 case DESC_HDR_SEL0_AFEU
:
369 dev_err(dev
, "AFEUISR 0x%08x_%08x\n",
370 in_be32(priv
->reg
+ TALITOS_AFEUISR
),
371 in_be32(priv
->reg
+ TALITOS_AFEUISR_LO
));
373 case DESC_HDR_SEL0_DEU
:
374 dev_err(dev
, "DEUISR 0x%08x_%08x\n",
375 in_be32(priv
->reg
+ TALITOS_DEUISR
),
376 in_be32(priv
->reg
+ TALITOS_DEUISR_LO
));
378 case DESC_HDR_SEL0_MDEUA
:
379 case DESC_HDR_SEL0_MDEUB
:
380 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
381 in_be32(priv
->reg
+ TALITOS_MDEUISR
),
382 in_be32(priv
->reg
+ TALITOS_MDEUISR_LO
));
384 case DESC_HDR_SEL0_RNG
:
385 dev_err(dev
, "RNGUISR 0x%08x_%08x\n",
386 in_be32(priv
->reg
+ TALITOS_RNGUISR
),
387 in_be32(priv
->reg
+ TALITOS_RNGUISR_LO
));
389 case DESC_HDR_SEL0_PKEU
:
390 dev_err(dev
, "PKEUISR 0x%08x_%08x\n",
391 in_be32(priv
->reg
+ TALITOS_PKEUISR
),
392 in_be32(priv
->reg
+ TALITOS_PKEUISR_LO
));
394 case DESC_HDR_SEL0_AESU
:
395 dev_err(dev
, "AESUISR 0x%08x_%08x\n",
396 in_be32(priv
->reg
+ TALITOS_AESUISR
),
397 in_be32(priv
->reg
+ TALITOS_AESUISR_LO
));
399 case DESC_HDR_SEL0_CRCU
:
400 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
401 in_be32(priv
->reg
+ TALITOS_CRCUISR
),
402 in_be32(priv
->reg
+ TALITOS_CRCUISR_LO
));
404 case DESC_HDR_SEL0_KEU
:
405 dev_err(dev
, "KEUISR 0x%08x_%08x\n",
406 in_be32(priv
->reg
+ TALITOS_KEUISR
),
407 in_be32(priv
->reg
+ TALITOS_KEUISR_LO
));
411 switch (desc_hdr
& DESC_HDR_SEL1_MASK
) {
412 case DESC_HDR_SEL1_MDEUA
:
413 case DESC_HDR_SEL1_MDEUB
:
414 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
415 in_be32(priv
->reg
+ TALITOS_MDEUISR
),
416 in_be32(priv
->reg
+ TALITOS_MDEUISR_LO
));
418 case DESC_HDR_SEL1_CRCU
:
419 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
420 in_be32(priv
->reg
+ TALITOS_CRCUISR
),
421 in_be32(priv
->reg
+ TALITOS_CRCUISR_LO
));
425 for (i
= 0; i
< 8; i
++)
426 dev_err(dev
, "DESCBUF 0x%08x_%08x\n",
427 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
+ 8*i
),
428 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF_LO
+ 8*i
));
432 * recover from error interrupts
434 static void talitos_error(struct device
*dev
, u32 isr
, u32 isr_lo
)
436 struct talitos_private
*priv
= dev_get_drvdata(dev
);
437 unsigned int timeout
= TALITOS_TIMEOUT
;
438 int ch
, error
, reset_dev
= 0, reset_ch
= 0;
441 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
442 /* skip channels without errors */
443 if (!(isr
& (1 << (ch
* 2 + 1))))
448 v
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR
);
449 v_lo
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR_LO
);
451 if (v_lo
& TALITOS_CCPSR_LO_DOF
) {
452 dev_err(dev
, "double fetch fifo overflow error\n");
456 if (v_lo
& TALITOS_CCPSR_LO_SOF
) {
457 /* h/w dropped descriptor */
458 dev_err(dev
, "single fetch fifo overflow error\n");
461 if (v_lo
& TALITOS_CCPSR_LO_MDTE
)
462 dev_err(dev
, "master data transfer error\n");
463 if (v_lo
& TALITOS_CCPSR_LO_SGDLZ
)
464 dev_err(dev
, "s/g data length zero error\n");
465 if (v_lo
& TALITOS_CCPSR_LO_FPZ
)
466 dev_err(dev
, "fetch pointer zero error\n");
467 if (v_lo
& TALITOS_CCPSR_LO_IDH
)
468 dev_err(dev
, "illegal descriptor header error\n");
469 if (v_lo
& TALITOS_CCPSR_LO_IEU
)
470 dev_err(dev
, "invalid execution unit error\n");
471 if (v_lo
& TALITOS_CCPSR_LO_EU
)
472 report_eu_error(dev
, ch
, current_desc_hdr(dev
, ch
));
473 if (v_lo
& TALITOS_CCPSR_LO_GB
)
474 dev_err(dev
, "gather boundary error\n");
475 if (v_lo
& TALITOS_CCPSR_LO_GRL
)
476 dev_err(dev
, "gather return/length error\n");
477 if (v_lo
& TALITOS_CCPSR_LO_SB
)
478 dev_err(dev
, "scatter boundary error\n");
479 if (v_lo
& TALITOS_CCPSR_LO_SRL
)
480 dev_err(dev
, "scatter return/length error\n");
482 flush_channel(dev
, ch
, error
, reset_ch
);
485 reset_channel(dev
, ch
);
487 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
489 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, 0);
490 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
491 TALITOS_CCCR_CONT
) && --timeout
)
494 dev_err(dev
, "failed to restart channel %d\n",
500 if (reset_dev
|| isr
& ~TALITOS_ISR_4CHERR
|| isr_lo
) {
501 dev_err(dev
, "done overflow, internal time out, or rngu error: "
502 "ISR 0x%08x_%08x\n", isr
, isr_lo
);
504 /* purge request queues */
505 for (ch
= 0; ch
< priv
->num_channels
; ch
++)
506 flush_channel(dev
, ch
, -EIO
, 1);
508 /* reset and reinitialize the device */
513 #define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
514 static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
516 struct device *dev = data; \
517 struct talitos_private *priv = dev_get_drvdata(dev); \
519 unsigned long flags; \
521 spin_lock_irqsave(&priv->reg_lock, flags); \
522 isr = in_be32(priv->reg + TALITOS_ISR); \
523 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
524 /* Acknowledge interrupt */ \
525 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
526 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
528 if (unlikely(isr & ch_err_mask || isr_lo)) { \
529 spin_unlock_irqrestore(&priv->reg_lock, flags); \
530 talitos_error(dev, isr & ch_err_mask, isr_lo); \
533 if (likely(isr & ch_done_mask)) { \
534 /* mask further done interrupts. */ \
535 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
536 /* done_task will unmask done interrupts at exit */ \
537 tasklet_schedule(&priv->done_task[tlet]); \
539 spin_unlock_irqrestore(&priv->reg_lock, flags); \
542 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
545 DEF_TALITOS_INTERRUPT(4ch
, TALITOS_ISR_4CHDONE
, TALITOS_ISR_4CHERR
, 0)
546 DEF_TALITOS_INTERRUPT(ch0_2
, TALITOS_ISR_CH_0_2_DONE
, TALITOS_ISR_CH_0_2_ERR
, 0)
547 DEF_TALITOS_INTERRUPT(ch1_3
, TALITOS_ISR_CH_1_3_DONE
, TALITOS_ISR_CH_1_3_ERR
, 1)
552 static int talitos_rng_data_present(struct hwrng
*rng
, int wait
)
554 struct device
*dev
= (struct device
*)rng
->priv
;
555 struct talitos_private
*priv
= dev_get_drvdata(dev
);
559 for (i
= 0; i
< 20; i
++) {
560 ofl
= in_be32(priv
->reg
+ TALITOS_RNGUSR_LO
) &
561 TALITOS_RNGUSR_LO_OFL
;
570 static int talitos_rng_data_read(struct hwrng
*rng
, u32
*data
)
572 struct device
*dev
= (struct device
*)rng
->priv
;
573 struct talitos_private
*priv
= dev_get_drvdata(dev
);
575 /* rng fifo requires 64-bit accesses */
576 *data
= in_be32(priv
->reg
+ TALITOS_RNGU_FIFO
);
577 *data
= in_be32(priv
->reg
+ TALITOS_RNGU_FIFO_LO
);
582 static int talitos_rng_init(struct hwrng
*rng
)
584 struct device
*dev
= (struct device
*)rng
->priv
;
585 struct talitos_private
*priv
= dev_get_drvdata(dev
);
586 unsigned int timeout
= TALITOS_TIMEOUT
;
588 setbits32(priv
->reg
+ TALITOS_RNGURCR_LO
, TALITOS_RNGURCR_LO_SR
);
589 while (!(in_be32(priv
->reg
+ TALITOS_RNGUSR_LO
) & TALITOS_RNGUSR_LO_RD
)
593 dev_err(dev
, "failed to reset rng hw\n");
597 /* start generating */
598 setbits32(priv
->reg
+ TALITOS_RNGUDSR_LO
, 0);
603 static int talitos_register_rng(struct device
*dev
)
605 struct talitos_private
*priv
= dev_get_drvdata(dev
);
607 priv
->rng
.name
= dev_driver_string(dev
),
608 priv
->rng
.init
= talitos_rng_init
,
609 priv
->rng
.data_present
= talitos_rng_data_present
,
610 priv
->rng
.data_read
= talitos_rng_data_read
,
611 priv
->rng
.priv
= (unsigned long)dev
;
613 return hwrng_register(&priv
->rng
);
616 static void talitos_unregister_rng(struct device
*dev
)
618 struct talitos_private
*priv
= dev_get_drvdata(dev
);
620 hwrng_unregister(&priv
->rng
);
626 #define TALITOS_CRA_PRIORITY 3000
627 #define TALITOS_MAX_KEY_SIZE 96
628 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
630 #define MD5_BLOCK_SIZE 64
635 __be32 desc_hdr_template
;
636 u8 key
[TALITOS_MAX_KEY_SIZE
];
637 u8 iv
[TALITOS_MAX_IV_LENGTH
];
639 unsigned int enckeylen
;
640 unsigned int authkeylen
;
641 unsigned int authsize
;
644 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
645 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
647 struct talitos_ahash_req_ctx
{
648 u32 hw_context
[TALITOS_MDEU_MAX_CONTEXT_SIZE
/ sizeof(u32
)];
649 unsigned int hw_context_size
;
650 u8 buf
[HASH_MAX_BLOCK_SIZE
];
651 u8 bufnext
[HASH_MAX_BLOCK_SIZE
];
655 unsigned int to_hash_later
;
657 struct scatterlist bufsl
[2];
658 struct scatterlist
*psrc
;
661 static int aead_setauthsize(struct crypto_aead
*authenc
,
662 unsigned int authsize
)
664 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
666 ctx
->authsize
= authsize
;
671 static int aead_setkey(struct crypto_aead
*authenc
,
672 const u8
*key
, unsigned int keylen
)
674 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
675 struct rtattr
*rta
= (void *)key
;
676 struct crypto_authenc_key_param
*param
;
677 unsigned int authkeylen
;
678 unsigned int enckeylen
;
680 if (!RTA_OK(rta
, keylen
))
683 if (rta
->rta_type
!= CRYPTO_AUTHENC_KEYA_PARAM
)
686 if (RTA_PAYLOAD(rta
) < sizeof(*param
))
689 param
= RTA_DATA(rta
);
690 enckeylen
= be32_to_cpu(param
->enckeylen
);
692 key
+= RTA_ALIGN(rta
->rta_len
);
693 keylen
-= RTA_ALIGN(rta
->rta_len
);
695 if (keylen
< enckeylen
)
698 authkeylen
= keylen
- enckeylen
;
700 if (keylen
> TALITOS_MAX_KEY_SIZE
)
703 memcpy(&ctx
->key
, key
, keylen
);
705 ctx
->keylen
= keylen
;
706 ctx
->enckeylen
= enckeylen
;
707 ctx
->authkeylen
= authkeylen
;
712 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
717 * talitos_edesc - s/w-extended descriptor
718 * @assoc_nents: number of segments in associated data scatterlist
719 * @src_nents: number of segments in input scatterlist
720 * @dst_nents: number of segments in output scatterlist
721 * @assoc_chained: whether assoc is chained or not
722 * @src_chained: whether src is chained or not
723 * @dst_chained: whether dst is chained or not
724 * @iv_dma: dma address of iv for checking continuity and link table
725 * @dma_len: length of dma mapped link_tbl space
726 * @dma_link_tbl: bus physical address of link_tbl
727 * @desc: h/w descriptor
728 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
730 * if decrypting (with authcheck), or either one of src_nents or dst_nents
731 * is greater than 1, an integrity check value is concatenated to the end
734 struct talitos_edesc
{
743 dma_addr_t dma_link_tbl
;
744 struct talitos_desc desc
;
745 struct talitos_ptr link_tbl
[0];
748 static int talitos_map_sg(struct device
*dev
, struct scatterlist
*sg
,
749 unsigned int nents
, enum dma_data_direction dir
,
752 if (unlikely(chained
))
754 dma_map_sg(dev
, sg
, 1, dir
);
755 sg
= scatterwalk_sg_next(sg
);
758 dma_map_sg(dev
, sg
, nents
, dir
);
762 static void talitos_unmap_sg_chain(struct device
*dev
, struct scatterlist
*sg
,
763 enum dma_data_direction dir
)
766 dma_unmap_sg(dev
, sg
, 1, dir
);
767 sg
= scatterwalk_sg_next(sg
);
771 static void talitos_sg_unmap(struct device
*dev
,
772 struct talitos_edesc
*edesc
,
773 struct scatterlist
*src
,
774 struct scatterlist
*dst
)
776 unsigned int src_nents
= edesc
->src_nents
? : 1;
777 unsigned int dst_nents
= edesc
->dst_nents
? : 1;
780 if (edesc
->src_chained
)
781 talitos_unmap_sg_chain(dev
, src
, DMA_TO_DEVICE
);
783 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
786 if (edesc
->dst_chained
)
787 talitos_unmap_sg_chain(dev
, dst
,
790 dma_unmap_sg(dev
, dst
, dst_nents
,
794 if (edesc
->src_chained
)
795 talitos_unmap_sg_chain(dev
, src
, DMA_BIDIRECTIONAL
);
797 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
800 static void ipsec_esp_unmap(struct device
*dev
,
801 struct talitos_edesc
*edesc
,
802 struct aead_request
*areq
)
804 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[6], DMA_FROM_DEVICE
);
805 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[3], DMA_TO_DEVICE
);
806 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
807 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[0], DMA_TO_DEVICE
);
809 if (edesc
->assoc_chained
)
810 talitos_unmap_sg_chain(dev
, areq
->assoc
, DMA_TO_DEVICE
);
812 /* assoc_nents counts also for IV in non-contiguous cases */
813 dma_unmap_sg(dev
, areq
->assoc
,
814 edesc
->assoc_nents
? edesc
->assoc_nents
- 1 : 1,
817 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
);
820 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
825 * ipsec_esp descriptor callbacks
827 static void ipsec_esp_encrypt_done(struct device
*dev
,
828 struct talitos_desc
*desc
, void *context
,
831 struct aead_request
*areq
= context
;
832 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
833 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
834 struct talitos_edesc
*edesc
;
835 struct scatterlist
*sg
;
838 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
840 ipsec_esp_unmap(dev
, edesc
, areq
);
842 /* copy the generated ICV to dst */
843 if (edesc
->dst_nents
) {
844 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
845 edesc
->dst_nents
+ 2 +
847 sg
= sg_last(areq
->dst
, edesc
->dst_nents
);
848 memcpy((char *)sg_virt(sg
) + sg
->length
- ctx
->authsize
,
849 icvdata
, ctx
->authsize
);
854 aead_request_complete(areq
, err
);
857 static void ipsec_esp_decrypt_swauth_done(struct device
*dev
,
858 struct talitos_desc
*desc
,
859 void *context
, int err
)
861 struct aead_request
*req
= context
;
862 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
863 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
864 struct talitos_edesc
*edesc
;
865 struct scatterlist
*sg
;
868 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
870 ipsec_esp_unmap(dev
, edesc
, req
);
875 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
876 edesc
->dst_nents
+ 2 +
879 icvdata
= &edesc
->link_tbl
[0];
881 sg
= sg_last(req
->dst
, edesc
->dst_nents
? : 1);
882 err
= memcmp(icvdata
, (char *)sg_virt(sg
) + sg
->length
-
883 ctx
->authsize
, ctx
->authsize
) ? -EBADMSG
: 0;
888 aead_request_complete(req
, err
);
891 static void ipsec_esp_decrypt_hwauth_done(struct device
*dev
,
892 struct talitos_desc
*desc
,
893 void *context
, int err
)
895 struct aead_request
*req
= context
;
896 struct talitos_edesc
*edesc
;
898 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
900 ipsec_esp_unmap(dev
, edesc
, req
);
902 /* check ICV auth status */
903 if (!err
&& ((desc
->hdr_lo
& DESC_HDR_LO_ICCR1_MASK
) !=
904 DESC_HDR_LO_ICCR1_PASS
))
909 aead_request_complete(req
, err
);
913 * convert scatterlist to SEC h/w link table format
914 * stop at cryptlen bytes
916 static int sg_to_link_tbl(struct scatterlist
*sg
, int sg_count
,
917 int cryptlen
, struct talitos_ptr
*link_tbl_ptr
)
922 to_talitos_ptr(link_tbl_ptr
, sg_dma_address(sg
));
923 link_tbl_ptr
->len
= cpu_to_be16(sg_dma_len(sg
));
924 link_tbl_ptr
->j_extent
= 0;
926 cryptlen
-= sg_dma_len(sg
);
927 sg
= scatterwalk_sg_next(sg
);
930 /* adjust (decrease) last one (or two) entry's len to cryptlen */
932 while (be16_to_cpu(link_tbl_ptr
->len
) <= (-cryptlen
)) {
933 /* Empty this entry, and move to previous one */
934 cryptlen
+= be16_to_cpu(link_tbl_ptr
->len
);
935 link_tbl_ptr
->len
= 0;
939 link_tbl_ptr
->len
= cpu_to_be16(be16_to_cpu(link_tbl_ptr
->len
)
942 /* tag end of link table */
943 link_tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
949 * fill in and submit ipsec_esp descriptor
951 static int ipsec_esp(struct talitos_edesc
*edesc
, struct aead_request
*areq
,
952 u64 seq
, void (*callback
) (struct device
*dev
,
953 struct talitos_desc
*desc
,
954 void *context
, int error
))
956 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
957 struct talitos_ctx
*ctx
= crypto_aead_ctx(aead
);
958 struct device
*dev
= ctx
->dev
;
959 struct talitos_desc
*desc
= &edesc
->desc
;
960 unsigned int cryptlen
= areq
->cryptlen
;
961 unsigned int authsize
= ctx
->authsize
;
962 unsigned int ivsize
= crypto_aead_ivsize(aead
);
967 map_single_talitos_ptr(dev
, &desc
->ptr
[0], ctx
->authkeylen
, &ctx
->key
,
971 desc
->ptr
[1].len
= cpu_to_be16(areq
->assoclen
+ ivsize
);
972 if (edesc
->assoc_nents
) {
973 int tbl_off
= edesc
->src_nents
+ edesc
->dst_nents
+ 2;
974 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
976 to_talitos_ptr(&desc
->ptr
[1], edesc
->dma_link_tbl
+ tbl_off
*
977 sizeof(struct talitos_ptr
));
978 desc
->ptr
[1].j_extent
= DESC_PTR_LNKTBL_JUMP
;
980 /* assoc_nents - 1 entries for assoc, 1 for IV */
981 sg_count
= sg_to_link_tbl(areq
->assoc
, edesc
->assoc_nents
- 1,
982 areq
->assoclen
, tbl_ptr
);
984 /* add IV to link table */
985 tbl_ptr
+= sg_count
- 1;
986 tbl_ptr
->j_extent
= 0;
988 to_talitos_ptr(tbl_ptr
, edesc
->iv_dma
);
989 tbl_ptr
->len
= cpu_to_be16(ivsize
);
990 tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
992 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
993 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
995 to_talitos_ptr(&desc
->ptr
[1], sg_dma_address(areq
->assoc
));
996 desc
->ptr
[1].j_extent
= 0;
1000 to_talitos_ptr(&desc
->ptr
[2], edesc
->iv_dma
);
1001 desc
->ptr
[2].len
= cpu_to_be16(ivsize
);
1002 desc
->ptr
[2].j_extent
= 0;
1003 /* Sync needed for the aead_givencrypt case */
1004 dma_sync_single_for_device(dev
, edesc
->iv_dma
, ivsize
, DMA_TO_DEVICE
);
1007 map_single_talitos_ptr(dev
, &desc
->ptr
[3], ctx
->enckeylen
,
1008 (char *)&ctx
->key
+ ctx
->authkeylen
, 0,
1013 * map and adjust cipher len to aead request cryptlen.
1014 * extent is bytes of HMAC postpended to ciphertext,
1015 * typically 12 for ipsec
1017 desc
->ptr
[4].len
= cpu_to_be16(cryptlen
);
1018 desc
->ptr
[4].j_extent
= authsize
;
1020 sg_count
= talitos_map_sg(dev
, areq
->src
, edesc
->src_nents
? : 1,
1021 (areq
->src
== areq
->dst
) ? DMA_BIDIRECTIONAL
1023 edesc
->src_chained
);
1025 if (sg_count
== 1) {
1026 to_talitos_ptr(&desc
->ptr
[4], sg_dma_address(areq
->src
));
1028 sg_link_tbl_len
= cryptlen
;
1030 if (edesc
->desc
.hdr
& DESC_HDR_MODE1_MDEU_CICV
)
1031 sg_link_tbl_len
= cryptlen
+ authsize
;
1033 sg_count
= sg_to_link_tbl(areq
->src
, sg_count
, sg_link_tbl_len
,
1034 &edesc
->link_tbl
[0]);
1036 desc
->ptr
[4].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1037 to_talitos_ptr(&desc
->ptr
[4], edesc
->dma_link_tbl
);
1038 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1042 /* Only one segment now, so no link tbl needed */
1043 to_talitos_ptr(&desc
->ptr
[4],
1044 sg_dma_address(areq
->src
));
1049 desc
->ptr
[5].len
= cpu_to_be16(cryptlen
);
1050 desc
->ptr
[5].j_extent
= authsize
;
1052 if (areq
->src
!= areq
->dst
)
1053 sg_count
= talitos_map_sg(dev
, areq
->dst
,
1054 edesc
->dst_nents
? : 1,
1055 DMA_FROM_DEVICE
, edesc
->dst_chained
);
1057 if (sg_count
== 1) {
1058 to_talitos_ptr(&desc
->ptr
[5], sg_dma_address(areq
->dst
));
1060 int tbl_off
= edesc
->src_nents
+ 1;
1061 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
1063 to_talitos_ptr(&desc
->ptr
[5], edesc
->dma_link_tbl
+
1064 tbl_off
* sizeof(struct talitos_ptr
));
1065 sg_count
= sg_to_link_tbl(areq
->dst
, sg_count
, cryptlen
,
1068 /* Add an entry to the link table for ICV data */
1069 tbl_ptr
+= sg_count
- 1;
1070 tbl_ptr
->j_extent
= 0;
1072 tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
1073 tbl_ptr
->len
= cpu_to_be16(authsize
);
1075 /* icv data follows link tables */
1076 to_talitos_ptr(tbl_ptr
, edesc
->dma_link_tbl
+
1077 (tbl_off
+ edesc
->dst_nents
+ 1 +
1078 edesc
->assoc_nents
) *
1079 sizeof(struct talitos_ptr
));
1080 desc
->ptr
[5].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1081 dma_sync_single_for_device(ctx
->dev
, edesc
->dma_link_tbl
,
1082 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1086 map_single_talitos_ptr(dev
, &desc
->ptr
[6], ivsize
, ctx
->iv
, 0,
1089 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1090 if (ret
!= -EINPROGRESS
) {
1091 ipsec_esp_unmap(dev
, edesc
, areq
);
1098 * derive number of elements in scatterlist
1100 static int sg_count(struct scatterlist
*sg_list
, int nbytes
, bool *chained
)
1102 struct scatterlist
*sg
= sg_list
;
1106 while (nbytes
> 0) {
1108 nbytes
-= sg
->length
;
1109 if (!sg_is_last(sg
) && (sg
+ 1)->length
== 0)
1111 sg
= scatterwalk_sg_next(sg
);
1118 * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
1120 * @nents: Number of SG entries
1121 * @buf: Where to copy to
1122 * @buflen: The number of bytes to copy
1123 * @skip: The number of bytes to skip before copying.
1124 * Note: skip + buflen should equal SG total size.
1126 * Returns the number of copied bytes.
1129 static size_t sg_copy_end_to_buffer(struct scatterlist
*sgl
, unsigned int nents
,
1130 void *buf
, size_t buflen
, unsigned int skip
)
1132 unsigned int offset
= 0;
1133 unsigned int boffset
= 0;
1134 struct sg_mapping_iter miter
;
1135 unsigned long flags
;
1136 unsigned int sg_flags
= SG_MITER_ATOMIC
;
1137 size_t total_buffer
= buflen
+ skip
;
1139 sg_flags
|= SG_MITER_FROM_SG
;
1141 sg_miter_start(&miter
, sgl
, nents
, sg_flags
);
1143 local_irq_save(flags
);
1145 while (sg_miter_next(&miter
) && offset
< total_buffer
) {
1147 unsigned int ignore
;
1149 if ((offset
+ miter
.length
) > skip
) {
1150 if (offset
< skip
) {
1151 /* Copy part of this segment */
1152 ignore
= skip
- offset
;
1153 len
= miter
.length
- ignore
;
1154 if (boffset
+ len
> buflen
)
1155 len
= buflen
- boffset
;
1156 memcpy(buf
+ boffset
, miter
.addr
+ ignore
, len
);
1158 /* Copy all of this segment (up to buflen) */
1160 if (boffset
+ len
> buflen
)
1161 len
= buflen
- boffset
;
1162 memcpy(buf
+ boffset
, miter
.addr
, len
);
1166 offset
+= miter
.length
;
1169 sg_miter_stop(&miter
);
1171 local_irq_restore(flags
);
1176 * allocate and map the extended descriptor
1178 static struct talitos_edesc
*talitos_edesc_alloc(struct device
*dev
,
1179 struct scatterlist
*assoc
,
1180 struct scatterlist
*src
,
1181 struct scatterlist
*dst
,
1183 unsigned int assoclen
,
1184 unsigned int cryptlen
,
1185 unsigned int authsize
,
1186 unsigned int ivsize
,
1190 struct talitos_edesc
*edesc
;
1191 int assoc_nents
= 0, src_nents
, dst_nents
, alloc_len
, dma_len
;
1192 bool assoc_chained
= false, src_chained
= false, dst_chained
= false;
1193 dma_addr_t iv_dma
= 0;
1194 gfp_t flags
= cryptoflags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1197 if (cryptlen
+ authsize
> TALITOS_MAX_DATA_LEN
) {
1198 dev_err(dev
, "length exceeds h/w max limit\n");
1199 return ERR_PTR(-EINVAL
);
1203 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
1207 * Currently it is assumed that iv is provided whenever assoc
1212 assoc_nents
= sg_count(assoc
, assoclen
, &assoc_chained
);
1213 talitos_map_sg(dev
, assoc
, assoc_nents
, DMA_TO_DEVICE
,
1215 assoc_nents
= (assoc_nents
== 1) ? 0 : assoc_nents
;
1217 if (assoc_nents
|| sg_dma_address(assoc
) + assoclen
!= iv_dma
)
1218 assoc_nents
= assoc_nents
? assoc_nents
+ 1 : 2;
1221 src_nents
= sg_count(src
, cryptlen
+ authsize
, &src_chained
);
1222 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1228 dst_nents
= src_nents
;
1230 dst_nents
= sg_count(dst
, cryptlen
+ authsize
,
1232 dst_nents
= (dst_nents
== 1) ? 0 : dst_nents
;
1237 * allocate space for base edesc plus the link tables,
1238 * allowing for two separate entries for ICV and generated ICV (+ 2),
1239 * and the ICV data itself
1241 alloc_len
= sizeof(struct talitos_edesc
);
1242 if (assoc_nents
|| src_nents
|| dst_nents
) {
1243 dma_len
= (src_nents
+ dst_nents
+ 2 + assoc_nents
) *
1244 sizeof(struct talitos_ptr
) + authsize
;
1245 alloc_len
+= dma_len
;
1248 alloc_len
+= icv_stashing
? authsize
: 0;
1251 edesc
= kmalloc(alloc_len
, GFP_DMA
| flags
);
1253 talitos_unmap_sg_chain(dev
, assoc
, DMA_TO_DEVICE
);
1255 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
1256 dev_err(dev
, "could not allocate edescriptor\n");
1257 return ERR_PTR(-ENOMEM
);
1260 edesc
->assoc_nents
= assoc_nents
;
1261 edesc
->src_nents
= src_nents
;
1262 edesc
->dst_nents
= dst_nents
;
1263 edesc
->assoc_chained
= assoc_chained
;
1264 edesc
->src_chained
= src_chained
;
1265 edesc
->dst_chained
= dst_chained
;
1266 edesc
->iv_dma
= iv_dma
;
1267 edesc
->dma_len
= dma_len
;
1269 edesc
->dma_link_tbl
= dma_map_single(dev
, &edesc
->link_tbl
[0],
1276 static struct talitos_edesc
*aead_edesc_alloc(struct aead_request
*areq
, u8
*iv
,
1279 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1280 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1281 unsigned int ivsize
= crypto_aead_ivsize(authenc
);
1283 return talitos_edesc_alloc(ctx
->dev
, areq
->assoc
, areq
->src
, areq
->dst
,
1284 iv
, areq
->assoclen
, areq
->cryptlen
,
1285 ctx
->authsize
, ivsize
, icv_stashing
,
1289 static int aead_encrypt(struct aead_request
*req
)
1291 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1292 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1293 struct talitos_edesc
*edesc
;
1295 /* allocate extended descriptor */
1296 edesc
= aead_edesc_alloc(req
, req
->iv
, 0);
1298 return PTR_ERR(edesc
);
1301 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1303 return ipsec_esp(edesc
, req
, 0, ipsec_esp_encrypt_done
);
1306 static int aead_decrypt(struct aead_request
*req
)
1308 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1309 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1310 unsigned int authsize
= ctx
->authsize
;
1311 struct talitos_private
*priv
= dev_get_drvdata(ctx
->dev
);
1312 struct talitos_edesc
*edesc
;
1313 struct scatterlist
*sg
;
1316 req
->cryptlen
-= authsize
;
1318 /* allocate extended descriptor */
1319 edesc
= aead_edesc_alloc(req
, req
->iv
, 1);
1321 return PTR_ERR(edesc
);
1323 if ((priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
) &&
1324 ((!edesc
->src_nents
&& !edesc
->dst_nents
) ||
1325 priv
->features
& TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
)) {
1327 /* decrypt and check the ICV */
1328 edesc
->desc
.hdr
= ctx
->desc_hdr_template
|
1329 DESC_HDR_DIR_INBOUND
|
1330 DESC_HDR_MODE1_MDEU_CICV
;
1332 /* reset integrity check result bits */
1333 edesc
->desc
.hdr_lo
= 0;
1335 return ipsec_esp(edesc
, req
, 0, ipsec_esp_decrypt_hwauth_done
);
1338 /* Have to check the ICV with software */
1339 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1341 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1343 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
1344 edesc
->dst_nents
+ 2 +
1345 edesc
->assoc_nents
];
1347 icvdata
= &edesc
->link_tbl
[0];
1349 sg
= sg_last(req
->src
, edesc
->src_nents
? : 1);
1351 memcpy(icvdata
, (char *)sg_virt(sg
) + sg
->length
- ctx
->authsize
,
1354 return ipsec_esp(edesc
, req
, 0, ipsec_esp_decrypt_swauth_done
);
1357 static int aead_givencrypt(struct aead_givcrypt_request
*req
)
1359 struct aead_request
*areq
= &req
->areq
;
1360 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1361 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1362 struct talitos_edesc
*edesc
;
1364 /* allocate extended descriptor */
1365 edesc
= aead_edesc_alloc(areq
, req
->giv
, 0);
1367 return PTR_ERR(edesc
);
1370 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1372 memcpy(req
->giv
, ctx
->iv
, crypto_aead_ivsize(authenc
));
1373 /* avoid consecutive packets going out with same IV */
1374 *(__be64
*)req
->giv
^= cpu_to_be64(req
->seq
);
1376 return ipsec_esp(edesc
, areq
, req
->seq
, ipsec_esp_encrypt_done
);
1379 static int ablkcipher_setkey(struct crypto_ablkcipher
*cipher
,
1380 const u8
*key
, unsigned int keylen
)
1382 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1384 memcpy(&ctx
->key
, key
, keylen
);
1385 ctx
->keylen
= keylen
;
1390 static void common_nonsnoop_unmap(struct device
*dev
,
1391 struct talitos_edesc
*edesc
,
1392 struct ablkcipher_request
*areq
)
1394 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1395 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
1396 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1], DMA_TO_DEVICE
);
1398 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
);
1401 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1405 static void ablkcipher_done(struct device
*dev
,
1406 struct talitos_desc
*desc
, void *context
,
1409 struct ablkcipher_request
*areq
= context
;
1410 struct talitos_edesc
*edesc
;
1412 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1414 common_nonsnoop_unmap(dev
, edesc
, areq
);
1418 areq
->base
.complete(&areq
->base
, err
);
1421 static int common_nonsnoop(struct talitos_edesc
*edesc
,
1422 struct ablkcipher_request
*areq
,
1423 void (*callback
) (struct device
*dev
,
1424 struct talitos_desc
*desc
,
1425 void *context
, int error
))
1427 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1428 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1429 struct device
*dev
= ctx
->dev
;
1430 struct talitos_desc
*desc
= &edesc
->desc
;
1431 unsigned int cryptlen
= areq
->nbytes
;
1432 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1435 /* first DWORD empty */
1436 desc
->ptr
[0].len
= 0;
1437 to_talitos_ptr(&desc
->ptr
[0], 0);
1438 desc
->ptr
[0].j_extent
= 0;
1441 to_talitos_ptr(&desc
->ptr
[1], edesc
->iv_dma
);
1442 desc
->ptr
[1].len
= cpu_to_be16(ivsize
);
1443 desc
->ptr
[1].j_extent
= 0;
1446 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1447 (char *)&ctx
->key
, 0, DMA_TO_DEVICE
);
1452 desc
->ptr
[3].len
= cpu_to_be16(cryptlen
);
1453 desc
->ptr
[3].j_extent
= 0;
1455 sg_count
= talitos_map_sg(dev
, areq
->src
, edesc
->src_nents
? : 1,
1456 (areq
->src
== areq
->dst
) ? DMA_BIDIRECTIONAL
1458 edesc
->src_chained
);
1460 if (sg_count
== 1) {
1461 to_talitos_ptr(&desc
->ptr
[3], sg_dma_address(areq
->src
));
1463 sg_count
= sg_to_link_tbl(areq
->src
, sg_count
, cryptlen
,
1464 &edesc
->link_tbl
[0]);
1466 to_talitos_ptr(&desc
->ptr
[3], edesc
->dma_link_tbl
);
1467 desc
->ptr
[3].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1468 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1472 /* Only one segment now, so no link tbl needed */
1473 to_talitos_ptr(&desc
->ptr
[3],
1474 sg_dma_address(areq
->src
));
1479 desc
->ptr
[4].len
= cpu_to_be16(cryptlen
);
1480 desc
->ptr
[4].j_extent
= 0;
1482 if (areq
->src
!= areq
->dst
)
1483 sg_count
= talitos_map_sg(dev
, areq
->dst
,
1484 edesc
->dst_nents
? : 1,
1485 DMA_FROM_DEVICE
, edesc
->dst_chained
);
1487 if (sg_count
== 1) {
1488 to_talitos_ptr(&desc
->ptr
[4], sg_dma_address(areq
->dst
));
1490 struct talitos_ptr
*link_tbl_ptr
=
1491 &edesc
->link_tbl
[edesc
->src_nents
+ 1];
1493 to_talitos_ptr(&desc
->ptr
[4], edesc
->dma_link_tbl
+
1494 (edesc
->src_nents
+ 1) *
1495 sizeof(struct talitos_ptr
));
1496 desc
->ptr
[4].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1497 sg_count
= sg_to_link_tbl(areq
->dst
, sg_count
, cryptlen
,
1499 dma_sync_single_for_device(ctx
->dev
, edesc
->dma_link_tbl
,
1500 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1504 map_single_talitos_ptr(dev
, &desc
->ptr
[5], ivsize
, ctx
->iv
, 0,
1507 /* last DWORD empty */
1508 desc
->ptr
[6].len
= 0;
1509 to_talitos_ptr(&desc
->ptr
[6], 0);
1510 desc
->ptr
[6].j_extent
= 0;
1512 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1513 if (ret
!= -EINPROGRESS
) {
1514 common_nonsnoop_unmap(dev
, edesc
, areq
);
1520 static struct talitos_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
*
1523 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1524 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1525 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1527 return talitos_edesc_alloc(ctx
->dev
, NULL
, areq
->src
, areq
->dst
,
1528 areq
->info
, 0, areq
->nbytes
, 0, ivsize
, 0,
1532 static int ablkcipher_encrypt(struct ablkcipher_request
*areq
)
1534 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1535 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1536 struct talitos_edesc
*edesc
;
1538 /* allocate extended descriptor */
1539 edesc
= ablkcipher_edesc_alloc(areq
);
1541 return PTR_ERR(edesc
);
1544 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1546 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1549 static int ablkcipher_decrypt(struct ablkcipher_request
*areq
)
1551 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1552 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1553 struct talitos_edesc
*edesc
;
1555 /* allocate extended descriptor */
1556 edesc
= ablkcipher_edesc_alloc(areq
);
1558 return PTR_ERR(edesc
);
1560 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1562 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1565 static void common_nonsnoop_hash_unmap(struct device
*dev
,
1566 struct talitos_edesc
*edesc
,
1567 struct ahash_request
*areq
)
1569 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1571 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1573 /* When using hashctx-in, must unmap it. */
1574 if (edesc
->desc
.ptr
[1].len
)
1575 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1],
1578 if (edesc
->desc
.ptr
[2].len
)
1579 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2],
1582 talitos_sg_unmap(dev
, edesc
, req_ctx
->psrc
, NULL
);
1585 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1590 static void ahash_done(struct device
*dev
,
1591 struct talitos_desc
*desc
, void *context
,
1594 struct ahash_request
*areq
= context
;
1595 struct talitos_edesc
*edesc
=
1596 container_of(desc
, struct talitos_edesc
, desc
);
1597 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1599 if (!req_ctx
->last
&& req_ctx
->to_hash_later
) {
1600 /* Position any partial block for next update/final/finup */
1601 memcpy(req_ctx
->buf
, req_ctx
->bufnext
, req_ctx
->to_hash_later
);
1602 req_ctx
->nbuf
= req_ctx
->to_hash_later
;
1604 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1608 areq
->base
.complete(&areq
->base
, err
);
1611 static int common_nonsnoop_hash(struct talitos_edesc
*edesc
,
1612 struct ahash_request
*areq
, unsigned int length
,
1613 void (*callback
) (struct device
*dev
,
1614 struct talitos_desc
*desc
,
1615 void *context
, int error
))
1617 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1618 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1619 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1620 struct device
*dev
= ctx
->dev
;
1621 struct talitos_desc
*desc
= &edesc
->desc
;
1624 /* first DWORD empty */
1625 desc
->ptr
[0] = zero_entry
;
1627 /* hash context in */
1628 if (!req_ctx
->first
|| req_ctx
->swinit
) {
1629 map_single_talitos_ptr(dev
, &desc
->ptr
[1],
1630 req_ctx
->hw_context_size
,
1631 (char *)req_ctx
->hw_context
, 0,
1633 req_ctx
->swinit
= 0;
1635 desc
->ptr
[1] = zero_entry
;
1636 /* Indicate next op is not the first. */
1642 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1643 (char *)&ctx
->key
, 0, DMA_TO_DEVICE
);
1645 desc
->ptr
[2] = zero_entry
;
1650 desc
->ptr
[3].len
= cpu_to_be16(length
);
1651 desc
->ptr
[3].j_extent
= 0;
1653 sg_count
= talitos_map_sg(dev
, req_ctx
->psrc
,
1654 edesc
->src_nents
? : 1,
1655 DMA_TO_DEVICE
, edesc
->src_chained
);
1657 if (sg_count
== 1) {
1658 to_talitos_ptr(&desc
->ptr
[3], sg_dma_address(req_ctx
->psrc
));
1660 sg_count
= sg_to_link_tbl(req_ctx
->psrc
, sg_count
, length
,
1661 &edesc
->link_tbl
[0]);
1663 desc
->ptr
[3].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1664 to_talitos_ptr(&desc
->ptr
[3], edesc
->dma_link_tbl
);
1665 dma_sync_single_for_device(ctx
->dev
,
1666 edesc
->dma_link_tbl
,
1670 /* Only one segment now, so no link tbl needed */
1671 to_talitos_ptr(&desc
->ptr
[3],
1672 sg_dma_address(req_ctx
->psrc
));
1676 /* fifth DWORD empty */
1677 desc
->ptr
[4] = zero_entry
;
1679 /* hash/HMAC out -or- hash context out */
1681 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1682 crypto_ahash_digestsize(tfm
),
1683 areq
->result
, 0, DMA_FROM_DEVICE
);
1685 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1686 req_ctx
->hw_context_size
,
1687 req_ctx
->hw_context
, 0, DMA_FROM_DEVICE
);
1689 /* last DWORD empty */
1690 desc
->ptr
[6] = zero_entry
;
1692 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1693 if (ret
!= -EINPROGRESS
) {
1694 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1700 static struct talitos_edesc
*ahash_edesc_alloc(struct ahash_request
*areq
,
1701 unsigned int nbytes
)
1703 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1704 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1705 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1707 return talitos_edesc_alloc(ctx
->dev
, NULL
, req_ctx
->psrc
, NULL
, NULL
, 0,
1708 nbytes
, 0, 0, 0, areq
->base
.flags
);
1711 static int ahash_init(struct ahash_request
*areq
)
1713 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1714 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1716 /* Initialize the context */
1718 req_ctx
->first
= 1; /* first indicates h/w must init its context */
1719 req_ctx
->swinit
= 0; /* assume h/w init of context */
1720 req_ctx
->hw_context_size
=
1721 (crypto_ahash_digestsize(tfm
) <= SHA256_DIGEST_SIZE
)
1722 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1723 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
;
1729 * on h/w without explicit sha224 support, we initialize h/w context
1730 * manually with sha224 constants, and tell it to run sha256.
1732 static int ahash_init_sha224_swinit(struct ahash_request
*areq
)
1734 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1737 req_ctx
->swinit
= 1;/* prevent h/w initting context with sha256 values*/
1739 req_ctx
->hw_context
[0] = SHA224_H0
;
1740 req_ctx
->hw_context
[1] = SHA224_H1
;
1741 req_ctx
->hw_context
[2] = SHA224_H2
;
1742 req_ctx
->hw_context
[3] = SHA224_H3
;
1743 req_ctx
->hw_context
[4] = SHA224_H4
;
1744 req_ctx
->hw_context
[5] = SHA224_H5
;
1745 req_ctx
->hw_context
[6] = SHA224_H6
;
1746 req_ctx
->hw_context
[7] = SHA224_H7
;
1748 /* init 64-bit count */
1749 req_ctx
->hw_context
[8] = 0;
1750 req_ctx
->hw_context
[9] = 0;
1755 static int ahash_process_req(struct ahash_request
*areq
, unsigned int nbytes
)
1757 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1758 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1759 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1760 struct talitos_edesc
*edesc
;
1761 unsigned int blocksize
=
1762 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1763 unsigned int nbytes_to_hash
;
1764 unsigned int to_hash_later
;
1768 if (!req_ctx
->last
&& (nbytes
+ req_ctx
->nbuf
<= blocksize
)) {
1769 /* Buffer up to one whole block */
1770 sg_copy_to_buffer(areq
->src
,
1771 sg_count(areq
->src
, nbytes
, &chained
),
1772 req_ctx
->buf
+ req_ctx
->nbuf
, nbytes
);
1773 req_ctx
->nbuf
+= nbytes
;
1777 /* At least (blocksize + 1) bytes are available to hash */
1778 nbytes_to_hash
= nbytes
+ req_ctx
->nbuf
;
1779 to_hash_later
= nbytes_to_hash
& (blocksize
- 1);
1783 else if (to_hash_later
)
1784 /* There is a partial block. Hash the full block(s) now */
1785 nbytes_to_hash
-= to_hash_later
;
1787 /* Keep one block buffered */
1788 nbytes_to_hash
-= blocksize
;
1789 to_hash_later
= blocksize
;
1792 /* Chain in any previously buffered data */
1793 if (req_ctx
->nbuf
) {
1794 nsg
= (req_ctx
->nbuf
< nbytes_to_hash
) ? 2 : 1;
1795 sg_init_table(req_ctx
->bufsl
, nsg
);
1796 sg_set_buf(req_ctx
->bufsl
, req_ctx
->buf
, req_ctx
->nbuf
);
1798 scatterwalk_sg_chain(req_ctx
->bufsl
, 2, areq
->src
);
1799 req_ctx
->psrc
= req_ctx
->bufsl
;
1801 req_ctx
->psrc
= areq
->src
;
1803 if (to_hash_later
) {
1804 int nents
= sg_count(areq
->src
, nbytes
, &chained
);
1805 sg_copy_end_to_buffer(areq
->src
, nents
,
1808 nbytes
- to_hash_later
);
1810 req_ctx
->to_hash_later
= to_hash_later
;
1812 /* Allocate extended descriptor */
1813 edesc
= ahash_edesc_alloc(areq
, nbytes_to_hash
);
1815 return PTR_ERR(edesc
);
1817 edesc
->desc
.hdr
= ctx
->desc_hdr_template
;
1819 /* On last one, request SEC to pad; otherwise continue */
1821 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_PAD
;
1823 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_CONT
;
1825 /* request SEC to INIT hash. */
1826 if (req_ctx
->first
&& !req_ctx
->swinit
)
1827 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_INIT
;
1829 /* When the tfm context has a keylen, it's an HMAC.
1830 * A first or last (ie. not middle) descriptor must request HMAC.
1832 if (ctx
->keylen
&& (req_ctx
->first
|| req_ctx
->last
))
1833 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_HMAC
;
1835 return common_nonsnoop_hash(edesc
, areq
, nbytes_to_hash
,
1839 static int ahash_update(struct ahash_request
*areq
)
1841 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1845 return ahash_process_req(areq
, areq
->nbytes
);
1848 static int ahash_final(struct ahash_request
*areq
)
1850 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1854 return ahash_process_req(areq
, 0);
1857 static int ahash_finup(struct ahash_request
*areq
)
1859 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1863 return ahash_process_req(areq
, areq
->nbytes
);
1866 static int ahash_digest(struct ahash_request
*areq
)
1868 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1869 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
1874 return ahash_process_req(areq
, areq
->nbytes
);
1877 struct keyhash_result
{
1878 struct completion completion
;
1882 static void keyhash_complete(struct crypto_async_request
*req
, int err
)
1884 struct keyhash_result
*res
= req
->data
;
1886 if (err
== -EINPROGRESS
)
1890 complete(&res
->completion
);
1893 static int keyhash(struct crypto_ahash
*tfm
, const u8
*key
, unsigned int keylen
,
1896 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1898 struct scatterlist sg
[1];
1899 struct ahash_request
*req
;
1900 struct keyhash_result hresult
;
1903 init_completion(&hresult
.completion
);
1905 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
1909 /* Keep tfm keylen == 0 during hash of the long key */
1911 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
1912 keyhash_complete
, &hresult
);
1914 sg_init_one(&sg
[0], key
, keylen
);
1916 ahash_request_set_crypt(req
, sg
, hash
, keylen
);
1917 ret
= crypto_ahash_digest(req
);
1923 ret
= wait_for_completion_interruptible(
1924 &hresult
.completion
);
1931 ahash_request_free(req
);
1936 static int ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1937 unsigned int keylen
)
1939 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1940 unsigned int blocksize
=
1941 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1942 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
1943 unsigned int keysize
= keylen
;
1944 u8 hash
[SHA512_DIGEST_SIZE
];
1947 if (keylen
<= blocksize
)
1948 memcpy(ctx
->key
, key
, keysize
);
1950 /* Must get the hash of the long key */
1951 ret
= keyhash(tfm
, key
, keylen
, hash
);
1954 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1958 keysize
= digestsize
;
1959 memcpy(ctx
->key
, hash
, digestsize
);
1962 ctx
->keylen
= keysize
;
1968 struct talitos_alg_template
{
1971 struct crypto_alg crypto
;
1972 struct ahash_alg hash
;
1974 __be32 desc_hdr_template
;
1977 static struct talitos_alg_template driver_algs
[] = {
1979 * AEAD algorithms. These use a single-pass ipsec_esp descriptor.
1980 * authencesn(*,*) is also registered, although not present
1983 { .type
= CRYPTO_ALG_TYPE_AEAD
,
1985 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1986 .cra_driver_name
= "authenc-hmac-sha1-cbc-aes-talitos",
1987 .cra_blocksize
= AES_BLOCK_SIZE
,
1988 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1990 .ivsize
= AES_BLOCK_SIZE
,
1991 .maxauthsize
= SHA1_DIGEST_SIZE
,
1994 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
1995 DESC_HDR_SEL0_AESU
|
1996 DESC_HDR_MODE0_AESU_CBC
|
1997 DESC_HDR_SEL1_MDEUA
|
1998 DESC_HDR_MODE1_MDEU_INIT
|
1999 DESC_HDR_MODE1_MDEU_PAD
|
2000 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2002 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2004 .cra_name
= "authenc(hmac(sha1),cbc(des3_ede))",
2005 .cra_driver_name
= "authenc-hmac-sha1-cbc-3des-talitos",
2006 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2007 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2009 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2010 .maxauthsize
= SHA1_DIGEST_SIZE
,
2013 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2015 DESC_HDR_MODE0_DEU_CBC
|
2016 DESC_HDR_MODE0_DEU_3DES
|
2017 DESC_HDR_SEL1_MDEUA
|
2018 DESC_HDR_MODE1_MDEU_INIT
|
2019 DESC_HDR_MODE1_MDEU_PAD
|
2020 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2022 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2024 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
2025 .cra_driver_name
= "authenc-hmac-sha224-cbc-aes-talitos",
2026 .cra_blocksize
= AES_BLOCK_SIZE
,
2027 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2029 .ivsize
= AES_BLOCK_SIZE
,
2030 .maxauthsize
= SHA224_DIGEST_SIZE
,
2033 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2034 DESC_HDR_SEL0_AESU
|
2035 DESC_HDR_MODE0_AESU_CBC
|
2036 DESC_HDR_SEL1_MDEUA
|
2037 DESC_HDR_MODE1_MDEU_INIT
|
2038 DESC_HDR_MODE1_MDEU_PAD
|
2039 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2041 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2043 .cra_name
= "authenc(hmac(sha224),cbc(des3_ede))",
2044 .cra_driver_name
= "authenc-hmac-sha224-cbc-3des-talitos",
2045 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2046 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2048 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2049 .maxauthsize
= SHA224_DIGEST_SIZE
,
2052 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2054 DESC_HDR_MODE0_DEU_CBC
|
2055 DESC_HDR_MODE0_DEU_3DES
|
2056 DESC_HDR_SEL1_MDEUA
|
2057 DESC_HDR_MODE1_MDEU_INIT
|
2058 DESC_HDR_MODE1_MDEU_PAD
|
2059 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2061 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2063 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
2064 .cra_driver_name
= "authenc-hmac-sha256-cbc-aes-talitos",
2065 .cra_blocksize
= AES_BLOCK_SIZE
,
2066 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2068 .ivsize
= AES_BLOCK_SIZE
,
2069 .maxauthsize
= SHA256_DIGEST_SIZE
,
2072 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2073 DESC_HDR_SEL0_AESU
|
2074 DESC_HDR_MODE0_AESU_CBC
|
2075 DESC_HDR_SEL1_MDEUA
|
2076 DESC_HDR_MODE1_MDEU_INIT
|
2077 DESC_HDR_MODE1_MDEU_PAD
|
2078 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2080 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2082 .cra_name
= "authenc(hmac(sha256),cbc(des3_ede))",
2083 .cra_driver_name
= "authenc-hmac-sha256-cbc-3des-talitos",
2084 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2085 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2087 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2088 .maxauthsize
= SHA256_DIGEST_SIZE
,
2091 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2093 DESC_HDR_MODE0_DEU_CBC
|
2094 DESC_HDR_MODE0_DEU_3DES
|
2095 DESC_HDR_SEL1_MDEUA
|
2096 DESC_HDR_MODE1_MDEU_INIT
|
2097 DESC_HDR_MODE1_MDEU_PAD
|
2098 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2100 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2102 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
2103 .cra_driver_name
= "authenc-hmac-sha384-cbc-aes-talitos",
2104 .cra_blocksize
= AES_BLOCK_SIZE
,
2105 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2107 .ivsize
= AES_BLOCK_SIZE
,
2108 .maxauthsize
= SHA384_DIGEST_SIZE
,
2111 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2112 DESC_HDR_SEL0_AESU
|
2113 DESC_HDR_MODE0_AESU_CBC
|
2114 DESC_HDR_SEL1_MDEUB
|
2115 DESC_HDR_MODE1_MDEU_INIT
|
2116 DESC_HDR_MODE1_MDEU_PAD
|
2117 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2119 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2121 .cra_name
= "authenc(hmac(sha384),cbc(des3_ede))",
2122 .cra_driver_name
= "authenc-hmac-sha384-cbc-3des-talitos",
2123 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2124 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2126 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2127 .maxauthsize
= SHA384_DIGEST_SIZE
,
2130 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2132 DESC_HDR_MODE0_DEU_CBC
|
2133 DESC_HDR_MODE0_DEU_3DES
|
2134 DESC_HDR_SEL1_MDEUB
|
2135 DESC_HDR_MODE1_MDEU_INIT
|
2136 DESC_HDR_MODE1_MDEU_PAD
|
2137 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2139 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2141 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
2142 .cra_driver_name
= "authenc-hmac-sha512-cbc-aes-talitos",
2143 .cra_blocksize
= AES_BLOCK_SIZE
,
2144 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2146 .ivsize
= AES_BLOCK_SIZE
,
2147 .maxauthsize
= SHA512_DIGEST_SIZE
,
2150 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2151 DESC_HDR_SEL0_AESU
|
2152 DESC_HDR_MODE0_AESU_CBC
|
2153 DESC_HDR_SEL1_MDEUB
|
2154 DESC_HDR_MODE1_MDEU_INIT
|
2155 DESC_HDR_MODE1_MDEU_PAD
|
2156 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2158 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2160 .cra_name
= "authenc(hmac(sha512),cbc(des3_ede))",
2161 .cra_driver_name
= "authenc-hmac-sha512-cbc-3des-talitos",
2162 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2163 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2165 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2166 .maxauthsize
= SHA512_DIGEST_SIZE
,
2169 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2171 DESC_HDR_MODE0_DEU_CBC
|
2172 DESC_HDR_MODE0_DEU_3DES
|
2173 DESC_HDR_SEL1_MDEUB
|
2174 DESC_HDR_MODE1_MDEU_INIT
|
2175 DESC_HDR_MODE1_MDEU_PAD
|
2176 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2178 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2180 .cra_name
= "authenc(hmac(md5),cbc(aes))",
2181 .cra_driver_name
= "authenc-hmac-md5-cbc-aes-talitos",
2182 .cra_blocksize
= AES_BLOCK_SIZE
,
2183 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2185 .ivsize
= AES_BLOCK_SIZE
,
2186 .maxauthsize
= MD5_DIGEST_SIZE
,
2189 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2190 DESC_HDR_SEL0_AESU
|
2191 DESC_HDR_MODE0_AESU_CBC
|
2192 DESC_HDR_SEL1_MDEUA
|
2193 DESC_HDR_MODE1_MDEU_INIT
|
2194 DESC_HDR_MODE1_MDEU_PAD
|
2195 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2197 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2199 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2200 .cra_driver_name
= "authenc-hmac-md5-cbc-3des-talitos",
2201 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2202 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2204 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2205 .maxauthsize
= MD5_DIGEST_SIZE
,
2208 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2210 DESC_HDR_MODE0_DEU_CBC
|
2211 DESC_HDR_MODE0_DEU_3DES
|
2212 DESC_HDR_SEL1_MDEUA
|
2213 DESC_HDR_MODE1_MDEU_INIT
|
2214 DESC_HDR_MODE1_MDEU_PAD
|
2215 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2217 /* ABLKCIPHER algorithms. */
2218 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2220 .cra_name
= "cbc(aes)",
2221 .cra_driver_name
= "cbc-aes-talitos",
2222 .cra_blocksize
= AES_BLOCK_SIZE
,
2223 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2226 .min_keysize
= AES_MIN_KEY_SIZE
,
2227 .max_keysize
= AES_MAX_KEY_SIZE
,
2228 .ivsize
= AES_BLOCK_SIZE
,
2231 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2232 DESC_HDR_SEL0_AESU
|
2233 DESC_HDR_MODE0_AESU_CBC
,
2235 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2237 .cra_name
= "cbc(des3_ede)",
2238 .cra_driver_name
= "cbc-3des-talitos",
2239 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2240 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2243 .min_keysize
= DES3_EDE_KEY_SIZE
,
2244 .max_keysize
= DES3_EDE_KEY_SIZE
,
2245 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2248 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2250 DESC_HDR_MODE0_DEU_CBC
|
2251 DESC_HDR_MODE0_DEU_3DES
,
2253 /* AHASH algorithms. */
2254 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2256 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2259 .cra_driver_name
= "md5-talitos",
2260 .cra_blocksize
= MD5_BLOCK_SIZE
,
2261 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2265 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2266 DESC_HDR_SEL0_MDEUA
|
2267 DESC_HDR_MODE0_MDEU_MD5
,
2269 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2271 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2274 .cra_driver_name
= "sha1-talitos",
2275 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2276 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2280 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2281 DESC_HDR_SEL0_MDEUA
|
2282 DESC_HDR_MODE0_MDEU_SHA1
,
2284 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2286 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2288 .cra_name
= "sha224",
2289 .cra_driver_name
= "sha224-talitos",
2290 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2291 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2295 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2296 DESC_HDR_SEL0_MDEUA
|
2297 DESC_HDR_MODE0_MDEU_SHA224
,
2299 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2301 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2303 .cra_name
= "sha256",
2304 .cra_driver_name
= "sha256-talitos",
2305 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2306 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2310 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2311 DESC_HDR_SEL0_MDEUA
|
2312 DESC_HDR_MODE0_MDEU_SHA256
,
2314 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2316 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2318 .cra_name
= "sha384",
2319 .cra_driver_name
= "sha384-talitos",
2320 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2321 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2325 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2326 DESC_HDR_SEL0_MDEUB
|
2327 DESC_HDR_MODE0_MDEUB_SHA384
,
2329 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2331 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2333 .cra_name
= "sha512",
2334 .cra_driver_name
= "sha512-talitos",
2335 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2336 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2340 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2341 DESC_HDR_SEL0_MDEUB
|
2342 DESC_HDR_MODE0_MDEUB_SHA512
,
2344 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2346 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2348 .cra_name
= "hmac(md5)",
2349 .cra_driver_name
= "hmac-md5-talitos",
2350 .cra_blocksize
= MD5_BLOCK_SIZE
,
2351 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2355 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2356 DESC_HDR_SEL0_MDEUA
|
2357 DESC_HDR_MODE0_MDEU_MD5
,
2359 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2361 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2363 .cra_name
= "hmac(sha1)",
2364 .cra_driver_name
= "hmac-sha1-talitos",
2365 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2366 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2370 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2371 DESC_HDR_SEL0_MDEUA
|
2372 DESC_HDR_MODE0_MDEU_SHA1
,
2374 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2376 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2378 .cra_name
= "hmac(sha224)",
2379 .cra_driver_name
= "hmac-sha224-talitos",
2380 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2381 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2385 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2386 DESC_HDR_SEL0_MDEUA
|
2387 DESC_HDR_MODE0_MDEU_SHA224
,
2389 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2391 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2393 .cra_name
= "hmac(sha256)",
2394 .cra_driver_name
= "hmac-sha256-talitos",
2395 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2396 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2400 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2401 DESC_HDR_SEL0_MDEUA
|
2402 DESC_HDR_MODE0_MDEU_SHA256
,
2404 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2406 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2408 .cra_name
= "hmac(sha384)",
2409 .cra_driver_name
= "hmac-sha384-talitos",
2410 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2411 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2415 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2416 DESC_HDR_SEL0_MDEUB
|
2417 DESC_HDR_MODE0_MDEUB_SHA384
,
2419 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2421 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2423 .cra_name
= "hmac(sha512)",
2424 .cra_driver_name
= "hmac-sha512-talitos",
2425 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2426 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2430 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2431 DESC_HDR_SEL0_MDEUB
|
2432 DESC_HDR_MODE0_MDEUB_SHA512
,
2436 struct talitos_crypto_alg
{
2437 struct list_head entry
;
2439 struct talitos_alg_template algt
;
2442 static int talitos_cra_init(struct crypto_tfm
*tfm
)
2444 struct crypto_alg
*alg
= tfm
->__crt_alg
;
2445 struct talitos_crypto_alg
*talitos_alg
;
2446 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2447 struct talitos_private
*priv
;
2449 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_AHASH
)
2450 talitos_alg
= container_of(__crypto_ahash_alg(alg
),
2451 struct talitos_crypto_alg
,
2454 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
2457 /* update context with ptr to dev */
2458 ctx
->dev
= talitos_alg
->dev
;
2460 /* assign SEC channel to tfm in round-robin fashion */
2461 priv
= dev_get_drvdata(ctx
->dev
);
2462 ctx
->ch
= atomic_inc_return(&priv
->last_chan
) &
2463 (priv
->num_channels
- 1);
2465 /* copy descriptor header template value */
2466 ctx
->desc_hdr_template
= talitos_alg
->algt
.desc_hdr_template
;
2468 /* select done notification */
2469 ctx
->desc_hdr_template
|= DESC_HDR_DONE_NOTIFY
;
2474 static int talitos_cra_init_aead(struct crypto_tfm
*tfm
)
2476 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2478 talitos_cra_init(tfm
);
2480 /* random first IV */
2481 get_random_bytes(ctx
->iv
, TALITOS_MAX_IV_LENGTH
);
2486 static int talitos_cra_init_ahash(struct crypto_tfm
*tfm
)
2488 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2490 talitos_cra_init(tfm
);
2493 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2494 sizeof(struct talitos_ahash_req_ctx
));
2500 * given the alg's descriptor header template, determine whether descriptor
2501 * type and primary/secondary execution units required match the hw
2502 * capabilities description provided in the device tree node.
2504 static int hw_supports(struct device
*dev
, __be32 desc_hdr_template
)
2506 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2509 ret
= (1 << DESC_TYPE(desc_hdr_template
) & priv
->desc_types
) &&
2510 (1 << PRIMARY_EU(desc_hdr_template
) & priv
->exec_units
);
2512 if (SECONDARY_EU(desc_hdr_template
))
2513 ret
= ret
&& (1 << SECONDARY_EU(desc_hdr_template
)
2514 & priv
->exec_units
);
2519 static int talitos_remove(struct platform_device
*ofdev
)
2521 struct device
*dev
= &ofdev
->dev
;
2522 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2523 struct talitos_crypto_alg
*t_alg
, *n
;
2526 list_for_each_entry_safe(t_alg
, n
, &priv
->alg_list
, entry
) {
2527 switch (t_alg
->algt
.type
) {
2528 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2529 case CRYPTO_ALG_TYPE_AEAD
:
2530 crypto_unregister_alg(&t_alg
->algt
.alg
.crypto
);
2532 case CRYPTO_ALG_TYPE_AHASH
:
2533 crypto_unregister_ahash(&t_alg
->algt
.alg
.hash
);
2536 list_del(&t_alg
->entry
);
2540 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
))
2541 talitos_unregister_rng(dev
);
2543 for (i
= 0; i
< priv
->num_channels
; i
++)
2544 kfree(priv
->chan
[i
].fifo
);
2548 for (i
= 0; i
< 2; i
++)
2550 free_irq(priv
->irq
[i
], dev
);
2551 irq_dispose_mapping(priv
->irq
[i
]);
2554 tasklet_kill(&priv
->done_task
[0]);
2556 tasklet_kill(&priv
->done_task
[1]);
2560 dev_set_drvdata(dev
, NULL
);
2567 static struct talitos_crypto_alg
*talitos_alg_alloc(struct device
*dev
,
2568 struct talitos_alg_template
2571 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2572 struct talitos_crypto_alg
*t_alg
;
2573 struct crypto_alg
*alg
;
2575 t_alg
= kzalloc(sizeof(struct talitos_crypto_alg
), GFP_KERNEL
);
2577 return ERR_PTR(-ENOMEM
);
2579 t_alg
->algt
= *template;
2581 switch (t_alg
->algt
.type
) {
2582 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2583 alg
= &t_alg
->algt
.alg
.crypto
;
2584 alg
->cra_init
= talitos_cra_init
;
2585 alg
->cra_type
= &crypto_ablkcipher_type
;
2586 alg
->cra_ablkcipher
.setkey
= ablkcipher_setkey
;
2587 alg
->cra_ablkcipher
.encrypt
= ablkcipher_encrypt
;
2588 alg
->cra_ablkcipher
.decrypt
= ablkcipher_decrypt
;
2589 alg
->cra_ablkcipher
.geniv
= "eseqiv";
2591 case CRYPTO_ALG_TYPE_AEAD
:
2592 alg
= &t_alg
->algt
.alg
.crypto
;
2593 alg
->cra_init
= talitos_cra_init_aead
;
2594 alg
->cra_type
= &crypto_aead_type
;
2595 alg
->cra_aead
.setkey
= aead_setkey
;
2596 alg
->cra_aead
.setauthsize
= aead_setauthsize
;
2597 alg
->cra_aead
.encrypt
= aead_encrypt
;
2598 alg
->cra_aead
.decrypt
= aead_decrypt
;
2599 alg
->cra_aead
.givencrypt
= aead_givencrypt
;
2600 alg
->cra_aead
.geniv
= "<built-in>";
2602 case CRYPTO_ALG_TYPE_AHASH
:
2603 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
2604 alg
->cra_init
= talitos_cra_init_ahash
;
2605 alg
->cra_type
= &crypto_ahash_type
;
2606 t_alg
->algt
.alg
.hash
.init
= ahash_init
;
2607 t_alg
->algt
.alg
.hash
.update
= ahash_update
;
2608 t_alg
->algt
.alg
.hash
.final
= ahash_final
;
2609 t_alg
->algt
.alg
.hash
.finup
= ahash_finup
;
2610 t_alg
->algt
.alg
.hash
.digest
= ahash_digest
;
2611 t_alg
->algt
.alg
.hash
.setkey
= ahash_setkey
;
2613 if (!(priv
->features
& TALITOS_FTR_HMAC_OK
) &&
2614 !strncmp(alg
->cra_name
, "hmac", 4)) {
2616 return ERR_PTR(-ENOTSUPP
);
2618 if (!(priv
->features
& TALITOS_FTR_SHA224_HWINIT
) &&
2619 (!strcmp(alg
->cra_name
, "sha224") ||
2620 !strcmp(alg
->cra_name
, "hmac(sha224)"))) {
2621 t_alg
->algt
.alg
.hash
.init
= ahash_init_sha224_swinit
;
2622 t_alg
->algt
.desc_hdr_template
=
2623 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2624 DESC_HDR_SEL0_MDEUA
|
2625 DESC_HDR_MODE0_MDEU_SHA256
;
2629 dev_err(dev
, "unknown algorithm type %d\n", t_alg
->algt
.type
);
2630 return ERR_PTR(-EINVAL
);
2633 alg
->cra_module
= THIS_MODULE
;
2634 alg
->cra_priority
= TALITOS_CRA_PRIORITY
;
2635 alg
->cra_alignmask
= 0;
2636 alg
->cra_ctxsize
= sizeof(struct talitos_ctx
);
2637 alg
->cra_flags
|= CRYPTO_ALG_KERN_DRIVER_ONLY
;
2644 static int talitos_probe_irq(struct platform_device
*ofdev
)
2646 struct device
*dev
= &ofdev
->dev
;
2647 struct device_node
*np
= ofdev
->dev
.of_node
;
2648 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2651 priv
->irq
[0] = irq_of_parse_and_map(np
, 0);
2652 if (!priv
->irq
[0]) {
2653 dev_err(dev
, "failed to map irq\n");
2657 priv
->irq
[1] = irq_of_parse_and_map(np
, 1);
2659 /* get the primary irq line */
2660 if (!priv
->irq
[1]) {
2661 err
= request_irq(priv
->irq
[0], talitos_interrupt_4ch
, 0,
2662 dev_driver_string(dev
), dev
);
2666 err
= request_irq(priv
->irq
[0], talitos_interrupt_ch0_2
, 0,
2667 dev_driver_string(dev
), dev
);
2671 /* get the secondary irq line */
2672 err
= request_irq(priv
->irq
[1], talitos_interrupt_ch1_3
, 0,
2673 dev_driver_string(dev
), dev
);
2675 dev_err(dev
, "failed to request secondary irq\n");
2676 irq_dispose_mapping(priv
->irq
[1]);
2684 dev_err(dev
, "failed to request primary irq\n");
2685 irq_dispose_mapping(priv
->irq
[0]);
2692 static int talitos_probe(struct platform_device
*ofdev
)
2694 struct device
*dev
= &ofdev
->dev
;
2695 struct device_node
*np
= ofdev
->dev
.of_node
;
2696 struct talitos_private
*priv
;
2697 const unsigned int *prop
;
2700 priv
= kzalloc(sizeof(struct talitos_private
), GFP_KERNEL
);
2704 dev_set_drvdata(dev
, priv
);
2706 priv
->ofdev
= ofdev
;
2708 spin_lock_init(&priv
->reg_lock
);
2710 err
= talitos_probe_irq(ofdev
);
2714 if (!priv
->irq
[1]) {
2715 tasklet_init(&priv
->done_task
[0], talitos_done_4ch
,
2716 (unsigned long)dev
);
2718 tasklet_init(&priv
->done_task
[0], talitos_done_ch0_2
,
2719 (unsigned long)dev
);
2720 tasklet_init(&priv
->done_task
[1], talitos_done_ch1_3
,
2721 (unsigned long)dev
);
2724 INIT_LIST_HEAD(&priv
->alg_list
);
2726 priv
->reg
= of_iomap(np
, 0);
2728 dev_err(dev
, "failed to of_iomap\n");
2733 /* get SEC version capabilities from device tree */
2734 prop
= of_get_property(np
, "fsl,num-channels", NULL
);
2736 priv
->num_channels
= *prop
;
2738 prop
= of_get_property(np
, "fsl,channel-fifo-len", NULL
);
2740 priv
->chfifo_len
= *prop
;
2742 prop
= of_get_property(np
, "fsl,exec-units-mask", NULL
);
2744 priv
->exec_units
= *prop
;
2746 prop
= of_get_property(np
, "fsl,descriptor-types-mask", NULL
);
2748 priv
->desc_types
= *prop
;
2750 if (!is_power_of_2(priv
->num_channels
) || !priv
->chfifo_len
||
2751 !priv
->exec_units
|| !priv
->desc_types
) {
2752 dev_err(dev
, "invalid property data in device tree node\n");
2757 if (of_device_is_compatible(np
, "fsl,sec3.0"))
2758 priv
->features
|= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
;
2760 if (of_device_is_compatible(np
, "fsl,sec2.1"))
2761 priv
->features
|= TALITOS_FTR_HW_AUTH_CHECK
|
2762 TALITOS_FTR_SHA224_HWINIT
|
2763 TALITOS_FTR_HMAC_OK
;
2765 priv
->chan
= kzalloc(sizeof(struct talitos_channel
) *
2766 priv
->num_channels
, GFP_KERNEL
);
2768 dev_err(dev
, "failed to allocate channel management space\n");
2773 for (i
= 0; i
< priv
->num_channels
; i
++) {
2774 priv
->chan
[i
].reg
= priv
->reg
+ TALITOS_CH_STRIDE
* (i
+ 1);
2775 if (!priv
->irq
[1] || !(i
& 1))
2776 priv
->chan
[i
].reg
+= TALITOS_CH_BASE_OFFSET
;
2779 for (i
= 0; i
< priv
->num_channels
; i
++) {
2780 spin_lock_init(&priv
->chan
[i
].head_lock
);
2781 spin_lock_init(&priv
->chan
[i
].tail_lock
);
2784 priv
->fifo_len
= roundup_pow_of_two(priv
->chfifo_len
);
2786 for (i
= 0; i
< priv
->num_channels
; i
++) {
2787 priv
->chan
[i
].fifo
= kzalloc(sizeof(struct talitos_request
) *
2788 priv
->fifo_len
, GFP_KERNEL
);
2789 if (!priv
->chan
[i
].fifo
) {
2790 dev_err(dev
, "failed to allocate request fifo %d\n", i
);
2796 for (i
= 0; i
< priv
->num_channels
; i
++)
2797 atomic_set(&priv
->chan
[i
].submit_count
,
2798 -(priv
->chfifo_len
- 1));
2800 dma_set_mask(dev
, DMA_BIT_MASK(36));
2802 /* reset and initialize the h/w */
2803 err
= init_device(dev
);
2805 dev_err(dev
, "failed to initialize device\n");
2809 /* register the RNG, if available */
2810 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
)) {
2811 err
= talitos_register_rng(dev
);
2813 dev_err(dev
, "failed to register hwrng: %d\n", err
);
2816 dev_info(dev
, "hwrng\n");
2819 /* register crypto algorithms the device supports */
2820 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
2821 if (hw_supports(dev
, driver_algs
[i
].desc_hdr_template
)) {
2822 struct talitos_crypto_alg
*t_alg
;
2824 bool authenc
= false;
2827 t_alg
= talitos_alg_alloc(dev
, &driver_algs
[i
]);
2828 if (IS_ERR(t_alg
)) {
2829 err
= PTR_ERR(t_alg
);
2830 if (err
== -ENOTSUPP
)
2835 switch (t_alg
->algt
.type
) {
2836 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2837 case CRYPTO_ALG_TYPE_AEAD
:
2838 err
= crypto_register_alg(
2839 &t_alg
->algt
.alg
.crypto
);
2840 name
= t_alg
->algt
.alg
.crypto
.cra_driver_name
;
2841 authenc
= authenc
? !authenc
:
2842 !(bool)memcmp(name
, "authenc", 7);
2844 case CRYPTO_ALG_TYPE_AHASH
:
2845 err
= crypto_register_ahash(
2846 &t_alg
->algt
.alg
.hash
);
2848 t_alg
->algt
.alg
.hash
.halg
.base
.cra_driver_name
;
2852 dev_err(dev
, "%s alg registration failed\n",
2856 list_add_tail(&t_alg
->entry
, &priv
->alg_list
);
2858 struct crypto_alg
*alg
=
2859 &driver_algs
[i
].alg
.crypto
;
2861 name
= alg
->cra_name
;
2862 memmove(name
+ 10, name
+ 7,
2864 memcpy(name
+ 7, "esn", 3);
2866 name
= alg
->cra_driver_name
;
2867 memmove(name
+ 10, name
+ 7,
2869 memcpy(name
+ 7, "esn", 3);
2876 if (!list_empty(&priv
->alg_list
))
2877 dev_info(dev
, "%s algorithms registered in /proc/crypto\n",
2878 (char *)of_get_property(np
, "compatible", NULL
));
2883 talitos_remove(ofdev
);
2888 static const struct of_device_id talitos_match
[] = {
2890 .compatible
= "fsl,sec2.0",
2894 MODULE_DEVICE_TABLE(of
, talitos_match
);
2896 static struct platform_driver talitos_driver
= {
2899 .owner
= THIS_MODULE
,
2900 .of_match_table
= talitos_match
,
2902 .probe
= talitos_probe
,
2903 .remove
= talitos_remove
,
2906 module_platform_driver(talitos_driver
);
2908 MODULE_LICENSE("GPL");
2909 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2910 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");