2 * Texas Instruments CPDMA Driver
4 * Copyright (C) 2010 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 #include <linux/kernel.h>
16 #include <linux/spinlock.h>
17 #include <linux/device.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/dma-mapping.h>
23 #include "davinci_cpdma.h"
26 #define CPDMA_TXIDVER 0x00
27 #define CPDMA_TXCONTROL 0x04
28 #define CPDMA_TXTEARDOWN 0x08
29 #define CPDMA_RXIDVER 0x10
30 #define CPDMA_RXCONTROL 0x14
31 #define CPDMA_SOFTRESET 0x1c
32 #define CPDMA_RXTEARDOWN 0x18
33 #define CPDMA_TXINTSTATRAW 0x80
34 #define CPDMA_TXINTSTATMASKED 0x84
35 #define CPDMA_TXINTMASKSET 0x88
36 #define CPDMA_TXINTMASKCLEAR 0x8c
37 #define CPDMA_MACINVECTOR 0x90
38 #define CPDMA_MACEOIVECTOR 0x94
39 #define CPDMA_RXINTSTATRAW 0xa0
40 #define CPDMA_RXINTSTATMASKED 0xa4
41 #define CPDMA_RXINTMASKSET 0xa8
42 #define CPDMA_RXINTMASKCLEAR 0xac
43 #define CPDMA_DMAINTSTATRAW 0xb0
44 #define CPDMA_DMAINTSTATMASKED 0xb4
45 #define CPDMA_DMAINTMASKSET 0xb8
46 #define CPDMA_DMAINTMASKCLEAR 0xbc
47 #define CPDMA_DMAINT_HOSTERR BIT(1)
49 /* the following exist only if has_ext_regs is set */
50 #define CPDMA_DMACONTROL 0x20
51 #define CPDMA_DMASTATUS 0x24
52 #define CPDMA_RXBUFFOFS 0x28
53 #define CPDMA_EM_CONTROL 0x2c
55 /* Descriptor mode bits */
56 #define CPDMA_DESC_SOP BIT(31)
57 #define CPDMA_DESC_EOP BIT(30)
58 #define CPDMA_DESC_OWNER BIT(29)
59 #define CPDMA_DESC_EOQ BIT(28)
60 #define CPDMA_DESC_TD_COMPLETE BIT(27)
61 #define CPDMA_DESC_PASS_CRC BIT(26)
63 #define CPDMA_TEARDOWN_VALUE 0xfffffffc
77 struct cpdma_desc_pool
{
79 void __iomem
*iomap
; /* ioremap map */
80 void *cpumap
; /* dma_alloc map */
81 int desc_size
, mem_size
;
82 int num_desc
, used_desc
;
83 unsigned long *bitmap
;
94 const char *cpdma_state_str
[] = { "idle", "active", "teardown" };
97 enum cpdma_state state
;
98 struct cpdma_params params
;
100 struct cpdma_desc_pool
*pool
;
102 struct cpdma_chan
*channels
[2 * CPDMA_MAX_CHANNELS
];
106 enum cpdma_state state
;
107 struct cpdma_ctlr
*ctlr
;
110 struct cpdma_desc __iomem
*head
, *tail
;
112 void __iomem
*hdp
, *cp
, *rxfree
;
114 cpdma_handler_fn handler
;
115 enum dma_data_direction dir
;
116 struct cpdma_chan_stats stats
;
117 /* offsets into dmaregs */
118 int int_set
, int_clear
, td
;
121 /* The following make access to common cpdma_ctlr params more readable */
122 #define dmaregs params.dmaregs
123 #define num_chan params.num_chan
125 /* various accessors */
126 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
127 #define chan_read(chan, fld) __raw_readl((chan)->fld)
128 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
129 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
130 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
131 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
134 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
135 * emac) have dedicated on-chip memory for these descriptors. Some other
136 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
137 * abstract out these details
139 static struct cpdma_desc_pool
*
140 cpdma_desc_pool_create(struct device
*dev
, u32 phys
, int size
, int align
)
143 struct cpdma_desc_pool
*pool
;
145 pool
= kzalloc(sizeof(*pool
), GFP_KERNEL
);
149 spin_lock_init(&pool
->lock
);
152 pool
->mem_size
= size
;
153 pool
->desc_size
= ALIGN(sizeof(struct cpdma_desc
), align
);
154 pool
->num_desc
= size
/ pool
->desc_size
;
156 bitmap_size
= (pool
->num_desc
/ BITS_PER_LONG
) * sizeof(long);
157 pool
->bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
163 pool
->iomap
= ioremap(phys
, size
);
165 pool
->cpumap
= dma_alloc_coherent(dev
, size
, &pool
->phys
,
167 pool
->iomap
= (void __force __iomem
*)pool
->cpumap
;
179 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool
*pool
)
186 spin_lock_irqsave(&pool
->lock
, flags
);
187 WARN_ON(pool
->used_desc
);
190 dma_free_coherent(pool
->dev
, pool
->mem_size
, pool
->cpumap
,
193 iounmap(pool
->iomap
);
195 spin_unlock_irqrestore(&pool
->lock
, flags
);
199 static inline dma_addr_t
desc_phys(struct cpdma_desc_pool
*pool
,
200 struct cpdma_desc __iomem
*desc
)
204 return pool
->phys
+ (__force dma_addr_t
)desc
-
205 (__force dma_addr_t
)pool
->iomap
;
208 static inline struct cpdma_desc __iomem
*
209 desc_from_phys(struct cpdma_desc_pool
*pool
, dma_addr_t dma
)
211 return dma
? pool
->iomap
+ dma
- pool
->phys
: NULL
;
214 static struct cpdma_desc __iomem
*
215 cpdma_desc_alloc(struct cpdma_desc_pool
*pool
, int num_desc
)
219 struct cpdma_desc __iomem
*desc
= NULL
;
221 spin_lock_irqsave(&pool
->lock
, flags
);
223 index
= bitmap_find_next_zero_area(pool
->bitmap
, pool
->num_desc
, 0,
225 if (index
< pool
->num_desc
) {
226 bitmap_set(pool
->bitmap
, index
, num_desc
);
227 desc
= pool
->iomap
+ pool
->desc_size
* index
;
231 spin_unlock_irqrestore(&pool
->lock
, flags
);
235 static void cpdma_desc_free(struct cpdma_desc_pool
*pool
,
236 struct cpdma_desc __iomem
*desc
, int num_desc
)
238 unsigned long flags
, index
;
240 index
= ((unsigned long)desc
- (unsigned long)pool
->iomap
) /
242 spin_lock_irqsave(&pool
->lock
, flags
);
243 bitmap_clear(pool
->bitmap
, index
, num_desc
);
245 spin_unlock_irqrestore(&pool
->lock
, flags
);
248 struct cpdma_ctlr
*cpdma_ctlr_create(struct cpdma_params
*params
)
250 struct cpdma_ctlr
*ctlr
;
252 ctlr
= kzalloc(sizeof(*ctlr
), GFP_KERNEL
);
256 ctlr
->state
= CPDMA_STATE_IDLE
;
257 ctlr
->params
= *params
;
258 ctlr
->dev
= params
->dev
;
259 spin_lock_init(&ctlr
->lock
);
261 ctlr
->pool
= cpdma_desc_pool_create(ctlr
->dev
,
262 ctlr
->params
.desc_mem_phys
,
263 ctlr
->params
.desc_mem_size
,
264 ctlr
->params
.desc_align
);
270 if (WARN_ON(ctlr
->num_chan
> CPDMA_MAX_CHANNELS
))
271 ctlr
->num_chan
= CPDMA_MAX_CHANNELS
;
275 int cpdma_ctlr_start(struct cpdma_ctlr
*ctlr
)
280 spin_lock_irqsave(&ctlr
->lock
, flags
);
281 if (ctlr
->state
!= CPDMA_STATE_IDLE
) {
282 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
286 if (ctlr
->params
.has_soft_reset
) {
287 unsigned long timeout
= jiffies
+ HZ
/10;
289 dma_reg_write(ctlr
, CPDMA_SOFTRESET
, 1);
290 while (time_before(jiffies
, timeout
)) {
291 if (dma_reg_read(ctlr
, CPDMA_SOFTRESET
) == 0)
294 WARN_ON(!time_before(jiffies
, timeout
));
297 for (i
= 0; i
< ctlr
->num_chan
; i
++) {
298 __raw_writel(0, ctlr
->params
.txhdp
+ 4 * i
);
299 __raw_writel(0, ctlr
->params
.rxhdp
+ 4 * i
);
300 __raw_writel(0, ctlr
->params
.txcp
+ 4 * i
);
301 __raw_writel(0, ctlr
->params
.rxcp
+ 4 * i
);
304 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
305 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
307 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 1);
308 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 1);
310 ctlr
->state
= CPDMA_STATE_ACTIVE
;
312 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
313 if (ctlr
->channels
[i
])
314 cpdma_chan_start(ctlr
->channels
[i
]);
316 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
320 int cpdma_ctlr_stop(struct cpdma_ctlr
*ctlr
)
325 spin_lock_irqsave(&ctlr
->lock
, flags
);
326 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
327 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
331 ctlr
->state
= CPDMA_STATE_TEARDOWN
;
333 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
334 if (ctlr
->channels
[i
])
335 cpdma_chan_stop(ctlr
->channels
[i
]);
338 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
339 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
341 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 0);
342 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 0);
344 ctlr
->state
= CPDMA_STATE_IDLE
;
346 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
350 int cpdma_ctlr_dump(struct cpdma_ctlr
*ctlr
)
352 struct device
*dev
= ctlr
->dev
;
356 spin_lock_irqsave(&ctlr
->lock
, flags
);
358 dev_info(dev
, "CPDMA: state: %s", cpdma_state_str
[ctlr
->state
]);
360 dev_info(dev
, "CPDMA: txidver: %x",
361 dma_reg_read(ctlr
, CPDMA_TXIDVER
));
362 dev_info(dev
, "CPDMA: txcontrol: %x",
363 dma_reg_read(ctlr
, CPDMA_TXCONTROL
));
364 dev_info(dev
, "CPDMA: txteardown: %x",
365 dma_reg_read(ctlr
, CPDMA_TXTEARDOWN
));
366 dev_info(dev
, "CPDMA: rxidver: %x",
367 dma_reg_read(ctlr
, CPDMA_RXIDVER
));
368 dev_info(dev
, "CPDMA: rxcontrol: %x",
369 dma_reg_read(ctlr
, CPDMA_RXCONTROL
));
370 dev_info(dev
, "CPDMA: softreset: %x",
371 dma_reg_read(ctlr
, CPDMA_SOFTRESET
));
372 dev_info(dev
, "CPDMA: rxteardown: %x",
373 dma_reg_read(ctlr
, CPDMA_RXTEARDOWN
));
374 dev_info(dev
, "CPDMA: txintstatraw: %x",
375 dma_reg_read(ctlr
, CPDMA_TXINTSTATRAW
));
376 dev_info(dev
, "CPDMA: txintstatmasked: %x",
377 dma_reg_read(ctlr
, CPDMA_TXINTSTATMASKED
));
378 dev_info(dev
, "CPDMA: txintmaskset: %x",
379 dma_reg_read(ctlr
, CPDMA_TXINTMASKSET
));
380 dev_info(dev
, "CPDMA: txintmaskclear: %x",
381 dma_reg_read(ctlr
, CPDMA_TXINTMASKCLEAR
));
382 dev_info(dev
, "CPDMA: macinvector: %x",
383 dma_reg_read(ctlr
, CPDMA_MACINVECTOR
));
384 dev_info(dev
, "CPDMA: maceoivector: %x",
385 dma_reg_read(ctlr
, CPDMA_MACEOIVECTOR
));
386 dev_info(dev
, "CPDMA: rxintstatraw: %x",
387 dma_reg_read(ctlr
, CPDMA_RXINTSTATRAW
));
388 dev_info(dev
, "CPDMA: rxintstatmasked: %x",
389 dma_reg_read(ctlr
, CPDMA_RXINTSTATMASKED
));
390 dev_info(dev
, "CPDMA: rxintmaskset: %x",
391 dma_reg_read(ctlr
, CPDMA_RXINTMASKSET
));
392 dev_info(dev
, "CPDMA: rxintmaskclear: %x",
393 dma_reg_read(ctlr
, CPDMA_RXINTMASKCLEAR
));
394 dev_info(dev
, "CPDMA: dmaintstatraw: %x",
395 dma_reg_read(ctlr
, CPDMA_DMAINTSTATRAW
));
396 dev_info(dev
, "CPDMA: dmaintstatmasked: %x",
397 dma_reg_read(ctlr
, CPDMA_DMAINTSTATMASKED
));
398 dev_info(dev
, "CPDMA: dmaintmaskset: %x",
399 dma_reg_read(ctlr
, CPDMA_DMAINTMASKSET
));
400 dev_info(dev
, "CPDMA: dmaintmaskclear: %x",
401 dma_reg_read(ctlr
, CPDMA_DMAINTMASKCLEAR
));
403 if (!ctlr
->params
.has_ext_regs
) {
404 dev_info(dev
, "CPDMA: dmacontrol: %x",
405 dma_reg_read(ctlr
, CPDMA_DMACONTROL
));
406 dev_info(dev
, "CPDMA: dmastatus: %x",
407 dma_reg_read(ctlr
, CPDMA_DMASTATUS
));
408 dev_info(dev
, "CPDMA: rxbuffofs: %x",
409 dma_reg_read(ctlr
, CPDMA_RXBUFFOFS
));
412 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++)
413 if (ctlr
->channels
[i
])
414 cpdma_chan_dump(ctlr
->channels
[i
]);
416 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
420 int cpdma_ctlr_destroy(struct cpdma_ctlr
*ctlr
)
428 spin_lock_irqsave(&ctlr
->lock
, flags
);
429 if (ctlr
->state
!= CPDMA_STATE_IDLE
)
430 cpdma_ctlr_stop(ctlr
);
432 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
433 if (ctlr
->channels
[i
])
434 cpdma_chan_destroy(ctlr
->channels
[i
]);
437 cpdma_desc_pool_destroy(ctlr
->pool
);
438 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
443 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr
*ctlr
, bool enable
)
448 spin_lock_irqsave(&ctlr
->lock
, flags
);
449 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
450 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
454 reg
= enable
? CPDMA_DMAINTMASKSET
: CPDMA_DMAINTMASKCLEAR
;
455 dma_reg_write(ctlr
, reg
, CPDMA_DMAINT_HOSTERR
);
457 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
458 if (ctlr
->channels
[i
])
459 cpdma_chan_int_ctrl(ctlr
->channels
[i
], enable
);
462 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
466 void cpdma_ctlr_eoi(struct cpdma_ctlr
*ctlr
)
468 dma_reg_write(ctlr
, CPDMA_MACEOIVECTOR
, 0);
471 struct cpdma_chan
*cpdma_chan_create(struct cpdma_ctlr
*ctlr
, int chan_num
,
472 cpdma_handler_fn handler
)
474 struct cpdma_chan
*chan
;
475 int ret
, offset
= (chan_num
% CPDMA_MAX_CHANNELS
) * 4;
478 if (__chan_linear(chan_num
) >= ctlr
->num_chan
)
482 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
486 spin_lock_irqsave(&ctlr
->lock
, flags
);
488 if (ctlr
->channels
[chan_num
])
492 chan
->state
= CPDMA_STATE_IDLE
;
493 chan
->chan_num
= chan_num
;
494 chan
->handler
= handler
;
496 if (is_rx_chan(chan
)) {
497 chan
->hdp
= ctlr
->params
.rxhdp
+ offset
;
498 chan
->cp
= ctlr
->params
.rxcp
+ offset
;
499 chan
->rxfree
= ctlr
->params
.rxfree
+ offset
;
500 chan
->int_set
= CPDMA_RXINTMASKSET
;
501 chan
->int_clear
= CPDMA_RXINTMASKCLEAR
;
502 chan
->td
= CPDMA_RXTEARDOWN
;
503 chan
->dir
= DMA_FROM_DEVICE
;
505 chan
->hdp
= ctlr
->params
.txhdp
+ offset
;
506 chan
->cp
= ctlr
->params
.txcp
+ offset
;
507 chan
->int_set
= CPDMA_TXINTMASKSET
;
508 chan
->int_clear
= CPDMA_TXINTMASKCLEAR
;
509 chan
->td
= CPDMA_TXTEARDOWN
;
510 chan
->dir
= DMA_TO_DEVICE
;
512 chan
->mask
= BIT(chan_linear(chan
));
514 spin_lock_init(&chan
->lock
);
516 ctlr
->channels
[chan_num
] = chan
;
517 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
521 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
527 int cpdma_chan_destroy(struct cpdma_chan
*chan
)
529 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
535 spin_lock_irqsave(&ctlr
->lock
, flags
);
536 if (chan
->state
!= CPDMA_STATE_IDLE
)
537 cpdma_chan_stop(chan
);
538 ctlr
->channels
[chan
->chan_num
] = NULL
;
539 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
544 int cpdma_chan_get_stats(struct cpdma_chan
*chan
,
545 struct cpdma_chan_stats
*stats
)
550 spin_lock_irqsave(&chan
->lock
, flags
);
551 memcpy(stats
, &chan
->stats
, sizeof(*stats
));
552 spin_unlock_irqrestore(&chan
->lock
, flags
);
556 int cpdma_chan_dump(struct cpdma_chan
*chan
)
559 struct device
*dev
= chan
->ctlr
->dev
;
561 spin_lock_irqsave(&chan
->lock
, flags
);
563 dev_info(dev
, "channel %d (%s %d) state %s",
564 chan
->chan_num
, is_rx_chan(chan
) ? "rx" : "tx",
565 chan_linear(chan
), cpdma_state_str
[chan
->state
]);
566 dev_info(dev
, "\thdp: %x\n", chan_read(chan
, hdp
));
567 dev_info(dev
, "\tcp: %x\n", chan_read(chan
, cp
));
569 dev_info(dev
, "\trxfree: %x\n",
570 chan_read(chan
, rxfree
));
573 dev_info(dev
, "\tstats head_enqueue: %d\n",
574 chan
->stats
.head_enqueue
);
575 dev_info(dev
, "\tstats tail_enqueue: %d\n",
576 chan
->stats
.tail_enqueue
);
577 dev_info(dev
, "\tstats pad_enqueue: %d\n",
578 chan
->stats
.pad_enqueue
);
579 dev_info(dev
, "\tstats misqueued: %d\n",
580 chan
->stats
.misqueued
);
581 dev_info(dev
, "\tstats desc_alloc_fail: %d\n",
582 chan
->stats
.desc_alloc_fail
);
583 dev_info(dev
, "\tstats pad_alloc_fail: %d\n",
584 chan
->stats
.pad_alloc_fail
);
585 dev_info(dev
, "\tstats runt_receive_buff: %d\n",
586 chan
->stats
.runt_receive_buff
);
587 dev_info(dev
, "\tstats runt_transmit_buff: %d\n",
588 chan
->stats
.runt_transmit_buff
);
589 dev_info(dev
, "\tstats empty_dequeue: %d\n",
590 chan
->stats
.empty_dequeue
);
591 dev_info(dev
, "\tstats busy_dequeue: %d\n",
592 chan
->stats
.busy_dequeue
);
593 dev_info(dev
, "\tstats good_dequeue: %d\n",
594 chan
->stats
.good_dequeue
);
595 dev_info(dev
, "\tstats requeue: %d\n",
596 chan
->stats
.requeue
);
597 dev_info(dev
, "\tstats teardown_dequeue: %d\n",
598 chan
->stats
.teardown_dequeue
);
600 spin_unlock_irqrestore(&chan
->lock
, flags
);
604 static void __cpdma_chan_submit(struct cpdma_chan
*chan
,
605 struct cpdma_desc __iomem
*desc
)
607 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
608 struct cpdma_desc __iomem
*prev
= chan
->tail
;
609 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
613 desc_dma
= desc_phys(pool
, desc
);
615 /* simple case - idle channel */
617 chan
->stats
.head_enqueue
++;
620 if (chan
->state
== CPDMA_STATE_ACTIVE
)
621 chan_write(chan
, hdp
, desc_dma
);
625 /* first chain the descriptor at the tail of the list */
626 desc_write(prev
, hw_next
, desc_dma
);
628 chan
->stats
.tail_enqueue
++;
630 /* next check if EOQ has been triggered already */
631 mode
= desc_read(prev
, hw_mode
);
632 if (((mode
& (CPDMA_DESC_EOQ
| CPDMA_DESC_OWNER
)) == CPDMA_DESC_EOQ
) &&
633 (chan
->state
== CPDMA_STATE_ACTIVE
)) {
634 desc_write(prev
, hw_mode
, mode
& ~CPDMA_DESC_EOQ
);
635 chan_write(chan
, hdp
, desc_dma
);
636 chan
->stats
.misqueued
++;
640 int cpdma_chan_submit(struct cpdma_chan
*chan
, void *token
, void *data
,
641 int len
, gfp_t gfp_mask
)
643 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
644 struct cpdma_desc __iomem
*desc
;
650 spin_lock_irqsave(&chan
->lock
, flags
);
652 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
657 desc
= cpdma_desc_alloc(ctlr
->pool
, 1);
659 chan
->stats
.desc_alloc_fail
++;
664 if (len
< ctlr
->params
.min_packet_size
) {
665 len
= ctlr
->params
.min_packet_size
;
666 chan
->stats
.runt_transmit_buff
++;
669 buffer
= dma_map_single(ctlr
->dev
, data
, len
, chan
->dir
);
670 mode
= CPDMA_DESC_OWNER
| CPDMA_DESC_SOP
| CPDMA_DESC_EOP
;
672 desc_write(desc
, hw_next
, 0);
673 desc_write(desc
, hw_buffer
, buffer
);
674 desc_write(desc
, hw_len
, len
);
675 desc_write(desc
, hw_mode
, mode
| len
);
676 desc_write(desc
, sw_token
, token
);
677 desc_write(desc
, sw_buffer
, buffer
);
678 desc_write(desc
, sw_len
, len
);
680 __cpdma_chan_submit(chan
, desc
);
682 if (chan
->state
== CPDMA_STATE_ACTIVE
&& chan
->rxfree
)
683 chan_write(chan
, rxfree
, 1);
688 spin_unlock_irqrestore(&chan
->lock
, flags
);
692 static void __cpdma_chan_free(struct cpdma_chan
*chan
,
693 struct cpdma_desc __iomem
*desc
,
694 int outlen
, int status
)
696 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
697 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
702 token
= (void *)desc_read(desc
, sw_token
);
703 buff_dma
= desc_read(desc
, sw_buffer
);
704 origlen
= desc_read(desc
, sw_len
);
706 dma_unmap_single(ctlr
->dev
, buff_dma
, origlen
, chan
->dir
);
707 cpdma_desc_free(pool
, desc
, 1);
708 (*chan
->handler
)(token
, outlen
, status
);
711 static int __cpdma_chan_process(struct cpdma_chan
*chan
)
713 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
714 struct cpdma_desc __iomem
*desc
;
716 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
720 spin_lock_irqsave(&chan
->lock
, flags
);
724 chan
->stats
.empty_dequeue
++;
728 desc_dma
= desc_phys(pool
, desc
);
730 status
= __raw_readl(&desc
->hw_mode
);
731 outlen
= status
& 0x7ff;
732 if (status
& CPDMA_DESC_OWNER
) {
733 chan
->stats
.busy_dequeue
++;
737 status
= status
& (CPDMA_DESC_EOQ
| CPDMA_DESC_TD_COMPLETE
);
739 chan
->head
= desc_from_phys(pool
, desc_read(desc
, hw_next
));
740 chan_write(chan
, cp
, desc_dma
);
742 chan
->stats
.good_dequeue
++;
744 if (status
& CPDMA_DESC_EOQ
) {
745 chan
->stats
.requeue
++;
746 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
749 spin_unlock_irqrestore(&chan
->lock
, flags
);
751 __cpdma_chan_free(chan
, desc
, outlen
, status
);
755 spin_unlock_irqrestore(&chan
->lock
, flags
);
759 int cpdma_chan_process(struct cpdma_chan
*chan
, int quota
)
761 int used
= 0, ret
= 0;
763 if (chan
->state
!= CPDMA_STATE_ACTIVE
)
766 while (used
< quota
) {
767 ret
= __cpdma_chan_process(chan
);
775 int cpdma_chan_start(struct cpdma_chan
*chan
)
777 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
778 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
781 spin_lock_irqsave(&chan
->lock
, flags
);
782 if (chan
->state
!= CPDMA_STATE_IDLE
) {
783 spin_unlock_irqrestore(&chan
->lock
, flags
);
786 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
787 spin_unlock_irqrestore(&chan
->lock
, flags
);
790 dma_reg_write(ctlr
, chan
->int_set
, chan
->mask
);
791 chan
->state
= CPDMA_STATE_ACTIVE
;
793 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
795 chan_write(chan
, rxfree
, chan
->count
);
798 spin_unlock_irqrestore(&chan
->lock
, flags
);
802 int cpdma_chan_stop(struct cpdma_chan
*chan
)
804 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
805 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
808 unsigned long timeout
;
810 spin_lock_irqsave(&chan
->lock
, flags
);
811 if (chan
->state
!= CPDMA_STATE_ACTIVE
) {
812 spin_unlock_irqrestore(&chan
->lock
, flags
);
816 chan
->state
= CPDMA_STATE_TEARDOWN
;
817 dma_reg_write(ctlr
, chan
->int_clear
, chan
->mask
);
819 /* trigger teardown */
820 dma_reg_write(ctlr
, chan
->td
, chan
->chan_num
);
822 /* wait for teardown complete */
823 timeout
= jiffies
+ HZ
/10; /* 100 msec */
824 while (time_before(jiffies
, timeout
)) {
825 u32 cp
= chan_read(chan
, cp
);
826 if ((cp
& CPDMA_TEARDOWN_VALUE
) == CPDMA_TEARDOWN_VALUE
)
830 WARN_ON(!time_before(jiffies
, timeout
));
831 chan_write(chan
, cp
, CPDMA_TEARDOWN_VALUE
);
833 /* handle completed packets */
835 ret
= __cpdma_chan_process(chan
);
838 } while ((ret
& CPDMA_DESC_TD_COMPLETE
) == 0);
840 /* remaining packets haven't been tx/rx'ed, clean them up */
842 struct cpdma_desc __iomem
*desc
= chan
->head
;
845 next_dma
= desc_read(desc
, hw_next
);
846 chan
->head
= desc_from_phys(pool
, next_dma
);
847 chan
->stats
.teardown_dequeue
++;
849 /* issue callback without locks held */
850 spin_unlock_irqrestore(&chan
->lock
, flags
);
851 __cpdma_chan_free(chan
, desc
, 0, -ENOSYS
);
852 spin_lock_irqsave(&chan
->lock
, flags
);
855 chan
->state
= CPDMA_STATE_IDLE
;
856 spin_unlock_irqrestore(&chan
->lock
, flags
);
860 int cpdma_chan_int_ctrl(struct cpdma_chan
*chan
, bool enable
)
864 spin_lock_irqsave(&chan
->lock
, flags
);
865 if (chan
->state
!= CPDMA_STATE_ACTIVE
) {
866 spin_unlock_irqrestore(&chan
->lock
, flags
);
870 dma_reg_write(chan
->ctlr
, enable
? chan
->int_set
: chan
->int_clear
,
872 spin_unlock_irqrestore(&chan
->lock
, flags
);
877 struct cpdma_control_info
{
881 #define ACCESS_RO BIT(0)
882 #define ACCESS_WO BIT(1)
883 #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
886 struct cpdma_control_info controls
[] = {
887 [CPDMA_CMD_IDLE
] = {CPDMA_DMACONTROL
, 3, 1, ACCESS_WO
},
888 [CPDMA_COPY_ERROR_FRAMES
] = {CPDMA_DMACONTROL
, 4, 1, ACCESS_RW
},
889 [CPDMA_RX_OFF_LEN_UPDATE
] = {CPDMA_DMACONTROL
, 2, 1, ACCESS_RW
},
890 [CPDMA_RX_OWNERSHIP_FLIP
] = {CPDMA_DMACONTROL
, 1, 1, ACCESS_RW
},
891 [CPDMA_TX_PRIO_FIXED
] = {CPDMA_DMACONTROL
, 0, 1, ACCESS_RW
},
892 [CPDMA_STAT_IDLE
] = {CPDMA_DMASTATUS
, 31, 1, ACCESS_RO
},
893 [CPDMA_STAT_TX_ERR_CODE
] = {CPDMA_DMASTATUS
, 20, 0xf, ACCESS_RW
},
894 [CPDMA_STAT_TX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 16, 0x7, ACCESS_RW
},
895 [CPDMA_STAT_RX_ERR_CODE
] = {CPDMA_DMASTATUS
, 12, 0xf, ACCESS_RW
},
896 [CPDMA_STAT_RX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 8, 0x7, ACCESS_RW
},
897 [CPDMA_RX_BUFFER_OFFSET
] = {CPDMA_RXBUFFOFS
, 0, 0xffff, ACCESS_RW
},
900 int cpdma_control_get(struct cpdma_ctlr
*ctlr
, int control
)
903 struct cpdma_control_info
*info
= &controls
[control
];
906 spin_lock_irqsave(&ctlr
->lock
, flags
);
909 if (!ctlr
->params
.has_ext_regs
)
913 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
917 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
921 if ((info
->access
& ACCESS_RO
) != ACCESS_RO
)
924 ret
= (dma_reg_read(ctlr
, info
->reg
) >> info
->shift
) & info
->mask
;
927 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
931 int cpdma_control_set(struct cpdma_ctlr
*ctlr
, int control
, int value
)
934 struct cpdma_control_info
*info
= &controls
[control
];
938 spin_lock_irqsave(&ctlr
->lock
, flags
);
941 if (!ctlr
->params
.has_ext_regs
)
945 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
949 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
953 if ((info
->access
& ACCESS_WO
) != ACCESS_WO
)
956 val
= dma_reg_read(ctlr
, info
->reg
);
957 val
&= ~(info
->mask
<< info
->shift
);
958 val
|= (value
& info
->mask
) << info
->shift
;
959 dma_reg_write(ctlr
, info
->reg
, val
);
963 spin_unlock_irqrestore(&ctlr
->lock
, flags
);