2 * Copyright (C) 2005-2006 by Texas Instruments
4 * This file implements a DMA interface using TI's CPPI DMA.
5 * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
6 * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
11 #include "musb_core.h"
15 /* CPPI DMA status 7-mar-2006:
17 * - See musb_{host,gadget}.c for more info
19 * - Correct RX DMA generally forces the engine into irq-per-packet mode,
20 * which can easily saturate the CPU under non-mass-storage loads.
22 * NOTES 24-aug-2006 (2.6.18-rc4):
24 * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
25 * evidently after the 1 byte packet was received and acked, the queue
26 * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
27 * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
28 * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
29 * of its next (512 byte) packet. IRQ issues?
31 * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
32 * evidently also directly update the RX and TX CSRs ... so audit all
33 * host and peripheral side DMA code to avoid CSR access after DMA has
37 /* REVISIT now we can avoid preallocating these descriptors; or
38 * more simply, switch to a global freelist not per-channel ones.
39 * Note: at full speed, 64 descriptors == 4K bulk data.
41 #define NUM_TXCHAN_BD 64
42 #define NUM_RXCHAN_BD 64
44 static inline void cpu_drain_writebuffer(void)
47 #ifdef CONFIG_CPU_ARM926T
48 /* REVISIT this "should not be needed",
49 * but lack of it sure seemed to hurt ...
51 asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
55 static inline struct cppi_descriptor
*cppi_bd_alloc(struct cppi_channel
*c
)
57 struct cppi_descriptor
*bd
= c
->freelist
;
60 c
->freelist
= bd
->next
;
65 cppi_bd_free(struct cppi_channel
*c
, struct cppi_descriptor
*bd
)
69 bd
->next
= c
->freelist
;
74 * Start DMA controller
76 * Initialize the DMA controller as necessary.
79 /* zero out entire rx state RAM entry for the channel */
80 static void cppi_reset_rx(struct cppi_rx_stateram __iomem
*rx
)
82 musb_writel(&rx
->rx_skipbytes
, 0, 0);
83 musb_writel(&rx
->rx_head
, 0, 0);
84 musb_writel(&rx
->rx_sop
, 0, 0);
85 musb_writel(&rx
->rx_current
, 0, 0);
86 musb_writel(&rx
->rx_buf_current
, 0, 0);
87 musb_writel(&rx
->rx_len_len
, 0, 0);
88 musb_writel(&rx
->rx_cnt_cnt
, 0, 0);
91 /* zero out entire tx state RAM entry for the channel */
92 static void cppi_reset_tx(struct cppi_tx_stateram __iomem
*tx
, u32 ptr
)
94 musb_writel(&tx
->tx_head
, 0, 0);
95 musb_writel(&tx
->tx_buf
, 0, 0);
96 musb_writel(&tx
->tx_current
, 0, 0);
97 musb_writel(&tx
->tx_buf_current
, 0, 0);
98 musb_writel(&tx
->tx_info
, 0, 0);
99 musb_writel(&tx
->tx_rem_len
, 0, 0);
100 /* musb_writel(&tx->tx_dummy, 0, 0); */
101 musb_writel(&tx
->tx_complete
, 0, ptr
);
104 static void __init
cppi_pool_init(struct cppi
*cppi
, struct cppi_channel
*c
)
108 /* initialize channel fields */
111 c
->last_processed
= NULL
;
112 c
->channel
.status
= MUSB_DMA_STATUS_UNKNOWN
;
113 c
->controller
= cppi
;
117 /* build the BD Free list for the channel */
118 for (j
= 0; j
< NUM_TXCHAN_BD
+ 1; j
++) {
119 struct cppi_descriptor
*bd
;
122 bd
= dma_pool_alloc(cppi
->pool
, GFP_KERNEL
, &dma
);
128 static int cppi_channel_abort(struct dma_channel
*);
130 static void cppi_pool_free(struct cppi_channel
*c
)
132 struct cppi
*cppi
= c
->controller
;
133 struct cppi_descriptor
*bd
;
135 (void) cppi_channel_abort(&c
->channel
);
136 c
->channel
.status
= MUSB_DMA_STATUS_UNKNOWN
;
137 c
->controller
= NULL
;
139 /* free all its bds */
140 bd
= c
->last_processed
;
143 dma_pool_free(cppi
->pool
, bd
, bd
->dma
);
144 bd
= cppi_bd_alloc(c
);
146 c
->last_processed
= NULL
;
149 static int __init
cppi_controller_start(struct dma_controller
*c
)
151 struct cppi
*controller
;
152 void __iomem
*tibase
;
155 controller
= container_of(c
, struct cppi
, controller
);
157 /* do whatever is necessary to start controller */
158 for (i
= 0; i
< ARRAY_SIZE(controller
->tx
); i
++) {
159 controller
->tx
[i
].transmit
= true;
160 controller
->tx
[i
].index
= i
;
162 for (i
= 0; i
< ARRAY_SIZE(controller
->rx
); i
++) {
163 controller
->rx
[i
].transmit
= false;
164 controller
->rx
[i
].index
= i
;
167 /* setup BD list on a per channel basis */
168 for (i
= 0; i
< ARRAY_SIZE(controller
->tx
); i
++)
169 cppi_pool_init(controller
, controller
->tx
+ i
);
170 for (i
= 0; i
< ARRAY_SIZE(controller
->rx
); i
++)
171 cppi_pool_init(controller
, controller
->rx
+ i
);
173 tibase
= controller
->tibase
;
174 INIT_LIST_HEAD(&controller
->tx_complete
);
176 /* initialise tx/rx channel head pointers to zero */
177 for (i
= 0; i
< ARRAY_SIZE(controller
->tx
); i
++) {
178 struct cppi_channel
*tx_ch
= controller
->tx
+ i
;
179 struct cppi_tx_stateram __iomem
*tx
;
181 INIT_LIST_HEAD(&tx_ch
->tx_complete
);
183 tx
= tibase
+ DAVINCI_TXCPPI_STATERAM_OFFSET(i
);
184 tx_ch
->state_ram
= tx
;
185 cppi_reset_tx(tx
, 0);
187 for (i
= 0; i
< ARRAY_SIZE(controller
->rx
); i
++) {
188 struct cppi_channel
*rx_ch
= controller
->rx
+ i
;
189 struct cppi_rx_stateram __iomem
*rx
;
191 INIT_LIST_HEAD(&rx_ch
->tx_complete
);
193 rx
= tibase
+ DAVINCI_RXCPPI_STATERAM_OFFSET(i
);
194 rx_ch
->state_ram
= rx
;
198 /* enable individual cppi channels */
199 musb_writel(tibase
, DAVINCI_TXCPPI_INTENAB_REG
,
200 DAVINCI_DMA_ALL_CHANNELS_ENABLE
);
201 musb_writel(tibase
, DAVINCI_RXCPPI_INTENAB_REG
,
202 DAVINCI_DMA_ALL_CHANNELS_ENABLE
);
204 /* enable tx/rx CPPI control */
205 musb_writel(tibase
, DAVINCI_TXCPPI_CTRL_REG
, DAVINCI_DMA_CTRL_ENABLE
);
206 musb_writel(tibase
, DAVINCI_RXCPPI_CTRL_REG
, DAVINCI_DMA_CTRL_ENABLE
);
208 /* disable RNDIS mode, also host rx RNDIS autorequest */
209 musb_writel(tibase
, DAVINCI_RNDIS_REG
, 0);
210 musb_writel(tibase
, DAVINCI_AUTOREQ_REG
, 0);
216 * Stop DMA controller
218 * De-Init the DMA controller as necessary.
221 static int cppi_controller_stop(struct dma_controller
*c
)
223 struct cppi
*controller
;
224 void __iomem
*tibase
;
227 controller
= container_of(c
, struct cppi
, controller
);
229 tibase
= controller
->tibase
;
230 /* DISABLE INDIVIDUAL CHANNEL Interrupts */
231 musb_writel(tibase
, DAVINCI_TXCPPI_INTCLR_REG
,
232 DAVINCI_DMA_ALL_CHANNELS_ENABLE
);
233 musb_writel(tibase
, DAVINCI_RXCPPI_INTCLR_REG
,
234 DAVINCI_DMA_ALL_CHANNELS_ENABLE
);
236 DBG(1, "Tearing down RX and TX Channels\n");
237 for (i
= 0; i
< ARRAY_SIZE(controller
->tx
); i
++) {
238 /* FIXME restructure of txdma to use bds like rxdma */
239 controller
->tx
[i
].last_processed
= NULL
;
240 cppi_pool_free(controller
->tx
+ i
);
242 for (i
= 0; i
< ARRAY_SIZE(controller
->rx
); i
++)
243 cppi_pool_free(controller
->rx
+ i
);
245 /* in Tx Case proper teardown is supported. We resort to disabling
246 * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
247 * complete TX CPPI cannot be disabled.
249 /*disable tx/rx cppi */
250 musb_writel(tibase
, DAVINCI_TXCPPI_CTRL_REG
, DAVINCI_DMA_CTRL_DISABLE
);
251 musb_writel(tibase
, DAVINCI_RXCPPI_CTRL_REG
, DAVINCI_DMA_CTRL_DISABLE
);
256 /* While dma channel is allocated, we only want the core irqs active
257 * for fault reports, otherwise we'd get irqs that we don't care about.
258 * Except for TX irqs, where dma done != fifo empty and reusable ...
260 * NOTE: docs don't say either way, but irq masking **enables** irqs.
262 * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
264 static inline void core_rxirq_disable(void __iomem
*tibase
, unsigned epnum
)
266 musb_writel(tibase
, DAVINCI_USB_INT_MASK_CLR_REG
, 1 << (epnum
+ 8));
269 static inline void core_rxirq_enable(void __iomem
*tibase
, unsigned epnum
)
271 musb_writel(tibase
, DAVINCI_USB_INT_MASK_SET_REG
, 1 << (epnum
+ 8));
276 * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
277 * each transfer direction of a non-control endpoint, so allocating
278 * (and deallocating) is mostly a way to notice bad housekeeping on
279 * the software side. We assume the irqs are always active.
281 static struct dma_channel
*
282 cppi_channel_allocate(struct dma_controller
*c
,
283 struct musb_hw_ep
*ep
, u8 transmit
)
285 struct cppi
*controller
;
287 struct cppi_channel
*cppi_ch
;
288 void __iomem
*tibase
;
290 controller
= container_of(c
, struct cppi
, controller
);
291 tibase
= controller
->tibase
;
293 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
294 index
= ep
->epnum
- 1;
296 /* return the corresponding CPPI Channel Handle, and
297 * probably disable the non-CPPI irq until we need it.
300 if (index
>= ARRAY_SIZE(controller
->tx
)) {
301 DBG(1, "no %cX%d CPPI channel\n", 'T', index
);
304 cppi_ch
= controller
->tx
+ index
;
306 if (index
>= ARRAY_SIZE(controller
->rx
)) {
307 DBG(1, "no %cX%d CPPI channel\n", 'R', index
);
310 cppi_ch
= controller
->rx
+ index
;
311 core_rxirq_disable(tibase
, ep
->epnum
);
314 /* REVISIT make this an error later once the same driver code works
315 * with the other DMA engine too
318 DBG(1, "re-allocating DMA%d %cX channel %p\n",
319 index
, transmit
? 'T' : 'R', cppi_ch
);
321 cppi_ch
->channel
.status
= MUSB_DMA_STATUS_FREE
;
323 DBG(4, "Allocate CPPI%d %cX\n", index
, transmit
? 'T' : 'R');
324 return &cppi_ch
->channel
;
327 /* Release a CPPI Channel. */
328 static void cppi_channel_release(struct dma_channel
*channel
)
330 struct cppi_channel
*c
;
331 void __iomem
*tibase
;
333 /* REVISIT: for paranoia, check state and abort if needed... */
335 c
= container_of(channel
, struct cppi_channel
, channel
);
336 tibase
= c
->controller
->tibase
;
338 DBG(1, "releasing idle DMA channel %p\n", c
);
339 else if (!c
->transmit
)
340 core_rxirq_enable(tibase
, c
->index
+ 1);
342 /* for now, leave its cppi IRQ enabled (we won't trigger it) */
344 channel
->status
= MUSB_DMA_STATUS_UNKNOWN
;
347 /* Context: controller irqlocked */
349 cppi_dump_rx(int level
, struct cppi_channel
*c
, const char *tag
)
351 void __iomem
*base
= c
->controller
->mregs
;
352 struct cppi_rx_stateram __iomem
*rx
= c
->state_ram
;
354 musb_ep_select(base
, c
->index
+ 1);
356 DBG(level
, "RX DMA%d%s: %d left, csr %04x, "
357 "%08x H%08x S%08x C%08x, "
358 "B%08x L%08x %08x .. %08x"
361 musb_readl(c
->controller
->tibase
,
362 DAVINCI_RXCPPI_BUFCNT0_REG
+ 4 * c
->index
),
363 musb_readw(c
->hw_ep
->regs
, MUSB_RXCSR
),
365 musb_readl(&rx
->rx_skipbytes
, 0),
366 musb_readl(&rx
->rx_head
, 0),
367 musb_readl(&rx
->rx_sop
, 0),
368 musb_readl(&rx
->rx_current
, 0),
370 musb_readl(&rx
->rx_buf_current
, 0),
371 musb_readl(&rx
->rx_len_len
, 0),
372 musb_readl(&rx
->rx_cnt_cnt
, 0),
373 musb_readl(&rx
->rx_complete
, 0)
377 /* Context: controller irqlocked */
379 cppi_dump_tx(int level
, struct cppi_channel
*c
, const char *tag
)
381 void __iomem
*base
= c
->controller
->mregs
;
382 struct cppi_tx_stateram __iomem
*tx
= c
->state_ram
;
384 musb_ep_select(base
, c
->index
+ 1);
386 DBG(level
, "TX DMA%d%s: csr %04x, "
387 "H%08x S%08x C%08x %08x, "
388 "F%08x L%08x .. %08x"
391 musb_readw(c
->hw_ep
->regs
, MUSB_TXCSR
),
393 musb_readl(&tx
->tx_head
, 0),
394 musb_readl(&tx
->tx_buf
, 0),
395 musb_readl(&tx
->tx_current
, 0),
396 musb_readl(&tx
->tx_buf_current
, 0),
398 musb_readl(&tx
->tx_info
, 0),
399 musb_readl(&tx
->tx_rem_len
, 0),
400 /* dummy/unused word 6 */
401 musb_readl(&tx
->tx_complete
, 0)
405 /* Context: controller irqlocked */
407 cppi_rndis_update(struct cppi_channel
*c
, int is_rx
,
408 void __iomem
*tibase
, int is_rndis
)
410 /* we may need to change the rndis flag for this cppi channel */
411 if (c
->is_rndis
!= is_rndis
) {
412 u32 value
= musb_readl(tibase
, DAVINCI_RNDIS_REG
);
413 u32 temp
= 1 << (c
->index
);
421 musb_writel(tibase
, DAVINCI_RNDIS_REG
, value
);
422 c
->is_rndis
= is_rndis
;
426 static void cppi_dump_rxbd(const char *tag
, struct cppi_descriptor
*bd
)
428 pr_debug("RXBD/%s %08x: "
429 "nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
431 bd
->hw_next
, bd
->hw_bufp
, bd
->hw_off_len
,
435 static void cppi_dump_rxq(int level
, const char *tag
, struct cppi_channel
*rx
)
438 struct cppi_descriptor
*bd
;
440 if (!_dbg_level(level
))
442 cppi_dump_rx(level
, rx
, tag
);
443 if (rx
->last_processed
)
444 cppi_dump_rxbd("last", rx
->last_processed
);
445 for (bd
= rx
->head
; bd
; bd
= bd
->next
)
446 cppi_dump_rxbd("active", bd
);
451 /* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
452 * so we won't ever use it (see "CPPI RX Woes" below).
454 static inline int cppi_autoreq_update(struct cppi_channel
*rx
,
455 void __iomem
*tibase
, int onepacket
, unsigned n_bds
)
459 #ifdef RNDIS_RX_IS_USABLE
461 /* assert(is_host_active(musb)) */
463 /* start from "AutoReq never" */
464 tmp
= musb_readl(tibase
, DAVINCI_AUTOREQ_REG
);
465 val
= tmp
& ~((0x3) << (rx
->index
* 2));
467 /* HCD arranged reqpkt for packet #1. we arrange int
468 * for all but the last one, maybe in two segments.
472 /* use two segments, autoreq "all" then the last "never" */
473 val
|= ((0x3) << (rx
->index
* 2));
476 /* one segment, autoreq "all-but-last" */
477 val
|= ((0x1) << (rx
->index
* 2));
484 /* make sure that autoreq is updated before continuing */
485 musb_writel(tibase
, DAVINCI_AUTOREQ_REG
, val
);
487 tmp
= musb_readl(tibase
, DAVINCI_AUTOREQ_REG
);
495 /* REQPKT is turned off after each segment */
496 if (n_bds
&& rx
->channel
.actual_len
) {
497 void __iomem
*regs
= rx
->hw_ep
->regs
;
499 val
= musb_readw(regs
, MUSB_RXCSR
);
500 if (!(val
& MUSB_RXCSR_H_REQPKT
)) {
501 val
|= MUSB_RXCSR_H_REQPKT
| MUSB_RXCSR_H_WZC_BITS
;
502 musb_writew(regs
, MUSB_RXCSR
, val
);
503 /* flush writebufer */
504 val
= musb_readw(regs
, MUSB_RXCSR
);
511 /* Buffer enqueuing Logic:
513 * - RX builds new queues each time, to help handle routine "early
514 * termination" cases (faults, including errors and short reads)
517 * - for now, TX reuses the same queue of BDs every time
519 * REVISIT long term, we want a normal dynamic model.
520 * ... the goal will be to append to the
521 * existing queue, processing completed "dma buffers" (segments) on the fly.
523 * Otherwise we force an IRQ latency between requests, which slows us a lot
524 * (especially in "transparent" dma). Unfortunately that model seems to be
525 * inherent in the DMA model from the Mentor code, except in the rare case
526 * of transfers big enough (~128+ KB) that we could append "middle" segments
527 * in the TX paths. (RX can't do this, see below.)
529 * That's true even in the CPPI- friendly iso case, where most urbs have
530 * several small segments provided in a group and where the "packet at a time"
531 * "transparent" DMA model is always correct, even on the RX side.
537 * TX is a lot more reasonable than RX; it doesn't need to run in
538 * irq-per-packet mode very often. RNDIS mode seems to behave too
539 * (except how it handles the exactly-N-packets case). Building a
540 * txdma queue with multiple requests (urb or usb_request) looks
541 * like it would work ... but fault handling would need much testing.
543 * The main issue with TX mode RNDIS relates to transfer lengths that
544 * are an exact multiple of the packet length. It appears that there's
545 * a hiccup in that case (maybe the DMA completes before the ZLP gets
546 * written?) boiling down to not being able to rely on CPPI writing any
547 * terminating zero length packet before the next transfer is written.
548 * So that's punted to PIO; better yet, gadget drivers can avoid it.
550 * Plus, there's allegedly an undocumented constraint that rndis transfer
551 * length be a multiple of 64 bytes ... but the chip doesn't act that
552 * way, and we really don't _want_ that behavior anyway.
554 * On TX, "transparent" mode works ... although experiments have shown
555 * problems trying to use the SOP/EOP bits in different USB packets.
557 * REVISIT try to handle terminating zero length packets using CPPI
558 * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
559 * links avoid that issue by forcing them to avoid zlps.)
562 cppi_next_tx_segment(struct musb
*musb
, struct cppi_channel
*tx
)
564 unsigned maxpacket
= tx
->maxpacket
;
565 dma_addr_t addr
= tx
->buf_dma
+ tx
->offset
;
566 size_t length
= tx
->buf_len
- tx
->offset
;
567 struct cppi_descriptor
*bd
;
570 struct cppi_tx_stateram __iomem
*tx_ram
= tx
->state_ram
;
573 /* TX can use the CPPI "rndis" mode, where we can probably fit this
574 * transfer in one BD and one IRQ. The only time we would NOT want
575 * to use it is when hardware constraints prevent it, or if we'd
576 * trigger the "send a ZLP?" confusion.
578 rndis
= (maxpacket
& 0x3f) == 0
580 && (length
% maxpacket
) != 0;
586 n_bds
= length
/ maxpacket
;
587 if (!length
|| (length
% maxpacket
))
589 n_bds
= min(n_bds
, (unsigned) NUM_TXCHAN_BD
);
590 length
= min(n_bds
* maxpacket
, length
);
593 DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
596 rndis
? "rndis" : "transparent",
600 cppi_rndis_update(tx
, 0, musb
->ctrl_base
, rndis
);
602 /* assuming here that channel_program is called during
603 * transfer initiation ... current code maintains state
604 * for one outstanding request only (no queues, not even
605 * the implicit ones of an iso urb).
610 tx
->last_processed
= NULL
;
612 /* FIXME use BD pool like RX side does, and just queue
613 * the minimum number for this request.
616 /* Prepare queue of BDs first, then hand it to hardware.
617 * All BDs except maybe the last should be of full packet
618 * size; for RNDIS there _is_ only that last packet.
620 for (i
= 0; i
< n_bds
; ) {
621 if (++i
< n_bds
&& bd
->next
)
622 bd
->hw_next
= bd
->next
->dma
;
626 bd
->hw_bufp
= tx
->buf_dma
+ tx
->offset
;
628 /* FIXME set EOP only on the last packet,
629 * SOP only on the first ... avoid IRQs
631 if ((tx
->offset
+ maxpacket
) <= tx
->buf_len
) {
632 tx
->offset
+= maxpacket
;
633 bd
->hw_off_len
= maxpacket
;
634 bd
->hw_options
= CPPI_SOP_SET
| CPPI_EOP_SET
635 | CPPI_OWN_SET
| maxpacket
;
637 /* only this one may be a partial USB Packet */
640 partial_len
= tx
->buf_len
- tx
->offset
;
641 tx
->offset
= tx
->buf_len
;
642 bd
->hw_off_len
= partial_len
;
644 bd
->hw_options
= CPPI_SOP_SET
| CPPI_EOP_SET
645 | CPPI_OWN_SET
| partial_len
;
646 if (partial_len
== 0)
647 bd
->hw_options
|= CPPI_ZERO_SET
;
650 DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
651 bd
, bd
->hw_next
, bd
->hw_bufp
,
652 bd
->hw_off_len
, bd
->hw_options
);
654 /* update the last BD enqueued to the list */
659 /* BDs live in DMA-coherent memory, but writes might be pending */
660 cpu_drain_writebuffer();
662 /* Write to the HeadPtr in state RAM to trigger */
663 musb_writel(&tx_ram
->tx_head
, 0, (u32
)tx
->freelist
->dma
);
665 cppi_dump_tx(5, tx
, "/S");
671 * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
672 * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
673 * (Full speed transfers have similar scenarios.)
675 * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
676 * and the next packet goes into a buffer that's queued later; while (b) fills
677 * the buffer with 1024 bytes. How to do that with CPPI?
679 * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
680 * (b) loses **BADLY** because nothing (!) happens when that second packet
681 * fills the buffer, much less when a third one arrives. (Which makes this
682 * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
683 * is optional, and it's fine if peripherals -- not hosts! -- pad messages
684 * out to end-of-buffer. Standard PCI host controller DMA descriptors
685 * implement that mode by default ... which is no accident.)
687 * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
688 * converse problems: (b) is handled right, but (a) loses badly. CPPI RX
689 * ignores SOP/EOP markings and processes both of those BDs; so both packets
690 * are loaded into the buffer (with a 212 byte gap between them), and the next
691 * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
692 * are intended as outputs for RX queues, not inputs...)
694 * - A variant of "transparent" mode -- one BD at a time -- is the only way to
695 * reliably make both cases work, with software handling both cases correctly
696 * and at the significant penalty of needing an IRQ per packet. (The lack of
697 * I/O overlap can be slightly ameliorated by enabling double buffering.)
699 * So how to get rid of IRQ-per-packet? The transparent multi-BD case could
700 * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
701 * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
702 * with guaranteed driver level fault recovery and scrubbing out what's left
703 * of that garbaged datastream.
705 * But there seems to be no way to identify the cases where CPPI RNDIS mode
706 * is appropriate -- which do NOT include RNDIS host drivers, but do include
707 * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
708 * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
709 * that applies best on the peripheral side (and which could fail rudely).
711 * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
712 * cases other than mass storage class. Otherwise we're correct but slow,
713 * since CPPI penalizes our need for a "true RNDIS" default mode.
717 /* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
720 * (a) peripheral mode ... since rndis peripherals could pad their
721 * writes to hosts, causing i/o failure; or we'd have to cope with
722 * a largely unknowable variety of host side protocol variants
723 * (b) and short reads are NOT errors ... since full reads would
724 * cause those same i/o failures
725 * (c) and read length is
726 * - less than 64KB (max per cppi descriptor)
727 * - not a multiple of 4096 (g_zero default, full reads typical)
728 * - N (>1) packets long, ditto (full reads not EXPECTED)
732 * Cost of heuristic failing: RXDMA wedges at the end of transfers that
733 * fill out the whole buffer. Buggy host side usb network drivers could
734 * trigger that, but "in the field" such bugs seem to be all but unknown.
736 * So this module parameter lets the heuristic be disabled. When using
737 * gadgetfs, the heuristic will probably need to be disabled.
739 static int cppi_rx_rndis
= 1;
741 module_param(cppi_rx_rndis
, bool, 0);
742 MODULE_PARM_DESC(cppi_rx_rndis
, "enable/disable RX RNDIS heuristic");
746 * cppi_next_rx_segment - dma read for the next chunk of a buffer
747 * @musb: the controller
749 * @onepacket: true unless caller treats short reads as errors, and
750 * performs fault recovery above usbcore.
751 * Context: controller irqlocked
753 * See above notes about why we can't use multi-BD RX queues except in
754 * rare cases (mass storage class), and can never use the hardware "rndis"
755 * mode (since it's not a "true" RNDIS mode) with complete safety..
757 * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
758 * code to recover from corrupted datastreams after each short transfer.
761 cppi_next_rx_segment(struct musb
*musb
, struct cppi_channel
*rx
, int onepacket
)
763 unsigned maxpacket
= rx
->maxpacket
;
764 dma_addr_t addr
= rx
->buf_dma
+ rx
->offset
;
765 size_t length
= rx
->buf_len
- rx
->offset
;
766 struct cppi_descriptor
*bd
, *tail
;
769 void __iomem
*tibase
= musb
->ctrl_base
;
771 struct cppi_rx_stateram __iomem
*rx_ram
= rx
->state_ram
;
774 /* almost every USB driver, host or peripheral side */
777 /* maybe apply the heuristic above */
779 && is_peripheral_active(musb
)
780 && length
> maxpacket
781 && (length
& ~0xffff) == 0
782 && (length
& 0x0fff) != 0
783 && (length
& (maxpacket
- 1)) == 0) {
788 /* virtually nothing except mass storage class */
789 if (length
> 0xffff) {
790 n_bds
= 0xffff / maxpacket
;
791 length
= n_bds
* maxpacket
;
793 n_bds
= length
/ maxpacket
;
794 if (length
% maxpacket
)
800 n_bds
= min(n_bds
, (unsigned) NUM_RXCHAN_BD
);
803 /* In host mode, autorequest logic can generate some IN tokens; it's
804 * tricky since we can't leave REQPKT set in RXCSR after the transfer
805 * finishes. So: multipacket transfers involve two or more segments.
806 * And always at least two IRQs ... RNDIS mode is not an option.
808 if (is_host_active(musb
))
809 n_bds
= cppi_autoreq_update(rx
, tibase
, onepacket
, n_bds
);
811 cppi_rndis_update(rx
, 1, musb
->ctrl_base
, is_rndis
);
813 length
= min(n_bds
* maxpacket
, length
);
815 DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
816 "dma 0x%x len %u %u/%u\n",
817 rx
->index
, maxpacket
,
819 ? (is_rndis
? "rndis" : "onepacket")
823 DAVINCI_RXCPPI_BUFCNT0_REG
+ (rx
->index
* 4))
825 addr
, length
, rx
->channel
.actual_len
, rx
->buf_len
);
827 /* only queue one segment at a time, since the hardware prevents
828 * correct queue shutdown after unexpected short packets
830 bd
= cppi_bd_alloc(rx
);
833 /* Build BDs for all packets in this segment */
834 for (i
= 0, tail
= NULL
; bd
&& i
< n_bds
; i
++, tail
= bd
) {
838 bd
= cppi_bd_alloc(rx
);
842 tail
->hw_next
= bd
->dma
;
846 /* all but the last packet will be maxpacket size */
847 if (maxpacket
< length
)
854 rx
->offset
+= bd_len
;
856 bd
->hw_off_len
= (0 /*offset*/ << 16) + bd_len
;
859 bd
->hw_options
= CPPI_OWN_SET
| (i
== 0 ? length
: 0);
863 /* we always expect at least one reusable BD! */
865 WARNING("rx dma%d -- no BDs? need %d\n", rx
->index
, n_bds
);
867 } else if (i
< n_bds
)
868 WARNING("rx dma%d -- only %d of %d BDs\n", rx
->index
, i
, n_bds
);
876 /* short reads and other faults should terminate this entire
877 * dma segment. we want one "dma packet" per dma segment, not
878 * one per USB packet, terminating the whole queue at once...
879 * NOTE that current hardware seems to ignore SOP and EOP.
881 bd
->hw_options
|= CPPI_SOP_SET
;
882 tail
->hw_options
|= CPPI_EOP_SET
;
885 struct cppi_descriptor
*d
;
887 for (d
= rx
->head
; d
; d
= d
->next
)
888 cppi_dump_rxbd("S", d
);
891 /* in case the preceding transfer left some state... */
892 tail
= rx
->last_processed
;
895 tail
->hw_next
= bd
->dma
;
898 core_rxirq_enable(tibase
, rx
->index
+ 1);
900 /* BDs live in DMA-coherent memory, but writes might be pending */
901 cpu_drain_writebuffer();
903 /* REVISIT specs say to write this AFTER the BUFCNT register
904 * below ... but that loses badly.
906 musb_writel(&rx_ram
->rx_head
, 0, bd
->dma
);
908 /* bufferCount must be at least 3, and zeroes on completion
909 * unless it underflows below zero, or stops at two, or keeps
912 i
= musb_readl(tibase
,
913 DAVINCI_RXCPPI_BUFCNT0_REG
+ (rx
->index
* 4))
918 DAVINCI_RXCPPI_BUFCNT0_REG
+ (rx
->index
* 4),
920 else if (n_bds
> (i
- 3))
922 DAVINCI_RXCPPI_BUFCNT0_REG
+ (rx
->index
* 4),
925 i
= musb_readl(tibase
,
926 DAVINCI_RXCPPI_BUFCNT0_REG
+ (rx
->index
* 4))
928 if (i
< (2 + n_bds
)) {
929 DBG(2, "bufcnt%d underrun - %d (for %d)\n",
930 rx
->index
, i
, n_bds
);
932 DAVINCI_RXCPPI_BUFCNT0_REG
+ (rx
->index
* 4),
936 cppi_dump_rx(4, rx
, "/S");
940 * cppi_channel_program - program channel for data transfer
942 * @maxpacket: max packet size
943 * @mode: For RX, 1 unless the usb protocol driver promised to treat
944 * all short reads as errors and kick in high level fault recovery.
945 * For TX, ignored because of RNDIS mode races/glitches.
946 * @dma_addr: dma address of buffer
947 * @len: length of buffer
948 * Context: controller irqlocked
950 static int cppi_channel_program(struct dma_channel
*ch
,
951 u16 maxpacket
, u8 mode
,
952 dma_addr_t dma_addr
, u32 len
)
954 struct cppi_channel
*cppi_ch
;
955 struct cppi
*controller
;
958 cppi_ch
= container_of(ch
, struct cppi_channel
, channel
);
959 controller
= cppi_ch
->controller
;
960 musb
= controller
->musb
;
962 switch (ch
->status
) {
963 case MUSB_DMA_STATUS_BUS_ABORT
:
964 case MUSB_DMA_STATUS_CORE_ABORT
:
965 /* fault irq handler should have handled cleanup */
966 WARNING("%cX DMA%d not cleaned up after abort!\n",
967 cppi_ch
->transmit
? 'T' : 'R',
971 case MUSB_DMA_STATUS_BUSY
:
972 WARNING("program active channel? %cX DMA%d\n",
973 cppi_ch
->transmit
? 'T' : 'R',
977 case MUSB_DMA_STATUS_UNKNOWN
:
978 DBG(1, "%cX DMA%d not allocated!\n",
979 cppi_ch
->transmit
? 'T' : 'R',
982 case MUSB_DMA_STATUS_FREE
:
986 ch
->status
= MUSB_DMA_STATUS_BUSY
;
988 /* set transfer parameters, then queue up its first segment */
989 cppi_ch
->buf_dma
= dma_addr
;
991 cppi_ch
->maxpacket
= maxpacket
;
992 cppi_ch
->buf_len
= len
;
994 /* TX channel? or RX? */
995 if (cppi_ch
->transmit
)
996 cppi_next_tx_segment(musb
, cppi_ch
);
998 cppi_next_rx_segment(musb
, cppi_ch
, mode
);
1003 static bool cppi_rx_scan(struct cppi
*cppi
, unsigned ch
)
1005 struct cppi_channel
*rx
= &cppi
->rx
[ch
];
1006 struct cppi_rx_stateram __iomem
*state
= rx
->state_ram
;
1007 struct cppi_descriptor
*bd
;
1008 struct cppi_descriptor
*last
= rx
->last_processed
;
1009 bool completed
= false;
1012 dma_addr_t safe2ack
;
1013 void __iomem
*regs
= rx
->hw_ep
->regs
;
1015 cppi_dump_rx(6, rx
, "/K");
1017 bd
= last
? last
->next
: rx
->head
;
1021 /* run through all completed BDs */
1022 for (i
= 0, safe2ack
= musb_readl(&state
->rx_complete
, 0);
1023 (safe2ack
|| completed
) && bd
&& i
< NUM_RXCHAN_BD
;
1024 i
++, bd
= bd
->next
) {
1027 /* catch latest BD writes from CPPI */
1029 if (!completed
&& (bd
->hw_options
& CPPI_OWN_SET
))
1032 DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
1033 "off.len %08x opt.len %08x (%d)\n",
1034 bd
->dma
, bd
->hw_next
, bd
->hw_bufp
,
1035 bd
->hw_off_len
, bd
->hw_options
,
1036 rx
->channel
.actual_len
);
1038 /* actual packet received length */
1039 if ((bd
->hw_options
& CPPI_SOP_SET
) && !completed
)
1040 len
= bd
->hw_off_len
& CPPI_RECV_PKTLEN_MASK
;
1044 if (bd
->hw_options
& CPPI_EOQ_MASK
)
1047 if (!completed
&& len
< bd
->buflen
) {
1048 /* NOTE: when we get a short packet, RXCSR_H_REQPKT
1049 * must have been cleared, and no more DMA packets may
1050 * active be in the queue... TI docs didn't say, but
1051 * CPPI ignores those BDs even though OWN is still set.
1054 DBG(3, "rx short %d/%d (%d)\n",
1056 rx
->channel
.actual_len
);
1059 /* If we got here, we expect to ack at least one BD; meanwhile
1060 * CPPI may completing other BDs while we scan this list...
1062 * RACE: we can notice OWN cleared before CPPI raises the
1063 * matching irq by writing that BD as the completion pointer.
1064 * In such cases, stop scanning and wait for the irq, avoiding
1065 * lost acks and states where BD ownership is unclear.
1067 if (bd
->dma
== safe2ack
) {
1068 musb_writel(&state
->rx_complete
, 0, safe2ack
);
1069 safe2ack
= musb_readl(&state
->rx_complete
, 0);
1071 if (bd
->dma
== safe2ack
)
1075 rx
->channel
.actual_len
+= len
;
1077 cppi_bd_free(rx
, last
);
1080 /* stop scanning on end-of-segment */
1081 if (bd
->hw_next
== 0)
1084 rx
->last_processed
= last
;
1086 /* dma abort, lost ack, or ... */
1087 if (!acked
&& last
) {
1090 if (safe2ack
== 0 || safe2ack
== rx
->last_processed
->dma
)
1091 musb_writel(&state
->rx_complete
, 0, safe2ack
);
1092 if (safe2ack
== 0) {
1093 cppi_bd_free(rx
, last
);
1094 rx
->last_processed
= NULL
;
1096 /* if we land here on the host side, H_REQPKT will
1097 * be clear and we need to restart the queue...
1101 musb_ep_select(cppi
->mregs
, rx
->index
+ 1);
1102 csr
= musb_readw(regs
, MUSB_RXCSR
);
1103 if (csr
& MUSB_RXCSR_DMAENAB
) {
1104 DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
1108 ? rx
->last_processed
->dma
1110 completed
? ", completed" : "",
1112 cppi_dump_rxq(4, "/what?", rx
);
1120 /* REVISIT seems like "autoreq all but EOP" doesn't...
1121 * setting it here "should" be racey, but seems to work
1123 csr
= musb_readw(rx
->hw_ep
->regs
, MUSB_RXCSR
);
1124 if (is_host_active(cppi
->musb
)
1126 && !(csr
& MUSB_RXCSR_H_REQPKT
)) {
1127 csr
|= MUSB_RXCSR_H_REQPKT
;
1128 musb_writew(regs
, MUSB_RXCSR
,
1129 MUSB_RXCSR_H_WZC_BITS
| csr
);
1130 csr
= musb_readw(rx
->hw_ep
->regs
, MUSB_RXCSR
);
1137 cppi_dump_rx(6, rx
, completed
? "/completed" : "/cleaned");
1141 void cppi_completion(struct musb
*musb
, u32 rx
, u32 tx
)
1143 void __iomem
*tibase
;
1146 struct musb_hw_ep
*hw_ep
= NULL
;
1148 cppi
= container_of(musb
->dma_controller
, struct cppi
, controller
);
1150 tibase
= musb
->ctrl_base
;
1152 /* process TX channels */
1153 for (index
= 0; tx
; tx
= tx
>> 1, index
++) {
1154 struct cppi_channel
*tx_ch
;
1155 struct cppi_tx_stateram __iomem
*tx_ram
;
1156 bool completed
= false;
1157 struct cppi_descriptor
*bd
;
1162 tx_ch
= cppi
->tx
+ index
;
1163 tx_ram
= tx_ch
->state_ram
;
1165 /* FIXME need a cppi_tx_scan() routine, which
1166 * can also be called from abort code
1169 cppi_dump_tx(5, tx_ch
, "/E");
1174 DBG(1, "null BD\n");
1178 /* run through all completed BDs */
1179 for (i
= 0; !completed
&& bd
&& i
< NUM_TXCHAN_BD
;
1180 i
++, bd
= bd
->next
) {
1183 /* catch latest BD writes from CPPI */
1185 if (bd
->hw_options
& CPPI_OWN_SET
)
1188 DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
1189 bd
, bd
->hw_next
, bd
->hw_bufp
,
1190 bd
->hw_off_len
, bd
->hw_options
);
1192 len
= bd
->hw_off_len
& CPPI_BUFFER_LEN_MASK
;
1193 tx_ch
->channel
.actual_len
+= len
;
1195 tx_ch
->last_processed
= bd
;
1197 /* write completion register to acknowledge
1198 * processing of completed BDs, and possibly
1199 * release the IRQ; EOQ might not be set ...
1201 * REVISIT use the same ack strategy as rx
1203 * REVISIT have observed bit 18 set; huh??
1205 /* if ((bd->hw_options & CPPI_EOQ_MASK)) */
1206 musb_writel(&tx_ram
->tx_complete
, 0, bd
->dma
);
1208 /* stop scanning on end-of-segment */
1209 if (bd
->hw_next
== 0)
1213 /* on end of segment, maybe go to next one */
1215 /* cppi_dump_tx(4, tx_ch, "/complete"); */
1217 /* transfer more, or report completion */
1218 if (tx_ch
->offset
>= tx_ch
->buf_len
) {
1221 tx_ch
->channel
.status
= MUSB_DMA_STATUS_FREE
;
1223 hw_ep
= tx_ch
->hw_ep
;
1225 /* Peripheral role never repurposes the
1226 * endpoint, so immediate completion is
1227 * safe. Host role waits for the fifo
1228 * to empty (TXPKTRDY irq) before going
1229 * to the next queued bulk transfer.
1231 if (is_host_active(cppi
->musb
)) {
1233 /* WORKAROUND because we may
1234 * not always get TXKPTRDY ...
1238 csr
= musb_readw(hw_ep
->regs
,
1240 if (csr
& MUSB_TXCSR_TXPKTRDY
)
1245 musb_dma_completion(musb
, index
+ 1, 1);
1248 /* Bigger transfer than we could fit in
1249 * that first batch of descriptors...
1251 cppi_next_tx_segment(musb
, tx_ch
);
1257 /* Start processing the RX block */
1258 for (index
= 0; rx
; rx
= rx
>> 1, index
++) {
1261 struct cppi_channel
*rx_ch
;
1263 rx_ch
= cppi
->rx
+ index
;
1265 /* let incomplete dma segments finish */
1266 if (!cppi_rx_scan(cppi
, index
))
1269 /* start another dma segment if needed */
1270 if (rx_ch
->channel
.actual_len
!= rx_ch
->buf_len
1271 && rx_ch
->channel
.actual_len
1273 cppi_next_rx_segment(musb
, rx_ch
, 1);
1277 /* all segments completed! */
1278 rx_ch
->channel
.status
= MUSB_DMA_STATUS_FREE
;
1280 hw_ep
= rx_ch
->hw_ep
;
1282 core_rxirq_disable(tibase
, index
+ 1);
1283 musb_dma_completion(musb
, index
+ 1, 0);
1287 /* write to CPPI EOI register to re-enable interrupts */
1288 musb_writel(tibase
, DAVINCI_CPPI_EOI_REG
, 0);
1291 /* Instantiate a software object representing a DMA controller. */
1292 struct dma_controller
*__init
1293 dma_controller_create(struct musb
*musb
, void __iomem
*mregs
)
1295 struct cppi
*controller
;
1297 controller
= kzalloc(sizeof *controller
, GFP_KERNEL
);
1301 controller
->mregs
= mregs
;
1302 controller
->tibase
= mregs
- DAVINCI_BASE_OFFSET
;
1304 controller
->musb
= musb
;
1305 controller
->controller
.start
= cppi_controller_start
;
1306 controller
->controller
.stop
= cppi_controller_stop
;
1307 controller
->controller
.channel_alloc
= cppi_channel_allocate
;
1308 controller
->controller
.channel_release
= cppi_channel_release
;
1309 controller
->controller
.channel_program
= cppi_channel_program
;
1310 controller
->controller
.channel_abort
= cppi_channel_abort
;
1312 /* NOTE: allocating from on-chip SRAM would give the least
1313 * contention for memory access, if that ever matters here.
1316 /* setup BufferPool */
1317 controller
->pool
= dma_pool_create("cppi",
1318 controller
->musb
->controller
,
1319 sizeof(struct cppi_descriptor
),
1320 CPPI_DESCRIPTOR_ALIGN
, 0);
1321 if (!controller
->pool
) {
1326 return &controller
->controller
;
1330 * Destroy a previously-instantiated DMA controller.
1332 void dma_controller_destroy(struct dma_controller
*c
)
1336 cppi
= container_of(c
, struct cppi
, controller
);
1338 /* assert: caller stopped the controller first */
1339 dma_pool_destroy(cppi
->pool
);
1345 * Context: controller irqlocked, endpoint selected
1347 static int cppi_channel_abort(struct dma_channel
*channel
)
1349 struct cppi_channel
*cppi_ch
;
1350 struct cppi
*controller
;
1351 void __iomem
*mbase
;
1352 void __iomem
*tibase
;
1355 struct cppi_descriptor
*queue
;
1357 cppi_ch
= container_of(channel
, struct cppi_channel
, channel
);
1359 controller
= cppi_ch
->controller
;
1361 switch (channel
->status
) {
1362 case MUSB_DMA_STATUS_BUS_ABORT
:
1363 case MUSB_DMA_STATUS_CORE_ABORT
:
1364 /* from RX or TX fault irq handler */
1365 case MUSB_DMA_STATUS_BUSY
:
1366 /* the hardware needs shutting down */
1367 regs
= cppi_ch
->hw_ep
->regs
;
1369 case MUSB_DMA_STATUS_UNKNOWN
:
1370 case MUSB_DMA_STATUS_FREE
:
1376 if (!cppi_ch
->transmit
&& cppi_ch
->head
)
1377 cppi_dump_rxq(3, "/abort", cppi_ch
);
1379 mbase
= controller
->mregs
;
1380 tibase
= controller
->tibase
;
1382 queue
= cppi_ch
->head
;
1383 cppi_ch
->head
= NULL
;
1384 cppi_ch
->tail
= NULL
;
1386 /* REVISIT should rely on caller having done this,
1387 * and caller should rely on us not changing it.
1388 * peripheral code is safe ... check host too.
1390 musb_ep_select(mbase
, cppi_ch
->index
+ 1);
1392 if (cppi_ch
->transmit
) {
1393 struct cppi_tx_stateram __iomem
*tx_ram
;
1396 /* mask interrupts raised to signal teardown complete. */
1397 enabled
= musb_readl(tibase
, DAVINCI_TXCPPI_INTENAB_REG
)
1398 & (1 << cppi_ch
->index
);
1400 musb_writel(tibase
, DAVINCI_TXCPPI_INTCLR_REG
,
1401 (1 << cppi_ch
->index
));
1403 /* REVISIT put timeouts on these controller handshakes */
1405 cppi_dump_tx(6, cppi_ch
, " (teardown)");
1407 /* teardown DMA engine then usb core */
1409 value
= musb_readl(tibase
, DAVINCI_TXCPPI_TEAR_REG
);
1410 } while (!(value
& CPPI_TEAR_READY
));
1411 musb_writel(tibase
, DAVINCI_TXCPPI_TEAR_REG
, cppi_ch
->index
);
1413 tx_ram
= cppi_ch
->state_ram
;
1415 value
= musb_readl(&tx_ram
->tx_complete
, 0);
1416 } while (0xFFFFFFFC != value
);
1417 musb_writel(&tx_ram
->tx_complete
, 0, 0xFFFFFFFC);
1419 /* FIXME clean up the transfer state ... here?
1420 * the completion routine should get called with
1421 * an appropriate status code.
1424 value
= musb_readw(regs
, MUSB_TXCSR
);
1425 value
&= ~MUSB_TXCSR_DMAENAB
;
1426 value
|= MUSB_TXCSR_FLUSHFIFO
;
1427 musb_writew(regs
, MUSB_TXCSR
, value
);
1428 musb_writew(regs
, MUSB_TXCSR
, value
);
1430 /* re-enable interrupt */
1432 musb_writel(tibase
, DAVINCI_TXCPPI_INTENAB_REG
,
1433 (1 << cppi_ch
->index
));
1435 /* While we scrub the TX state RAM, ensure that we clean
1436 * up any interrupt that's currently asserted:
1437 * 1. Write to completion Ptr value 0x1(bit 0 set)
1439 * 2. Write to completion Ptr value 0x0(bit 0 cleared)
1441 * Value written is compared(for bits 31:2) and when
1442 * equal, interrupt is deasserted.
1444 cppi_reset_tx(tx_ram
, 1);
1445 musb_writel(&tx_ram
->tx_complete
, 0, 0);
1447 cppi_dump_tx(5, cppi_ch
, " (done teardown)");
1449 /* REVISIT tx side _should_ clean up the same way
1450 * as the RX side ... this does no cleanup at all!
1456 /* NOTE: docs don't guarantee any of this works ... we
1457 * expect that if the usb core stops telling the cppi core
1458 * to pull more data from it, then it'll be safe to flush
1459 * current RX DMA state iff any pending fifo transfer is done.
1462 core_rxirq_disable(tibase
, cppi_ch
->index
+ 1);
1464 /* for host, ensure ReqPkt is never set again */
1465 if (is_host_active(cppi_ch
->controller
->musb
)) {
1466 value
= musb_readl(tibase
, DAVINCI_AUTOREQ_REG
);
1467 value
&= ~((0x3) << (cppi_ch
->index
* 2));
1468 musb_writel(tibase
, DAVINCI_AUTOREQ_REG
, value
);
1471 csr
= musb_readw(regs
, MUSB_RXCSR
);
1473 /* for host, clear (just) ReqPkt at end of current packet(s) */
1474 if (is_host_active(cppi_ch
->controller
->musb
)) {
1475 csr
|= MUSB_RXCSR_H_WZC_BITS
;
1476 csr
&= ~MUSB_RXCSR_H_REQPKT
;
1478 csr
|= MUSB_RXCSR_P_WZC_BITS
;
1480 /* clear dma enable */
1481 csr
&= ~(MUSB_RXCSR_DMAENAB
);
1482 musb_writew(regs
, MUSB_RXCSR
, csr
);
1483 csr
= musb_readw(regs
, MUSB_RXCSR
);
1485 /* Quiesce: wait for current dma to finish (if not cleanup).
1486 * We can't use bit zero of stateram->rx_sop, since that
1487 * refers to an entire "DMA packet" not just emptying the
1488 * current fifo. Most segments need multiple usb packets.
1490 if (channel
->status
== MUSB_DMA_STATUS_BUSY
)
1493 /* scan the current list, reporting any data that was
1494 * transferred and acking any IRQ
1496 cppi_rx_scan(controller
, cppi_ch
->index
);
1498 /* clobber the existing state once it's idle
1500 * NOTE: arguably, we should also wait for all the other
1501 * RX channels to quiesce (how??) and then temporarily
1502 * disable RXCPPI_CTRL_REG ... but it seems that we can
1503 * rely on the controller restarting from state ram, with
1504 * only RXCPPI_BUFCNT state being bogus. BUFCNT will
1505 * correct itself after the next DMA transfer though.
1507 * REVISIT does using rndis mode change that?
1509 cppi_reset_rx(cppi_ch
->state_ram
);
1511 /* next DMA request _should_ load cppi head ptr */
1513 /* ... we don't "free" that list, only mutate it in place. */
1514 cppi_dump_rx(5, cppi_ch
, " (done abort)");
1516 /* clean up previously pending bds */
1517 cppi_bd_free(cppi_ch
, cppi_ch
->last_processed
);
1518 cppi_ch
->last_processed
= NULL
;
1521 struct cppi_descriptor
*tmp
= queue
->next
;
1523 cppi_bd_free(cppi_ch
, queue
);
1528 channel
->status
= MUSB_DMA_STATUS_FREE
;
1529 cppi_ch
->buf_dma
= 0;
1530 cppi_ch
->offset
= 0;
1531 cppi_ch
->buf_len
= 0;
1532 cppi_ch
->maxpacket
= 0;
1538 * Power Management ... probably turn off cppi during suspend, restart;
1539 * check state ram? Clocking is presumably shared with usb core.