Xilinx: ARM: NAND: Removed ClearNAND support
[linux-2.6-xlnx.git] / drivers / dma / xilinx_dma.c
blob055fe9d44ccb9049880b3c49b58b466e11c83785
1 /*
2 * Xilinx DMA Engine support
4 * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
8 * Description:
9 * This driver supports three Xilinx DMA engines:
10 * . Axi CDMA engine, it does transfers between memory and memory, it
11 * only has one channel.
12 * . Axi DMA engine, it does transfers between memory and device. It can be
13 * configured to have one channel or two channels. If configured as two
14 * channels, one is to transmit to a device and another is to receive from
15 * a device.
16 * . Axi VDMA engine, it does transfers between memory and video devices.
17 * It can be configured to have one channel or two channels. If configured
18 * as two channels, one is to transmit to the video device and another is
19 * to receive from the video device.
21 * This is free software; you can redistribute it and/or modify
22 * it under the terms of the GNU General Public License as published by
23 * the Free Software Foundation; either version 2 of the License, or
24 * (at your option) any later version.
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmapool.h>
33 #include <asm/io.h>
34 #include <linux/of.h>
35 #include <linux/of_platform.h>
36 #include <linux/platform_device.h>
37 #include <linux/of_address.h>
38 #include <linux/amba/xilinx_dma.h>
39 #include <linux/of_irq.h>
41 /* Hw specific definitions
43 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
44 #define XILINX_DMA_MAX_TRANS_LEN 0x7FFFFF
46 /* General register bits definitions
48 #define XILINX_DMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
49 #define XILINX_DMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA engine */
51 #define XILINX_DMA_SR_HALTED_MASK 0x00000001 /* DMA channel halted */
52 #define XILINX_DMA_SR_IDLE_MASK 0x00000002 /* DMA channel idle */
54 #define XILINX_DMA_SR_ERR_INTERNAL_MASK 0x00000010 /* Datamover internal err */
55 #define XILINX_DMA_SR_ERR_SLAVE_MASK 0x00000020 /* Datamover slave err */
56 #define XILINX_DMA_SR_ERR_DECODE_MASK 0x00000040 /* Datamover decode err */
57 #define XILINX_DMA_SR_ERR_SG_INT_MASK 0x00000100 /* SG internal err */
58 #define XILINX_DMA_SR_ERR_SG_SLV_MASK 0x00000200 /* SG slave err */
59 #define XILINX_DMA_SR_ERR_SG_DEC_MASK 0x00000400 /* SG decode err */
60 #define XILINX_DMA_SR_ERR_ALL_MASK 0x00000770 /* All errors */
62 #define XILINX_DMA_XR_IRQ_IOC_MASK 0x00001000 /* Completion interrupt */
63 #define XILINX_DMA_XR_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
64 #define XILINX_DMA_XR_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
65 #define XILINX_DMA_XR_IRQ_ALL_MASK 0x00007000 /* All interrupts */
67 #define XILINX_DMA_XR_DELAY_MASK 0xFF000000 /* Delay timeout counter */
68 #define XILINX_DMA_XR_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
70 #define XILINX_DMA_IRQ_SHIFT 12
71 #define XILINX_DMA_DELAY_SHIFT 24
72 #define XILINX_DMA_COALESCE_SHIFT 16
74 #define XILINX_DMA_DELAY_MAX 0xFF /**< Maximum delay counter value */
75 #define XILINX_DMA_COALESCE_MAX 0xFF /**< Maximum coalescing counter value */
77 #define XILINX_DMA_RX_CHANNEL_OFFSET 0x30
79 /* Axi CDMA special register bits
81 #define XILINX_CDMA_CR_SGMODE_MASK 0x00000008 /**< Scatter gather mode */
83 #define XILINX_CDMA_SR_SGINCLD_MASK 0x00000008 /**< Hybrid build */
84 #define XILINX_CDMA_XR_IRQ_SIMPLE_ALL_MASK 0x00005000 /**< All interrupts for
85 simple only mode */
86 /* Axi VDMA special register bits
88 #define XILINX_VDMA_CIRC_EN 0x00000002 /* Circular mode */
89 #define XILINX_VDMA_SYNC_EN 0x00000008 /* Sync enable mode */
90 #define XILINX_VDMA_FRMCNT_EN 0x00000010 /* Frm Cnt enable mode */
91 #define XILINX_VDMA_MSTR_MASK 0x00000F00 /* Master in control */
93 #define XILINX_VDMA_EXTFSYNC_SHIFT 6
94 #define XILINX_VDMA_MSTR_SHIFT 8
95 #define XILINX_VDMA_WR_REF_SHIFT 8
97 #define XILINX_VDMA_FRMDLY_SHIFT 24
99 #define XILINX_VDMA_DIRECT_REG_OFFSET 0x50
100 #define XILINX_VDMA_CHAN_DIRECT_REG_SIZE 0x50
102 #define XILINX_VDMA_PARK_REG_OFFSET 0x28
104 /* Axi VDMA Specific Error bits
106 #define XILINX_VDMA_SR_ERR_FSIZE_LESS_MASK 0x00000080 /* FSize Less
107 Mismatch err */
108 #define XILINX_VDMA_SR_ERR_LSIZE_LESS_MASK 0x00000100 /* LSize Less
109 Mismatch err */
110 #define XILINX_VDMA_SR_ERR_FSIZE_MORE_MASK 0x00000800 /* FSize
111 more err */
112 /* Recoverable errors are DMA Internal error, FSize Less, LSize Less
113 * and FSize More mismatch errors. These are only recoverable only
114 * when C_FLUSH_ON_FSYNC is enabled in the hardware system.
116 #define XILINX_VDMA_SR_ERR_RECOVER_MASK 0x00000990 /* Recoverable
117 errs */
119 /* Axi VDMA Flush on Fsync bits
121 #define XILINX_VDMA_FLUSH_S2MM 3
122 #define XILINX_VDMA_FLUSH_MM2S 2
123 #define XILINX_VDMA_FLUSH_BOTH 1
125 /* BD definitions for Axi Dma and Axi Cdma
127 #define XILINX_DMA_BD_STS_COMPL_MASK 0x80000000
128 #define XILINX_DMA_BD_STS_ERR_MASK 0x70000000
129 #define XILINX_DMA_BD_STS_ALL_MASK 0xF0000000
131 /* Axi DMA BD special bits definitions
133 #define XILINX_DMA_BD_SOP 0x08000000 /* Start of packet bit */
134 #define XILINX_DMA_BD_EOP 0x04000000 /* End of packet bit */
136 /* Feature encodings
138 #define XILINX_DMA_FTR_DATA_WIDTH_MASK 0x000000FF /* Data width mask, 1024 */
139 #define XILINX_DMA_FTR_HAS_SG 0x00000100 /* Has SG */
140 #define XILINX_DMA_FTR_HAS_SG_SHIFT 8 /* Has SG shift */
141 #define XILINX_DMA_FTR_STSCNTRL_STRM 0x00010000 /* Optional feature for dma */
143 /* Feature encodings for VDMA
145 #define XILINX_VDMA_FTR_FLUSH_MASK 0x00000600 /* Flush-on-FSync Mask */
146 #define XILINX_VDMA_FTR_FLUSH_SHIFT 9 /* Flush-on-FSync shift */
148 /* Delay loop counter to prevent hardware failure
150 #define XILINX_DMA_RESET_LOOP 1000000
151 #define XILINX_DMA_HALT_LOOP 1000000
153 /* Device Id in the private structure
155 #define XILINX_DMA_DEVICE_ID_SHIFT 28
157 /* IO accessors
159 #define DMA_OUT(addr, val) (iowrite32(val, addr))
160 #define DMA_IN(addr) (ioread32(addr))
162 /* Hardware descriptor
164 * shared by all Xilinx DMA engines
166 struct xilinx_dma_desc_hw {
167 u32 next_desc; /* 0x00 */
168 u32 pad1; /* 0x04 */
169 u32 buf_addr; /* 0x08 */
170 u32 pad2; /* 0x0C */
171 u32 addr_vsize; /* 0x10 */
172 u32 hsize; /* 0x14 */
173 u32 control; /* 0x18 */
174 u32 status; /* 0x1C */
175 u32 app_0; /* 0x20 */
176 u32 app_1; /* 0x24 */
177 u32 app_2; /* 0x28 */
178 u32 app_3; /* 0x2C */
179 u32 app_4; /* 0x30 */
180 } __attribute__((aligned(64)));
182 struct xilinx_dma_desc_sw {
183 struct xilinx_dma_desc_hw hw;
184 struct list_head node;
185 struct list_head tx_list;
186 struct dma_async_tx_descriptor async_tx;
187 } __attribute__((aligned(64)));
189 struct xdma_regs {
190 u32 cr; /* 0x00 Control Register */
191 u32 sr; /* 0x04 Status Register */
192 u32 cdr; /* 0x08 Current Descriptor Register */
193 u32 pad1;
194 u32 tdr; /* 0x10 Tail Descriptor Register */
195 u32 pad2;
196 u32 src; /* 0x18 Source Address Register (cdma) */
197 u32 pad3;
198 u32 dst; /* 0x20 Destination Address Register (cdma) */
199 u32 pad4;
200 u32 btt_ref;/* 0x28 Bytes To Transfer (cdma) or park_ref (vdma) */
201 u32 version; /* 0x2c version (vdma) */
204 struct vdma_addr_regs {
205 u32 vsize; /* 0x0 Vertical size */
206 u32 hsize; /* 0x4 Horizontal size */
207 u32 frmdly_stride; /* 0x8 Frame delay and stride */
208 u32 buf_addr[16]; /* 0xC - 0x48 Src addresses */
211 /* Per DMA specific operations should be embedded in the channel structure
213 struct xilinx_dma_chan {
214 struct xdma_regs __iomem *regs; /* Control status registers */
215 struct vdma_addr_regs *addr_regs; /* Direct address registers */
216 dma_cookie_t completed_cookie; /* The maximum cookie completed */
217 dma_cookie_t cookie; /* The current cookie */
218 spinlock_t lock; /* Descriptor operation lock */
219 bool sg_waiting; /* Scatter gather transfer waiting */
220 struct list_head active_list; /* Active descriptors */
221 struct list_head pending_list; /* Descriptors waiting */
222 struct dma_chan common; /* DMA common channel */
223 struct dma_pool *desc_pool; /* Descriptors pool */
224 struct device *dev; /* The dma device */
225 int irq; /* Channel IRQ */
226 int id; /* Channel ID */
227 enum dma_transfer_direction direction;/* Transfer direction */
228 int max_len; /* Maximum data len per transfer */
229 int is_lite; /* Whether is light build */
230 int num_frms; /* Number of frames */
231 int has_SG; /* Support scatter transfers */
232 int has_DRE; /* Support unaligned transfers */
233 int genlock; /* Support genlock mode */
234 int err; /* Channel has errors */
235 struct tasklet_struct tasklet; /* Cleanup work after irq */
236 u32 feature; /* IP feature */
237 u32 private; /* Match info for channel request */
238 void (*start_transfer)(struct xilinx_dma_chan *chan);
239 struct xilinx_dma_config config; /* Device configuration info */
240 u32 flush_fsync; /* Flush on Fsync */
243 struct xilinx_dma_device {
244 void __iomem *regs;
245 struct device *dev;
246 struct dma_device common;
247 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
248 u32 feature;
249 int irq;
252 #define to_xilinx_chan(chan) container_of(chan, struct xilinx_dma_chan, common)
254 /* Required functions
256 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
258 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
260 /* Has this channel already been allocated? */
261 if (chan->desc_pool)
262 return 1;
265 * We need the descriptor to be aligned to 64bytes
266 * for meeting Xilinx DMA specification requirement.
268 chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
269 chan->dev,
270 sizeof(struct xilinx_dma_desc_sw),
271 __alignof__(struct xilinx_dma_desc_sw), 0);
272 if (!chan->desc_pool) {
273 dev_err(chan->dev, "unable to allocate channel %d "
274 "descriptor pool\n", chan->id);
275 return -ENOMEM;
278 chan->completed_cookie = 1;
279 chan->cookie = 1;
281 /* there is at least one descriptor free to be allocated */
282 return 1;
285 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
286 struct list_head *list)
288 struct xilinx_dma_desc_sw *desc, *_desc;
290 list_for_each_entry_safe(desc, _desc, list, node) {
291 list_del(&desc->node);
292 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
296 static void xilinx_dma_free_desc_list_reverse(struct xilinx_dma_chan *chan,
297 struct list_head *list)
299 struct xilinx_dma_desc_sw *desc, *_desc;
301 list_for_each_entry_safe_reverse(desc, _desc, list, node) {
302 list_del(&desc->node);
303 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
307 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
309 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
310 unsigned long flags;
312 dev_dbg(chan->dev, "Free all channel resources.\n");
313 spin_lock_irqsave(&chan->lock, flags);
314 xilinx_dma_free_desc_list(chan, &chan->active_list);
315 xilinx_dma_free_desc_list(chan, &chan->pending_list);
316 spin_unlock_irqrestore(&chan->lock, flags);
318 dma_pool_destroy(chan->desc_pool);
319 chan->desc_pool = NULL;
322 static enum dma_status xilinx_dma_desc_status(struct xilinx_dma_chan *chan,
323 struct xilinx_dma_desc_sw *desc)
325 return dma_async_is_complete(desc->async_tx.cookie,
326 chan->completed_cookie,
327 chan->cookie);
330 static void xilinx_chan_desc_cleanup(struct xilinx_dma_chan *chan)
332 struct xilinx_dma_desc_sw *desc, *_desc;
333 unsigned long flags;
335 spin_lock_irqsave(&chan->lock, flags);
337 list_for_each_entry_safe(desc, _desc, &chan->active_list, node) {
338 dma_async_tx_callback callback;
339 void *callback_param;
341 if (xilinx_dma_desc_status(chan, desc) == DMA_IN_PROGRESS)
342 break;
344 /* Remove from the list of running transactions */
345 list_del(&desc->node);
347 /* Run the link descriptor callback function */
348 callback = desc->async_tx.callback;
349 callback_param = desc->async_tx.callback_param;
350 if (callback) {
351 spin_unlock_irqrestore(&chan->lock, flags);
352 callback(callback_param);
353 spin_lock_irqsave(&chan->lock, flags);
356 /* Run any dependencies, then free the descriptor */
357 dma_run_dependencies(&desc->async_tx);
358 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
361 spin_unlock_irqrestore(&chan->lock, flags);
364 static enum dma_status xilinx_tx_status(struct dma_chan *dchan,
365 dma_cookie_t cookie,
366 struct dma_tx_state *txstate)
368 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
369 dma_cookie_t last_used;
370 dma_cookie_t last_complete;
372 xilinx_chan_desc_cleanup(chan);
374 last_used = dchan->cookie;
375 last_complete = chan->completed_cookie;
377 dma_set_tx_state(txstate, last_complete, last_used, 0);
379 return dma_async_is_complete(cookie, last_complete, last_used);
382 static int dma_is_running(struct xilinx_dma_chan *chan)
384 return !(DMA_IN(&chan->regs->sr) & XILINX_DMA_SR_HALTED_MASK) &&
385 (DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RUNSTOP_MASK);
388 static int dma_is_idle(struct xilinx_dma_chan *chan)
390 return DMA_IN(&chan->regs->sr) & XILINX_DMA_SR_IDLE_MASK;
393 /* Only needed for Axi CDMA v2_00_a or earlier core
395 static void dma_sg_toggle(struct xilinx_dma_chan *chan)
398 DMA_OUT(&chan->regs->cr,
399 DMA_IN(&chan->regs->cr) & ~XILINX_CDMA_CR_SGMODE_MASK);
401 DMA_OUT(&chan->regs->cr,
402 DMA_IN(&chan->regs->cr) | XILINX_CDMA_CR_SGMODE_MASK);
405 #define XILINX_DMA_DRIVER_DEBUG 0
407 #if (XILINX_DMA_DRIVER_DEBUG == 1)
408 static void desc_dump(struct xilinx_dma_desc_hw *hw)
410 printk(KERN_INFO "hw desc %x:\n", (unsigned int)hw);
411 printk(KERN_INFO "\tnext_desc %x\n", hw->next_desc);
412 printk(KERN_INFO "\tbuf_addr %x\n", hw->buf_addr);
413 printk(KERN_INFO "\taddr_vsize %x\n", hw->addr_vsize);
414 printk(KERN_INFO "\thsize %x\n", hw->hsize);
415 printk(KERN_INFO "\tcontrol %x\n", hw->control);
416 printk(KERN_INFO "\tstatus %x\n", hw->status);
419 #endif
421 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
423 unsigned long flags;
424 struct xilinx_dma_desc_sw *desch, *desct;
425 struct xilinx_dma_desc_hw *hw;
427 if (chan->err)
428 return;
430 spin_lock_irqsave(&chan->lock, flags);
432 if (list_empty(&chan->pending_list))
433 goto out_unlock;
435 /* If hardware is busy, cannot submit
437 if (!dma_is_idle(chan)) {
438 dev_dbg(chan->dev, "DMA controller still busy %x\n",
439 DMA_IN(&chan->regs->sr));
440 goto out_unlock;
443 /* Enable interrupts
445 DMA_OUT(&chan->regs->cr,
446 DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
448 desch = list_first_entry(&chan->pending_list, struct xilinx_dma_desc_sw,
449 node);
451 if (chan->has_SG) {
453 /* If hybrid mode, append pending list to active list
455 desct = container_of(chan->pending_list.prev,
456 struct xilinx_dma_desc_sw, node);
458 list_splice_tail_init(&chan->pending_list, &chan->active_list);
460 /* If hardware is idle, then all descriptors on the active list
461 * are done, start new transfers
463 dma_sg_toggle(chan);
465 DMA_OUT(&chan->regs->cdr, desch->async_tx.phys);
467 /* Update tail ptr register and start the transfer
469 DMA_OUT(&chan->regs->tdr, desct->async_tx.phys);
470 goto out_unlock;
473 /* In simple mode
475 list_del(&desch->node);
476 list_add_tail(&desch->node, &chan->active_list);
478 hw = &desch->hw;
480 DMA_OUT(&chan->regs->src, hw->buf_addr);
481 DMA_OUT(&chan->regs->dst, hw->addr_vsize);
483 /* Start the transfer
485 DMA_OUT(&chan->regs->btt_ref,
486 hw->control & XILINX_DMA_MAX_TRANS_LEN);
488 out_unlock:
489 spin_unlock_irqrestore(&chan->lock, flags);
492 /* If sg mode, link the pending list to running list; if simple mode, get the
493 * head of the pending list and submit it to hw
495 static void xilinx_cdma_issue_pending(struct dma_chan *dchan)
497 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
498 xilinx_cdma_start_transfer(chan);
501 /* Stop the hardware, the ongoing transfer will be finished */
502 static void dma_halt(struct xilinx_dma_chan *chan)
504 int loop = XILINX_DMA_HALT_LOOP;
506 DMA_OUT(&chan->regs->cr,
507 DMA_IN(&chan->regs->cr) & ~XILINX_DMA_CR_RUNSTOP_MASK);
509 /* Wait for the hardware to halt
511 while (loop) {
512 if (!(DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RUNSTOP_MASK))
513 break;
515 loop -= 1;
518 if (!loop) {
519 pr_debug("Cannot stop channel %x: %x\n",
520 (unsigned int)chan,
521 (unsigned int)DMA_IN(&chan->regs->cr));
522 chan->err = 1;
525 return;
528 /* Start the hardware. Transfers are not started yet */
529 static void dma_start(struct xilinx_dma_chan *chan)
531 int loop = XILINX_DMA_HALT_LOOP;
533 DMA_OUT(&chan->regs->cr,
534 DMA_IN(&chan->regs->cr) | XILINX_DMA_CR_RUNSTOP_MASK);
536 /* Wait for the hardware to start
538 while (loop) {
539 if (DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RUNSTOP_MASK)
540 break;
542 loop -= 1;
545 if (!loop) {
546 pr_debug("Cannot start channel %x: %x\n",
547 (unsigned int)chan,
548 (unsigned int)DMA_IN(&chan->regs->cr));
550 chan->err = 1;
553 return;
557 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
559 unsigned long flags;
560 struct xilinx_dma_desc_sw *desch, *desct;
561 struct xilinx_dma_desc_hw *hw;
563 if (chan->err)
564 return;
566 spin_lock_irqsave(&chan->lock, flags);
568 if (list_empty(&chan->pending_list))
569 goto out_unlock;
571 /* If hardware is busy, cannot submit
573 if (dma_is_running(chan) && !dma_is_idle(chan)) {
574 dev_dbg(chan->dev, "DMA controller still busy\n");
575 goto out_unlock;
578 /* If hardware is idle, then all descriptors on active list are
579 * done, start new transfers
581 dma_halt(chan);
583 if (chan->err)
584 goto out_unlock;
586 if (chan->has_SG) {
587 desch = list_first_entry(&chan->pending_list,
588 struct xilinx_dma_desc_sw, node);
590 desct = container_of(chan->pending_list.prev,
591 struct xilinx_dma_desc_sw, node);
593 DMA_OUT(&chan->regs->cdr, desch->async_tx.phys);
595 dma_start(chan);
597 if (chan->err)
598 goto out_unlock;
599 list_splice_tail_init(&chan->pending_list, &chan->active_list);
601 /* Enable interrupts
603 DMA_OUT(&chan->regs->cr,
604 DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
606 /* Update tail ptr register and start the transfer
608 DMA_OUT(&chan->regs->tdr, desct->async_tx.phys);
609 goto out_unlock;
612 /* In simple mode
615 dma_halt(chan);
617 if (chan->err)
618 goto out_unlock;
620 printk(KERN_INFO "xilinx_dma_start_transfer::simple DMA mode\n");
622 desch = list_first_entry(&chan->pending_list,
623 struct xilinx_dma_desc_sw, node);
625 list_del(&desch->node);
626 list_add_tail(&desch->node, &chan->active_list);
628 dma_start(chan);
630 if (chan->err)
631 goto out_unlock;
633 hw = &desch->hw;
635 /* Enable interrupts
637 DMA_OUT(&chan->regs->cr,
638 DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
640 DMA_OUT(&chan->regs->src, hw->buf_addr);
642 /* Start the transfer
644 DMA_OUT(&chan->regs->btt_ref,
645 hw->control & XILINX_DMA_MAX_TRANS_LEN);
647 out_unlock:
648 spin_unlock_irqrestore(&chan->lock, flags);
651 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
653 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
654 xilinx_dma_start_transfer(chan);
657 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
659 unsigned long flags;
660 struct xilinx_dma_desc_sw *desch, *desct = NULL;
661 struct xilinx_dma_config *config;
662 u32 reg;
663 u8 *chan_base;
665 if (chan->err)
666 return;
668 spin_lock_irqsave(&chan->lock, flags);
670 if (list_empty(&chan->pending_list))
671 goto out_unlock;
673 /* If it is SG mode and hardware is busy, cannot submit
675 if (chan->has_SG && dma_is_running(chan) && !dma_is_idle(chan)) {
676 dev_dbg(chan->dev, "DMA controller still busy\n");
677 goto out_unlock;
680 /* If hardware is idle, then all descriptors on the running lists are
681 * done, start new transfers
683 if (chan->err)
684 goto out_unlock;
686 if (chan->has_SG) {
687 desch = list_first_entry(&chan->pending_list,
688 struct xilinx_dma_desc_sw, node);
690 desct = container_of(chan->pending_list.prev,
691 struct xilinx_dma_desc_sw, node);
693 DMA_OUT(&chan->regs->cdr, desch->async_tx.phys);
696 /* Configure the hardware using info in the config structure */
697 config = &(chan->config);
698 reg = DMA_IN(&chan->regs->cr);
700 if (config->frm_cnt_en)
701 reg |= XILINX_VDMA_FRMCNT_EN;
702 else
703 reg &= ~XILINX_VDMA_FRMCNT_EN;
705 /* With SG, start with circular mode, so that BDs can be fetched.
706 * In direct register mode, if not parking, enable circular mode */
707 if ((chan->has_SG) || (!config->park))
708 reg |= XILINX_VDMA_CIRC_EN;
710 if (config->park)
711 reg &= ~XILINX_VDMA_CIRC_EN;
713 DMA_OUT(&chan->regs->cr, reg);
715 if ((config->park_frm >= 0) && (config->park_frm < chan->num_frms)) {
716 if (config->direction == DMA_MEM_TO_DEV) {
717 chan_base = (char *)chan->regs;
718 DMA_OUT((chan_base + XILINX_VDMA_PARK_REG_OFFSET),
719 config->park_frm);
720 } else {
721 chan_base = ((char *)chan->regs -
722 XILINX_DMA_RX_CHANNEL_OFFSET);
723 DMA_OUT((chan_base + XILINX_VDMA_PARK_REG_OFFSET),
724 config->park_frm << XILINX_VDMA_WR_REF_SHIFT);
728 /* Start the hardware
730 dma_start(chan);
732 if (chan->err)
733 goto out_unlock;
734 list_splice_tail_init(&chan->pending_list, &chan->active_list);
736 /* Enable interrupts
738 * park/genlock testing does not use interrupts */
739 if (!chan->config.disable_intr) {
740 DMA_OUT(&chan->regs->cr,
741 DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK);
742 } else {
743 DMA_OUT(&chan->regs->cr,
744 (DMA_IN(&chan->regs->cr) | XILINX_DMA_XR_IRQ_ALL_MASK) &
745 ~((chan->config.disable_intr << XILINX_DMA_IRQ_SHIFT)));
748 /* Start the transfer
750 if (chan->has_SG)
751 DMA_OUT(&chan->regs->tdr, desct->async_tx.phys);
752 else
753 DMA_OUT(&chan->addr_regs->vsize, config->vsize);
755 out_unlock:
756 spin_unlock_irqrestore(&chan->lock, flags);
759 static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
761 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
762 xilinx_vdma_start_transfer(chan);
766 * xilinx_dma_update_completed_cookie - Update the completed cookie.
767 * @chan : xilinx DMA channel
769 * CONTEXT: hardirq
771 static void xilinx_dma_update_completed_cookie(struct xilinx_dma_chan *chan)
773 struct xilinx_dma_desc_sw *desc = NULL;
774 struct xilinx_dma_desc_hw *hw = NULL;
775 unsigned long flags;
776 dma_cookie_t cookie = -EBUSY;
777 int done = 0;
779 spin_lock_irqsave(&chan->lock, flags);
781 if (list_empty(&chan->active_list)) {
782 dev_dbg(chan->dev, "no running descriptors\n");
783 goto out_unlock;
786 /* Get the last completed descriptor, update the cookie to that */
787 list_for_each_entry(desc, &chan->active_list, node) {
788 if ((!(chan->feature & XILINX_DMA_IP_VDMA)) && chan->has_SG) {
789 hw = &desc->hw;
791 /* If a BD has no status bits set, hw has it */
792 if (!(hw->status & XILINX_DMA_BD_STS_ALL_MASK)) {
793 break;
794 } else {
795 done = 1;
796 cookie = desc->async_tx.cookie;
798 } else {
799 /* In non-SG mode, all active entries are done */
800 done = 1;
801 cookie = desc->async_tx.cookie;
805 if (done)
806 chan->completed_cookie = cookie;
808 out_unlock:
809 spin_unlock_irqrestore(&chan->lock, flags);
812 /* Reset hardware
814 static int dma_init(struct xilinx_dma_chan *chan)
816 int loop = XILINX_DMA_RESET_LOOP;
817 u32 tmp;
819 DMA_OUT(&chan->regs->cr,
820 DMA_IN(&chan->regs->cr) | XILINX_DMA_CR_RESET_MASK);
822 tmp = DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RESET_MASK;
824 /* Wait for the hardware to finish reset
826 while (loop && tmp) {
827 tmp = DMA_IN(&chan->regs->cr) & XILINX_DMA_CR_RESET_MASK;
828 loop -= 1;
831 if (!loop) {
832 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
833 DMA_IN(&chan->regs->cr), DMA_IN(&chan->regs->sr));
834 return 1;
837 /* For Axi CDMA, always do sg transfers if sg mode is built in
839 if ((chan->feature & XILINX_DMA_IP_CDMA) && chan->has_SG)
840 DMA_OUT(&chan->regs->cr, tmp | XILINX_CDMA_CR_SGMODE_MASK);
842 return 0;
846 static irqreturn_t dma_intr_handler(int irq, void *data)
848 struct xilinx_dma_chan *chan = data;
849 int update_cookie = 0;
850 int to_transfer = 0;
851 u32 stat, reg;
853 reg = DMA_IN(&chan->regs->cr);
855 /* Disable intr
857 DMA_OUT(&chan->regs->cr,
858 reg & ~XILINX_DMA_XR_IRQ_ALL_MASK);
860 stat = DMA_IN(&chan->regs->sr);
861 if (!(stat & XILINX_DMA_XR_IRQ_ALL_MASK))
862 return IRQ_NONE;
864 /* Ack the interrupts
866 DMA_OUT(&chan->regs->sr, XILINX_DMA_XR_IRQ_ALL_MASK);
868 /* Check for only the interrupts which are enabled
870 stat &= (reg & XILINX_DMA_XR_IRQ_ALL_MASK);
872 if (stat & XILINX_DMA_XR_IRQ_ERROR_MASK) {
873 if ((chan->feature & XILINX_DMA_IP_VDMA)
874 && chan->flush_fsync) {
875 /* VDMA Recoverable Errors, only when
876 C_FLUSH_ON_FSYNC is enabled */
877 u32 error = DMA_IN(&chan->regs->sr) &
878 XILINX_VDMA_SR_ERR_RECOVER_MASK;
879 if (error)
880 DMA_OUT(&chan->regs->sr, error);
881 else
882 chan->err = 1;
883 } else {
884 dev_err(chan->dev,
885 "Channel %x has errors %x, cdr %x tdr %x\n",
886 (unsigned int)chan,
887 (unsigned int)DMA_IN(&chan->regs->sr),
888 (unsigned int)DMA_IN(&chan->regs->cdr),
889 (unsigned int)DMA_IN(&chan->regs->tdr));
890 chan->err = 1;
894 /* Device takes too long to do the transfer when user requires
895 * responsiveness
897 if (stat & XILINX_DMA_XR_IRQ_DELAY_MASK)
898 dev_dbg(chan->dev, "Inter-packet latency too long\n");
900 if (stat & XILINX_DMA_XR_IRQ_IOC_MASK) {
901 update_cookie = 1;
902 to_transfer = 1;
905 if (update_cookie)
906 xilinx_dma_update_completed_cookie(chan);
908 if (to_transfer)
909 chan->start_transfer(chan);
911 tasklet_schedule(&chan->tasklet);
912 return IRQ_HANDLED;
915 static void dma_do_tasklet(unsigned long data)
917 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
919 xilinx_chan_desc_cleanup(chan);
922 /* Append the descriptor list to the pending list */
923 static void append_desc_queue(struct xilinx_dma_chan *chan,
924 struct xilinx_dma_desc_sw *desc)
926 struct xilinx_dma_desc_sw *tail = container_of(chan->pending_list.prev,
927 struct xilinx_dma_desc_sw, node);
928 struct xilinx_dma_desc_hw *hw;
930 if (list_empty(&chan->pending_list))
931 goto out_splice;
933 /* Add the hardware descriptor to the chain of hardware descriptors
934 * that already exists in memory.
936 hw = &(tail->hw);
937 hw->next_desc = (u32)desc->async_tx.phys;
939 /* Add the software descriptor and all children to the list
940 * of pending transactions
942 out_splice:
943 list_splice_tail_init(&desc->tx_list, &chan->pending_list);
946 /* Assign cookie to each descriptor, and append the descriptors to the pending
947 * list
949 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
951 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
952 struct xilinx_dma_desc_sw *desc = container_of(tx,
953 struct xilinx_dma_desc_sw, async_tx);
954 struct xilinx_dma_desc_sw *child;
955 unsigned long flags;
956 dma_cookie_t cookie = -EBUSY;
958 if (chan->err) {
959 /* If reset fails, need to hard reset the system.
960 * Channel is no longer functional
962 if (!dma_init(chan))
963 chan->err = 0;
964 else
965 return cookie;
968 spin_lock_irqsave(&chan->lock, flags);
971 * assign cookies to all of the software descriptors
972 * that make up this transaction
974 cookie = chan->cookie;
975 list_for_each_entry(child, &desc->tx_list, node) {
976 cookie++;
977 if (cookie < 0)
978 cookie = DMA_MIN_COOKIE;
980 child->async_tx.cookie = cookie;
983 chan->cookie = cookie;
986 /* put this transaction onto the tail of the pending queue */
987 append_desc_queue(chan, desc);
989 spin_unlock_irqrestore(&chan->lock, flags);
991 return cookie;
994 static struct xilinx_dma_desc_sw *xilinx_dma_alloc_descriptor(
995 struct xilinx_dma_chan *chan)
997 struct xilinx_dma_desc_sw *desc;
998 dma_addr_t pdesc;
1000 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
1001 if (!desc) {
1002 dev_dbg(chan->dev, "out of memory for desc\n");
1003 return NULL;
1006 memset(desc, 0, sizeof(*desc));
1007 INIT_LIST_HEAD(&desc->tx_list);
1008 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1009 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1010 desc->async_tx.phys = pdesc;
1012 return desc;
1016 * xilinx_dma_prep_memcpy - prepare descriptors for a memcpy transaction
1017 * @dchan: DMA channel
1018 * @dma_dst: destination address
1019 * @dma_src: source address
1020 * @len: transfer length
1021 * @flags: transfer ack flags
1023 static struct dma_async_tx_descriptor *xilinx_dma_prep_memcpy(
1024 struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
1025 size_t len, unsigned long flags)
1027 struct xilinx_dma_chan *chan;
1028 struct xilinx_dma_desc_sw *first = NULL, *prev = NULL, *new;
1029 struct xilinx_dma_desc_hw *hw, *prev_hw;
1030 size_t copy;
1031 dma_addr_t src = dma_src;
1032 dma_addr_t dst = dma_dst;
1034 if (!dchan)
1035 return NULL;
1037 if (!len)
1038 return NULL;
1040 chan = to_xilinx_chan(dchan);
1042 if (chan->err) {
1044 /* If reset fails, need to hard reset the system.
1045 * Channel is no longer functional
1047 if (!dma_init(chan))
1048 chan->err = 0;
1049 else
1050 return NULL;
1053 /* If build does not have Data Realignment Engine (DRE),
1054 * src has to be aligned
1056 if (!chan->has_DRE) {
1057 if ((dma_src &
1058 (chan->feature & XILINX_DMA_FTR_DATA_WIDTH_MASK)) ||
1059 (dma_dst &
1060 (chan->feature & XILINX_DMA_FTR_DATA_WIDTH_MASK))) {
1062 dev_err(chan->dev,
1063 "Source or destination address not aligned when no DRE\n");
1065 return NULL;
1069 do {
1071 /* Allocate descriptor from DMA pool */
1072 new = xilinx_dma_alloc_descriptor(chan);
1073 if (!new) {
1074 dev_err(chan->dev,
1075 "No free memory for link descriptor\n");
1076 goto fail;
1079 copy = min(len, (size_t)chan->max_len);
1081 /* if lite build, transfer cannot cross page boundary
1083 if (chan->is_lite)
1084 copy = min(copy, (size_t)(PAGE_MASK -
1085 (src & PAGE_MASK)));
1087 if (!copy) {
1088 dev_err(chan->dev,
1089 "Got zero transfer length for %x\n",
1090 (unsigned int)src);
1091 goto fail;
1094 hw = &(new->hw);
1095 hw->control =
1096 (hw->control & ~XILINX_DMA_MAX_TRANS_LEN) | copy;
1097 hw->buf_addr = src;
1098 hw->addr_vsize = dst;
1100 if (!first)
1101 first = new;
1102 else {
1103 prev_hw = &(prev->hw);
1104 prev_hw->next_desc = new->async_tx.phys;
1107 new->async_tx.cookie = 0;
1108 async_tx_ack(&new->async_tx);
1110 prev = new;
1111 len -= copy;
1112 src += copy;
1113 dst += copy;
1115 /* Insert the descriptor to the list */
1116 list_add_tail(&new->node, &first->tx_list);
1117 } while (len);
1119 /* Link the last BD with the first BD */
1120 hw->next_desc = first->async_tx.phys;
1122 new->async_tx.flags = flags; /* client is in control of this ack */
1123 new->async_tx.cookie = -EBUSY;
1125 return &first->async_tx;
1127 fail:
1128 if (!first)
1129 return NULL;
1131 xilinx_dma_free_desc_list_reverse(chan, &first->tx_list);
1132 return NULL;
1136 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1137 * @chan: DMA channel
1138 * @sgl: scatterlist to transfer to/from
1139 * @sg_len: number of entries in @scatterlist
1140 * @direction: DMA direction
1141 * @flags: transfer ack flags
1143 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1144 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1145 enum dma_transfer_direction direction, unsigned long flags,
1146 void *context)
1148 struct xilinx_dma_chan *chan;
1149 struct xilinx_dma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
1150 struct xilinx_dma_desc_hw *hw = NULL, *prev_hw = NULL;
1152 size_t copy;
1154 int i;
1155 struct scatterlist *sg;
1156 size_t sg_used;
1157 dma_addr_t dma_src;
1159 #ifdef TEST_DMA_WITH_LOOPBACK
1160 int total_len;
1161 #endif
1162 if (!dchan)
1163 return NULL;
1165 chan = to_xilinx_chan(dchan);
1167 if (chan->direction != direction)
1168 return NULL;
1170 #ifdef TEST_DMA_WITH_LOOPBACK
1171 total_len = 0;
1173 for_each_sg(sgl, sg, sg_len, i) {
1174 total_len += sg_dma_len(sg);
1176 #endif
1178 * Build transactions using information in the scatter gather list
1180 for_each_sg(sgl, sg, sg_len, i) {
1181 sg_used = 0;
1183 /* Loop until the entire scatterlist entry is used */
1184 while (sg_used < sg_dma_len(sg)) {
1186 /* Allocate the link descriptor from DMA pool */
1187 new = xilinx_dma_alloc_descriptor(chan);
1188 if (!new) {
1189 dev_err(chan->dev, "No free memory for "
1190 "link descriptor\n");
1191 goto fail;
1195 * Calculate the maximum number of bytes to transfer,
1196 * making sure it is less than the hw limit
1198 copy = min((size_t)(sg_dma_len(sg) - sg_used),
1199 (size_t)chan->max_len);
1200 hw = &(new->hw);
1202 dma_src = sg_dma_address(sg) + sg_used;
1204 hw->buf_addr = dma_src;
1206 /* Fill in the descriptor */
1207 hw->control = copy;
1210 * If this is not the first descriptor, chain the
1211 * current descriptor after the previous descriptor
1213 * For the first DMA_MEM_TO_DEV transfer, set SOP
1215 if (!first) {
1216 first = new;
1217 if (direction == DMA_MEM_TO_DEV) {
1218 hw->control |= XILINX_DMA_BD_SOP;
1219 #ifdef TEST_DMA_WITH_LOOPBACK
1220 hw->app_4 = total_len;
1221 #endif
1223 } else {
1224 prev_hw = &(prev->hw);
1225 prev_hw->next_desc = new->async_tx.phys;
1228 new->async_tx.cookie = 0;
1229 async_tx_ack(&new->async_tx);
1231 prev = new;
1232 sg_used += copy;
1234 /* Insert the link descriptor into the LD ring */
1235 list_add_tail(&new->node, &first->tx_list);
1239 /* Link the last BD with the first BD */
1240 hw->next_desc = first->async_tx.phys;
1242 if (direction == DMA_MEM_TO_DEV)
1243 hw->control |= XILINX_DMA_BD_EOP;
1245 /* All scatter gather list entries has length == 0 */
1246 if (!first || !new)
1247 return NULL;
1249 new->async_tx.flags = flags;
1250 new->async_tx.cookie = -EBUSY;
1252 /* Set EOP to the last link descriptor of new list */
1253 hw->control |= XILINX_DMA_BD_EOP;
1255 return &first->async_tx;
1257 fail:
1258 /* If first was not set, then we failed to allocate the very first
1259 * descriptor, and we're done */
1260 if (!first)
1261 return NULL;
1264 * First is set, so all of the descriptors we allocated have been added
1265 * to first->tx_list, INCLUDING "first" itself. Therefore we
1266 * must traverse the list backwards freeing each descriptor in turn
1268 xilinx_dma_free_desc_list_reverse(chan, &first->tx_list);
1270 return NULL;
1274 * xilinx_vdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1275 * @chan: VDMA channel
1276 * @sgl: scatterlist to transfer to/from
1277 * @sg_len: number of entries in @scatterlist
1278 * @direction: DMA direction
1279 * @flags: transfer ack flags
1281 static struct dma_async_tx_descriptor *xilinx_vdma_prep_slave_sg(
1282 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1283 enum dma_transfer_direction direction, unsigned long flags,
1284 void *context)
1286 struct xilinx_dma_chan *chan;
1287 struct xilinx_dma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
1288 struct xilinx_dma_desc_hw *hw = NULL, *prev_hw = NULL;
1289 int i;
1290 struct scatterlist *sg;
1291 dma_addr_t dma_src;
1293 if (!dchan)
1294 return NULL;
1296 chan = to_xilinx_chan(dchan);
1298 if (chan->direction != direction)
1299 return NULL;
1301 /* Enforce one sg entry for one frame */
1302 if (sg_len != chan->num_frms) {
1303 dev_err(chan->dev, "number of entries %d not the "
1304 "same as num stores %d\n", sg_len, chan->num_frms);
1306 return NULL;
1309 if (!chan->has_SG) {
1310 DMA_OUT(&chan->addr_regs->hsize, chan->config.hsize);
1311 DMA_OUT(&chan->addr_regs->frmdly_stride,
1312 chan->config.frm_dly << XILINX_VDMA_FRMDLY_SHIFT |
1313 chan->config.stride);
1316 /* Build transactions using information in the scatter gather list
1318 for_each_sg(sgl, sg, sg_len, i) {
1320 /* Allocate the link descriptor from DMA pool */
1321 new = xilinx_dma_alloc_descriptor(chan);
1322 if (!new) {
1323 dev_err(chan->dev, "No free memory for "
1324 "link descriptor\n");
1325 goto fail;
1329 * Calculate the maximum number of bytes to transfer,
1330 * making sure it is less than the hw limit
1332 hw = &(new->hw);
1334 dma_src = sg_dma_address(sg);
1335 if (chan->has_SG) {
1336 hw->buf_addr = dma_src;
1338 /* Fill in the descriptor */
1339 hw->addr_vsize = chan->config.vsize;
1340 hw->hsize = chan->config.hsize;
1341 hw->control = (chan->config.frm_dly <<
1342 XILINX_VDMA_FRMDLY_SHIFT) |
1343 chan->config.stride;
1344 } else {
1345 /* Update the registers */
1346 DMA_OUT(&(chan->addr_regs->buf_addr[i]), dma_src);
1349 /* If this is not the first descriptor, chain the
1350 * current descriptor after the previous descriptor
1352 if (!first) {
1353 first = new;
1354 } else {
1355 prev_hw = &(prev->hw);
1356 prev_hw->next_desc = new->async_tx.phys;
1359 new->async_tx.cookie = 0;
1360 async_tx_ack(&new->async_tx);
1362 prev = new;
1364 /* Insert the link descriptor into the list */
1365 list_add_tail(&new->node, &first->tx_list);
1368 /* Link the last BD with the first BD */
1369 hw->next_desc = first->async_tx.phys;
1371 if (!first || !new)
1372 return NULL;
1374 new->async_tx.flags = flags;
1375 new->async_tx.cookie = -EBUSY;
1377 return &first->async_tx;
1379 fail:
1380 /* If first was not set, then we failed to allocate the very first
1381 * descriptor, and we're done */
1382 if (!first)
1383 return NULL;
1385 /* First is set, so all of the descriptors we allocated have been added
1386 * to first->tx_list, INCLUDING "first" itself. Therefore we
1387 * must traverse the list backwards freeing each descriptor in turn
1389 xilinx_dma_free_desc_list_reverse(chan, &first->tx_list);
1390 return NULL;
1393 /* Run-time device configuration for Axi DMA and Axi CDMA */
1394 static int xilinx_dma_device_control(struct dma_chan *dchan,
1395 enum dma_ctrl_cmd cmd, unsigned long arg)
1397 struct xilinx_dma_chan *chan;
1398 unsigned long flags;
1400 if (!dchan)
1401 return -EINVAL;
1403 chan = to_xilinx_chan(dchan);
1405 if (cmd == DMA_TERMINATE_ALL) {
1406 /* Halt the DMA engine */
1407 dma_halt(chan);
1409 spin_lock_irqsave(&chan->lock, flags);
1411 /* Remove and free all of the descriptors in the lists */
1412 xilinx_dma_free_desc_list(chan, &chan->pending_list);
1413 xilinx_dma_free_desc_list(chan, &chan->active_list);
1415 spin_unlock_irqrestore(&chan->lock, flags);
1416 return 0;
1417 } else if (cmd == DMA_SLAVE_CONFIG) {
1418 /* Configure interrupt coalescing and delay counter
1419 * Use value XILINX_DMA_NO_CHANGE to signal no change
1421 struct xilinx_dma_config *cfg = (struct xilinx_dma_config *)arg;
1422 u32 reg = DMA_IN(&chan->regs->cr);
1424 if (cfg->coalesc <= XILINX_DMA_COALESCE_MAX) {
1425 reg &= ~XILINX_DMA_XR_COALESCE_MASK;
1426 reg |= cfg->coalesc << XILINX_DMA_COALESCE_SHIFT;
1428 chan->config.coalesc = cfg->coalesc;
1431 if (cfg->delay <= XILINX_DMA_DELAY_MAX) {
1432 reg &= ~XILINX_DMA_XR_DELAY_MASK;
1433 reg |= cfg->delay << XILINX_DMA_DELAY_SHIFT;
1434 chan->config.delay = cfg->delay;
1437 DMA_OUT(&chan->regs->cr, reg);
1439 return 0;
1440 } else
1441 return -ENXIO;
1444 /* Run-time configuration for Axi VDMA, supports:
1445 * . halt the channel
1446 * . configure interrupt coalescing and inter-packet delay threshold
1447 * . start/stop parking
1448 * . enable genlock
1449 * . set transfer information using config struct
1451 static int xilinx_vdma_device_control(struct dma_chan *dchan,
1452 enum dma_ctrl_cmd cmd, unsigned long arg)
1454 struct xilinx_dma_chan *chan;
1455 unsigned long flags;
1457 if (!dchan)
1458 return -EINVAL;
1460 chan = to_xilinx_chan(dchan);
1462 if (cmd == DMA_TERMINATE_ALL) {
1463 /* Halt the DMA engine */
1464 dma_halt(chan);
1466 spin_lock_irqsave(&chan->lock, flags);
1468 /* Remove and free all of the descriptors in the lists */
1469 xilinx_dma_free_desc_list(chan, &chan->pending_list);
1470 xilinx_dma_free_desc_list(chan, &chan->active_list);
1472 spin_unlock_irqrestore(&chan->lock, flags);
1473 return 0;
1474 } else if (cmd == DMA_SLAVE_CONFIG) {
1475 struct xilinx_dma_config *cfg = (struct xilinx_dma_config *)arg;
1476 u32 reg;
1478 if (cfg->reset)
1479 dma_init(chan);
1481 reg = DMA_IN(&chan->regs->cr);
1483 /* If vsize is -1, it is park-related operations */
1484 if (cfg->vsize == -1) {
1485 if (cfg->park)
1486 reg &= ~XILINX_VDMA_CIRC_EN;
1487 else
1488 reg |= XILINX_VDMA_CIRC_EN;
1490 DMA_OUT(&chan->regs->cr, reg);
1491 return 0;
1494 /* If hsize is -1, it is interrupt threshold settings */
1495 if (cfg->hsize == -1) {
1496 if (cfg->coalesc <= XILINX_DMA_COALESCE_MAX) {
1497 reg &= ~XILINX_DMA_XR_COALESCE_MASK;
1498 reg |= cfg->coalesc <<
1499 XILINX_DMA_COALESCE_SHIFT;
1500 chan->config.coalesc = cfg->coalesc;
1503 if (cfg->delay <= XILINX_DMA_DELAY_MAX) {
1504 reg &= ~XILINX_DMA_XR_DELAY_MASK;
1505 reg |= cfg->delay << XILINX_DMA_DELAY_SHIFT;
1506 chan->config.delay = cfg->delay;
1509 DMA_OUT(&chan->regs->cr, reg);
1510 return 0;
1513 /* Transfer information */
1514 chan->config.vsize = cfg->vsize;
1515 chan->config.hsize = cfg->hsize;
1516 chan->config.stride = cfg->stride;
1517 chan->config.frm_dly = cfg->frm_dly;
1518 chan->config.park = cfg->park;
1519 chan->config.direction = cfg->direction;
1521 /* genlock settings */
1522 chan->config.gen_lock = cfg->gen_lock;
1523 chan->config.master = cfg->master;
1525 if (cfg->gen_lock) {
1526 if (chan->genlock) {
1527 reg |= XILINX_VDMA_SYNC_EN;
1528 reg |= cfg->master << XILINX_VDMA_MSTR_SHIFT;
1532 chan->config.frm_cnt_en = cfg->frm_cnt_en;
1533 if (cfg->park)
1534 chan->config.park_frm = cfg->park_frm;
1535 else
1536 chan->config.park_frm = -1;
1538 chan->config.coalesc = cfg->coalesc;
1539 chan->config.delay = cfg->delay;
1540 if (cfg->coalesc <= XILINX_DMA_COALESCE_MAX) {
1541 reg |= cfg->coalesc << XILINX_DMA_COALESCE_SHIFT;
1542 chan->config.coalesc = cfg->coalesc;
1545 if (cfg->delay <= XILINX_DMA_DELAY_MAX) {
1546 reg |= cfg->delay << XILINX_DMA_DELAY_SHIFT;
1547 chan->config.delay = cfg->delay;
1550 chan->config.disable_intr = cfg->disable_intr;
1552 if (cfg->ext_fsync)
1553 reg |= cfg->ext_fsync << XILINX_VDMA_EXTFSYNC_SHIFT;
1555 DMA_OUT(&chan->regs->cr, reg);
1556 return 0;
1557 } else
1558 return -ENXIO;
1562 /* Logarithm function to compute alignment shift
1564 * Only deals with value less than 4096.
1566 static int my_log(int value)
1568 int i = 0;
1569 while ((1 << i) < value) {
1570 i++;
1572 if (i >= 12)
1573 return 0;
1576 return i;
1579 #ifdef CONFIG_OF
1581 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
1583 irq_dispose_mapping(chan->irq);
1584 list_del(&chan->common.device_node);
1585 kfree(chan);
1589 * Probing channels
1591 * . Get channel features from the device tree entry
1592 * . Initialize special channel handling routines
1594 static int __devinit xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
1595 struct device_node *node, u32 feature)
1597 struct xilinx_dma_chan *chan;
1598 int err;
1599 int *value;
1600 u32 width = 0, device_id = 0, flush_fsync = 0;
1602 /* alloc channel */
1603 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1604 if (!chan) {
1605 dev_err(xdev->dev, "no free memory for DMA channels!\n");
1606 err = -ENOMEM;
1607 goto out_return;
1610 chan->feature = feature;
1611 chan->is_lite = 0;
1612 chan->has_DRE = 0;
1613 chan->has_SG = 0;
1614 chan->max_len = XILINX_DMA_MAX_TRANS_LEN;
1616 value = (int *)of_get_property(node, "xlnx,include-dre",
1617 NULL);
1618 if (value) {
1619 if (be32_to_cpup(value) == 1)
1620 chan->has_DRE = 1;
1623 value = (int *)of_get_property(node, "xlnx,genlock-mode",
1624 NULL);
1625 if (value) {
1626 if (be32_to_cpup(value) == 1)
1627 chan->genlock = 1;
1630 value = (int *)of_get_property(node,
1631 "xlnx,datawidth",
1632 NULL);
1633 if (value) {
1634 width = be32_to_cpup(value) >> 3; /* convert bits to bytes */
1636 /* If data width is greater than 8 bytes, DRE is not in hw */
1637 if (width > 8)
1638 chan->has_DRE = 0;
1640 chan->feature |= width - 1;
1643 value = (int *)of_get_property(node, "xlnx,device-id", NULL);
1644 if (value)
1645 device_id = be32_to_cpup(value);
1647 flush_fsync = (xdev->feature & XILINX_VDMA_FTR_FLUSH_MASK) >>
1648 XILINX_VDMA_FTR_FLUSH_SHIFT;
1650 if (feature & XILINX_DMA_IP_CDMA) {
1651 chan->direction = DMA_MEM_TO_MEM;
1652 chan->start_transfer = xilinx_cdma_start_transfer;
1654 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
1655 XILINX_DMA_FTR_HAS_SG_SHIFT;
1657 value = (int *)of_get_property(node,
1658 "xlnx,lite-mode", NULL);
1659 if (value) {
1660 if (be32_to_cpup(value) == 1) {
1661 chan->is_lite = 1;
1662 value = (int *)of_get_property(node,
1663 "xlnx,max-burst-len", NULL);
1664 if (value) {
1665 if (!width) {
1666 dev_err(xdev->dev,
1667 "Lite mode without data width property\n");
1668 goto out_free_chan;
1670 chan->max_len = width *
1671 be32_to_cpup(value);
1677 if (feature & XILINX_DMA_IP_DMA) {
1678 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
1679 XILINX_DMA_FTR_HAS_SG_SHIFT;
1681 chan->start_transfer = xilinx_dma_start_transfer;
1683 if (of_device_is_compatible(node,
1684 "xlnx,axi-dma-mm2s-channel"))
1685 chan->direction = DMA_MEM_TO_DEV;
1687 if (of_device_is_compatible(node,
1688 "xlnx,axi-dma-s2mm-channel"))
1689 chan->direction = DMA_DEV_TO_MEM;
1693 if (feature & XILINX_DMA_IP_VDMA) {
1694 chan->start_transfer = xilinx_vdma_start_transfer;
1696 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
1697 XILINX_DMA_FTR_HAS_SG_SHIFT;
1699 if (of_device_is_compatible(node,
1700 "xlnx,axi-vdma-mm2s-channel")) {
1701 chan->direction = DMA_MEM_TO_DEV;
1702 if (!chan->has_SG) {
1703 chan->addr_regs = (struct vdma_addr_regs *)
1704 ((u32)xdev->regs +
1705 XILINX_VDMA_DIRECT_REG_OFFSET);
1707 if (flush_fsync == XILINX_VDMA_FLUSH_BOTH ||
1708 flush_fsync == XILINX_VDMA_FLUSH_MM2S)
1709 chan->flush_fsync = 1;
1712 if (of_device_is_compatible(node,
1713 "xlnx,axi-vdma-s2mm-channel")) {
1714 chan->direction = DMA_DEV_TO_MEM;
1715 if (!chan->has_SG) {
1716 chan->addr_regs = (struct vdma_addr_regs *)
1717 ((u32)xdev->regs +
1718 XILINX_VDMA_DIRECT_REG_OFFSET +
1719 XILINX_VDMA_CHAN_DIRECT_REG_SIZE);
1721 if (flush_fsync == XILINX_VDMA_FLUSH_BOTH ||
1722 flush_fsync == XILINX_VDMA_FLUSH_S2MM)
1723 chan->flush_fsync = 1;
1727 chan->regs = (struct xdma_regs *)xdev->regs;
1728 chan->id = 0;
1730 if (chan->direction == DMA_DEV_TO_MEM) {
1731 chan->regs = (struct xdma_regs *)((u32)xdev->regs +
1732 XILINX_DMA_RX_CHANNEL_OFFSET);
1733 chan->id = 1;
1736 /* Used by dmatest channel matching in slave transfers
1737 * Can change it to be a structure to have more matching information
1739 chan->private = (chan->direction & 0xFF) |
1740 (chan->feature & XILINX_DMA_IP_MASK) |
1741 (device_id << XILINX_DMA_DEVICE_ID_SHIFT);
1742 chan->common.private = (void *)&(chan->private);
1744 if (!chan->has_DRE)
1745 xdev->common.copy_align = my_log(width);
1747 chan->dev = xdev->dev;
1748 xdev->chan[chan->id] = chan;
1750 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1752 /* Initialize the channel */
1753 if (dma_init(chan)) {
1754 dev_err(xdev->dev, "Reset channel failed\n");
1755 goto out_free_chan;
1759 spin_lock_init(&chan->lock);
1760 INIT_LIST_HEAD(&chan->pending_list);
1761 INIT_LIST_HEAD(&chan->active_list);
1763 chan->common.device = &xdev->common;
1765 /* find the IRQ line, if it exists in the device tree */
1766 chan->irq = irq_of_parse_and_map(node, 0);
1767 err = request_irq(chan->irq, dma_intr_handler, IRQF_SHARED,
1768 "xilinx-dma-controller", chan);
1769 if (err) {
1770 dev_err(xdev->dev, "unable to request IRQ\n");
1771 goto out_free_irq;
1774 /* Add the channel to DMA device channel list */
1775 list_add_tail(&chan->common.device_node, &xdev->common.channels);
1776 xdev->common.chancnt++;
1778 return 0;
1780 out_free_irq:
1781 irq_dispose_mapping(chan->irq);
1782 out_free_chan:
1783 kfree(chan);
1784 out_return:
1785 return err;
1788 static int __devinit xilinx_dma_of_probe(struct platform_device *op)
1790 struct xilinx_dma_device *xdev;
1791 struct device_node *child, *node;
1792 int err;
1793 int *value;
1794 int num_frames = 0;
1796 dev_info(&op->dev, "Probing xilinx axi dma engines\n");
1798 xdev = kzalloc(sizeof(struct xilinx_dma_device), GFP_KERNEL);
1799 if (!xdev) {
1800 dev_err(&op->dev, "Not enough memory for device\n");
1801 err = -ENOMEM;
1802 goto out_return;
1805 xdev->dev = &(op->dev);
1806 INIT_LIST_HEAD(&xdev->common.channels);
1808 node = op->dev.of_node;
1809 xdev->feature = 0;
1811 /* iomap registers */
1812 xdev->regs = of_iomap(node, 0);
1813 if (!xdev->regs) {
1814 dev_err(&op->dev, "unable to iomap registers\n");
1815 err = -ENOMEM;
1816 goto out_free_xdev;
1819 /* Axi CDMA only does memcpy
1821 if (of_device_is_compatible(node, "xlnx,axi-cdma")) {
1822 xdev->feature |= XILINX_DMA_IP_CDMA;
1824 value = (int *)of_get_property(node, "xlnx,include-sg",
1825 NULL);
1826 if (value) {
1827 if (be32_to_cpup(value) == 1)
1828 xdev->feature |= XILINX_DMA_FTR_HAS_SG;
1831 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
1832 xdev->common.device_prep_dma_memcpy = xilinx_dma_prep_memcpy;
1833 xdev->common.device_control = xilinx_dma_device_control;
1834 xdev->common.device_issue_pending = xilinx_cdma_issue_pending;
1837 /* Axi DMA and VDMA only do slave transfers
1839 if (of_device_is_compatible(node, "xlnx,axi-dma")) {
1841 xdev->feature |= XILINX_DMA_IP_DMA;
1842 value = (int *)of_get_property(node,
1843 "xlnx,sg-include-stscntrl-strm",
1844 NULL);
1845 if (value) {
1846 if (be32_to_cpup(value) == 1) {
1847 xdev->feature |= (XILINX_DMA_FTR_STSCNTRL_STRM |
1848 XILINX_DMA_FTR_HAS_SG);
1852 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
1853 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
1854 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
1855 xdev->common.device_control = xilinx_dma_device_control;
1856 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
1859 if (of_device_is_compatible(node, "xlnx,axi-vdma")) {
1860 xdev->feature |= XILINX_DMA_IP_VDMA;
1862 value = (int *)of_get_property(node, "xlnx,include-sg",
1863 NULL);
1864 if (value) {
1865 if (be32_to_cpup(value) == 1)
1866 xdev->feature |= XILINX_DMA_FTR_HAS_SG;
1869 value = (int *)of_get_property(node, "xlnx,num-fstores",
1870 NULL);
1871 if (value)
1872 num_frames = be32_to_cpup(value);
1874 value = (int *)of_get_property(node, "xlnx,flush-fsync", NULL);
1875 if (value)
1876 xdev->feature |= be32_to_cpup(value) <<
1877 XILINX_VDMA_FTR_FLUSH_SHIFT;
1879 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
1880 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
1881 xdev->common.device_prep_slave_sg = xilinx_vdma_prep_slave_sg;
1882 xdev->common.device_control = xilinx_vdma_device_control;
1883 xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
1886 xdev->common.device_alloc_chan_resources =
1887 xilinx_dma_alloc_chan_resources;
1888 xdev->common.device_free_chan_resources =
1889 xilinx_dma_free_chan_resources;
1890 xdev->common.device_tx_status = xilinx_tx_status;
1891 xdev->common.dev = &op->dev;
1893 dev_set_drvdata(&op->dev, xdev);
1895 for_each_child_of_node(node, child) {
1896 xilinx_dma_chan_probe(xdev, child, xdev->feature);
1899 if (xdev->feature & XILINX_DMA_IP_VDMA) {
1900 int i;
1902 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) {
1903 if (xdev->chan[i])
1904 xdev->chan[i]->num_frms = num_frames;
1908 dma_async_device_register(&xdev->common);
1910 return 0;
1912 out_free_xdev:
1913 kfree(xdev);
1915 out_return:
1916 return err;
1919 static int __devexit xilinx_dma_of_remove(struct platform_device *op)
1921 struct xilinx_dma_device *xdev;
1922 int i;
1924 xdev = dev_get_drvdata(&op->dev);
1925 dma_async_device_unregister(&xdev->common);
1927 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) {
1928 if (xdev->chan[i])
1929 xilinx_dma_chan_remove(xdev->chan[i]);
1932 iounmap(xdev->regs);
1933 dev_set_drvdata(&op->dev, NULL);
1934 kfree(xdev);
1936 return 0;
1939 static const struct of_device_id xilinx_dma_of_ids[] = {
1940 { .compatible = "xlnx,axi-cdma",},
1941 { .compatible = "xlnx,axi-dma",},
1942 { .compatible = "xlnx,axi-vdma",},
1946 static struct platform_driver xilinx_dma_of_driver = {
1947 .driver = {
1948 .name = "xilinx-dma",
1949 .owner = THIS_MODULE,
1950 .of_match_table = xilinx_dma_of_ids,
1952 .probe = xilinx_dma_of_probe,
1953 .remove = __devexit_p(xilinx_dma_of_remove),
1956 /*----------------------------------------------------------------------------*/
1957 /* Module Init / Exit */
1958 /*----------------------------------------------------------------------------*/
1960 static __init int xilinx_dma_init(void)
1962 int ret;
1964 pr_info("Xilinx DMA driver\n");
1966 ret = platform_driver_register(&xilinx_dma_of_driver);
1967 if (ret)
1968 pr_err("xilinx_dma: failed to register platform driver\n");
1970 return ret;
1973 static void __exit xilinx_dma_exit(void)
1975 platform_driver_unregister(&xilinx_dma_of_driver);
1978 subsys_initcall(xilinx_dma_init);
1979 module_exit(xilinx_dma_exit);
1981 #else
1983 /**************************************************/
1984 /* Platform bus to support ARM before device tree */
1985 /**************************************************/
1987 /* The following probe and chan_probe functions were
1988 copied from the OF section above, then modified
1989 to use platform data.
1992 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
1994 free_irq(chan->irq, chan);
1995 list_del(&chan->common.device_node);
1996 kfree(chan);
2000 * Probing channels
2002 * . Get channel features from the device tree entry
2003 * . Initialize special channel handling routines
2005 static int __devinit xilinx_dma_chan_probe(struct platform_device *pdev,
2006 struct xilinx_dma_device *xdev,
2007 struct dma_channel_config *channel_config,
2008 int channel_num, u32 feature)
2010 struct xilinx_dma_chan *chan;
2011 int err;
2012 u32 width = 0;
2013 struct resource *res;
2015 /* alloc channel */
2018 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2019 if (!chan) {
2020 dev_err(xdev->dev, "no free memory for DMA channels!\n");
2021 err = -ENOMEM;
2022 goto out_return;
2025 chan->feature = feature;
2026 chan->is_lite = 0;
2027 chan->has_DRE = 0;
2028 chan->has_SG = 0;
2029 chan->max_len = XILINX_DMA_MAX_TRANS_LEN;
2031 if (channel_config->include_dre)
2032 chan->has_DRE = 1;
2034 if (channel_config->genlock_mode)
2035 chan->genlock = 1;
2037 width = channel_config->datawidth >> 3;
2038 chan->feature |= width - 1;
2040 if (feature & XILINX_DMA_IP_CDMA) {
2042 chan->direction = DMA_MEM_TO_MEM;
2043 chan->start_transfer = xilinx_cdma_start_transfer;
2045 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
2046 XILINX_DMA_FTR_HAS_SG_SHIFT;
2048 if (channel_config->lite_mode) {
2049 chan->is_lite = 1;
2050 chan->max_len = width * channel_config->max_burst_len;
2054 if (feature & XILINX_DMA_IP_DMA) {
2055 chan->has_SG = 1;
2056 chan->start_transfer = xilinx_dma_start_transfer;
2058 if (!strcmp(channel_config->type, "axi-dma-mm2s-channel"))
2059 chan->direction = DMA_MEM_TO_DEV;
2061 if (!strcmp(channel_config->type, "axi-dma-s2mm-channel"))
2062 chan->direction = DMA_DEV_TO_MEM;
2065 if (feature & XILINX_DMA_IP_VDMA) {
2067 chan->start_transfer = xilinx_vdma_start_transfer;
2069 chan->has_SG = (xdev->feature & XILINX_DMA_FTR_HAS_SG) >>
2070 XILINX_DMA_FTR_HAS_SG_SHIFT;
2072 if (!strcmp(channel_config->type, "axi-vdma-mm2s-channel")) {
2074 printk(KERN_INFO, "axi-vdma-mm2s-channel found\n");
2076 chan->direction = DMA_MEM_TO_DEV;
2077 if (!chan->has_SG) {
2078 chan->addr_regs = (struct vdma_addr_regs *)
2079 ((u32)xdev->regs +
2080 XILINX_VDMA_DIRECT_REG_OFFSET);
2084 if (!strcmp(channel_config->type, "axi-vdma-s2mm-channel")) {
2086 printk(KERN_INFO, "axi-vdma-s2mm-channel found\n");
2088 chan->direction = DMA_DEV_TO_MEM;
2089 if (!chan->has_SG) {
2090 chan->addr_regs = (struct vdma_addr_regs *)
2091 ((u32)xdev->regs +
2092 XILINX_VDMA_DIRECT_REG_OFFSET +
2093 XILINX_VDMA_CHAN_DIRECT_REG_SIZE);
2098 chan->regs = (struct xdma_regs *)xdev->regs;
2099 chan->id = 0;
2101 if (chan->direction == DMA_DEV_TO_MEM) {
2102 chan->regs = (struct xdma_regs *)((u32)xdev->regs +
2103 XILINX_DMA_RX_CHANNEL_OFFSET);
2104 chan->id = 1;
2107 /* Used by dmatest channel matching in slave transfers
2108 * Can change it to be a structure to have more matching information
2110 chan->private = (chan->direction & 0xFF) |
2111 (chan->feature & XILINX_DMA_IP_MASK);
2112 chan->common.private = (void *)&(chan->private);
2114 if (!chan->has_DRE)
2115 xdev->common.copy_align = my_log(width);
2117 chan->dev = xdev->dev;
2118 xdev->chan[chan->id] = chan;
2120 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
2122 /* Initialize the channel */
2123 if (dma_init(chan)) {
2124 dev_err(xdev->dev, "Reset channel failed\n");
2125 goto out_free_chan;
2129 spin_lock_init(&chan->lock);
2130 INIT_LIST_HEAD(&chan->pending_list);
2131 INIT_LIST_HEAD(&chan->active_list);
2133 chan->common.device = &xdev->common;
2135 /* setup the interrupt for the channel */
2137 res = platform_get_resource(pdev, IORESOURCE_IRQ, channel_num);
2138 chan->irq = res->start;
2140 err = request_irq(chan->irq, dma_intr_handler, IRQF_SHARED,
2141 "xilinx-dma-controller", chan);
2142 if (err) {
2143 dev_err(xdev->dev, "unable to request IRQ\n");
2144 goto out_free_irq;
2145 } else
2146 dev_info(&pdev->dev, "using irq %d\n", chan->irq);
2148 /* Add the channel to DMA device channel list */
2149 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2150 xdev->common.chancnt++;
2152 return 0;
2154 out_free_irq:
2155 free_irq(chan->irq, chan);
2156 out_free_chan:
2157 kfree(chan);
2158 out_return:
2159 return err;
2162 static int __devinit xilinx_dma_probe(struct platform_device *pdev)
2164 struct xilinx_dma_device *xdev;
2165 int err;
2166 int num_frames = 0;
2167 struct resource *res;
2168 struct device *dev = &pdev->dev;
2169 struct dma_device_config *dma_config;
2170 int channel;
2172 dev_info(&pdev->dev, "Probing xilinx axi dma engines\n");
2174 xdev = kzalloc(sizeof(struct xilinx_dma_device), GFP_KERNEL);
2175 if (!xdev) {
2176 dev_err(&pdev->dev, "Not enough memory for device\n");
2177 err = -ENOMEM;
2178 goto out_return;
2181 xdev->dev = &(pdev->dev);
2182 INIT_LIST_HEAD(&xdev->common.channels);
2184 xdev->feature = 0;
2186 /* iomap registers */
2187 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2188 if (!res) {
2189 printk(KERN_ERR "get_resource for MEM resource for dev %d "
2190 "failed\n", pdev->id);
2191 err = -ENOMEM;
2192 goto out_return;
2193 } else {
2194 dev_info(&pdev->dev, "device %d actual base is %x\n",
2195 pdev->id, (unsigned int)res->start);
2197 if (!request_mem_region(res->start, 0x1000, "xilinx_axidma")) {
2198 printk(KERN_ERR "memory request failue for base %x\n",
2199 (unsigned int)res->start);
2200 err = -ENOMEM;
2201 goto out_return;
2204 xdev->regs = ioremap(res->start, 0x1000);
2205 pr_info("dma base remapped: %lx\n", (unsigned long)xdev->regs);
2206 if (!xdev->regs) {
2207 dev_err(&pdev->dev, "unable to iomap registers\n");
2208 err = -ENOMEM;
2209 goto out_free_xdev;
2212 dma_config = (struct dma_device_config *)dev->platform_data;
2214 /* Axi CDMA only does memcpy
2216 if (!strcmp(dma_config->type, "axi-cdma")) {
2218 pr_info("found an axi-cdma configuration\n");
2219 xdev->feature |= XILINX_DMA_IP_CDMA;
2221 if (dma_config->include_sg)
2222 xdev->feature |= XILINX_DMA_FTR_HAS_SG;
2224 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2225 xdev->common.device_prep_dma_memcpy = xilinx_dma_prep_memcpy;
2226 xdev->common.device_control = xilinx_dma_device_control;
2227 xdev->common.device_issue_pending = xilinx_cdma_issue_pending;
2230 /* Axi DMA and VDMA only do slave transfers
2232 if (!strcmp(dma_config->type, "axi-dma")) {
2234 pr_info("found an axi-dma configuration\n");
2236 xdev->feature |= XILINX_DMA_IP_DMA;
2237 if (dma_config->sg_include_stscntrl_strm)
2238 xdev->feature |= XILINX_DMA_FTR_STSCNTRL_STRM;
2240 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2241 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2242 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2243 xdev->common.device_control = xilinx_dma_device_control;
2244 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2247 if (!strcmp(dma_config->type, "axi-vdma")) {
2249 pr_info("found an axi-vdma configuration\n");
2251 xdev->feature |= XILINX_DMA_IP_VDMA;
2253 if (dma_config->include_sg)
2254 xdev->feature |= XILINX_DMA_FTR_HAS_SG;
2256 num_frames = dma_config->num_fstores;
2258 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2259 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2260 xdev->common.device_prep_slave_sg = xilinx_vdma_prep_slave_sg;
2261 xdev->common.device_control = xilinx_vdma_device_control;
2262 xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
2265 xdev->common.device_alloc_chan_resources =
2266 xilinx_dma_alloc_chan_resources;
2267 xdev->common.device_free_chan_resources =
2268 xilinx_dma_free_chan_resources;
2269 xdev->common.device_tx_status = xilinx_tx_status;
2270 xdev->common.dev = &pdev->dev;
2272 dev_set_drvdata(&pdev->dev, xdev);
2274 for (channel = 0; channel < dma_config->channel_count; channel++)
2275 xilinx_dma_chan_probe(pdev, xdev,
2276 &dma_config->channel_config[channel],
2277 channel, xdev->feature);
2279 if (xdev->feature & XILINX_DMA_IP_VDMA) {
2280 int i;
2282 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) {
2283 if (xdev->chan[i])
2284 xdev->chan[i]->num_frms = num_frames;
2288 dma_async_device_register(&xdev->common);
2290 return 0;
2292 out_free_xdev:
2293 kfree(xdev);
2295 out_return:
2296 return err;
2300 static int __exit xilinx_dma_remove(struct platform_device *pdev)
2302 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2303 int i;
2304 #if 1
2305 dma_async_device_unregister(&xdev->common);
2306 #endif
2307 for (i = 0; i < 2; i++) {
2308 if (xdev->chan[i])
2309 xilinx_dma_chan_remove(xdev->chan[i]);
2312 iounmap(xdev->regs);
2313 dev_set_drvdata(&pdev->dev, NULL);
2314 kfree(xdev);
2316 return 0;
2319 static void xilinx_dma_shutdown(struct platform_device *pdev)
2321 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2322 int i;
2324 for (i = 0; i < 2; i++)
2325 dma_halt(xdev->chan[i]);
2328 static struct platform_driver xilinx_dma_driver = {
2329 .probe = xilinx_dma_probe,
2330 .remove = __exit_p(xilinx_dma_remove),
2331 .shutdown = xilinx_dma_shutdown,
2332 .driver = {
2333 .owner = THIS_MODULE,
2334 .name = "xilinx-axidma",
2338 /*----------------------------------------------------------------------------*/
2339 /* Module Init / Exit */
2340 /*----------------------------------------------------------------------------*/
2342 static __init int xilinx_dma_init(void)
2344 int status;
2345 status = platform_driver_register(&xilinx_dma_driver);
2346 return status;
2348 module_init(xilinx_dma_init);
2350 static void __exit xilinx_dma_exit(void)
2352 platform_driver_unregister(&xilinx_dma_driver);
2355 module_exit(xilinx_dma_exit);
2356 #endif
2358 MODULE_DESCRIPTION("Xilinx DMA/CDMA/VDMA driver");
2359 MODULE_LICENSE("GPL");