2 * Xilinx DMA Engine support
4 * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * This driver supports three Xilinx DMA engines:
10 * . Axi CDMA engine, it does transfers between memory and memory, it
11 * only has one channel.
12 * . Axi DMA engine, it does transfers between memory and device. It can be
13 * configured to have one channel or two channels. If configured as two
14 * channels, one is to transmit to a device and another is to receive from
16 * . Axi VDMA engine, it does transfers between memory and video devices.
17 * It can be configured to have one channel or two channels. If configured
18 * as two channels, one is to transmit to the video device and another is
19 * to receive from the video device.
21 * This is free software; you can redistribute it and/or modify
22 * it under the terms of the GNU General Public License as published by
23 * the Free Software Foundation; either version 2 of the License, or
24 * (at your option) any later version.
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmapool.h>
35 #include <linux/of_platform.h>
36 #include <linux/platform_device.h>
37 #include <linux/of_address.h>
38 #include <linux/amba/xilinx_dma.h>
39 #include <linux/of_irq.h>
41 /* Hw specific definitions
43 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
44 #define XILINX_DMA_MAX_TRANS_LEN 0x7FFFFF
46 /* General register bits definitions
48 #define XILINX_DMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
49 #define XILINX_DMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA engine */
51 #define XILINX_DMA_SR_HALTED_MASK 0x00000001 /* DMA channel halted */
52 #define XILINX_DMA_SR_IDLE_MASK 0x00000002 /* DMA channel idle */
54 #define XILINX_DMA_SR_ERR_INTERNAL_MASK 0x00000010 /* Datamover internal err */
55 #define XILINX_DMA_SR_ERR_SLAVE_MASK 0x00000020 /* Datamover slave err */
56 #define XILINX_DMA_SR_ERR_DECODE_MASK 0x00000040 /* Datamover decode err */
57 #define XILINX_DMA_SR_ERR_SG_INT_MASK 0x00000100 /* SG internal err */
58 #define XILINX_DMA_SR_ERR_SG_SLV_MASK 0x00000200 /* SG slave err */
59 #define XILINX_DMA_SR_ERR_SG_DEC_MASK 0x00000400 /* SG decode err */
60 #define XILINX_DMA_SR_ERR_ALL_MASK 0x00000770 /* All errors */
62 #define XILINX_DMA_XR_IRQ_IOC_MASK 0x00001000 /* Completion interrupt */
63 #define XILINX_DMA_XR_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
64 #define XILINX_DMA_XR_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
65 #define XILINX_DMA_XR_IRQ_ALL_MASK 0x00007000 /* All interrupts */
67 #define XILINX_DMA_XR_DELAY_MASK 0xFF000000 /* Delay timeout counter */
68 #define XILINX_DMA_XR_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
70 #define XILINX_DMA_IRQ_SHIFT 12
71 #define XILINX_DMA_DELAY_SHIFT 24
72 #define XILINX_DMA_COALESCE_SHIFT 16
74 #define XILINX_DMA_DELAY_MAX 0xFF /**< Maximum delay counter value */
75 #define XILINX_DMA_COALESCE_MAX 0xFF /**< Maximum coalescing counter value */
77 #define XILINX_DMA_RX_CHANNEL_OFFSET 0x30
79 /* Axi CDMA special register bits
81 #define XILINX_CDMA_CR_SGMODE_MASK 0x00000008 /**< Scatter gather mode */
83 #define XILINX_CDMA_SR_SGINCLD_MASK 0x00000008 /**< Hybrid build */
84 #define XILINX_CDMA_XR_IRQ_SIMPLE_ALL_MASK 0x00005000 /**< All interrupts for
86 /* Axi VDMA special register bits
88 #define XILINX_VDMA_CIRC_EN 0x00000002 /* Circular mode */
89 #define XILINX_VDMA_SYNC_EN 0x00000008 /* Sync enable mode */
90 #define XILINX_VDMA_FRMCNT_EN 0x00000010 /* Frm Cnt enable mode */
91 #define XILINX_VDMA_MSTR_MASK 0x00000F00 /* Master in control */
93 #define XILINX_VDMA_EXTFSYNC_SHIFT 6
94 #define XILINX_VDMA_MSTR_SHIFT 8
95 #define XILINX_VDMA_WR_REF_SHIFT 8
97 #define XILINX_VDMA_FRMDLY_SHIFT 24
99 #define XILINX_VDMA_DIRECT_REG_OFFSET 0x50
100 #define XILINX_VDMA_CHAN_DIRECT_REG_SIZE 0x50
102 #define XILINX_VDMA_PARK_REG_OFFSET 0x28
104 /* Axi VDMA Specific Error bits
106 #define XILINX_VDMA_SR_ERR_FSIZE_LESS_MASK 0x00000080 /* FSize Less
108 #define XILINX_VDMA_SR_ERR_LSIZE_LESS_MASK 0x00000100 /* LSize Less
110 #define XILINX_VDMA_SR_ERR_FSIZE_MORE_MASK 0x00000800 /* FSize
112 /* Recoverable errors are DMA Internal error, FSize Less, LSize Less
113 * and FSize More mismatch errors. These are only recoverable only
114 * when C_FLUSH_ON_FSYNC is enabled in the hardware system.
116 #define XILINX_VDMA_SR_ERR_RECOVER_MASK 0x00000990 /* Recoverable
119 /* Axi VDMA Flush on Fsync bits
121 #define XILINX_VDMA_FLUSH_S2MM 3
122 #define XILINX_VDMA_FLUSH_MM2S 2
123 #define XILINX_VDMA_FLUSH_BOTH 1
125 /* BD definitions for Axi Dma and Axi Cdma
127 #define XILINX_DMA_BD_STS_COMPL_MASK 0x80000000
128 #define XILINX_DMA_BD_STS_ERR_MASK 0x70000000
129 #define XILINX_DMA_BD_STS_ALL_MASK 0xF0000000
131 /* Axi DMA BD special bits definitions
133 #define XILINX_DMA_BD_SOP 0x08000000 /* Start of packet bit */
134 #define XILINX_DMA_BD_EOP 0x04000000 /* End of packet bit */
138 #define XILINX_DMA_FTR_DATA_WIDTH_MASK 0x000000FF /* Data width mask, 1024 */
139 #define XILINX_DMA_FTR_HAS_SG 0x00000100 /* Has SG */
140 #define XILINX_DMA_FTR_HAS_SG_SHIFT 8 /* Has SG shift */
141 #define XILINX_DMA_FTR_STSCNTRL_STRM 0x00010000 /* Optional feature for dma */
143 /* Feature encodings for VDMA
145 #define XILINX_VDMA_FTR_FLUSH_MASK 0x00000600 /* Flush-on-FSync Mask */
146 #define XILINX_VDMA_FTR_FLUSH_SHIFT 9 /* Flush-on-FSync shift */
148 /* Delay loop counter to prevent hardware failure
150 #define XILINX_DMA_RESET_LOOP 1000000
151 #define XILINX_DMA_HALT_LOOP 1000000
153 /* Device Id in the private structure
155 #define XILINX_DMA_DEVICE_ID_SHIFT 28
159 #define DMA_OUT(addr, val) (iowrite32(val, addr))
160 #define DMA_IN(addr) (ioread32(addr))
162 /* Hardware descriptor
164 * shared by all Xilinx DMA engines
166 struct xilinx_dma_desc_hw
{
167 u32 next_desc
; /* 0x00 */
169 u32 buf_addr
; /* 0x08 */
171 u32 addr_vsize
; /* 0x10 */
172 u32 hsize
; /* 0x14 */
173 u32 control
; /* 0x18 */
174 u32 status
; /* 0x1C */
175 u32 app_0
; /* 0x20 */
176 u32 app_1
; /* 0x24 */
177 u32 app_2
; /* 0x28 */
178 u32 app_3
; /* 0x2C */
179 u32 app_4
; /* 0x30 */
180 } __attribute__((aligned(64)));
182 struct xilinx_dma_desc_sw
{
183 struct xilinx_dma_desc_hw hw
;
184 struct list_head node
;
185 struct list_head tx_list
;
186 struct dma_async_tx_descriptor async_tx
;
187 } __attribute__((aligned(64)));
190 u32 cr
; /* 0x00 Control Register */
191 u32 sr
; /* 0x04 Status Register */
192 u32 cdr
; /* 0x08 Current Descriptor Register */
194 u32 tdr
; /* 0x10 Tail Descriptor Register */
196 u32 src
; /* 0x18 Source Address Register (cdma) */
198 u32 dst
; /* 0x20 Destination Address Register (cdma) */
200 u32 btt_ref
;/* 0x28 Bytes To Transfer (cdma) or park_ref (vdma) */
201 u32 version
; /* 0x2c version (vdma) */
204 struct vdma_addr_regs
{
205 u32 vsize
; /* 0x0 Vertical size */
206 u32 hsize
; /* 0x4 Horizontal size */
207 u32 frmdly_stride
; /* 0x8 Frame delay and stride */
208 u32 buf_addr
[16]; /* 0xC - 0x48 Src addresses */
211 /* Per DMA specific operations should be embedded in the channel structure
213 struct xilinx_dma_chan
{
214 struct xdma_regs __iomem
*regs
; /* Control status registers */
215 struct vdma_addr_regs
*addr_regs
; /* Direct address registers */
216 dma_cookie_t completed_cookie
; /* The maximum cookie completed */
217 dma_cookie_t cookie
; /* The current cookie */
218 spinlock_t lock
; /* Descriptor operation lock */
219 bool sg_waiting
; /* Scatter gather transfer waiting */
220 struct list_head active_list
; /* Active descriptors */
221 struct list_head pending_list
; /* Descriptors waiting */
222 struct dma_chan common
; /* DMA common channel */
223 struct dma_pool
*desc_pool
; /* Descriptors pool */
224 struct device
*dev
; /* The dma device */
225 int irq
; /* Channel IRQ */
226 int id
; /* Channel ID */
227 enum dma_transfer_direction direction
;/* Transfer direction */
228 int max_len
; /* Maximum data len per transfer */
229 int is_lite
; /* Whether is light build */
230 int num_frms
; /* Number of frames */
231 int has_SG
; /* Support scatter transfers */
232 int has_DRE
; /* Support unaligned transfers */
233 int genlock
; /* Support genlock mode */
234 int err
; /* Channel has errors */
235 struct tasklet_struct tasklet
; /* Cleanup work after irq */
236 u32 feature
; /* IP feature */
237 u32
private; /* Match info for channel request */
238 void (*start_transfer
)(struct xilinx_dma_chan
*chan
);
239 struct xilinx_dma_config config
; /* Device configuration info */
240 u32 flush_fsync
; /* Flush on Fsync */
243 struct xilinx_dma_device
{
246 struct dma_device common
;
247 struct xilinx_dma_chan
*chan
[XILINX_DMA_MAX_CHANS_PER_DEVICE
];
252 #define to_xilinx_chan(chan) container_of(chan, struct xilinx_dma_chan, common)
254 /* Required functions
256 static int xilinx_dma_alloc_chan_resources(struct dma_chan
*dchan
)
258 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
260 /* Has this channel already been allocated? */
265 * We need the descriptor to be aligned to 64bytes
266 * for meeting Xilinx DMA specification requirement.
268 chan
->desc_pool
= dma_pool_create("xilinx_dma_desc_pool",
270 sizeof(struct xilinx_dma_desc_sw
),
271 __alignof__(struct xilinx_dma_desc_sw
), 0);
272 if (!chan
->desc_pool
) {
273 dev_err(chan
->dev
, "unable to allocate channel %d "
274 "descriptor pool\n", chan
->id
);
278 chan
->completed_cookie
= 1;
281 /* there is at least one descriptor free to be allocated */
285 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan
*chan
,
286 struct list_head
*list
)
288 struct xilinx_dma_desc_sw
*desc
, *_desc
;
290 list_for_each_entry_safe(desc
, _desc
, list
, node
) {
291 list_del(&desc
->node
);
292 dma_pool_free(chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
296 static void xilinx_dma_free_desc_list_reverse(struct xilinx_dma_chan
*chan
,
297 struct list_head
*list
)
299 struct xilinx_dma_desc_sw
*desc
, *_desc
;
301 list_for_each_entry_safe_reverse(desc
, _desc
, list
, node
) {
302 list_del(&desc
->node
);
303 dma_pool_free(chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
307 static void xilinx_dma_free_chan_resources(struct dma_chan
*dchan
)
309 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
312 dev_dbg(chan
->dev
, "Free all channel resources.\n");
313 spin_lock_irqsave(&chan
->lock
, flags
);
314 xilinx_dma_free_desc_list(chan
, &chan
->active_list
);
315 xilinx_dma_free_desc_list(chan
, &chan
->pending_list
);
316 spin_unlock_irqrestore(&chan
->lock
, flags
);
318 dma_pool_destroy(chan
->desc_pool
);
319 chan
->desc_pool
= NULL
;
322 static enum dma_status
xilinx_dma_desc_status(struct xilinx_dma_chan
*chan
,
323 struct xilinx_dma_desc_sw
*desc
)
325 return dma_async_is_complete(desc
->async_tx
.cookie
,
326 chan
->completed_cookie
,
330 static void xilinx_chan_desc_cleanup(struct xilinx_dma_chan
*chan
)
332 struct xilinx_dma_desc_sw
*desc
, *_desc
;
335 spin_lock_irqsave(&chan
->lock
, flags
);
337 list_for_each_entry_safe(desc
, _desc
, &chan
->active_list
, node
) {
338 dma_async_tx_callback callback
;
339 void *callback_param
;
341 if (xilinx_dma_desc_status(chan
, desc
) == DMA_IN_PROGRESS
)
344 /* Remove from the list of running transactions */
345 list_del(&desc
->node
);
347 /* Run the link descriptor callback function */
348 callback
= desc
->async_tx
.callback
;
349 callback_param
= desc
->async_tx
.callback_param
;
351 spin_unlock_irqrestore(&chan
->lock
, flags
);
352 callback(callback_param
);
353 spin_lock_irqsave(&chan
->lock
, flags
);
356 /* Run any dependencies, then free the descriptor */
357 dma_run_dependencies(&desc
->async_tx
);
358 dma_pool_free(chan
->desc_pool
, desc
, desc
->async_tx
.phys
);
361 spin_unlock_irqrestore(&chan
->lock
, flags
);
364 static enum dma_status
xilinx_tx_status(struct dma_chan
*dchan
,
366 struct dma_tx_state
*txstate
)
368 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
369 dma_cookie_t last_used
;
370 dma_cookie_t last_complete
;
372 xilinx_chan_desc_cleanup(chan
);
374 last_used
= dchan
->cookie
;
375 last_complete
= chan
->completed_cookie
;
377 dma_set_tx_state(txstate
, last_complete
, last_used
, 0);
379 return dma_async_is_complete(cookie
, last_complete
, last_used
);
382 static int dma_is_running(struct xilinx_dma_chan
*chan
)
384 return !(DMA_IN(&chan
->regs
->sr
) & XILINX_DMA_SR_HALTED_MASK
) &&
385 (DMA_IN(&chan
->regs
->cr
) & XILINX_DMA_CR_RUNSTOP_MASK
);
388 static int dma_is_idle(struct xilinx_dma_chan
*chan
)
390 return DMA_IN(&chan
->regs
->sr
) & XILINX_DMA_SR_IDLE_MASK
;
393 /* Only needed for Axi CDMA v2_00_a or earlier core
395 static void dma_sg_toggle(struct xilinx_dma_chan
*chan
)
398 DMA_OUT(&chan
->regs
->cr
,
399 DMA_IN(&chan
->regs
->cr
) & ~XILINX_CDMA_CR_SGMODE_MASK
);
401 DMA_OUT(&chan
->regs
->cr
,
402 DMA_IN(&chan
->regs
->cr
) | XILINX_CDMA_CR_SGMODE_MASK
);
405 #define XILINX_DMA_DRIVER_DEBUG 0
407 #if (XILINX_DMA_DRIVER_DEBUG == 1)
408 static void desc_dump(struct xilinx_dma_desc_hw
*hw
)
410 printk(KERN_INFO
"hw desc %x:\n", (unsigned int)hw
);
411 printk(KERN_INFO
"\tnext_desc %x\n", hw
->next_desc
);
412 printk(KERN_INFO
"\tbuf_addr %x\n", hw
->buf_addr
);
413 printk(KERN_INFO
"\taddr_vsize %x\n", hw
->addr_vsize
);
414 printk(KERN_INFO
"\thsize %x\n", hw
->hsize
);
415 printk(KERN_INFO
"\tcontrol %x\n", hw
->control
);
416 printk(KERN_INFO
"\tstatus %x\n", hw
->status
);
421 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan
*chan
)
424 struct xilinx_dma_desc_sw
*desch
, *desct
;
425 struct xilinx_dma_desc_hw
*hw
;
430 spin_lock_irqsave(&chan
->lock
, flags
);
432 if (list_empty(&chan
->pending_list
))
435 /* If hardware is busy, cannot submit
437 if (!dma_is_idle(chan
)) {
438 dev_dbg(chan
->dev
, "DMA controller still busy %x\n",
439 DMA_IN(&chan
->regs
->sr
));
445 DMA_OUT(&chan
->regs
->cr
,
446 DMA_IN(&chan
->regs
->cr
) | XILINX_DMA_XR_IRQ_ALL_MASK
);
448 desch
= list_first_entry(&chan
->pending_list
, struct xilinx_dma_desc_sw
,
453 /* If hybrid mode, append pending list to active list
455 desct
= container_of(chan
->pending_list
.prev
,
456 struct xilinx_dma_desc_sw
, node
);
458 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
460 /* If hardware is idle, then all descriptors on the active list
461 * are done, start new transfers
465 DMA_OUT(&chan
->regs
->cdr
, desch
->async_tx
.phys
);
467 /* Update tail ptr register and start the transfer
469 DMA_OUT(&chan
->regs
->tdr
, desct
->async_tx
.phys
);
475 list_del(&desch
->node
);
476 list_add_tail(&desch
->node
, &chan
->active_list
);
480 DMA_OUT(&chan
->regs
->src
, hw
->buf_addr
);
481 DMA_OUT(&chan
->regs
->dst
, hw
->addr_vsize
);
483 /* Start the transfer
485 DMA_OUT(&chan
->regs
->btt_ref
,
486 hw
->control
& XILINX_DMA_MAX_TRANS_LEN
);
489 spin_unlock_irqrestore(&chan
->lock
, flags
);
492 /* If sg mode, link the pending list to running list; if simple mode, get the
493 * head of the pending list and submit it to hw
495 static void xilinx_cdma_issue_pending(struct dma_chan
*dchan
)
497 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
498 xilinx_cdma_start_transfer(chan
);
501 /* Stop the hardware, the ongoing transfer will be finished */
502 static void dma_halt(struct xilinx_dma_chan
*chan
)
504 int loop
= XILINX_DMA_HALT_LOOP
;
506 DMA_OUT(&chan
->regs
->cr
,
507 DMA_IN(&chan
->regs
->cr
) & ~XILINX_DMA_CR_RUNSTOP_MASK
);
509 /* Wait for the hardware to halt
512 if (!(DMA_IN(&chan
->regs
->cr
) & XILINX_DMA_CR_RUNSTOP_MASK
))
519 pr_debug("Cannot stop channel %x: %x\n",
521 (unsigned int)DMA_IN(&chan
->regs
->cr
));
528 /* Start the hardware. Transfers are not started yet */
529 static void dma_start(struct xilinx_dma_chan
*chan
)
531 int loop
= XILINX_DMA_HALT_LOOP
;
533 DMA_OUT(&chan
->regs
->cr
,
534 DMA_IN(&chan
->regs
->cr
) | XILINX_DMA_CR_RUNSTOP_MASK
);
536 /* Wait for the hardware to start
539 if (DMA_IN(&chan
->regs
->cr
) & XILINX_DMA_CR_RUNSTOP_MASK
)
546 pr_debug("Cannot start channel %x: %x\n",
548 (unsigned int)DMA_IN(&chan
->regs
->cr
));
557 static void xilinx_dma_start_transfer(struct xilinx_dma_chan
*chan
)
560 struct xilinx_dma_desc_sw
*desch
, *desct
;
561 struct xilinx_dma_desc_hw
*hw
;
566 spin_lock_irqsave(&chan
->lock
, flags
);
568 if (list_empty(&chan
->pending_list
))
571 /* If hardware is busy, cannot submit
573 if (dma_is_running(chan
) && !dma_is_idle(chan
)) {
574 dev_dbg(chan
->dev
, "DMA controller still busy\n");
578 /* If hardware is idle, then all descriptors on active list are
579 * done, start new transfers
587 desch
= list_first_entry(&chan
->pending_list
,
588 struct xilinx_dma_desc_sw
, node
);
590 desct
= container_of(chan
->pending_list
.prev
,
591 struct xilinx_dma_desc_sw
, node
);
593 DMA_OUT(&chan
->regs
->cdr
, desch
->async_tx
.phys
);
599 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
603 DMA_OUT(&chan
->regs
->cr
,
604 DMA_IN(&chan
->regs
->cr
) | XILINX_DMA_XR_IRQ_ALL_MASK
);
606 /* Update tail ptr register and start the transfer
608 DMA_OUT(&chan
->regs
->tdr
, desct
->async_tx
.phys
);
620 printk(KERN_INFO
"xilinx_dma_start_transfer::simple DMA mode\n");
622 desch
= list_first_entry(&chan
->pending_list
,
623 struct xilinx_dma_desc_sw
, node
);
625 list_del(&desch
->node
);
626 list_add_tail(&desch
->node
, &chan
->active_list
);
637 DMA_OUT(&chan
->regs
->cr
,
638 DMA_IN(&chan
->regs
->cr
) | XILINX_DMA_XR_IRQ_ALL_MASK
);
640 DMA_OUT(&chan
->regs
->src
, hw
->buf_addr
);
642 /* Start the transfer
644 DMA_OUT(&chan
->regs
->btt_ref
,
645 hw
->control
& XILINX_DMA_MAX_TRANS_LEN
);
648 spin_unlock_irqrestore(&chan
->lock
, flags
);
651 static void xilinx_dma_issue_pending(struct dma_chan
*dchan
)
653 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
654 xilinx_dma_start_transfer(chan
);
657 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan
*chan
)
660 struct xilinx_dma_desc_sw
*desch
, *desct
= NULL
;
661 struct xilinx_dma_config
*config
;
668 spin_lock_irqsave(&chan
->lock
, flags
);
670 if (list_empty(&chan
->pending_list
))
673 /* If it is SG mode and hardware is busy, cannot submit
675 if (chan
->has_SG
&& dma_is_running(chan
) && !dma_is_idle(chan
)) {
676 dev_dbg(chan
->dev
, "DMA controller still busy\n");
680 /* If hardware is idle, then all descriptors on the running lists are
681 * done, start new transfers
687 desch
= list_first_entry(&chan
->pending_list
,
688 struct xilinx_dma_desc_sw
, node
);
690 desct
= container_of(chan
->pending_list
.prev
,
691 struct xilinx_dma_desc_sw
, node
);
693 DMA_OUT(&chan
->regs
->cdr
, desch
->async_tx
.phys
);
696 /* Configure the hardware using info in the config structure */
697 config
= &(chan
->config
);
698 reg
= DMA_IN(&chan
->regs
->cr
);
700 if (config
->frm_cnt_en
)
701 reg
|= XILINX_VDMA_FRMCNT_EN
;
703 reg
&= ~XILINX_VDMA_FRMCNT_EN
;
705 /* With SG, start with circular mode, so that BDs can be fetched.
706 * In direct register mode, if not parking, enable circular mode */
707 if ((chan
->has_SG
) || (!config
->park
))
708 reg
|= XILINX_VDMA_CIRC_EN
;
711 reg
&= ~XILINX_VDMA_CIRC_EN
;
713 DMA_OUT(&chan
->regs
->cr
, reg
);
715 if ((config
->park_frm
>= 0) && (config
->park_frm
< chan
->num_frms
)) {
716 if (config
->direction
== DMA_MEM_TO_DEV
) {
717 chan_base
= (char *)chan
->regs
;
718 DMA_OUT((chan_base
+ XILINX_VDMA_PARK_REG_OFFSET
),
721 chan_base
= ((char *)chan
->regs
-
722 XILINX_DMA_RX_CHANNEL_OFFSET
);
723 DMA_OUT((chan_base
+ XILINX_VDMA_PARK_REG_OFFSET
),
724 config
->park_frm
<< XILINX_VDMA_WR_REF_SHIFT
);
728 /* Start the hardware
734 list_splice_tail_init(&chan
->pending_list
, &chan
->active_list
);
738 * park/genlock testing does not use interrupts */
739 if (!chan
->config
.disable_intr
) {
740 DMA_OUT(&chan
->regs
->cr
,
741 DMA_IN(&chan
->regs
->cr
) | XILINX_DMA_XR_IRQ_ALL_MASK
);
743 DMA_OUT(&chan
->regs
->cr
,
744 (DMA_IN(&chan
->regs
->cr
) | XILINX_DMA_XR_IRQ_ALL_MASK
) &
745 ~((chan
->config
.disable_intr
<< XILINX_DMA_IRQ_SHIFT
)));
748 /* Start the transfer
751 DMA_OUT(&chan
->regs
->tdr
, desct
->async_tx
.phys
);
753 DMA_OUT(&chan
->addr_regs
->vsize
, config
->vsize
);
756 spin_unlock_irqrestore(&chan
->lock
, flags
);
759 static void xilinx_vdma_issue_pending(struct dma_chan
*dchan
)
761 struct xilinx_dma_chan
*chan
= to_xilinx_chan(dchan
);
762 xilinx_vdma_start_transfer(chan
);
766 * xilinx_dma_update_completed_cookie - Update the completed cookie.
767 * @chan : xilinx DMA channel
771 static void xilinx_dma_update_completed_cookie(struct xilinx_dma_chan
*chan
)
773 struct xilinx_dma_desc_sw
*desc
= NULL
;
774 struct xilinx_dma_desc_hw
*hw
= NULL
;
776 dma_cookie_t cookie
= -EBUSY
;
779 spin_lock_irqsave(&chan
->lock
, flags
);
781 if (list_empty(&chan
->active_list
)) {
782 dev_dbg(chan
->dev
, "no running descriptors\n");
786 /* Get the last completed descriptor, update the cookie to that */
787 list_for_each_entry(desc
, &chan
->active_list
, node
) {
788 if ((!(chan
->feature
& XILINX_DMA_IP_VDMA
)) && chan
->has_SG
) {
791 /* If a BD has no status bits set, hw has it */
792 if (!(hw
->status
& XILINX_DMA_BD_STS_ALL_MASK
)) {
796 cookie
= desc
->async_tx
.cookie
;
799 /* In non-SG mode, all active entries are done */
801 cookie
= desc
->async_tx
.cookie
;
806 chan
->completed_cookie
= cookie
;
809 spin_unlock_irqrestore(&chan
->lock
, flags
);
814 static int dma_init(struct xilinx_dma_chan
*chan
)
816 int loop
= XILINX_DMA_RESET_LOOP
;
819 DMA_OUT(&chan
->regs
->cr
,
820 DMA_IN(&chan
->regs
->cr
) | XILINX_DMA_CR_RESET_MASK
);
822 tmp
= DMA_IN(&chan
->regs
->cr
) & XILINX_DMA_CR_RESET_MASK
;
824 /* Wait for the hardware to finish reset
826 while (loop
&& tmp
) {
827 tmp
= DMA_IN(&chan
->regs
->cr
) & XILINX_DMA_CR_RESET_MASK
;
832 dev_err(chan
->dev
, "reset timeout, cr %x, sr %x\n",
833 DMA_IN(&chan
->regs
->cr
), DMA_IN(&chan
->regs
->sr
));
837 /* For Axi CDMA, always do sg transfers if sg mode is built in
839 if ((chan
->feature
& XILINX_DMA_IP_CDMA
) && chan
->has_SG
)
840 DMA_OUT(&chan
->regs
->cr
, tmp
| XILINX_CDMA_CR_SGMODE_MASK
);
846 static irqreturn_t
dma_intr_handler(int irq
, void *data
)
848 struct xilinx_dma_chan
*chan
= data
;
849 int update_cookie
= 0;
853 reg
= DMA_IN(&chan
->regs
->cr
);
857 DMA_OUT(&chan
->regs
->cr
,
858 reg
& ~XILINX_DMA_XR_IRQ_ALL_MASK
);
860 stat
= DMA_IN(&chan
->regs
->sr
);
861 if (!(stat
& XILINX_DMA_XR_IRQ_ALL_MASK
))
864 /* Ack the interrupts
866 DMA_OUT(&chan
->regs
->sr
, XILINX_DMA_XR_IRQ_ALL_MASK
);
868 /* Check for only the interrupts which are enabled
870 stat
&= (reg
& XILINX_DMA_XR_IRQ_ALL_MASK
);
872 if (stat
& XILINX_DMA_XR_IRQ_ERROR_MASK
) {
873 if ((chan
->feature
& XILINX_DMA_IP_VDMA
)
874 && chan
->flush_fsync
) {
875 /* VDMA Recoverable Errors, only when
876 C_FLUSH_ON_FSYNC is enabled */
877 u32 error
= DMA_IN(&chan
->regs
->sr
) &
878 XILINX_VDMA_SR_ERR_RECOVER_MASK
;
880 DMA_OUT(&chan
->regs
->sr
, error
);
885 "Channel %x has errors %x, cdr %x tdr %x\n",
887 (unsigned int)DMA_IN(&chan
->regs
->sr
),
888 (unsigned int)DMA_IN(&chan
->regs
->cdr
),
889 (unsigned int)DMA_IN(&chan
->regs
->tdr
));
894 /* Device takes too long to do the transfer when user requires
897 if (stat
& XILINX_DMA_XR_IRQ_DELAY_MASK
)
898 dev_dbg(chan
->dev
, "Inter-packet latency too long\n");
900 if (stat
& XILINX_DMA_XR_IRQ_IOC_MASK
) {
906 xilinx_dma_update_completed_cookie(chan
);
909 chan
->start_transfer(chan
);
911 tasklet_schedule(&chan
->tasklet
);
915 static void dma_do_tasklet(unsigned long data
)
917 struct xilinx_dma_chan
*chan
= (struct xilinx_dma_chan
*)data
;
919 xilinx_chan_desc_cleanup(chan
);
922 /* Append the descriptor list to the pending list */
923 static void append_desc_queue(struct xilinx_dma_chan
*chan
,
924 struct xilinx_dma_desc_sw
*desc
)
926 struct xilinx_dma_desc_sw
*tail
= container_of(chan
->pending_list
.prev
,
927 struct xilinx_dma_desc_sw
, node
);
928 struct xilinx_dma_desc_hw
*hw
;
930 if (list_empty(&chan
->pending_list
))
933 /* Add the hardware descriptor to the chain of hardware descriptors
934 * that already exists in memory.
937 hw
->next_desc
= (u32
)desc
->async_tx
.phys
;
939 /* Add the software descriptor and all children to the list
940 * of pending transactions
943 list_splice_tail_init(&desc
->tx_list
, &chan
->pending_list
);
946 /* Assign cookie to each descriptor, and append the descriptors to the pending
949 static dma_cookie_t
xilinx_dma_tx_submit(struct dma_async_tx_descriptor
*tx
)
951 struct xilinx_dma_chan
*chan
= to_xilinx_chan(tx
->chan
);
952 struct xilinx_dma_desc_sw
*desc
= container_of(tx
,
953 struct xilinx_dma_desc_sw
, async_tx
);
954 struct xilinx_dma_desc_sw
*child
;
956 dma_cookie_t cookie
= -EBUSY
;
959 /* If reset fails, need to hard reset the system.
960 * Channel is no longer functional
968 spin_lock_irqsave(&chan
->lock
, flags
);
971 * assign cookies to all of the software descriptors
972 * that make up this transaction
974 cookie
= chan
->cookie
;
975 list_for_each_entry(child
, &desc
->tx_list
, node
) {
978 cookie
= DMA_MIN_COOKIE
;
980 child
->async_tx
.cookie
= cookie
;
983 chan
->cookie
= cookie
;
986 /* put this transaction onto the tail of the pending queue */
987 append_desc_queue(chan
, desc
);
989 spin_unlock_irqrestore(&chan
->lock
, flags
);
994 static struct xilinx_dma_desc_sw
*xilinx_dma_alloc_descriptor(
995 struct xilinx_dma_chan
*chan
)
997 struct xilinx_dma_desc_sw
*desc
;
1000 desc
= dma_pool_alloc(chan
->desc_pool
, GFP_ATOMIC
, &pdesc
);
1002 dev_dbg(chan
->dev
, "out of memory for desc\n");
1006 memset(desc
, 0, sizeof(*desc
));
1007 INIT_LIST_HEAD(&desc
->tx_list
);
1008 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->common
);
1009 desc
->async_tx
.tx_submit
= xilinx_dma_tx_submit
;
1010 desc
->async_tx
.phys
= pdesc
;
1016 * xilinx_dma_prep_memcpy - prepare descriptors for a memcpy transaction
1017 * @dchan: DMA channel
1018 * @dma_dst: destination address
1019 * @dma_src: source address
1020 * @len: transfer length
1021 * @flags: transfer ack flags
1023 static struct dma_async_tx_descriptor
*xilinx_dma_prep_memcpy(
1024 struct dma_chan
*dchan
, dma_addr_t dma_dst
, dma_addr_t dma_src
,
1025 size_t len
, unsigned long flags
)
1027 struct xilinx_dma_chan
*chan
;
1028 struct xilinx_dma_desc_sw
*first
= NULL
, *prev
= NULL
, *new;
1029 struct xilinx_dma_desc_hw
*hw
, *prev_hw
;
1031 dma_addr_t src
= dma_src
;
1032 dma_addr_t dst
= dma_dst
;
1040 chan
= to_xilinx_chan(dchan
);
1044 /* If reset fails, need to hard reset the system.
1045 * Channel is no longer functional
1047 if (!dma_init(chan
))
1053 /* If build does not have Data Realignment Engine (DRE),
1054 * src has to be aligned
1056 if (!chan
->has_DRE
) {
1058 (chan
->feature
& XILINX_DMA_FTR_DATA_WIDTH_MASK
)) ||
1060 (chan
->feature
& XILINX_DMA_FTR_DATA_WIDTH_MASK
))) {
1063 "Source or destination address not aligned when no DRE\n");
1071 /* Allocate descriptor from DMA pool */
1072 new = xilinx_dma_alloc_descriptor(chan
);
1075 "No free memory for link descriptor\n");
1079 copy
= min(len
, (size_t)chan
->max_len
);
1081 /* if lite build, transfer cannot cross page boundary
1084 copy
= min(copy
, (size_t)(PAGE_MASK
-
1085 (src
& PAGE_MASK
)));
1089 "Got zero transfer length for %x\n",
1096 (hw
->control
& ~XILINX_DMA_MAX_TRANS_LEN
) | copy
;
1098 hw
->addr_vsize
= dst
;
1103 prev_hw
= &(prev
->hw
);
1104 prev_hw
->next_desc
= new->async_tx
.phys
;
1107 new->async_tx
.cookie
= 0;
1108 async_tx_ack(&new->async_tx
);
1115 /* Insert the descriptor to the list */
1116 list_add_tail(&new->node
, &first
->tx_list
);
1119 /* Link the last BD with the first BD */
1120 hw
->next_desc
= first
->async_tx
.phys
;
1122 new->async_tx
.flags
= flags
; /* client is in control of this ack */
1123 new->async_tx
.cookie
= -EBUSY
;
1125 return &first
->async_tx
;
1131 xilinx_dma_free_desc_list_reverse(chan
, &first
->tx_list
);
1136 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1137 * @chan: DMA channel
1138 * @sgl: scatterlist to transfer to/from
1139 * @sg_len: number of entries in @scatterlist
1140 * @direction: DMA direction
1141 * @flags: transfer ack flags
1143 static struct dma_async_tx_descriptor
*xilinx_dma_prep_slave_sg(
1144 struct dma_chan
*dchan
, struct scatterlist
*sgl
, unsigned int sg_len
,
1145 enum dma_transfer_direction direction
, unsigned long flags
,
1148 struct xilinx_dma_chan
*chan
;
1149 struct xilinx_dma_desc_sw
*first
= NULL
, *prev
= NULL
, *new = NULL
;
1150 struct xilinx_dma_desc_hw
*hw
= NULL
, *prev_hw
= NULL
;
1155 struct scatterlist
*sg
;
1159 #ifdef TEST_DMA_WITH_LOOPBACK
1165 chan
= to_xilinx_chan(dchan
);
1167 if (chan
->direction
!= direction
)
1170 #ifdef TEST_DMA_WITH_LOOPBACK
1173 for_each_sg(sgl
, sg
, sg_len
, i
) {
1174 total_len
+= sg_dma_len(sg
);
1178 * Build transactions using information in the scatter gather list
1180 for_each_sg(sgl
, sg
, sg_len
, i
) {
1183 /* Loop until the entire scatterlist entry is used */
1184 while (sg_used
< sg_dma_len(sg
)) {
1186 /* Allocate the link descriptor from DMA pool */
1187 new = xilinx_dma_alloc_descriptor(chan
);
1189 dev_err(chan
->dev
, "No free memory for "
1190 "link descriptor\n");
1195 * Calculate the maximum number of bytes to transfer,
1196 * making sure it is less than the hw limit
1198 copy
= min((size_t)(sg_dma_len(sg
) - sg_used
),
1199 (size_t)chan
->max_len
);
1202 dma_src
= sg_dma_address(sg
) + sg_used
;
1204 hw
->buf_addr
= dma_src
;
1206 /* Fill in the descriptor */
1210 * If this is not the first descriptor, chain the
1211 * current descriptor after the previous descriptor
1213 * For the first DMA_MEM_TO_DEV transfer, set SOP
1217 if (direction
== DMA_MEM_TO_DEV
) {
1218 hw
->control
|= XILINX_DMA_BD_SOP
;
1219 #ifdef TEST_DMA_WITH_LOOPBACK
1220 hw
->app_4
= total_len
;
1224 prev_hw
= &(prev
->hw
);
1225 prev_hw
->next_desc
= new->async_tx
.phys
;
1228 new->async_tx
.cookie
= 0;
1229 async_tx_ack(&new->async_tx
);
1234 /* Insert the link descriptor into the LD ring */
1235 list_add_tail(&new->node
, &first
->tx_list
);
1239 /* Link the last BD with the first BD */
1240 hw
->next_desc
= first
->async_tx
.phys
;
1242 if (direction
== DMA_MEM_TO_DEV
)
1243 hw
->control
|= XILINX_DMA_BD_EOP
;
1245 /* All scatter gather list entries has length == 0 */
1249 new->async_tx
.flags
= flags
;
1250 new->async_tx
.cookie
= -EBUSY
;
1252 /* Set EOP to the last link descriptor of new list */
1253 hw
->control
|= XILINX_DMA_BD_EOP
;
1255 return &first
->async_tx
;
1258 /* If first was not set, then we failed to allocate the very first
1259 * descriptor, and we're done */
1264 * First is set, so all of the descriptors we allocated have been added
1265 * to first->tx_list, INCLUDING "first" itself. Therefore we
1266 * must traverse the list backwards freeing each descriptor in turn
1268 xilinx_dma_free_desc_list_reverse(chan
, &first
->tx_list
);
1274 * xilinx_vdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1275 * @chan: VDMA channel
1276 * @sgl: scatterlist to transfer to/from
1277 * @sg_len: number of entries in @scatterlist
1278 * @direction: DMA direction
1279 * @flags: transfer ack flags
1281 static struct dma_async_tx_descriptor
*xilinx_vdma_prep_slave_sg(
1282 struct dma_chan
*dchan
, struct scatterlist
*sgl
, unsigned int sg_len
,
1283 enum dma_transfer_direction direction
, unsigned long flags
,
1286 struct xilinx_dma_chan
*chan
;
1287 struct xilinx_dma_desc_sw
*first
= NULL
, *prev
= NULL
, *new = NULL
;
1288 struct xilinx_dma_desc_hw
*hw
= NULL
, *prev_hw
= NULL
;
1290 struct scatterlist
*sg
;
1296 chan
= to_xilinx_chan(dchan
);
1298 if (chan
->direction
!= direction
)
1301 /* Enforce one sg entry for one frame */
1302 if (sg_len
!= chan
->num_frms
) {
1303 dev_err(chan
->dev
, "number of entries %d not the "
1304 "same as num stores %d\n", sg_len
, chan
->num_frms
);
1309 if (!chan
->has_SG
) {
1310 DMA_OUT(&chan
->addr_regs
->hsize
, chan
->config
.hsize
);
1311 DMA_OUT(&chan
->addr_regs
->frmdly_stride
,
1312 chan
->config
.frm_dly
<< XILINX_VDMA_FRMDLY_SHIFT
|
1313 chan
->config
.stride
);
1316 /* Build transactions using information in the scatter gather list
1318 for_each_sg(sgl
, sg
, sg_len
, i
) {
1320 /* Allocate the link descriptor from DMA pool */
1321 new = xilinx_dma_alloc_descriptor(chan
);
1323 dev_err(chan
->dev
, "No free memory for "
1324 "link descriptor\n");
1329 * Calculate the maximum number of bytes to transfer,
1330 * making sure it is less than the hw limit
1334 dma_src
= sg_dma_address(sg
);
1336 hw
->buf_addr
= dma_src
;
1338 /* Fill in the descriptor */
1339 hw
->addr_vsize
= chan
->config
.vsize
;
1340 hw
->hsize
= chan
->config
.hsize
;
1341 hw
->control
= (chan
->config
.frm_dly
<<
1342 XILINX_VDMA_FRMDLY_SHIFT
) |
1343 chan
->config
.stride
;
1345 /* Update the registers */
1346 DMA_OUT(&(chan
->addr_regs
->buf_addr
[i
]), dma_src
);
1349 /* If this is not the first descriptor, chain the
1350 * current descriptor after the previous descriptor
1355 prev_hw
= &(prev
->hw
);
1356 prev_hw
->next_desc
= new->async_tx
.phys
;
1359 new->async_tx
.cookie
= 0;
1360 async_tx_ack(&new->async_tx
);
1364 /* Insert the link descriptor into the list */
1365 list_add_tail(&new->node
, &first
->tx_list
);
1368 /* Link the last BD with the first BD */
1369 hw
->next_desc
= first
->async_tx
.phys
;
1374 new->async_tx
.flags
= flags
;
1375 new->async_tx
.cookie
= -EBUSY
;
1377 return &first
->async_tx
;
1380 /* If first was not set, then we failed to allocate the very first
1381 * descriptor, and we're done */
1385 /* First is set, so all of the descriptors we allocated have been added
1386 * to first->tx_list, INCLUDING "first" itself. Therefore we
1387 * must traverse the list backwards freeing each descriptor in turn
1389 xilinx_dma_free_desc_list_reverse(chan
, &first
->tx_list
);
1393 /* Run-time device configuration for Axi DMA and Axi CDMA */
1394 static int xilinx_dma_device_control(struct dma_chan
*dchan
,
1395 enum dma_ctrl_cmd cmd
, unsigned long arg
)
1397 struct xilinx_dma_chan
*chan
;
1398 unsigned long flags
;
1403 chan
= to_xilinx_chan(dchan
);
1405 if (cmd
== DMA_TERMINATE_ALL
) {
1406 /* Halt the DMA engine */
1409 spin_lock_irqsave(&chan
->lock
, flags
);
1411 /* Remove and free all of the descriptors in the lists */
1412 xilinx_dma_free_desc_list(chan
, &chan
->pending_list
);
1413 xilinx_dma_free_desc_list(chan
, &chan
->active_list
);
1415 spin_unlock_irqrestore(&chan
->lock
, flags
);
1417 } else if (cmd
== DMA_SLAVE_CONFIG
) {
1418 /* Configure interrupt coalescing and delay counter
1419 * Use value XILINX_DMA_NO_CHANGE to signal no change
1421 struct xilinx_dma_config
*cfg
= (struct xilinx_dma_config
*)arg
;
1422 u32 reg
= DMA_IN(&chan
->regs
->cr
);
1424 if (cfg
->coalesc
<= XILINX_DMA_COALESCE_MAX
) {
1425 reg
&= ~XILINX_DMA_XR_COALESCE_MASK
;
1426 reg
|= cfg
->coalesc
<< XILINX_DMA_COALESCE_SHIFT
;
1428 chan
->config
.coalesc
= cfg
->coalesc
;
1431 if (cfg
->delay
<= XILINX_DMA_DELAY_MAX
) {
1432 reg
&= ~XILINX_DMA_XR_DELAY_MASK
;
1433 reg
|= cfg
->delay
<< XILINX_DMA_DELAY_SHIFT
;
1434 chan
->config
.delay
= cfg
->delay
;
1437 DMA_OUT(&chan
->regs
->cr
, reg
);
1444 /* Run-time configuration for Axi VDMA, supports:
1445 * . halt the channel
1446 * . configure interrupt coalescing and inter-packet delay threshold
1447 * . start/stop parking
1449 * . set transfer information using config struct
1451 static int xilinx_vdma_device_control(struct dma_chan
*dchan
,
1452 enum dma_ctrl_cmd cmd
, unsigned long arg
)
1454 struct xilinx_dma_chan
*chan
;
1455 unsigned long flags
;
1460 chan
= to_xilinx_chan(dchan
);
1462 if (cmd
== DMA_TERMINATE_ALL
) {
1463 /* Halt the DMA engine */
1466 spin_lock_irqsave(&chan
->lock
, flags
);
1468 /* Remove and free all of the descriptors in the lists */
1469 xilinx_dma_free_desc_list(chan
, &chan
->pending_list
);
1470 xilinx_dma_free_desc_list(chan
, &chan
->active_list
);
1472 spin_unlock_irqrestore(&chan
->lock
, flags
);
1474 } else if (cmd
== DMA_SLAVE_CONFIG
) {
1475 struct xilinx_dma_config
*cfg
= (struct xilinx_dma_config
*)arg
;
1481 reg
= DMA_IN(&chan
->regs
->cr
);
1483 /* If vsize is -1, it is park-related operations */
1484 if (cfg
->vsize
== -1) {
1486 reg
&= ~XILINX_VDMA_CIRC_EN
;
1488 reg
|= XILINX_VDMA_CIRC_EN
;
1490 DMA_OUT(&chan
->regs
->cr
, reg
);
1494 /* If hsize is -1, it is interrupt threshold settings */
1495 if (cfg
->hsize
== -1) {
1496 if (cfg
->coalesc
<= XILINX_DMA_COALESCE_MAX
) {
1497 reg
&= ~XILINX_DMA_XR_COALESCE_MASK
;
1498 reg
|= cfg
->coalesc
<<
1499 XILINX_DMA_COALESCE_SHIFT
;
1500 chan
->config
.coalesc
= cfg
->coalesc
;
1503 if (cfg
->delay
<= XILINX_DMA_DELAY_MAX
) {
1504 reg
&= ~XILINX_DMA_XR_DELAY_MASK
;
1505 reg
|= cfg
->delay
<< XILINX_DMA_DELAY_SHIFT
;
1506 chan
->config
.delay
= cfg
->delay
;
1509 DMA_OUT(&chan
->regs
->cr
, reg
);
1513 /* Transfer information */
1514 chan
->config
.vsize
= cfg
->vsize
;
1515 chan
->config
.hsize
= cfg
->hsize
;
1516 chan
->config
.stride
= cfg
->stride
;
1517 chan
->config
.frm_dly
= cfg
->frm_dly
;
1518 chan
->config
.park
= cfg
->park
;
1519 chan
->config
.direction
= cfg
->direction
;
1521 /* genlock settings */
1522 chan
->config
.gen_lock
= cfg
->gen_lock
;
1523 chan
->config
.master
= cfg
->master
;
1525 if (cfg
->gen_lock
) {
1526 if (chan
->genlock
) {
1527 reg
|= XILINX_VDMA_SYNC_EN
;
1528 reg
|= cfg
->master
<< XILINX_VDMA_MSTR_SHIFT
;
1532 chan
->config
.frm_cnt_en
= cfg
->frm_cnt_en
;
1534 chan
->config
.park_frm
= cfg
->park_frm
;
1536 chan
->config
.park_frm
= -1;
1538 chan
->config
.coalesc
= cfg
->coalesc
;
1539 chan
->config
.delay
= cfg
->delay
;
1540 if (cfg
->coalesc
<= XILINX_DMA_COALESCE_MAX
) {
1541 reg
|= cfg
->coalesc
<< XILINX_DMA_COALESCE_SHIFT
;
1542 chan
->config
.coalesc
= cfg
->coalesc
;
1545 if (cfg
->delay
<= XILINX_DMA_DELAY_MAX
) {
1546 reg
|= cfg
->delay
<< XILINX_DMA_DELAY_SHIFT
;
1547 chan
->config
.delay
= cfg
->delay
;
1550 chan
->config
.disable_intr
= cfg
->disable_intr
;
1553 reg
|= cfg
->ext_fsync
<< XILINX_VDMA_EXTFSYNC_SHIFT
;
1555 DMA_OUT(&chan
->regs
->cr
, reg
);
1562 /* Logarithm function to compute alignment shift
1564 * Only deals with value less than 4096.
1566 static int my_log(int value
)
1569 while ((1 << i
) < value
) {
1581 static void xilinx_dma_chan_remove(struct xilinx_dma_chan
*chan
)
1583 irq_dispose_mapping(chan
->irq
);
1584 list_del(&chan
->common
.device_node
);
1591 * . Get channel features from the device tree entry
1592 * . Initialize special channel handling routines
1594 static int __devinit
xilinx_dma_chan_probe(struct xilinx_dma_device
*xdev
,
1595 struct device_node
*node
, u32 feature
)
1597 struct xilinx_dma_chan
*chan
;
1600 u32 width
= 0, device_id
= 0, flush_fsync
= 0;
1603 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
1605 dev_err(xdev
->dev
, "no free memory for DMA channels!\n");
1610 chan
->feature
= feature
;
1614 chan
->max_len
= XILINX_DMA_MAX_TRANS_LEN
;
1616 value
= (int *)of_get_property(node
, "xlnx,include-dre",
1619 if (be32_to_cpup(value
) == 1)
1623 value
= (int *)of_get_property(node
, "xlnx,genlock-mode",
1626 if (be32_to_cpup(value
) == 1)
1630 value
= (int *)of_get_property(node
,
1634 width
= be32_to_cpup(value
) >> 3; /* convert bits to bytes */
1636 /* If data width is greater than 8 bytes, DRE is not in hw */
1640 chan
->feature
|= width
- 1;
1643 value
= (int *)of_get_property(node
, "xlnx,device-id", NULL
);
1645 device_id
= be32_to_cpup(value
);
1647 flush_fsync
= (xdev
->feature
& XILINX_VDMA_FTR_FLUSH_MASK
) >>
1648 XILINX_VDMA_FTR_FLUSH_SHIFT
;
1650 if (feature
& XILINX_DMA_IP_CDMA
) {
1651 chan
->direction
= DMA_MEM_TO_MEM
;
1652 chan
->start_transfer
= xilinx_cdma_start_transfer
;
1654 chan
->has_SG
= (xdev
->feature
& XILINX_DMA_FTR_HAS_SG
) >>
1655 XILINX_DMA_FTR_HAS_SG_SHIFT
;
1657 value
= (int *)of_get_property(node
,
1658 "xlnx,lite-mode", NULL
);
1660 if (be32_to_cpup(value
) == 1) {
1662 value
= (int *)of_get_property(node
,
1663 "xlnx,max-burst-len", NULL
);
1667 "Lite mode without data width property\n");
1670 chan
->max_len
= width
*
1671 be32_to_cpup(value
);
1677 if (feature
& XILINX_DMA_IP_DMA
) {
1678 chan
->has_SG
= (xdev
->feature
& XILINX_DMA_FTR_HAS_SG
) >>
1679 XILINX_DMA_FTR_HAS_SG_SHIFT
;
1681 chan
->start_transfer
= xilinx_dma_start_transfer
;
1683 if (of_device_is_compatible(node
,
1684 "xlnx,axi-dma-mm2s-channel"))
1685 chan
->direction
= DMA_MEM_TO_DEV
;
1687 if (of_device_is_compatible(node
,
1688 "xlnx,axi-dma-s2mm-channel"))
1689 chan
->direction
= DMA_DEV_TO_MEM
;
1693 if (feature
& XILINX_DMA_IP_VDMA
) {
1694 chan
->start_transfer
= xilinx_vdma_start_transfer
;
1696 chan
->has_SG
= (xdev
->feature
& XILINX_DMA_FTR_HAS_SG
) >>
1697 XILINX_DMA_FTR_HAS_SG_SHIFT
;
1699 if (of_device_is_compatible(node
,
1700 "xlnx,axi-vdma-mm2s-channel")) {
1701 chan
->direction
= DMA_MEM_TO_DEV
;
1702 if (!chan
->has_SG
) {
1703 chan
->addr_regs
= (struct vdma_addr_regs
*)
1705 XILINX_VDMA_DIRECT_REG_OFFSET
);
1707 if (flush_fsync
== XILINX_VDMA_FLUSH_BOTH
||
1708 flush_fsync
== XILINX_VDMA_FLUSH_MM2S
)
1709 chan
->flush_fsync
= 1;
1712 if (of_device_is_compatible(node
,
1713 "xlnx,axi-vdma-s2mm-channel")) {
1714 chan
->direction
= DMA_DEV_TO_MEM
;
1715 if (!chan
->has_SG
) {
1716 chan
->addr_regs
= (struct vdma_addr_regs
*)
1718 XILINX_VDMA_DIRECT_REG_OFFSET
+
1719 XILINX_VDMA_CHAN_DIRECT_REG_SIZE
);
1721 if (flush_fsync
== XILINX_VDMA_FLUSH_BOTH
||
1722 flush_fsync
== XILINX_VDMA_FLUSH_S2MM
)
1723 chan
->flush_fsync
= 1;
1727 chan
->regs
= (struct xdma_regs
*)xdev
->regs
;
1730 if (chan
->direction
== DMA_DEV_TO_MEM
) {
1731 chan
->regs
= (struct xdma_regs
*)((u32
)xdev
->regs
+
1732 XILINX_DMA_RX_CHANNEL_OFFSET
);
1736 /* Used by dmatest channel matching in slave transfers
1737 * Can change it to be a structure to have more matching information
1739 chan
->private = (chan
->direction
& 0xFF) |
1740 (chan
->feature
& XILINX_DMA_IP_MASK
) |
1741 (device_id
<< XILINX_DMA_DEVICE_ID_SHIFT
);
1742 chan
->common
.private = (void *)&(chan
->private);
1745 xdev
->common
.copy_align
= my_log(width
);
1747 chan
->dev
= xdev
->dev
;
1748 xdev
->chan
[chan
->id
] = chan
;
1750 tasklet_init(&chan
->tasklet
, dma_do_tasklet
, (unsigned long)chan
);
1752 /* Initialize the channel */
1753 if (dma_init(chan
)) {
1754 dev_err(xdev
->dev
, "Reset channel failed\n");
1759 spin_lock_init(&chan
->lock
);
1760 INIT_LIST_HEAD(&chan
->pending_list
);
1761 INIT_LIST_HEAD(&chan
->active_list
);
1763 chan
->common
.device
= &xdev
->common
;
1765 /* find the IRQ line, if it exists in the device tree */
1766 chan
->irq
= irq_of_parse_and_map(node
, 0);
1767 err
= request_irq(chan
->irq
, dma_intr_handler
, IRQF_SHARED
,
1768 "xilinx-dma-controller", chan
);
1770 dev_err(xdev
->dev
, "unable to request IRQ\n");
1774 /* Add the channel to DMA device channel list */
1775 list_add_tail(&chan
->common
.device_node
, &xdev
->common
.channels
);
1776 xdev
->common
.chancnt
++;
1781 irq_dispose_mapping(chan
->irq
);
1788 static int __devinit
xilinx_dma_of_probe(struct platform_device
*op
)
1790 struct xilinx_dma_device
*xdev
;
1791 struct device_node
*child
, *node
;
1796 dev_info(&op
->dev
, "Probing xilinx axi dma engines\n");
1798 xdev
= kzalloc(sizeof(struct xilinx_dma_device
), GFP_KERNEL
);
1800 dev_err(&op
->dev
, "Not enough memory for device\n");
1805 xdev
->dev
= &(op
->dev
);
1806 INIT_LIST_HEAD(&xdev
->common
.channels
);
1808 node
= op
->dev
.of_node
;
1811 /* iomap registers */
1812 xdev
->regs
= of_iomap(node
, 0);
1814 dev_err(&op
->dev
, "unable to iomap registers\n");
1819 /* Axi CDMA only does memcpy
1821 if (of_device_is_compatible(node
, "xlnx,axi-cdma")) {
1822 xdev
->feature
|= XILINX_DMA_IP_CDMA
;
1824 value
= (int *)of_get_property(node
, "xlnx,include-sg",
1827 if (be32_to_cpup(value
) == 1)
1828 xdev
->feature
|= XILINX_DMA_FTR_HAS_SG
;
1831 dma_cap_set(DMA_MEMCPY
, xdev
->common
.cap_mask
);
1832 xdev
->common
.device_prep_dma_memcpy
= xilinx_dma_prep_memcpy
;
1833 xdev
->common
.device_control
= xilinx_dma_device_control
;
1834 xdev
->common
.device_issue_pending
= xilinx_cdma_issue_pending
;
1837 /* Axi DMA and VDMA only do slave transfers
1839 if (of_device_is_compatible(node
, "xlnx,axi-dma")) {
1841 xdev
->feature
|= XILINX_DMA_IP_DMA
;
1842 value
= (int *)of_get_property(node
,
1843 "xlnx,sg-include-stscntrl-strm",
1846 if (be32_to_cpup(value
) == 1) {
1847 xdev
->feature
|= (XILINX_DMA_FTR_STSCNTRL_STRM
|
1848 XILINX_DMA_FTR_HAS_SG
);
1852 dma_cap_set(DMA_SLAVE
, xdev
->common
.cap_mask
);
1853 dma_cap_set(DMA_PRIVATE
, xdev
->common
.cap_mask
);
1854 xdev
->common
.device_prep_slave_sg
= xilinx_dma_prep_slave_sg
;
1855 xdev
->common
.device_control
= xilinx_dma_device_control
;
1856 xdev
->common
.device_issue_pending
= xilinx_dma_issue_pending
;
1859 if (of_device_is_compatible(node
, "xlnx,axi-vdma")) {
1860 xdev
->feature
|= XILINX_DMA_IP_VDMA
;
1862 value
= (int *)of_get_property(node
, "xlnx,include-sg",
1865 if (be32_to_cpup(value
) == 1)
1866 xdev
->feature
|= XILINX_DMA_FTR_HAS_SG
;
1869 value
= (int *)of_get_property(node
, "xlnx,num-fstores",
1872 num_frames
= be32_to_cpup(value
);
1874 value
= (int *)of_get_property(node
, "xlnx,flush-fsync", NULL
);
1876 xdev
->feature
|= be32_to_cpup(value
) <<
1877 XILINX_VDMA_FTR_FLUSH_SHIFT
;
1879 dma_cap_set(DMA_SLAVE
, xdev
->common
.cap_mask
);
1880 dma_cap_set(DMA_PRIVATE
, xdev
->common
.cap_mask
);
1881 xdev
->common
.device_prep_slave_sg
= xilinx_vdma_prep_slave_sg
;
1882 xdev
->common
.device_control
= xilinx_vdma_device_control
;
1883 xdev
->common
.device_issue_pending
= xilinx_vdma_issue_pending
;
1886 xdev
->common
.device_alloc_chan_resources
=
1887 xilinx_dma_alloc_chan_resources
;
1888 xdev
->common
.device_free_chan_resources
=
1889 xilinx_dma_free_chan_resources
;
1890 xdev
->common
.device_tx_status
= xilinx_tx_status
;
1891 xdev
->common
.dev
= &op
->dev
;
1893 dev_set_drvdata(&op
->dev
, xdev
);
1895 for_each_child_of_node(node
, child
) {
1896 xilinx_dma_chan_probe(xdev
, child
, xdev
->feature
);
1899 if (xdev
->feature
& XILINX_DMA_IP_VDMA
) {
1902 for (i
= 0; i
< XILINX_DMA_MAX_CHANS_PER_DEVICE
; i
++) {
1904 xdev
->chan
[i
]->num_frms
= num_frames
;
1908 dma_async_device_register(&xdev
->common
);
1919 static int __devexit
xilinx_dma_of_remove(struct platform_device
*op
)
1921 struct xilinx_dma_device
*xdev
;
1924 xdev
= dev_get_drvdata(&op
->dev
);
1925 dma_async_device_unregister(&xdev
->common
);
1927 for (i
= 0; i
< XILINX_DMA_MAX_CHANS_PER_DEVICE
; i
++) {
1929 xilinx_dma_chan_remove(xdev
->chan
[i
]);
1932 iounmap(xdev
->regs
);
1933 dev_set_drvdata(&op
->dev
, NULL
);
1939 static const struct of_device_id xilinx_dma_of_ids
[] = {
1940 { .compatible
= "xlnx,axi-cdma",},
1941 { .compatible
= "xlnx,axi-dma",},
1942 { .compatible
= "xlnx,axi-vdma",},
1946 static struct platform_driver xilinx_dma_of_driver
= {
1948 .name
= "xilinx-dma",
1949 .owner
= THIS_MODULE
,
1950 .of_match_table
= xilinx_dma_of_ids
,
1952 .probe
= xilinx_dma_of_probe
,
1953 .remove
= __devexit_p(xilinx_dma_of_remove
),
1956 /*----------------------------------------------------------------------------*/
1957 /* Module Init / Exit */
1958 /*----------------------------------------------------------------------------*/
1960 static __init
int xilinx_dma_init(void)
1964 pr_info("Xilinx DMA driver\n");
1966 ret
= platform_driver_register(&xilinx_dma_of_driver
);
1968 pr_err("xilinx_dma: failed to register platform driver\n");
1973 static void __exit
xilinx_dma_exit(void)
1975 platform_driver_unregister(&xilinx_dma_of_driver
);
1978 subsys_initcall(xilinx_dma_init
);
1979 module_exit(xilinx_dma_exit
);
1983 /**************************************************/
1984 /* Platform bus to support ARM before device tree */
1985 /**************************************************/
1987 /* The following probe and chan_probe functions were
1988 copied from the OF section above, then modified
1989 to use platform data.
1992 static void xilinx_dma_chan_remove(struct xilinx_dma_chan
*chan
)
1994 free_irq(chan
->irq
, chan
);
1995 list_del(&chan
->common
.device_node
);
2002 * . Get channel features from the device tree entry
2003 * . Initialize special channel handling routines
2005 static int __devinit
xilinx_dma_chan_probe(struct platform_device
*pdev
,
2006 struct xilinx_dma_device
*xdev
,
2007 struct dma_channel_config
*channel_config
,
2008 int channel_num
, u32 feature
)
2010 struct xilinx_dma_chan
*chan
;
2013 struct resource
*res
;
2018 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
2020 dev_err(xdev
->dev
, "no free memory for DMA channels!\n");
2025 chan
->feature
= feature
;
2029 chan
->max_len
= XILINX_DMA_MAX_TRANS_LEN
;
2031 if (channel_config
->include_dre
)
2034 if (channel_config
->genlock_mode
)
2037 width
= channel_config
->datawidth
>> 3;
2038 chan
->feature
|= width
- 1;
2040 if (feature
& XILINX_DMA_IP_CDMA
) {
2042 chan
->direction
= DMA_MEM_TO_MEM
;
2043 chan
->start_transfer
= xilinx_cdma_start_transfer
;
2045 chan
->has_SG
= (xdev
->feature
& XILINX_DMA_FTR_HAS_SG
) >>
2046 XILINX_DMA_FTR_HAS_SG_SHIFT
;
2048 if (channel_config
->lite_mode
) {
2050 chan
->max_len
= width
* channel_config
->max_burst_len
;
2054 if (feature
& XILINX_DMA_IP_DMA
) {
2056 chan
->start_transfer
= xilinx_dma_start_transfer
;
2058 if (!strcmp(channel_config
->type
, "axi-dma-mm2s-channel"))
2059 chan
->direction
= DMA_MEM_TO_DEV
;
2061 if (!strcmp(channel_config
->type
, "axi-dma-s2mm-channel"))
2062 chan
->direction
= DMA_DEV_TO_MEM
;
2065 if (feature
& XILINX_DMA_IP_VDMA
) {
2067 chan
->start_transfer
= xilinx_vdma_start_transfer
;
2069 chan
->has_SG
= (xdev
->feature
& XILINX_DMA_FTR_HAS_SG
) >>
2070 XILINX_DMA_FTR_HAS_SG_SHIFT
;
2072 if (!strcmp(channel_config
->type
, "axi-vdma-mm2s-channel")) {
2074 printk(KERN_INFO
, "axi-vdma-mm2s-channel found\n");
2076 chan
->direction
= DMA_MEM_TO_DEV
;
2077 if (!chan
->has_SG
) {
2078 chan
->addr_regs
= (struct vdma_addr_regs
*)
2080 XILINX_VDMA_DIRECT_REG_OFFSET
);
2084 if (!strcmp(channel_config
->type
, "axi-vdma-s2mm-channel")) {
2086 printk(KERN_INFO
, "axi-vdma-s2mm-channel found\n");
2088 chan
->direction
= DMA_DEV_TO_MEM
;
2089 if (!chan
->has_SG
) {
2090 chan
->addr_regs
= (struct vdma_addr_regs
*)
2092 XILINX_VDMA_DIRECT_REG_OFFSET
+
2093 XILINX_VDMA_CHAN_DIRECT_REG_SIZE
);
2098 chan
->regs
= (struct xdma_regs
*)xdev
->regs
;
2101 if (chan
->direction
== DMA_DEV_TO_MEM
) {
2102 chan
->regs
= (struct xdma_regs
*)((u32
)xdev
->regs
+
2103 XILINX_DMA_RX_CHANNEL_OFFSET
);
2107 /* Used by dmatest channel matching in slave transfers
2108 * Can change it to be a structure to have more matching information
2110 chan
->private = (chan
->direction
& 0xFF) |
2111 (chan
->feature
& XILINX_DMA_IP_MASK
);
2112 chan
->common
.private = (void *)&(chan
->private);
2115 xdev
->common
.copy_align
= my_log(width
);
2117 chan
->dev
= xdev
->dev
;
2118 xdev
->chan
[chan
->id
] = chan
;
2120 tasklet_init(&chan
->tasklet
, dma_do_tasklet
, (unsigned long)chan
);
2122 /* Initialize the channel */
2123 if (dma_init(chan
)) {
2124 dev_err(xdev
->dev
, "Reset channel failed\n");
2129 spin_lock_init(&chan
->lock
);
2130 INIT_LIST_HEAD(&chan
->pending_list
);
2131 INIT_LIST_HEAD(&chan
->active_list
);
2133 chan
->common
.device
= &xdev
->common
;
2135 /* setup the interrupt for the channel */
2137 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, channel_num
);
2138 chan
->irq
= res
->start
;
2140 err
= request_irq(chan
->irq
, dma_intr_handler
, IRQF_SHARED
,
2141 "xilinx-dma-controller", chan
);
2143 dev_err(xdev
->dev
, "unable to request IRQ\n");
2146 dev_info(&pdev
->dev
, "using irq %d\n", chan
->irq
);
2148 /* Add the channel to DMA device channel list */
2149 list_add_tail(&chan
->common
.device_node
, &xdev
->common
.channels
);
2150 xdev
->common
.chancnt
++;
2155 free_irq(chan
->irq
, chan
);
2162 static int __devinit
xilinx_dma_probe(struct platform_device
*pdev
)
2164 struct xilinx_dma_device
*xdev
;
2167 struct resource
*res
;
2168 struct device
*dev
= &pdev
->dev
;
2169 struct dma_device_config
*dma_config
;
2172 dev_info(&pdev
->dev
, "Probing xilinx axi dma engines\n");
2174 xdev
= kzalloc(sizeof(struct xilinx_dma_device
), GFP_KERNEL
);
2176 dev_err(&pdev
->dev
, "Not enough memory for device\n");
2181 xdev
->dev
= &(pdev
->dev
);
2182 INIT_LIST_HEAD(&xdev
->common
.channels
);
2186 /* iomap registers */
2187 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2189 printk(KERN_ERR
"get_resource for MEM resource for dev %d "
2190 "failed\n", pdev
->id
);
2194 dev_info(&pdev
->dev
, "device %d actual base is %x\n",
2195 pdev
->id
, (unsigned int)res
->start
);
2197 if (!request_mem_region(res
->start
, 0x1000, "xilinx_axidma")) {
2198 printk(KERN_ERR
"memory request failue for base %x\n",
2199 (unsigned int)res
->start
);
2204 xdev
->regs
= ioremap(res
->start
, 0x1000);
2205 pr_info("dma base remapped: %lx\n", (unsigned long)xdev
->regs
);
2207 dev_err(&pdev
->dev
, "unable to iomap registers\n");
2212 dma_config
= (struct dma_device_config
*)dev
->platform_data
;
2214 /* Axi CDMA only does memcpy
2216 if (!strcmp(dma_config
->type
, "axi-cdma")) {
2218 pr_info("found an axi-cdma configuration\n");
2219 xdev
->feature
|= XILINX_DMA_IP_CDMA
;
2221 if (dma_config
->include_sg
)
2222 xdev
->feature
|= XILINX_DMA_FTR_HAS_SG
;
2224 dma_cap_set(DMA_MEMCPY
, xdev
->common
.cap_mask
);
2225 xdev
->common
.device_prep_dma_memcpy
= xilinx_dma_prep_memcpy
;
2226 xdev
->common
.device_control
= xilinx_dma_device_control
;
2227 xdev
->common
.device_issue_pending
= xilinx_cdma_issue_pending
;
2230 /* Axi DMA and VDMA only do slave transfers
2232 if (!strcmp(dma_config
->type
, "axi-dma")) {
2234 pr_info("found an axi-dma configuration\n");
2236 xdev
->feature
|= XILINX_DMA_IP_DMA
;
2237 if (dma_config
->sg_include_stscntrl_strm
)
2238 xdev
->feature
|= XILINX_DMA_FTR_STSCNTRL_STRM
;
2240 dma_cap_set(DMA_SLAVE
, xdev
->common
.cap_mask
);
2241 dma_cap_set(DMA_PRIVATE
, xdev
->common
.cap_mask
);
2242 xdev
->common
.device_prep_slave_sg
= xilinx_dma_prep_slave_sg
;
2243 xdev
->common
.device_control
= xilinx_dma_device_control
;
2244 xdev
->common
.device_issue_pending
= xilinx_dma_issue_pending
;
2247 if (!strcmp(dma_config
->type
, "axi-vdma")) {
2249 pr_info("found an axi-vdma configuration\n");
2251 xdev
->feature
|= XILINX_DMA_IP_VDMA
;
2253 if (dma_config
->include_sg
)
2254 xdev
->feature
|= XILINX_DMA_FTR_HAS_SG
;
2256 num_frames
= dma_config
->num_fstores
;
2258 dma_cap_set(DMA_SLAVE
, xdev
->common
.cap_mask
);
2259 dma_cap_set(DMA_PRIVATE
, xdev
->common
.cap_mask
);
2260 xdev
->common
.device_prep_slave_sg
= xilinx_vdma_prep_slave_sg
;
2261 xdev
->common
.device_control
= xilinx_vdma_device_control
;
2262 xdev
->common
.device_issue_pending
= xilinx_vdma_issue_pending
;
2265 xdev
->common
.device_alloc_chan_resources
=
2266 xilinx_dma_alloc_chan_resources
;
2267 xdev
->common
.device_free_chan_resources
=
2268 xilinx_dma_free_chan_resources
;
2269 xdev
->common
.device_tx_status
= xilinx_tx_status
;
2270 xdev
->common
.dev
= &pdev
->dev
;
2272 dev_set_drvdata(&pdev
->dev
, xdev
);
2274 for (channel
= 0; channel
< dma_config
->channel_count
; channel
++)
2275 xilinx_dma_chan_probe(pdev
, xdev
,
2276 &dma_config
->channel_config
[channel
],
2277 channel
, xdev
->feature
);
2279 if (xdev
->feature
& XILINX_DMA_IP_VDMA
) {
2282 for (i
= 0; i
< XILINX_DMA_MAX_CHANS_PER_DEVICE
; i
++) {
2284 xdev
->chan
[i
]->num_frms
= num_frames
;
2288 dma_async_device_register(&xdev
->common
);
2300 static int __exit
xilinx_dma_remove(struct platform_device
*pdev
)
2302 struct xilinx_dma_device
*xdev
= platform_get_drvdata(pdev
);
2305 dma_async_device_unregister(&xdev
->common
);
2307 for (i
= 0; i
< 2; i
++) {
2309 xilinx_dma_chan_remove(xdev
->chan
[i
]);
2312 iounmap(xdev
->regs
);
2313 dev_set_drvdata(&pdev
->dev
, NULL
);
2319 static void xilinx_dma_shutdown(struct platform_device
*pdev
)
2321 struct xilinx_dma_device
*xdev
= platform_get_drvdata(pdev
);
2324 for (i
= 0; i
< 2; i
++)
2325 dma_halt(xdev
->chan
[i
]);
2328 static struct platform_driver xilinx_dma_driver
= {
2329 .probe
= xilinx_dma_probe
,
2330 .remove
= __exit_p(xilinx_dma_remove
),
2331 .shutdown
= xilinx_dma_shutdown
,
2333 .owner
= THIS_MODULE
,
2334 .name
= "xilinx-axidma",
2338 /*----------------------------------------------------------------------------*/
2339 /* Module Init / Exit */
2340 /*----------------------------------------------------------------------------*/
2342 static __init
int xilinx_dma_init(void)
2345 status
= platform_driver_register(&xilinx_dma_driver
);
2348 module_init(xilinx_dma_init
);
2350 static void __exit
xilinx_dma_exit(void)
2352 platform_driver_unregister(&xilinx_dma_driver
);
2355 module_exit(xilinx_dma_exit
);
2358 MODULE_DESCRIPTION("Xilinx DMA/CDMA/VDMA driver");
2359 MODULE_LICENSE("GPL");