2 * driver/dma/coh901318.c
4 * Copyright (C) 2007-2009 ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 * DMA driver for COH 901 318
7 * Author: Per Friden <per.friden@stericsson.com>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h> /* printk() */
13 #include <linux/fs.h> /* everything... */
14 #include <linux/slab.h> /* kmalloc() */
15 #include <linux/dmaengine.h>
16 #include <linux/platform_device.h>
17 #include <linux/device.h>
18 #include <linux/irqreturn.h>
19 #include <linux/interrupt.h>
21 #include <linux/uaccess.h>
22 #include <linux/debugfs.h>
23 #include <mach/coh901318.h>
25 #include "coh901318_lli.h"
27 #define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
30 #define COH_DBG(x) ({ if (1) x; 0; })
32 #define COH_DBG(x) ({ if (0) x; 0; })
35 struct coh901318_desc
{
36 struct dma_async_tx_descriptor desc
;
37 struct list_head node
;
38 struct scatterlist
*sg
;
40 struct coh901318_lli
*data
;
41 enum dma_data_direction dir
;
46 struct coh901318_base
{
48 void __iomem
*virtbase
;
49 struct coh901318_pool pool
;
51 struct dma_device dma_slave
;
52 struct dma_device dma_memcpy
;
53 struct coh901318_chan
*chans
;
54 struct coh901318_platform
*platform
;
57 struct coh901318_chan
{
64 struct work_struct free_work
;
67 struct tasklet_struct tasklet
;
69 struct list_head active
;
70 struct list_head queue
;
71 struct list_head free
;
73 unsigned long nbr_active_done
;
77 struct coh901318_base
*base
;
80 static void coh901318_list_print(struct coh901318_chan
*cohc
,
81 struct coh901318_lli
*lli
)
83 struct coh901318_lli
*l
;
84 dma_addr_t addr
= virt_to_phys(lli
);
88 l
= phys_to_virt(addr
);
89 dev_vdbg(COHC_2_DEV(cohc
), "i %d, lli %p, ctrl 0x%x, src 0x%x"
90 ", dst 0x%x, link 0x%x link_virt 0x%p\n",
91 i
, l
, l
->control
, l
->src_addr
, l
->dst_addr
,
92 l
->link_addr
, phys_to_virt(l
->link_addr
));
98 #ifdef CONFIG_DEBUG_FS
100 #define COH901318_DEBUGFS_ASSIGN(x, y) (x = y)
102 static struct coh901318_base
*debugfs_dma_base
;
103 static struct dentry
*dma_dentry
;
105 static int coh901318_debugfs_open(struct inode
*inode
, struct file
*file
)
108 file
->private_data
= inode
->i_private
;
112 static int coh901318_debugfs_read(struct file
*file
, char __user
*buf
,
113 size_t count
, loff_t
*f_pos
)
115 u64 started_channels
= debugfs_dma_base
->pm
.started_channels
;
116 int pool_count
= debugfs_dma_base
->pool
.debugfs_pool_counter
;
123 dev_buf
= kmalloc(4*1024, GFP_KERNEL
);
128 tmp
+= sprintf(tmp
, "DMA -- enable dma channels\n");
130 for (i
= 0; i
< debugfs_dma_base
->platform
->max_channels
; i
++)
131 if (started_channels
& (1 << i
))
132 tmp
+= sprintf(tmp
, "channel %d\n", i
);
134 tmp
+= sprintf(tmp
, "Pool alloc nbr %d\n", pool_count
);
135 dev_size
= tmp
- dev_buf
;
137 /* No more to read if offset != 0 */
138 if (*f_pos
> dev_size
)
141 if (count
> dev_size
- *f_pos
)
142 count
= dev_size
- *f_pos
;
144 if (copy_to_user(buf
, dev_buf
+ *f_pos
, count
))
157 static const struct file_operations coh901318_debugfs_status_operations
= {
158 .owner
= THIS_MODULE
,
159 .open
= coh901318_debugfs_open
,
160 .read
= coh901318_debugfs_read
,
164 static int __init
init_coh901318_debugfs(void)
167 dma_dentry
= debugfs_create_dir("dma", NULL
);
169 (void) debugfs_create_file("status",
172 &coh901318_debugfs_status_operations
);
176 static void __exit
exit_coh901318_debugfs(void)
178 debugfs_remove_recursive(dma_dentry
);
181 module_init(init_coh901318_debugfs
);
182 module_exit(exit_coh901318_debugfs
);
185 #define COH901318_DEBUGFS_ASSIGN(x, y)
187 #endif /* CONFIG_DEBUG_FS */
189 static inline struct coh901318_chan
*to_coh901318_chan(struct dma_chan
*chan
)
191 return container_of(chan
, struct coh901318_chan
, chan
);
194 static inline dma_addr_t
195 cohc_dev_addr(struct coh901318_chan
*cohc
)
197 return cohc
->base
->platform
->chan_conf
[cohc
->id
].dev_addr
;
200 static inline const struct coh901318_params
*
201 cohc_chan_param(struct coh901318_chan
*cohc
)
203 return &cohc
->base
->platform
->chan_conf
[cohc
->id
].param
;
206 static inline const struct coh_dma_channel
*
207 cohc_chan_conf(struct coh901318_chan
*cohc
)
209 return &cohc
->base
->platform
->chan_conf
[cohc
->id
];
212 static void enable_powersave(struct coh901318_chan
*cohc
)
215 struct powersave
*pm
= &cohc
->base
->pm
;
217 spin_lock_irqsave(&pm
->lock
, flags
);
219 pm
->started_channels
&= ~(1ULL << cohc
->id
);
221 if (!pm
->started_channels
) {
222 /* DMA no longer intends to access memory */
223 cohc
->base
->platform
->access_memory_state(cohc
->base
->dev
,
227 spin_unlock_irqrestore(&pm
->lock
, flags
);
229 static void disable_powersave(struct coh901318_chan
*cohc
)
232 struct powersave
*pm
= &cohc
->base
->pm
;
234 spin_lock_irqsave(&pm
->lock
, flags
);
236 if (!pm
->started_channels
) {
237 /* DMA intends to access memory */
238 cohc
->base
->platform
->access_memory_state(cohc
->base
->dev
,
242 pm
->started_channels
|= (1ULL << cohc
->id
);
244 spin_unlock_irqrestore(&pm
->lock
, flags
);
247 static inline int coh901318_set_ctrl(struct coh901318_chan
*cohc
, u32 control
)
249 int channel
= cohc
->id
;
250 void __iomem
*virtbase
= cohc
->base
->virtbase
;
253 virtbase
+ COH901318_CX_CTRL
+
254 COH901318_CX_CTRL_SPACING
* channel
);
258 static inline int coh901318_set_conf(struct coh901318_chan
*cohc
, u32 conf
)
260 int channel
= cohc
->id
;
261 void __iomem
*virtbase
= cohc
->base
->virtbase
;
264 virtbase
+ COH901318_CX_CFG
+
265 COH901318_CX_CFG_SPACING
*channel
);
270 static int coh901318_start(struct coh901318_chan
*cohc
)
273 int channel
= cohc
->id
;
274 void __iomem
*virtbase
= cohc
->base
->virtbase
;
276 disable_powersave(cohc
);
278 val
= readl(virtbase
+ COH901318_CX_CFG
+
279 COH901318_CX_CFG_SPACING
* channel
);
282 val
|= COH901318_CX_CFG_CH_ENABLE
;
283 writel(val
, virtbase
+ COH901318_CX_CFG
+
284 COH901318_CX_CFG_SPACING
* channel
);
289 static int coh901318_prep_linked_list(struct coh901318_chan
*cohc
,
290 struct coh901318_lli
*data
)
292 int channel
= cohc
->id
;
293 void __iomem
*virtbase
= cohc
->base
->virtbase
;
295 BUG_ON(readl(virtbase
+ COH901318_CX_STAT
+
296 COH901318_CX_STAT_SPACING
*channel
) &
297 COH901318_CX_STAT_ACTIVE
);
299 writel(data
->src_addr
,
300 virtbase
+ COH901318_CX_SRC_ADDR
+
301 COH901318_CX_SRC_ADDR_SPACING
* channel
);
303 writel(data
->dst_addr
, virtbase
+
304 COH901318_CX_DST_ADDR
+
305 COH901318_CX_DST_ADDR_SPACING
* channel
);
307 writel(data
->link_addr
, virtbase
+ COH901318_CX_LNK_ADDR
+
308 COH901318_CX_LNK_ADDR_SPACING
* channel
);
310 writel(data
->control
, virtbase
+ COH901318_CX_CTRL
+
311 COH901318_CX_CTRL_SPACING
* channel
);
316 coh901318_assign_cookie(struct coh901318_chan
*cohc
,
317 struct coh901318_desc
*cohd
)
319 dma_cookie_t cookie
= cohc
->chan
.cookie
;
324 cohc
->chan
.cookie
= cookie
;
325 cohd
->desc
.cookie
= cookie
;
330 static struct coh901318_desc
*
331 coh901318_desc_get(struct coh901318_chan
*cohc
)
333 struct coh901318_desc
*desc
;
335 if (list_empty(&cohc
->free
)) {
336 /* alloc new desc because we're out of used ones
337 * TODO: alloc a pile of descs instead of just one,
338 * avoid many small allocations.
340 desc
= kmalloc(sizeof(struct coh901318_desc
), GFP_NOWAIT
);
343 INIT_LIST_HEAD(&desc
->node
);
345 /* Reuse an old desc. */
346 desc
= list_first_entry(&cohc
->free
,
347 struct coh901318_desc
,
349 list_del(&desc
->node
);
357 coh901318_desc_free(struct coh901318_chan
*cohc
, struct coh901318_desc
*cohd
)
359 list_add_tail(&cohd
->node
, &cohc
->free
);
362 /* call with irq lock held */
364 coh901318_desc_submit(struct coh901318_chan
*cohc
, struct coh901318_desc
*desc
)
366 list_add_tail(&desc
->node
, &cohc
->active
);
368 BUG_ON(cohc
->pending_irqs
!= 0);
370 cohc
->pending_irqs
= desc
->pending_irqs
;
373 static struct coh901318_desc
*
374 coh901318_first_active_get(struct coh901318_chan
*cohc
)
376 struct coh901318_desc
*d
;
378 if (list_empty(&cohc
->active
))
381 d
= list_first_entry(&cohc
->active
,
382 struct coh901318_desc
,
388 coh901318_desc_remove(struct coh901318_desc
*cohd
)
390 list_del(&cohd
->node
);
394 coh901318_desc_queue(struct coh901318_chan
*cohc
, struct coh901318_desc
*desc
)
396 list_add_tail(&desc
->node
, &cohc
->queue
);
399 static struct coh901318_desc
*
400 coh901318_first_queued(struct coh901318_chan
*cohc
)
402 struct coh901318_desc
*d
;
404 if (list_empty(&cohc
->queue
))
407 d
= list_first_entry(&cohc
->queue
,
408 struct coh901318_desc
,
414 * DMA start/stop controls
416 u32
coh901318_get_bytes_left(struct dma_chan
*chan
)
420 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
422 spin_lock_irqsave(&cohc
->lock
, flags
);
424 /* Read transfer count value */
425 ret
= readl(cohc
->base
->virtbase
+
426 COH901318_CX_CTRL
+COH901318_CX_CTRL_SPACING
*
427 cohc
->id
) & COH901318_CX_CTRL_TC_VALUE_MASK
;
429 spin_unlock_irqrestore(&cohc
->lock
, flags
);
433 EXPORT_SYMBOL(coh901318_get_bytes_left
);
436 /* Stops a transfer without losing data. Enables power save.
437 Use this function in conjunction with coh901318_continue(..)
439 void coh901318_stop(struct dma_chan
*chan
)
443 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
444 int channel
= cohc
->id
;
445 void __iomem
*virtbase
= cohc
->base
->virtbase
;
447 spin_lock_irqsave(&cohc
->lock
, flags
);
449 /* Disable channel in HW */
450 val
= readl(virtbase
+ COH901318_CX_CFG
+
451 COH901318_CX_CFG_SPACING
* channel
);
453 /* Stopping infinit transfer */
454 if ((val
& COH901318_CX_CTRL_TC_ENABLE
) == 0 &&
455 (val
& COH901318_CX_CFG_CH_ENABLE
))
459 val
&= ~COH901318_CX_CFG_CH_ENABLE
;
460 /* Enable twice, HW bug work around */
461 writel(val
, virtbase
+ COH901318_CX_CFG
+
462 COH901318_CX_CFG_SPACING
* channel
);
463 writel(val
, virtbase
+ COH901318_CX_CFG
+
464 COH901318_CX_CFG_SPACING
* channel
);
466 /* Spin-wait for it to actually go inactive */
467 while (readl(virtbase
+ COH901318_CX_STAT
+COH901318_CX_STAT_SPACING
*
468 channel
) & COH901318_CX_STAT_ACTIVE
)
471 /* Check if we stopped an active job */
472 if ((readl(virtbase
+ COH901318_CX_CTRL
+COH901318_CX_CTRL_SPACING
*
473 channel
) & COH901318_CX_CTRL_TC_VALUE_MASK
) > 0)
476 enable_powersave(cohc
);
478 spin_unlock_irqrestore(&cohc
->lock
, flags
);
480 EXPORT_SYMBOL(coh901318_stop
);
482 /* Continues a transfer that has been stopped via 300_dma_stop(..).
483 Power save is handled.
485 void coh901318_continue(struct dma_chan
*chan
)
489 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
490 int channel
= cohc
->id
;
492 spin_lock_irqsave(&cohc
->lock
, flags
);
494 disable_powersave(cohc
);
497 /* Enable channel in HW */
498 val
= readl(cohc
->base
->virtbase
+ COH901318_CX_CFG
+
499 COH901318_CX_CFG_SPACING
* channel
);
501 val
|= COH901318_CX_CFG_CH_ENABLE
;
503 writel(val
, cohc
->base
->virtbase
+ COH901318_CX_CFG
+
504 COH901318_CX_CFG_SPACING
*channel
);
509 spin_unlock_irqrestore(&cohc
->lock
, flags
);
511 EXPORT_SYMBOL(coh901318_continue
);
513 bool coh901318_filter_id(struct dma_chan
*chan
, void *chan_id
)
515 unsigned int ch_nr
= (unsigned int) chan_id
;
517 if (ch_nr
== to_coh901318_chan(chan
)->id
)
522 EXPORT_SYMBOL(coh901318_filter_id
);
525 * DMA channel allocation
527 static int coh901318_config(struct coh901318_chan
*cohc
,
528 struct coh901318_params
*param
)
531 const struct coh901318_params
*p
;
532 int channel
= cohc
->id
;
533 void __iomem
*virtbase
= cohc
->base
->virtbase
;
535 spin_lock_irqsave(&cohc
->lock
, flags
);
540 p
= &cohc
->base
->platform
->chan_conf
[channel
].param
;
542 /* Clear any pending BE or TC interrupt */
544 writel(1 << channel
, virtbase
+ COH901318_BE_INT_CLEAR1
);
545 writel(1 << channel
, virtbase
+ COH901318_TC_INT_CLEAR1
);
547 writel(1 << (channel
- 32), virtbase
+
548 COH901318_BE_INT_CLEAR2
);
549 writel(1 << (channel
- 32), virtbase
+
550 COH901318_TC_INT_CLEAR2
);
553 coh901318_set_conf(cohc
, p
->config
);
554 coh901318_set_ctrl(cohc
, p
->ctrl_lli_last
);
556 spin_unlock_irqrestore(&cohc
->lock
, flags
);
561 /* must lock when calling this function
562 * start queued jobs, if any
563 * TODO: start all queued jobs in one go
565 * Returns descriptor if queued job is started otherwise NULL.
566 * If the queue is empty NULL is returned.
568 static struct coh901318_desc
*coh901318_queue_start(struct coh901318_chan
*cohc
)
570 struct coh901318_desc
*cohd_que
;
572 /* start queued jobs, if any
573 * TODO: transmit all queued jobs in one go
575 cohd_que
= coh901318_first_queued(cohc
);
577 if (cohd_que
!= NULL
) {
578 /* Remove from queue */
579 coh901318_desc_remove(cohd_que
);
580 /* initiate DMA job */
583 coh901318_desc_submit(cohc
, cohd_que
);
585 coh901318_prep_linked_list(cohc
, cohd_que
->data
);
588 coh901318_start(cohc
);
595 static void dma_tasklet(unsigned long data
)
597 struct coh901318_chan
*cohc
= (struct coh901318_chan
*) data
;
598 struct coh901318_desc
*cohd_fin
;
600 dma_async_tx_callback callback
;
601 void *callback_param
;
603 spin_lock_irqsave(&cohc
->lock
, flags
);
605 /* get first active entry from list */
606 cohd_fin
= coh901318_first_active_get(cohc
);
608 BUG_ON(cohd_fin
->pending_irqs
== 0);
610 if (cohd_fin
== NULL
)
613 cohd_fin
->pending_irqs
--;
614 cohc
->completed
= cohd_fin
->desc
.cookie
;
616 if (cohc
->nbr_active_done
== 0)
619 if (!cohd_fin
->pending_irqs
) {
620 /* release the lli allocation*/
621 coh901318_lli_free(&cohc
->base
->pool
, &cohd_fin
->data
);
624 dev_vdbg(COHC_2_DEV(cohc
), "[%s] chan_id %d pending_irqs %d"
625 " nbr_active_done %ld\n", __func__
,
626 cohc
->id
, cohc
->pending_irqs
, cohc
->nbr_active_done
);
628 /* callback to client */
629 callback
= cohd_fin
->desc
.callback
;
630 callback_param
= cohd_fin
->desc
.callback_param
;
632 if (!cohd_fin
->pending_irqs
) {
633 coh901318_desc_remove(cohd_fin
);
635 /* return desc to free-list */
636 coh901318_desc_free(cohc
, cohd_fin
);
639 if (cohc
->nbr_active_done
)
640 cohc
->nbr_active_done
--;
642 if (cohc
->nbr_active_done
) {
643 if (cohc_chan_conf(cohc
)->priority_high
)
644 tasklet_hi_schedule(&cohc
->tasklet
);
646 tasklet_schedule(&cohc
->tasklet
);
648 spin_unlock_irqrestore(&cohc
->lock
, flags
);
651 callback(callback_param
);
656 spin_unlock_irqrestore(&cohc
->lock
, flags
);
657 dev_err(COHC_2_DEV(cohc
), "[%s] No active dma desc\n", __func__
);
661 /* called from interrupt context */
662 static void dma_tc_handle(struct coh901318_chan
*cohc
)
664 BUG_ON(!cohc
->allocated
&& (list_empty(&cohc
->active
) ||
665 list_empty(&cohc
->queue
)));
667 if (!cohc
->allocated
)
670 BUG_ON(cohc
->pending_irqs
== 0);
672 cohc
->pending_irqs
--;
673 cohc
->nbr_active_done
++;
675 if (cohc
->pending_irqs
== 0 && coh901318_queue_start(cohc
) == NULL
)
678 BUG_ON(list_empty(&cohc
->active
));
680 if (cohc_chan_conf(cohc
)->priority_high
)
681 tasklet_hi_schedule(&cohc
->tasklet
);
683 tasklet_schedule(&cohc
->tasklet
);
687 static irqreturn_t
dma_irq_handler(int irq
, void *dev_id
)
693 struct coh901318_base
*base
= dev_id
;
694 struct coh901318_chan
*cohc
;
695 void __iomem
*virtbase
= base
->virtbase
;
697 status1
= readl(virtbase
+ COH901318_INT_STATUS1
);
698 status2
= readl(virtbase
+ COH901318_INT_STATUS2
);
700 if (unlikely(status1
== 0 && status2
== 0)) {
701 dev_warn(base
->dev
, "spurious DMA IRQ from no channel!\n");
705 /* TODO: consider handle IRQ in tasklet here to
706 * minimize interrupt latency */
708 /* Check the first 32 DMA channels for IRQ */
710 /* Find first bit set, return as a number. */
711 i
= ffs(status1
) - 1;
714 cohc
= &base
->chans
[ch
];
715 spin_lock(&cohc
->lock
);
717 /* Mask off this bit */
718 status1
&= ~(1 << i
);
719 /* Check the individual channel bits */
720 if (test_bit(i
, virtbase
+ COH901318_BE_INT_STATUS1
)) {
721 dev_crit(COHC_2_DEV(cohc
),
722 "DMA bus error on channel %d!\n", ch
);
724 /* Clear BE interrupt */
725 __set_bit(i
, virtbase
+ COH901318_BE_INT_CLEAR1
);
727 /* Caused by TC, really? */
728 if (unlikely(!test_bit(i
, virtbase
+
729 COH901318_TC_INT_STATUS1
))) {
730 dev_warn(COHC_2_DEV(cohc
),
731 "ignoring interrupt not caused by terminal count on channel %d\n", ch
);
732 /* Clear TC interrupt */
734 __set_bit(i
, virtbase
+ COH901318_TC_INT_CLEAR1
);
736 /* Enable powersave if transfer has finished */
737 if (!(readl(virtbase
+ COH901318_CX_STAT
+
738 COH901318_CX_STAT_SPACING
*ch
) &
739 COH901318_CX_STAT_ENABLED
)) {
740 enable_powersave(cohc
);
743 /* Must clear TC interrupt before calling
745 * in case tc_handle initate a new dma job
747 __set_bit(i
, virtbase
+ COH901318_TC_INT_CLEAR1
);
752 spin_unlock(&cohc
->lock
);
755 /* Check the remaining 32 DMA channels for IRQ */
757 /* Find first bit set, return as a number. */
758 i
= ffs(status2
) - 1;
760 cohc
= &base
->chans
[ch
];
761 spin_lock(&cohc
->lock
);
763 /* Mask off this bit */
764 status2
&= ~(1 << i
);
765 /* Check the individual channel bits */
766 if (test_bit(i
, virtbase
+ COH901318_BE_INT_STATUS2
)) {
767 dev_crit(COHC_2_DEV(cohc
),
768 "DMA bus error on channel %d!\n", ch
);
769 /* Clear BE interrupt */
771 __set_bit(i
, virtbase
+ COH901318_BE_INT_CLEAR2
);
773 /* Caused by TC, really? */
774 if (unlikely(!test_bit(i
, virtbase
+
775 COH901318_TC_INT_STATUS2
))) {
776 dev_warn(COHC_2_DEV(cohc
),
777 "ignoring interrupt not caused by terminal count on channel %d\n", ch
);
778 /* Clear TC interrupt */
779 __set_bit(i
, virtbase
+ COH901318_TC_INT_CLEAR2
);
782 /* Enable powersave if transfer has finished */
783 if (!(readl(virtbase
+ COH901318_CX_STAT
+
784 COH901318_CX_STAT_SPACING
*ch
) &
785 COH901318_CX_STAT_ENABLED
)) {
786 enable_powersave(cohc
);
788 /* Must clear TC interrupt before calling
790 * in case tc_handle initate a new dma job
792 __set_bit(i
, virtbase
+ COH901318_TC_INT_CLEAR2
);
797 spin_unlock(&cohc
->lock
);
803 static int coh901318_alloc_chan_resources(struct dma_chan
*chan
)
805 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
807 dev_vdbg(COHC_2_DEV(cohc
), "[%s] DMA channel %d\n",
810 if (chan
->client_count
> 1)
813 coh901318_config(cohc
, NULL
);
816 cohc
->completed
= chan
->cookie
= 1;
822 coh901318_free_chan_resources(struct dma_chan
*chan
)
824 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
825 int channel
= cohc
->id
;
828 spin_lock_irqsave(&cohc
->lock
, flags
);
831 writel(0x00000000U
, cohc
->base
->virtbase
+ COH901318_CX_CFG
+
832 COH901318_CX_CFG_SPACING
*channel
);
833 writel(0x00000000U
, cohc
->base
->virtbase
+ COH901318_CX_CTRL
+
834 COH901318_CX_CTRL_SPACING
*channel
);
838 spin_unlock_irqrestore(&cohc
->lock
, flags
);
840 chan
->device
->device_terminate_all(chan
);
845 coh901318_tx_submit(struct dma_async_tx_descriptor
*tx
)
847 struct coh901318_desc
*cohd
= container_of(tx
, struct coh901318_desc
,
849 struct coh901318_chan
*cohc
= to_coh901318_chan(tx
->chan
);
852 spin_lock_irqsave(&cohc
->lock
, flags
);
854 tx
->cookie
= coh901318_assign_cookie(cohc
, cohd
);
856 coh901318_desc_queue(cohc
, cohd
);
858 spin_unlock_irqrestore(&cohc
->lock
, flags
);
863 static struct dma_async_tx_descriptor
*
864 coh901318_prep_memcpy(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t src
,
865 size_t size
, unsigned long flags
)
867 struct coh901318_lli
*data
;
868 struct coh901318_desc
*cohd
;
870 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
872 u32 ctrl_last
= cohc_chan_param(cohc
)->ctrl_lli_last
;
874 spin_lock_irqsave(&cohc
->lock
, flg
);
876 dev_vdbg(COHC_2_DEV(cohc
),
877 "[%s] channel %d src 0x%x dest 0x%x size %d\n",
878 __func__
, cohc
->id
, src
, dest
, size
);
880 if (flags
& DMA_PREP_INTERRUPT
)
881 /* Trigger interrupt after last lli */
882 ctrl_last
|= COH901318_CX_CTRL_TC_IRQ_ENABLE
;
884 lli_len
= size
>> MAX_DMA_PACKET_SIZE_SHIFT
;
885 if ((lli_len
<< MAX_DMA_PACKET_SIZE_SHIFT
) < size
)
888 data
= coh901318_lli_alloc(&cohc
->base
->pool
, lli_len
);
893 cohd
= coh901318_desc_get(cohc
);
899 coh901318_lli_fill_memcpy(
900 &cohc
->base
->pool
, data
, src
, size
, dest
,
901 cohc_chan_param(cohc
)->ctrl_lli_chained
,
905 COH_DBG(coh901318_list_print(cohc
, data
));
907 dma_async_tx_descriptor_init(&cohd
->desc
, chan
);
909 cohd
->desc
.tx_submit
= coh901318_tx_submit
;
911 spin_unlock_irqrestore(&cohc
->lock
, flg
);
915 spin_unlock_irqrestore(&cohc
->lock
, flg
);
919 static struct dma_async_tx_descriptor
*
920 coh901318_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
921 unsigned int sg_len
, enum dma_data_direction direction
,
924 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
925 struct coh901318_lli
*data
;
926 struct coh901318_desc
*cohd
;
927 struct scatterlist
*sg
;
931 u32 ctrl_chained
= cohc_chan_param(cohc
)->ctrl_lli_chained
;
932 u32 ctrl
= cohc_chan_param(cohc
)->ctrl_lli
;
933 u32 ctrl_last
= cohc_chan_param(cohc
)->ctrl_lli_last
;
938 if (sgl
->length
== 0)
941 spin_lock_irqsave(&cohc
->lock
, flg
);
943 dev_vdbg(COHC_2_DEV(cohc
), "[%s] sg_len %d dir %d\n",
944 __func__
, sg_len
, direction
);
946 if (flags
& DMA_PREP_INTERRUPT
)
947 /* Trigger interrupt after last lli */
948 ctrl_last
|= COH901318_CX_CTRL_TC_IRQ_ENABLE
;
950 cohd
= coh901318_desc_get(cohc
);
953 cohd
->dir
= direction
;
955 if (direction
== DMA_TO_DEVICE
) {
956 u32 tx_flags
= COH901318_CX_CTRL_PRDD_SOURCE
|
957 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE
;
959 ctrl_chained
|= tx_flags
;
960 ctrl_last
|= tx_flags
;
962 } else if (direction
== DMA_FROM_DEVICE
) {
963 u32 rx_flags
= COH901318_CX_CTRL_PRDD_DEST
|
964 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE
;
966 ctrl_chained
|= rx_flags
;
967 ctrl_last
|= rx_flags
;
972 dma_async_tx_descriptor_init(&cohd
->desc
, chan
);
974 cohd
->desc
.tx_submit
= coh901318_tx_submit
;
977 /* The dma only supports transmitting packages up to
978 * MAX_DMA_PACKET_SIZE. Calculate to total number of
979 * dma elemts required to send the entire sg list
981 for_each_sg(sgl
, sg
, sg_len
, i
) {
983 size
= sg_dma_len(sg
);
985 if (size
<= MAX_DMA_PACKET_SIZE
) {
990 factor
= size
>> MAX_DMA_PACKET_SIZE_SHIFT
;
991 if ((factor
<< MAX_DMA_PACKET_SIZE_SHIFT
) < size
)
997 data
= coh901318_lli_alloc(&cohc
->base
->pool
, len
);
1002 /* initiate allocated data list */
1003 cohd
->pending_irqs
=
1004 coh901318_lli_fill_sg(&cohc
->base
->pool
, data
, sgl
, sg_len
,
1005 cohc_dev_addr(cohc
),
1009 direction
, COH901318_CX_CTRL_TC_IRQ_ENABLE
);
1012 cohd
->flags
= flags
;
1014 COH_DBG(coh901318_list_print(cohc
, data
));
1016 spin_unlock_irqrestore(&cohc
->lock
, flg
);
1021 coh901318_desc_remove(cohd
);
1022 coh901318_desc_free(cohc
, cohd
);
1023 spin_unlock_irqrestore(&cohc
->lock
, flg
);
1028 static enum dma_status
1029 coh901318_is_tx_complete(struct dma_chan
*chan
,
1030 dma_cookie_t cookie
, dma_cookie_t
*done
,
1033 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
1034 dma_cookie_t last_used
;
1035 dma_cookie_t last_complete
;
1038 last_complete
= cohc
->completed
;
1039 last_used
= chan
->cookie
;
1041 ret
= dma_async_is_complete(cookie
, last_complete
, last_used
);
1044 *done
= last_complete
;
1052 coh901318_issue_pending(struct dma_chan
*chan
)
1054 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
1055 unsigned long flags
;
1057 spin_lock_irqsave(&cohc
->lock
, flags
);
1059 /* Busy means that pending jobs are already being processed */
1061 coh901318_queue_start(cohc
);
1063 spin_unlock_irqrestore(&cohc
->lock
, flags
);
1067 coh901318_terminate_all(struct dma_chan
*chan
)
1069 unsigned long flags
;
1070 struct coh901318_chan
*cohc
= to_coh901318_chan(chan
);
1071 struct coh901318_desc
*cohd
;
1072 void __iomem
*virtbase
= cohc
->base
->virtbase
;
1074 coh901318_stop(chan
);
1076 spin_lock_irqsave(&cohc
->lock
, flags
);
1078 /* Clear any pending BE or TC interrupt */
1079 if (cohc
->id
< 32) {
1080 writel(1 << cohc
->id
, virtbase
+ COH901318_BE_INT_CLEAR1
);
1081 writel(1 << cohc
->id
, virtbase
+ COH901318_TC_INT_CLEAR1
);
1083 writel(1 << (cohc
->id
- 32), virtbase
+
1084 COH901318_BE_INT_CLEAR2
);
1085 writel(1 << (cohc
->id
- 32), virtbase
+
1086 COH901318_TC_INT_CLEAR2
);
1089 enable_powersave(cohc
);
1091 while ((cohd
= coh901318_first_active_get(cohc
))) {
1092 /* release the lli allocation*/
1093 coh901318_lli_free(&cohc
->base
->pool
, &cohd
->data
);
1095 coh901318_desc_remove(cohd
);
1097 /* return desc to free-list */
1098 coh901318_desc_free(cohc
, cohd
);
1101 while ((cohd
= coh901318_first_queued(cohc
))) {
1102 /* release the lli allocation*/
1103 coh901318_lli_free(&cohc
->base
->pool
, &cohd
->data
);
1105 coh901318_desc_remove(cohd
);
1107 /* return desc to free-list */
1108 coh901318_desc_free(cohc
, cohd
);
1112 cohc
->nbr_active_done
= 0;
1114 cohc
->pending_irqs
= 0;
1116 spin_unlock_irqrestore(&cohc
->lock
, flags
);
1118 void coh901318_base_init(struct dma_device
*dma
, const int *pick_chans
,
1119 struct coh901318_base
*base
)
1123 struct coh901318_chan
*cohc
;
1125 INIT_LIST_HEAD(&dma
->channels
);
1127 for (chans_i
= 0; pick_chans
[chans_i
] != -1; chans_i
+= 2) {
1128 for (i
= pick_chans
[chans_i
]; i
<= pick_chans
[chans_i
+1]; i
++) {
1129 cohc
= &base
->chans
[i
];
1132 cohc
->chan
.device
= dma
;
1135 /* TODO: do we really need this lock if only one
1136 * client is connected to each channel?
1139 spin_lock_init(&cohc
->lock
);
1141 cohc
->pending_irqs
= 0;
1142 cohc
->nbr_active_done
= 0;
1144 INIT_LIST_HEAD(&cohc
->free
);
1145 INIT_LIST_HEAD(&cohc
->active
);
1146 INIT_LIST_HEAD(&cohc
->queue
);
1148 tasklet_init(&cohc
->tasklet
, dma_tasklet
,
1149 (unsigned long) cohc
);
1151 list_add_tail(&cohc
->chan
.device_node
,
1157 static int __init
coh901318_probe(struct platform_device
*pdev
)
1160 struct coh901318_platform
*pdata
;
1161 struct coh901318_base
*base
;
1163 struct resource
*io
;
1165 io
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1167 goto err_get_resource
;
1169 /* Map DMA controller registers to virtual memory */
1170 if (request_mem_region(io
->start
,
1172 pdev
->dev
.driver
->name
) == NULL
) {
1174 goto err_request_mem
;
1177 pdata
= pdev
->dev
.platform_data
;
1179 goto err_no_platformdata
;
1181 base
= kmalloc(ALIGN(sizeof(struct coh901318_base
), 4) +
1182 pdata
->max_channels
*
1183 sizeof(struct coh901318_chan
),
1186 goto err_alloc_coh_dma_channels
;
1188 base
->chans
= ((void *)base
) + ALIGN(sizeof(struct coh901318_base
), 4);
1190 base
->virtbase
= ioremap(io
->start
, resource_size(io
));
1191 if (!base
->virtbase
) {
1193 goto err_no_ioremap
;
1196 base
->dev
= &pdev
->dev
;
1197 base
->platform
= pdata
;
1198 spin_lock_init(&base
->pm
.lock
);
1199 base
->pm
.started_channels
= 0;
1201 COH901318_DEBUGFS_ASSIGN(debugfs_dma_base
, base
);
1203 platform_set_drvdata(pdev
, base
);
1205 irq
= platform_get_irq(pdev
, 0);
1209 err
= request_irq(irq
, dma_irq_handler
, IRQF_DISABLED
,
1212 dev_crit(&pdev
->dev
,
1213 "Cannot allocate IRQ for DMA controller!\n");
1214 goto err_request_irq
;
1217 err
= coh901318_pool_create(&base
->pool
, &pdev
->dev
,
1218 sizeof(struct coh901318_lli
),
1221 goto err_pool_create
;
1223 /* init channels for device transfers */
1224 coh901318_base_init(&base
->dma_slave
, base
->platform
->chans_slave
,
1227 dma_cap_zero(base
->dma_slave
.cap_mask
);
1228 dma_cap_set(DMA_SLAVE
, base
->dma_slave
.cap_mask
);
1230 base
->dma_slave
.device_alloc_chan_resources
= coh901318_alloc_chan_resources
;
1231 base
->dma_slave
.device_free_chan_resources
= coh901318_free_chan_resources
;
1232 base
->dma_slave
.device_prep_slave_sg
= coh901318_prep_slave_sg
;
1233 base
->dma_slave
.device_is_tx_complete
= coh901318_is_tx_complete
;
1234 base
->dma_slave
.device_issue_pending
= coh901318_issue_pending
;
1235 base
->dma_slave
.device_terminate_all
= coh901318_terminate_all
;
1236 base
->dma_slave
.dev
= &pdev
->dev
;
1238 err
= dma_async_device_register(&base
->dma_slave
);
1241 goto err_register_slave
;
1243 /* init channels for memcpy */
1244 coh901318_base_init(&base
->dma_memcpy
, base
->platform
->chans_memcpy
,
1247 dma_cap_zero(base
->dma_memcpy
.cap_mask
);
1248 dma_cap_set(DMA_MEMCPY
, base
->dma_memcpy
.cap_mask
);
1250 base
->dma_memcpy
.device_alloc_chan_resources
= coh901318_alloc_chan_resources
;
1251 base
->dma_memcpy
.device_free_chan_resources
= coh901318_free_chan_resources
;
1252 base
->dma_memcpy
.device_prep_dma_memcpy
= coh901318_prep_memcpy
;
1253 base
->dma_memcpy
.device_is_tx_complete
= coh901318_is_tx_complete
;
1254 base
->dma_memcpy
.device_issue_pending
= coh901318_issue_pending
;
1255 base
->dma_memcpy
.device_terminate_all
= coh901318_terminate_all
;
1256 base
->dma_memcpy
.dev
= &pdev
->dev
;
1257 err
= dma_async_device_register(&base
->dma_memcpy
);
1260 goto err_register_memcpy
;
1262 dev_dbg(&pdev
->dev
, "Initialized COH901318 DMA on virtual base 0x%08x\n",
1263 (u32
) base
->virtbase
);
1267 err_register_memcpy
:
1268 dma_async_device_unregister(&base
->dma_slave
);
1270 coh901318_pool_destroy(&base
->pool
);
1272 free_irq(platform_get_irq(pdev
, 0), base
);
1275 iounmap(base
->virtbase
);
1278 err_alloc_coh_dma_channels
:
1279 err_no_platformdata
:
1280 release_mem_region(pdev
->resource
->start
,
1281 resource_size(pdev
->resource
));
1287 static int __exit
coh901318_remove(struct platform_device
*pdev
)
1289 struct coh901318_base
*base
= platform_get_drvdata(pdev
);
1291 dma_async_device_unregister(&base
->dma_memcpy
);
1292 dma_async_device_unregister(&base
->dma_slave
);
1293 coh901318_pool_destroy(&base
->pool
);
1294 free_irq(platform_get_irq(pdev
, 0), base
);
1295 iounmap(base
->virtbase
);
1297 release_mem_region(pdev
->resource
->start
,
1298 resource_size(pdev
->resource
));
1303 static struct platform_driver coh901318_driver
= {
1304 .remove
= __exit_p(coh901318_remove
),
1306 .name
= "coh901318",
1310 int __init
coh901318_init(void)
1312 return platform_driver_probe(&coh901318_driver
, coh901318_probe
);
1314 subsys_initcall(coh901318_init
);
1316 void __exit
coh901318_exit(void)
1318 platform_driver_unregister(&coh901318_driver
);
1320 module_exit(coh901318_exit
);
1322 MODULE_LICENSE("GPL");
1323 MODULE_AUTHOR("Per Friden");