2 * OMAP DMAengine support
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/omap-dma.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/of_dma.h>
20 #include <linux/of_device.h>
25 struct dma_device ddev
;
27 struct tasklet_struct task
;
28 struct list_head pending
;
32 struct virt_dma_chan vc
;
33 struct list_head node
;
35 struct dma_slave_config cfg
;
41 struct omap_desc
*desc
;
47 uint32_t en
; /* number of elements (24-bit) */
48 uint32_t fn
; /* number of frames (16-bit) */
52 struct virt_dma_desc vd
;
53 enum dma_transfer_direction dir
;
56 int16_t fi
; /* for OMAP_DMA_SYNC_PACKET */
57 uint8_t es
; /* OMAP_DMA_DATA_TYPE_xxx */
58 uint8_t sync_mode
; /* OMAP_DMA_SYNC_xxx */
59 uint8_t sync_type
; /* OMAP_DMA_xxx_SYNC* */
60 uint8_t periph_port
; /* Peripheral port */
66 static const unsigned es_bytes
[] = {
67 [OMAP_DMA_DATA_TYPE_S8
] = 1,
68 [OMAP_DMA_DATA_TYPE_S16
] = 2,
69 [OMAP_DMA_DATA_TYPE_S32
] = 4,
72 static struct of_dma_filter_info omap_dma_info
= {
73 .filter_fn
= omap_dma_filter_fn
,
76 static inline struct omap_dmadev
*to_omap_dma_dev(struct dma_device
*d
)
78 return container_of(d
, struct omap_dmadev
, ddev
);
81 static inline struct omap_chan
*to_omap_dma_chan(struct dma_chan
*c
)
83 return container_of(c
, struct omap_chan
, vc
.chan
);
86 static inline struct omap_desc
*to_omap_dma_desc(struct dma_async_tx_descriptor
*t
)
88 return container_of(t
, struct omap_desc
, vd
.tx
);
91 static void omap_dma_desc_free(struct virt_dma_desc
*vd
)
93 kfree(container_of(vd
, struct omap_desc
, vd
));
96 static void omap_dma_start_sg(struct omap_chan
*c
, struct omap_desc
*d
,
99 struct omap_sg
*sg
= d
->sg
+ idx
;
101 if (d
->dir
== DMA_DEV_TO_MEM
)
102 omap_set_dma_dest_params(c
->dma_ch
, OMAP_DMA_PORT_EMIFF
,
103 OMAP_DMA_AMODE_POST_INC
, sg
->addr
, 0, 0);
105 omap_set_dma_src_params(c
->dma_ch
, OMAP_DMA_PORT_EMIFF
,
106 OMAP_DMA_AMODE_POST_INC
, sg
->addr
, 0, 0);
108 omap_set_dma_transfer_params(c
->dma_ch
, d
->es
, sg
->en
, sg
->fn
,
109 d
->sync_mode
, c
->dma_sig
, d
->sync_type
);
111 omap_start_dma(c
->dma_ch
);
114 static void omap_dma_start_desc(struct omap_chan
*c
)
116 struct virt_dma_desc
*vd
= vchan_next_desc(&c
->vc
);
126 c
->desc
= d
= to_omap_dma_desc(&vd
->tx
);
129 if (d
->dir
== DMA_DEV_TO_MEM
)
130 omap_set_dma_src_params(c
->dma_ch
, d
->periph_port
,
131 OMAP_DMA_AMODE_CONSTANT
, d
->dev_addr
, 0, d
->fi
);
133 omap_set_dma_dest_params(c
->dma_ch
, d
->periph_port
,
134 OMAP_DMA_AMODE_CONSTANT
, d
->dev_addr
, 0, d
->fi
);
136 omap_dma_start_sg(c
, d
, 0);
139 static void omap_dma_callback(int ch
, u16 status
, void *data
)
141 struct omap_chan
*c
= data
;
145 spin_lock_irqsave(&c
->vc
.lock
, flags
);
149 if (++c
->sgidx
< d
->sglen
) {
150 omap_dma_start_sg(c
, d
, c
->sgidx
);
152 omap_dma_start_desc(c
);
153 vchan_cookie_complete(&d
->vd
);
156 vchan_cyclic_callback(&d
->vd
);
159 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
163 * This callback schedules all pending channels. We could be more
164 * clever here by postponing allocation of the real DMA channels to
165 * this point, and freeing them when our virtual channel becomes idle.
167 * We would then need to deal with 'all channels in-use'
169 static void omap_dma_sched(unsigned long data
)
171 struct omap_dmadev
*d
= (struct omap_dmadev
*)data
;
174 spin_lock_irq(&d
->lock
);
175 list_splice_tail_init(&d
->pending
, &head
);
176 spin_unlock_irq(&d
->lock
);
178 while (!list_empty(&head
)) {
179 struct omap_chan
*c
= list_first_entry(&head
,
180 struct omap_chan
, node
);
182 spin_lock_irq(&c
->vc
.lock
);
183 list_del_init(&c
->node
);
184 omap_dma_start_desc(c
);
185 spin_unlock_irq(&c
->vc
.lock
);
189 static int omap_dma_alloc_chan_resources(struct dma_chan
*chan
)
191 struct omap_chan
*c
= to_omap_dma_chan(chan
);
193 dev_info(c
->vc
.chan
.device
->dev
, "allocating channel for %u\n", c
->dma_sig
);
195 return omap_request_dma(c
->dma_sig
, "DMA engine",
196 omap_dma_callback
, c
, &c
->dma_ch
);
199 static void omap_dma_free_chan_resources(struct dma_chan
*chan
)
201 struct omap_chan
*c
= to_omap_dma_chan(chan
);
203 vchan_free_chan_resources(&c
->vc
);
204 omap_free_dma(c
->dma_ch
);
206 dev_info(c
->vc
.chan
.device
->dev
, "freeing channel for %u\n", c
->dma_sig
);
209 static size_t omap_dma_sg_size(struct omap_sg
*sg
)
211 return sg
->en
* sg
->fn
;
214 static size_t omap_dma_desc_size(struct omap_desc
*d
)
219 for (size
= i
= 0; i
< d
->sglen
; i
++)
220 size
+= omap_dma_sg_size(&d
->sg
[i
]);
222 return size
* es_bytes
[d
->es
];
225 static size_t omap_dma_desc_size_pos(struct omap_desc
*d
, dma_addr_t addr
)
228 size_t size
, es_size
= es_bytes
[d
->es
];
230 for (size
= i
= 0; i
< d
->sglen
; i
++) {
231 size_t this_size
= omap_dma_sg_size(&d
->sg
[i
]) * es_size
;
235 else if (addr
>= d
->sg
[i
].addr
&&
236 addr
< d
->sg
[i
].addr
+ this_size
)
237 size
+= d
->sg
[i
].addr
+ this_size
- addr
;
242 static enum dma_status
omap_dma_tx_status(struct dma_chan
*chan
,
243 dma_cookie_t cookie
, struct dma_tx_state
*txstate
)
245 struct omap_chan
*c
= to_omap_dma_chan(chan
);
246 struct virt_dma_desc
*vd
;
250 ret
= dma_cookie_status(chan
, cookie
, txstate
);
251 if (ret
== DMA_SUCCESS
|| !txstate
)
254 spin_lock_irqsave(&c
->vc
.lock
, flags
);
255 vd
= vchan_find_desc(&c
->vc
, cookie
);
257 txstate
->residue
= omap_dma_desc_size(to_omap_dma_desc(&vd
->tx
));
258 } else if (c
->desc
&& c
->desc
->vd
.tx
.cookie
== cookie
) {
259 struct omap_desc
*d
= c
->desc
;
262 if (d
->dir
== DMA_MEM_TO_DEV
)
263 pos
= omap_get_dma_src_pos(c
->dma_ch
);
264 else if (d
->dir
== DMA_DEV_TO_MEM
)
265 pos
= omap_get_dma_dst_pos(c
->dma_ch
);
269 txstate
->residue
= omap_dma_desc_size_pos(d
, pos
);
271 txstate
->residue
= 0;
273 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
278 static void omap_dma_issue_pending(struct dma_chan
*chan
)
280 struct omap_chan
*c
= to_omap_dma_chan(chan
);
283 spin_lock_irqsave(&c
->vc
.lock
, flags
);
284 if (vchan_issue_pending(&c
->vc
) && !c
->desc
) {
286 * c->cyclic is used only by audio and in this case the DMA need
287 * to be started without delay.
290 struct omap_dmadev
*d
= to_omap_dma_dev(chan
->device
);
292 if (list_empty(&c
->node
))
293 list_add_tail(&c
->node
, &d
->pending
);
294 spin_unlock(&d
->lock
);
295 tasklet_schedule(&d
->task
);
297 omap_dma_start_desc(c
);
300 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
303 static struct dma_async_tx_descriptor
*omap_dma_prep_slave_sg(
304 struct dma_chan
*chan
, struct scatterlist
*sgl
, unsigned sglen
,
305 enum dma_transfer_direction dir
, unsigned long tx_flags
, void *context
)
307 struct omap_chan
*c
= to_omap_dma_chan(chan
);
308 enum dma_slave_buswidth dev_width
;
309 struct scatterlist
*sgent
;
312 unsigned i
, j
= 0, es
, en
, frame_bytes
, sync_type
;
315 if (dir
== DMA_DEV_TO_MEM
) {
316 dev_addr
= c
->cfg
.src_addr
;
317 dev_width
= c
->cfg
.src_addr_width
;
318 burst
= c
->cfg
.src_maxburst
;
319 sync_type
= OMAP_DMA_SRC_SYNC
;
320 } else if (dir
== DMA_MEM_TO_DEV
) {
321 dev_addr
= c
->cfg
.dst_addr
;
322 dev_width
= c
->cfg
.dst_addr_width
;
323 burst
= c
->cfg
.dst_maxburst
;
324 sync_type
= OMAP_DMA_DST_SYNC
;
326 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
330 /* Bus width translates to the element size (ES) */
332 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
333 es
= OMAP_DMA_DATA_TYPE_S8
;
335 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
336 es
= OMAP_DMA_DATA_TYPE_S16
;
338 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
339 es
= OMAP_DMA_DATA_TYPE_S32
;
341 default: /* not reached */
345 /* Now allocate and setup the descriptor. */
346 d
= kzalloc(sizeof(*d
) + sglen
* sizeof(d
->sg
[0]), GFP_ATOMIC
);
351 d
->dev_addr
= dev_addr
;
353 d
->sync_mode
= OMAP_DMA_SYNC_FRAME
;
354 d
->sync_type
= sync_type
;
355 d
->periph_port
= OMAP_DMA_PORT_TIPB
;
358 * Build our scatterlist entries: each contains the address,
359 * the number of elements (EN) in each frame, and the number of
360 * frames (FN). Number of bytes for this entry = ES * EN * FN.
362 * Burst size translates to number of elements with frame sync.
363 * Note: DMA engine defines burst to be the number of dev-width
367 frame_bytes
= es_bytes
[es
] * en
;
368 for_each_sg(sgl
, sgent
, sglen
, i
) {
369 d
->sg
[j
].addr
= sg_dma_address(sgent
);
371 d
->sg
[j
].fn
= sg_dma_len(sgent
) / frame_bytes
;
377 return vchan_tx_prep(&c
->vc
, &d
->vd
, tx_flags
);
380 static struct dma_async_tx_descriptor
*omap_dma_prep_dma_cyclic(
381 struct dma_chan
*chan
, dma_addr_t buf_addr
, size_t buf_len
,
382 size_t period_len
, enum dma_transfer_direction dir
, unsigned long flags
,
385 struct omap_chan
*c
= to_omap_dma_chan(chan
);
386 enum dma_slave_buswidth dev_width
;
389 unsigned es
, sync_type
;
392 if (dir
== DMA_DEV_TO_MEM
) {
393 dev_addr
= c
->cfg
.src_addr
;
394 dev_width
= c
->cfg
.src_addr_width
;
395 burst
= c
->cfg
.src_maxburst
;
396 sync_type
= OMAP_DMA_SRC_SYNC
;
397 } else if (dir
== DMA_MEM_TO_DEV
) {
398 dev_addr
= c
->cfg
.dst_addr
;
399 dev_width
= c
->cfg
.dst_addr_width
;
400 burst
= c
->cfg
.dst_maxburst
;
401 sync_type
= OMAP_DMA_DST_SYNC
;
403 dev_err(chan
->device
->dev
, "%s: bad direction?\n", __func__
);
407 /* Bus width translates to the element size (ES) */
409 case DMA_SLAVE_BUSWIDTH_1_BYTE
:
410 es
= OMAP_DMA_DATA_TYPE_S8
;
412 case DMA_SLAVE_BUSWIDTH_2_BYTES
:
413 es
= OMAP_DMA_DATA_TYPE_S16
;
415 case DMA_SLAVE_BUSWIDTH_4_BYTES
:
416 es
= OMAP_DMA_DATA_TYPE_S32
;
418 default: /* not reached */
422 /* Now allocate and setup the descriptor. */
423 d
= kzalloc(sizeof(*d
) + sizeof(d
->sg
[0]), GFP_ATOMIC
);
428 d
->dev_addr
= dev_addr
;
432 d
->sync_mode
= OMAP_DMA_SYNC_PACKET
;
434 d
->sync_mode
= OMAP_DMA_SYNC_ELEMENT
;
435 d
->sync_type
= sync_type
;
436 d
->periph_port
= OMAP_DMA_PORT_MPUI
;
437 d
->sg
[0].addr
= buf_addr
;
438 d
->sg
[0].en
= period_len
/ es_bytes
[es
];
439 d
->sg
[0].fn
= buf_len
/ period_len
;
444 omap_dma_link_lch(c
->dma_ch
, c
->dma_ch
);
446 if (flags
& DMA_PREP_INTERRUPT
)
447 omap_enable_dma_irq(c
->dma_ch
, OMAP_DMA_FRAME_IRQ
);
449 omap_disable_dma_irq(c
->dma_ch
, OMAP_DMA_BLOCK_IRQ
);
452 if (dma_omap2plus()) {
453 omap_set_dma_src_burst_mode(c
->dma_ch
, OMAP_DMA_DATA_BURST_16
);
454 omap_set_dma_dest_burst_mode(c
->dma_ch
, OMAP_DMA_DATA_BURST_16
);
457 return vchan_tx_prep(&c
->vc
, &d
->vd
, flags
);
460 static int omap_dma_slave_config(struct omap_chan
*c
, struct dma_slave_config
*cfg
)
462 if (cfg
->src_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
||
463 cfg
->dst_addr_width
== DMA_SLAVE_BUSWIDTH_8_BYTES
)
466 memcpy(&c
->cfg
, cfg
, sizeof(c
->cfg
));
471 static int omap_dma_terminate_all(struct omap_chan
*c
)
473 struct omap_dmadev
*d
= to_omap_dma_dev(c
->vc
.chan
.device
);
477 spin_lock_irqsave(&c
->vc
.lock
, flags
);
479 /* Prevent this channel being scheduled */
481 list_del_init(&c
->node
);
482 spin_unlock(&d
->lock
);
485 * Stop DMA activity: we assume the callback will not be called
486 * after omap_stop_dma() returns (even if it does, it will see
487 * c->desc is NULL and exit.)
491 /* Avoid stopping the dma twice */
493 omap_stop_dma(c
->dma_ch
);
499 omap_dma_unlink_lch(c
->dma_ch
, c
->dma_ch
);
502 vchan_get_all_descriptors(&c
->vc
, &head
);
503 spin_unlock_irqrestore(&c
->vc
.lock
, flags
);
504 vchan_dma_desc_free_list(&c
->vc
, &head
);
509 static int omap_dma_pause(struct omap_chan
*c
)
511 /* Pause/Resume only allowed with cyclic mode */
516 omap_stop_dma(c
->dma_ch
);
523 static int omap_dma_resume(struct omap_chan
*c
)
525 /* Pause/Resume only allowed with cyclic mode */
530 omap_start_dma(c
->dma_ch
);
537 static int omap_dma_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
,
540 struct omap_chan
*c
= to_omap_dma_chan(chan
);
544 case DMA_SLAVE_CONFIG
:
545 ret
= omap_dma_slave_config(c
, (struct dma_slave_config
*)arg
);
548 case DMA_TERMINATE_ALL
:
549 ret
= omap_dma_terminate_all(c
);
553 ret
= omap_dma_pause(c
);
557 ret
= omap_dma_resume(c
);
568 static int omap_dma_chan_init(struct omap_dmadev
*od
, int dma_sig
)
572 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
576 c
->dma_sig
= dma_sig
;
577 c
->vc
.desc_free
= omap_dma_desc_free
;
578 vchan_init(&c
->vc
, &od
->ddev
);
579 INIT_LIST_HEAD(&c
->node
);
586 static void omap_dma_free(struct omap_dmadev
*od
)
588 tasklet_kill(&od
->task
);
589 while (!list_empty(&od
->ddev
.channels
)) {
590 struct omap_chan
*c
= list_first_entry(&od
->ddev
.channels
,
591 struct omap_chan
, vc
.chan
.device_node
);
593 list_del(&c
->vc
.chan
.device_node
);
594 tasklet_kill(&c
->vc
.task
);
600 static int omap_dma_probe(struct platform_device
*pdev
)
602 struct omap_dmadev
*od
;
605 od
= kzalloc(sizeof(*od
), GFP_KERNEL
);
609 dma_cap_set(DMA_SLAVE
, od
->ddev
.cap_mask
);
610 dma_cap_set(DMA_CYCLIC
, od
->ddev
.cap_mask
);
611 od
->ddev
.device_alloc_chan_resources
= omap_dma_alloc_chan_resources
;
612 od
->ddev
.device_free_chan_resources
= omap_dma_free_chan_resources
;
613 od
->ddev
.device_tx_status
= omap_dma_tx_status
;
614 od
->ddev
.device_issue_pending
= omap_dma_issue_pending
;
615 od
->ddev
.device_prep_slave_sg
= omap_dma_prep_slave_sg
;
616 od
->ddev
.device_prep_dma_cyclic
= omap_dma_prep_dma_cyclic
;
617 od
->ddev
.device_control
= omap_dma_control
;
618 od
->ddev
.dev
= &pdev
->dev
;
619 INIT_LIST_HEAD(&od
->ddev
.channels
);
620 INIT_LIST_HEAD(&od
->pending
);
621 spin_lock_init(&od
->lock
);
623 tasklet_init(&od
->task
, omap_dma_sched
, (unsigned long)od
);
625 for (i
= 0; i
< 127; i
++) {
626 rc
= omap_dma_chan_init(od
, i
);
633 rc
= dma_async_device_register(&od
->ddev
);
635 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
641 platform_set_drvdata(pdev
, od
);
643 if (pdev
->dev
.of_node
) {
644 omap_dma_info
.dma_cap
= od
->ddev
.cap_mask
;
646 /* Device-tree DMA controller registration */
647 rc
= of_dma_controller_register(pdev
->dev
.of_node
,
648 of_dma_simple_xlate
, &omap_dma_info
);
650 pr_warn("OMAP-DMA: failed to register DMA controller\n");
651 dma_async_device_unregister(&od
->ddev
);
656 dev_info(&pdev
->dev
, "OMAP DMA engine driver\n");
661 static int omap_dma_remove(struct platform_device
*pdev
)
663 struct omap_dmadev
*od
= platform_get_drvdata(pdev
);
665 if (pdev
->dev
.of_node
)
666 of_dma_controller_free(pdev
->dev
.of_node
);
668 dma_async_device_unregister(&od
->ddev
);
674 static const struct of_device_id omap_dma_match
[] = {
675 { .compatible
= "ti,omap2420-sdma", },
676 { .compatible
= "ti,omap2430-sdma", },
677 { .compatible
= "ti,omap3430-sdma", },
678 { .compatible
= "ti,omap3630-sdma", },
679 { .compatible
= "ti,omap4430-sdma", },
682 MODULE_DEVICE_TABLE(of
, omap_dma_match
);
684 static struct platform_driver omap_dma_driver
= {
685 .probe
= omap_dma_probe
,
686 .remove
= omap_dma_remove
,
688 .name
= "omap-dma-engine",
689 .owner
= THIS_MODULE
,
690 .of_match_table
= of_match_ptr(omap_dma_match
),
694 bool omap_dma_filter_fn(struct dma_chan
*chan
, void *param
)
696 if (chan
->device
->dev
->driver
== &omap_dma_driver
.driver
) {
697 struct omap_chan
*c
= to_omap_dma_chan(chan
);
698 unsigned req
= *(unsigned *)param
;
700 return req
== c
->dma_sig
;
704 EXPORT_SYMBOL_GPL(omap_dma_filter_fn
);
706 static int omap_dma_init(void)
708 return platform_driver_register(&omap_dma_driver
);
710 subsys_initcall(omap_dma_init
);
712 static void __exit
omap_dma_exit(void)
714 platform_driver_unregister(&omap_dma_driver
);
716 module_exit(omap_dma_exit
);
718 MODULE_AUTHOR("Russell King");
719 MODULE_LICENSE("GPL");