cpufreq / powernow-k8: Change maintainer's email address
[linux-2.6/btrfs-unstable.git] / drivers / dma / omap-dma.c
blobbb2d8e7029eb638c8d18f9ed3a9db01d795aef2c
1 /*
2 * OMAP DMAengine support
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/omap-dma.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
20 #include "virt-dma.h"
22 #include <plat/cpu.h>
23 #include <plat/dma.h>
25 struct omap_dmadev {
26 struct dma_device ddev;
27 spinlock_t lock;
28 struct tasklet_struct task;
29 struct list_head pending;
32 struct omap_chan {
33 struct virt_dma_chan vc;
34 struct list_head node;
36 struct dma_slave_config cfg;
37 unsigned dma_sig;
38 bool cyclic;
39 bool paused;
41 int dma_ch;
42 struct omap_desc *desc;
43 unsigned sgidx;
46 struct omap_sg {
47 dma_addr_t addr;
48 uint32_t en; /* number of elements (24-bit) */
49 uint32_t fn; /* number of frames (16-bit) */
52 struct omap_desc {
53 struct virt_dma_desc vd;
54 enum dma_transfer_direction dir;
55 dma_addr_t dev_addr;
57 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
58 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
59 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
60 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
61 uint8_t periph_port; /* Peripheral port */
63 unsigned sglen;
64 struct omap_sg sg[0];
67 static const unsigned es_bytes[] = {
68 [OMAP_DMA_DATA_TYPE_S8] = 1,
69 [OMAP_DMA_DATA_TYPE_S16] = 2,
70 [OMAP_DMA_DATA_TYPE_S32] = 4,
73 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
75 return container_of(d, struct omap_dmadev, ddev);
78 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
80 return container_of(c, struct omap_chan, vc.chan);
83 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
85 return container_of(t, struct omap_desc, vd.tx);
88 static void omap_dma_desc_free(struct virt_dma_desc *vd)
90 kfree(container_of(vd, struct omap_desc, vd));
93 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
94 unsigned idx)
96 struct omap_sg *sg = d->sg + idx;
98 if (d->dir == DMA_DEV_TO_MEM)
99 omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
100 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
101 else
102 omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
103 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
105 omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
106 d->sync_mode, c->dma_sig, d->sync_type);
108 omap_start_dma(c->dma_ch);
111 static void omap_dma_start_desc(struct omap_chan *c)
113 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
114 struct omap_desc *d;
116 if (!vd) {
117 c->desc = NULL;
118 return;
121 list_del(&vd->node);
123 c->desc = d = to_omap_dma_desc(&vd->tx);
124 c->sgidx = 0;
126 if (d->dir == DMA_DEV_TO_MEM)
127 omap_set_dma_src_params(c->dma_ch, d->periph_port,
128 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
129 else
130 omap_set_dma_dest_params(c->dma_ch, d->periph_port,
131 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
133 omap_dma_start_sg(c, d, 0);
136 static void omap_dma_callback(int ch, u16 status, void *data)
138 struct omap_chan *c = data;
139 struct omap_desc *d;
140 unsigned long flags;
142 spin_lock_irqsave(&c->vc.lock, flags);
143 d = c->desc;
144 if (d) {
145 if (!c->cyclic) {
146 if (++c->sgidx < d->sglen) {
147 omap_dma_start_sg(c, d, c->sgidx);
148 } else {
149 omap_dma_start_desc(c);
150 vchan_cookie_complete(&d->vd);
152 } else {
153 vchan_cyclic_callback(&d->vd);
156 spin_unlock_irqrestore(&c->vc.lock, flags);
160 * This callback schedules all pending channels. We could be more
161 * clever here by postponing allocation of the real DMA channels to
162 * this point, and freeing them when our virtual channel becomes idle.
164 * We would then need to deal with 'all channels in-use'
166 static void omap_dma_sched(unsigned long data)
168 struct omap_dmadev *d = (struct omap_dmadev *)data;
169 LIST_HEAD(head);
171 spin_lock_irq(&d->lock);
172 list_splice_tail_init(&d->pending, &head);
173 spin_unlock_irq(&d->lock);
175 while (!list_empty(&head)) {
176 struct omap_chan *c = list_first_entry(&head,
177 struct omap_chan, node);
179 spin_lock_irq(&c->vc.lock);
180 list_del_init(&c->node);
181 omap_dma_start_desc(c);
182 spin_unlock_irq(&c->vc.lock);
186 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
188 struct omap_chan *c = to_omap_dma_chan(chan);
190 dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
192 return omap_request_dma(c->dma_sig, "DMA engine",
193 omap_dma_callback, c, &c->dma_ch);
196 static void omap_dma_free_chan_resources(struct dma_chan *chan)
198 struct omap_chan *c = to_omap_dma_chan(chan);
200 vchan_free_chan_resources(&c->vc);
201 omap_free_dma(c->dma_ch);
203 dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
206 static size_t omap_dma_sg_size(struct omap_sg *sg)
208 return sg->en * sg->fn;
211 static size_t omap_dma_desc_size(struct omap_desc *d)
213 unsigned i;
214 size_t size;
216 for (size = i = 0; i < d->sglen; i++)
217 size += omap_dma_sg_size(&d->sg[i]);
219 return size * es_bytes[d->es];
222 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
224 unsigned i;
225 size_t size, es_size = es_bytes[d->es];
227 for (size = i = 0; i < d->sglen; i++) {
228 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
230 if (size)
231 size += this_size;
232 else if (addr >= d->sg[i].addr &&
233 addr < d->sg[i].addr + this_size)
234 size += d->sg[i].addr + this_size - addr;
236 return size;
239 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
240 dma_cookie_t cookie, struct dma_tx_state *txstate)
242 struct omap_chan *c = to_omap_dma_chan(chan);
243 struct virt_dma_desc *vd;
244 enum dma_status ret;
245 unsigned long flags;
247 ret = dma_cookie_status(chan, cookie, txstate);
248 if (ret == DMA_SUCCESS || !txstate)
249 return ret;
251 spin_lock_irqsave(&c->vc.lock, flags);
252 vd = vchan_find_desc(&c->vc, cookie);
253 if (vd) {
254 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
255 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
256 struct omap_desc *d = c->desc;
257 dma_addr_t pos;
259 if (d->dir == DMA_MEM_TO_DEV)
260 pos = omap_get_dma_src_pos(c->dma_ch);
261 else if (d->dir == DMA_DEV_TO_MEM)
262 pos = omap_get_dma_dst_pos(c->dma_ch);
263 else
264 pos = 0;
266 txstate->residue = omap_dma_desc_size_pos(d, pos);
267 } else {
268 txstate->residue = 0;
270 spin_unlock_irqrestore(&c->vc.lock, flags);
272 return ret;
275 static void omap_dma_issue_pending(struct dma_chan *chan)
277 struct omap_chan *c = to_omap_dma_chan(chan);
278 unsigned long flags;
280 spin_lock_irqsave(&c->vc.lock, flags);
281 if (vchan_issue_pending(&c->vc) && !c->desc) {
282 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
283 spin_lock(&d->lock);
284 if (list_empty(&c->node))
285 list_add_tail(&c->node, &d->pending);
286 spin_unlock(&d->lock);
287 tasklet_schedule(&d->task);
289 spin_unlock_irqrestore(&c->vc.lock, flags);
292 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
293 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
294 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
296 struct omap_chan *c = to_omap_dma_chan(chan);
297 enum dma_slave_buswidth dev_width;
298 struct scatterlist *sgent;
299 struct omap_desc *d;
300 dma_addr_t dev_addr;
301 unsigned i, j = 0, es, en, frame_bytes, sync_type;
302 u32 burst;
304 if (dir == DMA_DEV_TO_MEM) {
305 dev_addr = c->cfg.src_addr;
306 dev_width = c->cfg.src_addr_width;
307 burst = c->cfg.src_maxburst;
308 sync_type = OMAP_DMA_SRC_SYNC;
309 } else if (dir == DMA_MEM_TO_DEV) {
310 dev_addr = c->cfg.dst_addr;
311 dev_width = c->cfg.dst_addr_width;
312 burst = c->cfg.dst_maxburst;
313 sync_type = OMAP_DMA_DST_SYNC;
314 } else {
315 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
316 return NULL;
319 /* Bus width translates to the element size (ES) */
320 switch (dev_width) {
321 case DMA_SLAVE_BUSWIDTH_1_BYTE:
322 es = OMAP_DMA_DATA_TYPE_S8;
323 break;
324 case DMA_SLAVE_BUSWIDTH_2_BYTES:
325 es = OMAP_DMA_DATA_TYPE_S16;
326 break;
327 case DMA_SLAVE_BUSWIDTH_4_BYTES:
328 es = OMAP_DMA_DATA_TYPE_S32;
329 break;
330 default: /* not reached */
331 return NULL;
334 /* Now allocate and setup the descriptor. */
335 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
336 if (!d)
337 return NULL;
339 d->dir = dir;
340 d->dev_addr = dev_addr;
341 d->es = es;
342 d->sync_mode = OMAP_DMA_SYNC_FRAME;
343 d->sync_type = sync_type;
344 d->periph_port = OMAP_DMA_PORT_TIPB;
347 * Build our scatterlist entries: each contains the address,
348 * the number of elements (EN) in each frame, and the number of
349 * frames (FN). Number of bytes for this entry = ES * EN * FN.
351 * Burst size translates to number of elements with frame sync.
352 * Note: DMA engine defines burst to be the number of dev-width
353 * transfers.
355 en = burst;
356 frame_bytes = es_bytes[es] * en;
357 for_each_sg(sgl, sgent, sglen, i) {
358 d->sg[j].addr = sg_dma_address(sgent);
359 d->sg[j].en = en;
360 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
361 j++;
364 d->sglen = j;
366 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
369 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
370 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
371 size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
372 void *context)
374 struct omap_chan *c = to_omap_dma_chan(chan);
375 enum dma_slave_buswidth dev_width;
376 struct omap_desc *d;
377 dma_addr_t dev_addr;
378 unsigned es, sync_type;
379 u32 burst;
381 if (dir == DMA_DEV_TO_MEM) {
382 dev_addr = c->cfg.src_addr;
383 dev_width = c->cfg.src_addr_width;
384 burst = c->cfg.src_maxburst;
385 sync_type = OMAP_DMA_SRC_SYNC;
386 } else if (dir == DMA_MEM_TO_DEV) {
387 dev_addr = c->cfg.dst_addr;
388 dev_width = c->cfg.dst_addr_width;
389 burst = c->cfg.dst_maxburst;
390 sync_type = OMAP_DMA_DST_SYNC;
391 } else {
392 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
393 return NULL;
396 /* Bus width translates to the element size (ES) */
397 switch (dev_width) {
398 case DMA_SLAVE_BUSWIDTH_1_BYTE:
399 es = OMAP_DMA_DATA_TYPE_S8;
400 break;
401 case DMA_SLAVE_BUSWIDTH_2_BYTES:
402 es = OMAP_DMA_DATA_TYPE_S16;
403 break;
404 case DMA_SLAVE_BUSWIDTH_4_BYTES:
405 es = OMAP_DMA_DATA_TYPE_S32;
406 break;
407 default: /* not reached */
408 return NULL;
411 /* Now allocate and setup the descriptor. */
412 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
413 if (!d)
414 return NULL;
416 d->dir = dir;
417 d->dev_addr = dev_addr;
418 d->fi = burst;
419 d->es = es;
420 if (burst)
421 d->sync_mode = OMAP_DMA_SYNC_PACKET;
422 else
423 d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
424 d->sync_type = sync_type;
425 d->periph_port = OMAP_DMA_PORT_MPUI;
426 d->sg[0].addr = buf_addr;
427 d->sg[0].en = period_len / es_bytes[es];
428 d->sg[0].fn = buf_len / period_len;
429 d->sglen = 1;
431 if (!c->cyclic) {
432 c->cyclic = true;
433 omap_dma_link_lch(c->dma_ch, c->dma_ch);
435 if (flags & DMA_PREP_INTERRUPT)
436 omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
438 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
441 if (!cpu_class_is_omap1()) {
442 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
443 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
446 return vchan_tx_prep(&c->vc, &d->vd, flags);
449 static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
451 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
452 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
453 return -EINVAL;
455 memcpy(&c->cfg, cfg, sizeof(c->cfg));
457 return 0;
460 static int omap_dma_terminate_all(struct omap_chan *c)
462 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
463 unsigned long flags;
464 LIST_HEAD(head);
466 spin_lock_irqsave(&c->vc.lock, flags);
468 /* Prevent this channel being scheduled */
469 spin_lock(&d->lock);
470 list_del_init(&c->node);
471 spin_unlock(&d->lock);
474 * Stop DMA activity: we assume the callback will not be called
475 * after omap_stop_dma() returns (even if it does, it will see
476 * c->desc is NULL and exit.)
478 if (c->desc) {
479 c->desc = NULL;
480 /* Avoid stopping the dma twice */
481 if (!c->paused)
482 omap_stop_dma(c->dma_ch);
485 if (c->cyclic) {
486 c->cyclic = false;
487 c->paused = false;
488 omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
491 vchan_get_all_descriptors(&c->vc, &head);
492 spin_unlock_irqrestore(&c->vc.lock, flags);
493 vchan_dma_desc_free_list(&c->vc, &head);
495 return 0;
498 static int omap_dma_pause(struct omap_chan *c)
500 /* Pause/Resume only allowed with cyclic mode */
501 if (!c->cyclic)
502 return -EINVAL;
504 if (!c->paused) {
505 omap_stop_dma(c->dma_ch);
506 c->paused = true;
509 return 0;
512 static int omap_dma_resume(struct omap_chan *c)
514 /* Pause/Resume only allowed with cyclic mode */
515 if (!c->cyclic)
516 return -EINVAL;
518 if (c->paused) {
519 omap_start_dma(c->dma_ch);
520 c->paused = false;
523 return 0;
526 static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
527 unsigned long arg)
529 struct omap_chan *c = to_omap_dma_chan(chan);
530 int ret;
532 switch (cmd) {
533 case DMA_SLAVE_CONFIG:
534 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
535 break;
537 case DMA_TERMINATE_ALL:
538 ret = omap_dma_terminate_all(c);
539 break;
541 case DMA_PAUSE:
542 ret = omap_dma_pause(c);
543 break;
545 case DMA_RESUME:
546 ret = omap_dma_resume(c);
547 break;
549 default:
550 ret = -ENXIO;
551 break;
554 return ret;
557 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
559 struct omap_chan *c;
561 c = kzalloc(sizeof(*c), GFP_KERNEL);
562 if (!c)
563 return -ENOMEM;
565 c->dma_sig = dma_sig;
566 c->vc.desc_free = omap_dma_desc_free;
567 vchan_init(&c->vc, &od->ddev);
568 INIT_LIST_HEAD(&c->node);
570 od->ddev.chancnt++;
572 return 0;
575 static void omap_dma_free(struct omap_dmadev *od)
577 tasklet_kill(&od->task);
578 while (!list_empty(&od->ddev.channels)) {
579 struct omap_chan *c = list_first_entry(&od->ddev.channels,
580 struct omap_chan, vc.chan.device_node);
582 list_del(&c->vc.chan.device_node);
583 tasklet_kill(&c->vc.task);
584 kfree(c);
586 kfree(od);
589 static int omap_dma_probe(struct platform_device *pdev)
591 struct omap_dmadev *od;
592 int rc, i;
594 od = kzalloc(sizeof(*od), GFP_KERNEL);
595 if (!od)
596 return -ENOMEM;
598 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
599 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
600 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
601 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
602 od->ddev.device_tx_status = omap_dma_tx_status;
603 od->ddev.device_issue_pending = omap_dma_issue_pending;
604 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
605 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
606 od->ddev.device_control = omap_dma_control;
607 od->ddev.dev = &pdev->dev;
608 INIT_LIST_HEAD(&od->ddev.channels);
609 INIT_LIST_HEAD(&od->pending);
610 spin_lock_init(&od->lock);
612 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
614 for (i = 0; i < 127; i++) {
615 rc = omap_dma_chan_init(od, i);
616 if (rc) {
617 omap_dma_free(od);
618 return rc;
622 rc = dma_async_device_register(&od->ddev);
623 if (rc) {
624 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
625 rc);
626 omap_dma_free(od);
627 } else {
628 platform_set_drvdata(pdev, od);
631 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
633 return rc;
636 static int omap_dma_remove(struct platform_device *pdev)
638 struct omap_dmadev *od = platform_get_drvdata(pdev);
640 dma_async_device_unregister(&od->ddev);
641 omap_dma_free(od);
643 return 0;
646 static struct platform_driver omap_dma_driver = {
647 .probe = omap_dma_probe,
648 .remove = omap_dma_remove,
649 .driver = {
650 .name = "omap-dma-engine",
651 .owner = THIS_MODULE,
655 bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
657 if (chan->device->dev->driver == &omap_dma_driver.driver) {
658 struct omap_chan *c = to_omap_dma_chan(chan);
659 unsigned req = *(unsigned *)param;
661 return req == c->dma_sig;
663 return false;
665 EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
667 static struct platform_device *pdev;
669 static const struct platform_device_info omap_dma_dev_info = {
670 .name = "omap-dma-engine",
671 .id = -1,
672 .dma_mask = DMA_BIT_MASK(32),
675 static int omap_dma_init(void)
677 int rc = platform_driver_register(&omap_dma_driver);
679 if (rc == 0) {
680 pdev = platform_device_register_full(&omap_dma_dev_info);
681 if (IS_ERR(pdev)) {
682 platform_driver_unregister(&omap_dma_driver);
683 rc = PTR_ERR(pdev);
686 return rc;
688 subsys_initcall(omap_dma_init);
690 static void __exit omap_dma_exit(void)
692 platform_device_unregister(pdev);
693 platform_driver_unregister(&omap_dma_driver);
695 module_exit(omap_dma_exit);
697 MODULE_AUTHOR("Russell King");
698 MODULE_LICENSE("GPL");