Merge tag 'staging-3.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[linux-2.6.git] / drivers / dma / timb_dma.c
blob0ef43c136aa7dbbd30c65b9b1ebb43984b1baea0
1 /*
2 * timb_dma.c timberdale FPGA DMA driver
3 * Copyright (c) 2010 Intel Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 /* Supports:
20 * Timberdale FPGA DMA engine
23 #include <linux/dmaengine.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/io.h>
28 #include <linux/module.h>
29 #include <linux/platform_device.h>
30 #include <linux/slab.h>
32 #include <linux/timb_dma.h>
34 #include "dmaengine.h"
36 #define DRIVER_NAME "timb-dma"
38 /* Global DMA registers */
39 #define TIMBDMA_ACR 0x34
40 #define TIMBDMA_32BIT_ADDR 0x01
42 #define TIMBDMA_ISR 0x080000
43 #define TIMBDMA_IPR 0x080004
44 #define TIMBDMA_IER 0x080008
46 /* Channel specific registers */
47 /* RX instances base addresses are 0x00, 0x40, 0x80 ...
48 * TX instances base addresses are 0x18, 0x58, 0x98 ...
50 #define TIMBDMA_INSTANCE_OFFSET 0x40
51 #define TIMBDMA_INSTANCE_TX_OFFSET 0x18
53 /* RX registers, relative the instance base */
54 #define TIMBDMA_OFFS_RX_DHAR 0x00
55 #define TIMBDMA_OFFS_RX_DLAR 0x04
56 #define TIMBDMA_OFFS_RX_LR 0x0C
57 #define TIMBDMA_OFFS_RX_BLR 0x10
58 #define TIMBDMA_OFFS_RX_ER 0x14
59 #define TIMBDMA_RX_EN 0x01
60 /* bytes per Row, video specific register
61 * which is placed after the TX registers...
63 #define TIMBDMA_OFFS_RX_BPRR 0x30
65 /* TX registers, relative the instance base */
66 #define TIMBDMA_OFFS_TX_DHAR 0x00
67 #define TIMBDMA_OFFS_TX_DLAR 0x04
68 #define TIMBDMA_OFFS_TX_BLR 0x0C
69 #define TIMBDMA_OFFS_TX_LR 0x14
72 #define TIMB_DMA_DESC_SIZE 8
74 struct timb_dma_desc {
75 struct list_head desc_node;
76 struct dma_async_tx_descriptor txd;
77 u8 *desc_list;
78 unsigned int desc_list_len;
79 bool interrupt;
82 struct timb_dma_chan {
83 struct dma_chan chan;
84 void __iomem *membase;
85 spinlock_t lock; /* Used to protect data structures,
86 especially the lists and descriptors,
87 from races between the tasklet and calls
88 from above */
89 bool ongoing;
90 struct list_head active_list;
91 struct list_head queue;
92 struct list_head free_list;
93 unsigned int bytes_per_line;
94 enum dma_transfer_direction direction;
95 unsigned int descs; /* Descriptors to allocate */
96 unsigned int desc_elems; /* number of elems per descriptor */
99 struct timb_dma {
100 struct dma_device dma;
101 void __iomem *membase;
102 struct tasklet_struct tasklet;
103 struct timb_dma_chan channels[0];
106 static struct device *chan2dev(struct dma_chan *chan)
108 return &chan->dev->device;
110 static struct device *chan2dmadev(struct dma_chan *chan)
112 return chan2dev(chan)->parent->parent;
115 static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
117 int id = td_chan->chan.chan_id;
118 return (struct timb_dma *)((u8 *)td_chan -
119 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
122 /* Must be called with the spinlock held */
123 static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
125 int id = td_chan->chan.chan_id;
126 struct timb_dma *td = tdchantotd(td_chan);
127 u32 ier;
129 /* enable interrupt for this channel */
130 ier = ioread32(td->membase + TIMBDMA_IER);
131 ier |= 1 << id;
132 dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
133 ier);
134 iowrite32(ier, td->membase + TIMBDMA_IER);
137 /* Should be called with the spinlock held */
138 static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
140 int id = td_chan->chan.chan_id;
141 struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
142 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
143 u32 isr;
144 bool done = false;
146 dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
148 isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
149 if (isr) {
150 iowrite32(isr, td->membase + TIMBDMA_ISR);
151 done = true;
154 return done;
157 static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
158 bool single)
160 dma_addr_t addr;
161 int len;
163 addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
164 dma_desc[4];
166 len = (dma_desc[3] << 8) | dma_desc[2];
168 if (single)
169 dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
170 DMA_TO_DEVICE);
171 else
172 dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
173 DMA_TO_DEVICE);
176 static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
178 struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
179 struct timb_dma_chan, chan);
180 u8 *descs;
182 for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
183 __td_unmap_desc(td_chan, descs, single);
184 if (descs[0] & 0x02)
185 break;
189 static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
190 struct scatterlist *sg, bool last)
192 if (sg_dma_len(sg) > USHRT_MAX) {
193 dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
194 return -EINVAL;
197 /* length must be word aligned */
198 if (sg_dma_len(sg) % sizeof(u32)) {
199 dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
200 sg_dma_len(sg));
201 return -EINVAL;
204 dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n",
205 dma_desc, (unsigned long long)sg_dma_address(sg));
207 dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
208 dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
209 dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
210 dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
212 dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
213 dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
215 dma_desc[1] = 0x00;
216 dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
218 return 0;
221 /* Must be called with the spinlock held */
222 static void __td_start_dma(struct timb_dma_chan *td_chan)
224 struct timb_dma_desc *td_desc;
226 if (td_chan->ongoing) {
227 dev_err(chan2dev(&td_chan->chan),
228 "Transfer already ongoing\n");
229 return;
232 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
233 desc_node);
235 dev_dbg(chan2dev(&td_chan->chan),
236 "td_chan: %p, chan: %d, membase: %p\n",
237 td_chan, td_chan->chan.chan_id, td_chan->membase);
239 if (td_chan->direction == DMA_DEV_TO_MEM) {
241 /* descriptor address */
242 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
243 iowrite32(td_desc->txd.phys, td_chan->membase +
244 TIMBDMA_OFFS_RX_DLAR);
245 /* Bytes per line */
246 iowrite32(td_chan->bytes_per_line, td_chan->membase +
247 TIMBDMA_OFFS_RX_BPRR);
248 /* enable RX */
249 iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
250 } else {
251 /* address high */
252 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
253 iowrite32(td_desc->txd.phys, td_chan->membase +
254 TIMBDMA_OFFS_TX_DLAR);
257 td_chan->ongoing = true;
259 if (td_desc->interrupt)
260 __td_enable_chan_irq(td_chan);
263 static void __td_finish(struct timb_dma_chan *td_chan)
265 dma_async_tx_callback callback;
266 void *param;
267 struct dma_async_tx_descriptor *txd;
268 struct timb_dma_desc *td_desc;
270 /* can happen if the descriptor is canceled */
271 if (list_empty(&td_chan->active_list))
272 return;
274 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
275 desc_node);
276 txd = &td_desc->txd;
278 dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
279 txd->cookie);
281 /* make sure to stop the transfer */
282 if (td_chan->direction == DMA_DEV_TO_MEM)
283 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
284 /* Currently no support for stopping DMA transfers
285 else
286 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
288 dma_cookie_complete(txd);
289 td_chan->ongoing = false;
291 callback = txd->callback;
292 param = txd->callback_param;
294 list_move(&td_desc->desc_node, &td_chan->free_list);
296 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
297 __td_unmap_descs(td_desc,
298 txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
301 * The API requires that no submissions are done from a
302 * callback, so we don't need to drop the lock here
304 if (callback)
305 callback(param);
308 static u32 __td_ier_mask(struct timb_dma *td)
310 int i;
311 u32 ret = 0;
313 for (i = 0; i < td->dma.chancnt; i++) {
314 struct timb_dma_chan *td_chan = td->channels + i;
315 if (td_chan->ongoing) {
316 struct timb_dma_desc *td_desc =
317 list_entry(td_chan->active_list.next,
318 struct timb_dma_desc, desc_node);
319 if (td_desc->interrupt)
320 ret |= 1 << i;
324 return ret;
327 static void __td_start_next(struct timb_dma_chan *td_chan)
329 struct timb_dma_desc *td_desc;
331 BUG_ON(list_empty(&td_chan->queue));
332 BUG_ON(td_chan->ongoing);
334 td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
335 desc_node);
337 dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
338 __func__, td_desc->txd.cookie);
340 list_move(&td_desc->desc_node, &td_chan->active_list);
341 __td_start_dma(td_chan);
344 static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
346 struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
347 txd);
348 struct timb_dma_chan *td_chan = container_of(txd->chan,
349 struct timb_dma_chan, chan);
350 dma_cookie_t cookie;
352 spin_lock_bh(&td_chan->lock);
353 cookie = dma_cookie_assign(txd);
355 if (list_empty(&td_chan->active_list)) {
356 dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
357 txd->cookie);
358 list_add_tail(&td_desc->desc_node, &td_chan->active_list);
359 __td_start_dma(td_chan);
360 } else {
361 dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
362 txd->cookie);
364 list_add_tail(&td_desc->desc_node, &td_chan->queue);
367 spin_unlock_bh(&td_chan->lock);
369 return cookie;
372 static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
374 struct dma_chan *chan = &td_chan->chan;
375 struct timb_dma_desc *td_desc;
376 int err;
378 td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
379 if (!td_desc) {
380 dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
381 goto out;
384 td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
386 td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
387 if (!td_desc->desc_list) {
388 dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
389 goto err;
392 dma_async_tx_descriptor_init(&td_desc->txd, chan);
393 td_desc->txd.tx_submit = td_tx_submit;
394 td_desc->txd.flags = DMA_CTRL_ACK;
396 td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
397 td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
399 err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
400 if (err) {
401 dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
402 goto err;
405 return td_desc;
406 err:
407 kfree(td_desc->desc_list);
408 kfree(td_desc);
409 out:
410 return NULL;
414 static void td_free_desc(struct timb_dma_desc *td_desc)
416 dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
417 dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
418 td_desc->desc_list_len, DMA_TO_DEVICE);
420 kfree(td_desc->desc_list);
421 kfree(td_desc);
424 static void td_desc_put(struct timb_dma_chan *td_chan,
425 struct timb_dma_desc *td_desc)
427 dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
429 spin_lock_bh(&td_chan->lock);
430 list_add(&td_desc->desc_node, &td_chan->free_list);
431 spin_unlock_bh(&td_chan->lock);
434 static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
436 struct timb_dma_desc *td_desc, *_td_desc;
437 struct timb_dma_desc *ret = NULL;
439 spin_lock_bh(&td_chan->lock);
440 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
441 desc_node) {
442 if (async_tx_test_ack(&td_desc->txd)) {
443 list_del(&td_desc->desc_node);
444 ret = td_desc;
445 break;
447 dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
448 td_desc);
450 spin_unlock_bh(&td_chan->lock);
452 return ret;
455 static int td_alloc_chan_resources(struct dma_chan *chan)
457 struct timb_dma_chan *td_chan =
458 container_of(chan, struct timb_dma_chan, chan);
459 int i;
461 dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
463 BUG_ON(!list_empty(&td_chan->free_list));
464 for (i = 0; i < td_chan->descs; i++) {
465 struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
466 if (!td_desc) {
467 if (i)
468 break;
469 else {
470 dev_err(chan2dev(chan),
471 "Couldnt allocate any descriptors\n");
472 return -ENOMEM;
476 td_desc_put(td_chan, td_desc);
479 spin_lock_bh(&td_chan->lock);
480 dma_cookie_init(chan);
481 spin_unlock_bh(&td_chan->lock);
483 return 0;
486 static void td_free_chan_resources(struct dma_chan *chan)
488 struct timb_dma_chan *td_chan =
489 container_of(chan, struct timb_dma_chan, chan);
490 struct timb_dma_desc *td_desc, *_td_desc;
491 LIST_HEAD(list);
493 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
495 /* check that all descriptors are free */
496 BUG_ON(!list_empty(&td_chan->active_list));
497 BUG_ON(!list_empty(&td_chan->queue));
499 spin_lock_bh(&td_chan->lock);
500 list_splice_init(&td_chan->free_list, &list);
501 spin_unlock_bh(&td_chan->lock);
503 list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
504 dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
505 td_desc);
506 td_free_desc(td_desc);
510 static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
511 struct dma_tx_state *txstate)
513 enum dma_status ret;
515 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
517 ret = dma_cookie_status(chan, cookie, txstate);
519 dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret);
521 return ret;
524 static void td_issue_pending(struct dma_chan *chan)
526 struct timb_dma_chan *td_chan =
527 container_of(chan, struct timb_dma_chan, chan);
529 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
530 spin_lock_bh(&td_chan->lock);
532 if (!list_empty(&td_chan->active_list))
533 /* transfer ongoing */
534 if (__td_dma_done_ack(td_chan))
535 __td_finish(td_chan);
537 if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
538 __td_start_next(td_chan);
540 spin_unlock_bh(&td_chan->lock);
543 static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
544 struct scatterlist *sgl, unsigned int sg_len,
545 enum dma_transfer_direction direction, unsigned long flags,
546 void *context)
548 struct timb_dma_chan *td_chan =
549 container_of(chan, struct timb_dma_chan, chan);
550 struct timb_dma_desc *td_desc;
551 struct scatterlist *sg;
552 unsigned int i;
553 unsigned int desc_usage = 0;
555 if (!sgl || !sg_len) {
556 dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
557 return NULL;
560 /* even channels are for RX, odd for TX */
561 if (td_chan->direction != direction) {
562 dev_err(chan2dev(chan),
563 "Requesting channel in wrong direction\n");
564 return NULL;
567 td_desc = td_desc_get(td_chan);
568 if (!td_desc) {
569 dev_err(chan2dev(chan), "Not enough descriptors available\n");
570 return NULL;
573 td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
575 for_each_sg(sgl, sg, sg_len, i) {
576 int err;
577 if (desc_usage > td_desc->desc_list_len) {
578 dev_err(chan2dev(chan), "No descriptor space\n");
579 return NULL;
582 err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
583 i == (sg_len - 1));
584 if (err) {
585 dev_err(chan2dev(chan), "Failed to update desc: %d\n",
586 err);
587 td_desc_put(td_chan, td_desc);
588 return NULL;
590 desc_usage += TIMB_DMA_DESC_SIZE;
593 dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
594 td_desc->desc_list_len, DMA_MEM_TO_DEV);
596 return &td_desc->txd;
599 static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
600 unsigned long arg)
602 struct timb_dma_chan *td_chan =
603 container_of(chan, struct timb_dma_chan, chan);
604 struct timb_dma_desc *td_desc, *_td_desc;
606 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
608 if (cmd != DMA_TERMINATE_ALL)
609 return -ENXIO;
611 /* first the easy part, put the queue into the free list */
612 spin_lock_bh(&td_chan->lock);
613 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
614 desc_node)
615 list_move(&td_desc->desc_node, &td_chan->free_list);
617 /* now tear down the running */
618 __td_finish(td_chan);
619 spin_unlock_bh(&td_chan->lock);
621 return 0;
624 static void td_tasklet(unsigned long data)
626 struct timb_dma *td = (struct timb_dma *)data;
627 u32 isr;
628 u32 ipr;
629 u32 ier;
630 int i;
632 isr = ioread32(td->membase + TIMBDMA_ISR);
633 ipr = isr & __td_ier_mask(td);
635 /* ack the interrupts */
636 iowrite32(ipr, td->membase + TIMBDMA_ISR);
638 for (i = 0; i < td->dma.chancnt; i++)
639 if (ipr & (1 << i)) {
640 struct timb_dma_chan *td_chan = td->channels + i;
641 spin_lock(&td_chan->lock);
642 __td_finish(td_chan);
643 if (!list_empty(&td_chan->queue))
644 __td_start_next(td_chan);
645 spin_unlock(&td_chan->lock);
648 ier = __td_ier_mask(td);
649 iowrite32(ier, td->membase + TIMBDMA_IER);
653 static irqreturn_t td_irq(int irq, void *devid)
655 struct timb_dma *td = devid;
656 u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
658 if (ipr) {
659 /* disable interrupts, will be re-enabled in tasklet */
660 iowrite32(0, td->membase + TIMBDMA_IER);
662 tasklet_schedule(&td->tasklet);
664 return IRQ_HANDLED;
665 } else
666 return IRQ_NONE;
670 static int td_probe(struct platform_device *pdev)
672 struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
673 struct timb_dma *td;
674 struct resource *iomem;
675 int irq;
676 int err;
677 int i;
679 if (!pdata) {
680 dev_err(&pdev->dev, "No platform data\n");
681 return -EINVAL;
684 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
685 if (!iomem)
686 return -EINVAL;
688 irq = platform_get_irq(pdev, 0);
689 if (irq < 0)
690 return irq;
692 if (!request_mem_region(iomem->start, resource_size(iomem),
693 DRIVER_NAME))
694 return -EBUSY;
696 td = kzalloc(sizeof(struct timb_dma) +
697 sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
698 if (!td) {
699 err = -ENOMEM;
700 goto err_release_region;
703 dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
705 td->membase = ioremap(iomem->start, resource_size(iomem));
706 if (!td->membase) {
707 dev_err(&pdev->dev, "Failed to remap I/O memory\n");
708 err = -ENOMEM;
709 goto err_free_mem;
712 /* 32bit addressing */
713 iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
715 /* disable and clear any interrupts */
716 iowrite32(0x0, td->membase + TIMBDMA_IER);
717 iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
719 tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
721 err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
722 if (err) {
723 dev_err(&pdev->dev, "Failed to request IRQ\n");
724 goto err_tasklet_kill;
727 td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
728 td->dma.device_free_chan_resources = td_free_chan_resources;
729 td->dma.device_tx_status = td_tx_status;
730 td->dma.device_issue_pending = td_issue_pending;
732 dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
733 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
734 td->dma.device_prep_slave_sg = td_prep_slave_sg;
735 td->dma.device_control = td_control;
737 td->dma.dev = &pdev->dev;
739 INIT_LIST_HEAD(&td->dma.channels);
741 for (i = 0; i < pdata->nr_channels; i++) {
742 struct timb_dma_chan *td_chan = &td->channels[i];
743 struct timb_dma_platform_data_channel *pchan =
744 pdata->channels + i;
746 /* even channels are RX, odd are TX */
747 if ((i % 2) == pchan->rx) {
748 dev_err(&pdev->dev, "Wrong channel configuration\n");
749 err = -EINVAL;
750 goto err_free_irq;
753 td_chan->chan.device = &td->dma;
754 dma_cookie_init(&td_chan->chan);
755 spin_lock_init(&td_chan->lock);
756 INIT_LIST_HEAD(&td_chan->active_list);
757 INIT_LIST_HEAD(&td_chan->queue);
758 INIT_LIST_HEAD(&td_chan->free_list);
760 td_chan->descs = pchan->descriptors;
761 td_chan->desc_elems = pchan->descriptor_elements;
762 td_chan->bytes_per_line = pchan->bytes_per_line;
763 td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
764 DMA_MEM_TO_DEV;
766 td_chan->membase = td->membase +
767 (i / 2) * TIMBDMA_INSTANCE_OFFSET +
768 (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
770 dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
771 i, td_chan->membase);
773 list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
776 err = dma_async_device_register(&td->dma);
777 if (err) {
778 dev_err(&pdev->dev, "Failed to register async device\n");
779 goto err_free_irq;
782 platform_set_drvdata(pdev, td);
784 dev_dbg(&pdev->dev, "Probe result: %d\n", err);
785 return err;
787 err_free_irq:
788 free_irq(irq, td);
789 err_tasklet_kill:
790 tasklet_kill(&td->tasklet);
791 iounmap(td->membase);
792 err_free_mem:
793 kfree(td);
794 err_release_region:
795 release_mem_region(iomem->start, resource_size(iomem));
797 return err;
801 static int td_remove(struct platform_device *pdev)
803 struct timb_dma *td = platform_get_drvdata(pdev);
804 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
805 int irq = platform_get_irq(pdev, 0);
807 dma_async_device_unregister(&td->dma);
808 free_irq(irq, td);
809 tasklet_kill(&td->tasklet);
810 iounmap(td->membase);
811 kfree(td);
812 release_mem_region(iomem->start, resource_size(iomem));
814 dev_dbg(&pdev->dev, "Removed...\n");
815 return 0;
818 static struct platform_driver td_driver = {
819 .driver = {
820 .name = DRIVER_NAME,
821 .owner = THIS_MODULE,
823 .probe = td_probe,
824 .remove = td_remove,
827 module_platform_driver(td_driver);
829 MODULE_LICENSE("GPL v2");
830 MODULE_DESCRIPTION("Timberdale DMA controller driver");
831 MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
832 MODULE_ALIAS("platform:"DRIVER_NAME);