flow_dissector: do not break if ports are not needed in flowlabel
[linux-2.6/btrfs-unstable.git] / drivers / dma / timb_dma.c
blobc4c3d93fdd1bf26b97425b79ac5bb516b3edca5a
1 /*
2 * timb_dma.c timberdale FPGA DMA driver
3 * Copyright (c) 2010 Intel Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 /* Supports:
20 * Timberdale FPGA DMA engine
23 #include <linux/dmaengine.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/io.h>
28 #include <linux/module.h>
29 #include <linux/platform_device.h>
30 #include <linux/slab.h>
32 #include <linux/timb_dma.h>
34 #include "dmaengine.h"
36 #define DRIVER_NAME "timb-dma"
38 /* Global DMA registers */
39 #define TIMBDMA_ACR 0x34
40 #define TIMBDMA_32BIT_ADDR 0x01
42 #define TIMBDMA_ISR 0x080000
43 #define TIMBDMA_IPR 0x080004
44 #define TIMBDMA_IER 0x080008
46 /* Channel specific registers */
47 /* RX instances base addresses are 0x00, 0x40, 0x80 ...
48 * TX instances base addresses are 0x18, 0x58, 0x98 ...
50 #define TIMBDMA_INSTANCE_OFFSET 0x40
51 #define TIMBDMA_INSTANCE_TX_OFFSET 0x18
53 /* RX registers, relative the instance base */
54 #define TIMBDMA_OFFS_RX_DHAR 0x00
55 #define TIMBDMA_OFFS_RX_DLAR 0x04
56 #define TIMBDMA_OFFS_RX_LR 0x0C
57 #define TIMBDMA_OFFS_RX_BLR 0x10
58 #define TIMBDMA_OFFS_RX_ER 0x14
59 #define TIMBDMA_RX_EN 0x01
60 /* bytes per Row, video specific register
61 * which is placed after the TX registers...
63 #define TIMBDMA_OFFS_RX_BPRR 0x30
65 /* TX registers, relative the instance base */
66 #define TIMBDMA_OFFS_TX_DHAR 0x00
67 #define TIMBDMA_OFFS_TX_DLAR 0x04
68 #define TIMBDMA_OFFS_TX_BLR 0x0C
69 #define TIMBDMA_OFFS_TX_LR 0x14
72 #define TIMB_DMA_DESC_SIZE 8
74 struct timb_dma_desc {
75 struct list_head desc_node;
76 struct dma_async_tx_descriptor txd;
77 u8 *desc_list;
78 unsigned int desc_list_len;
79 bool interrupt;
82 struct timb_dma_chan {
83 struct dma_chan chan;
84 void __iomem *membase;
85 spinlock_t lock; /* Used to protect data structures,
86 especially the lists and descriptors,
87 from races between the tasklet and calls
88 from above */
89 bool ongoing;
90 struct list_head active_list;
91 struct list_head queue;
92 struct list_head free_list;
93 unsigned int bytes_per_line;
94 enum dma_transfer_direction direction;
95 unsigned int descs; /* Descriptors to allocate */
96 unsigned int desc_elems; /* number of elems per descriptor */
99 struct timb_dma {
100 struct dma_device dma;
101 void __iomem *membase;
102 struct tasklet_struct tasklet;
103 struct timb_dma_chan channels[0];
106 static struct device *chan2dev(struct dma_chan *chan)
108 return &chan->dev->device;
110 static struct device *chan2dmadev(struct dma_chan *chan)
112 return chan2dev(chan)->parent->parent;
115 static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
117 int id = td_chan->chan.chan_id;
118 return (struct timb_dma *)((u8 *)td_chan -
119 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
122 /* Must be called with the spinlock held */
123 static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
125 int id = td_chan->chan.chan_id;
126 struct timb_dma *td = tdchantotd(td_chan);
127 u32 ier;
129 /* enable interrupt for this channel */
130 ier = ioread32(td->membase + TIMBDMA_IER);
131 ier |= 1 << id;
132 dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
133 ier);
134 iowrite32(ier, td->membase + TIMBDMA_IER);
137 /* Should be called with the spinlock held */
138 static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
140 int id = td_chan->chan.chan_id;
141 struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
142 id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
143 u32 isr;
144 bool done = false;
146 dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
148 isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
149 if (isr) {
150 iowrite32(isr, td->membase + TIMBDMA_ISR);
151 done = true;
154 return done;
157 static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
158 struct scatterlist *sg, bool last)
160 if (sg_dma_len(sg) > USHRT_MAX) {
161 dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
162 return -EINVAL;
165 /* length must be word aligned */
166 if (sg_dma_len(sg) % sizeof(u32)) {
167 dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
168 sg_dma_len(sg));
169 return -EINVAL;
172 dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n",
173 dma_desc, (unsigned long long)sg_dma_address(sg));
175 dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
176 dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
177 dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
178 dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
180 dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
181 dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
183 dma_desc[1] = 0x00;
184 dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
186 return 0;
189 /* Must be called with the spinlock held */
190 static void __td_start_dma(struct timb_dma_chan *td_chan)
192 struct timb_dma_desc *td_desc;
194 if (td_chan->ongoing) {
195 dev_err(chan2dev(&td_chan->chan),
196 "Transfer already ongoing\n");
197 return;
200 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
201 desc_node);
203 dev_dbg(chan2dev(&td_chan->chan),
204 "td_chan: %p, chan: %d, membase: %p\n",
205 td_chan, td_chan->chan.chan_id, td_chan->membase);
207 if (td_chan->direction == DMA_DEV_TO_MEM) {
209 /* descriptor address */
210 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
211 iowrite32(td_desc->txd.phys, td_chan->membase +
212 TIMBDMA_OFFS_RX_DLAR);
213 /* Bytes per line */
214 iowrite32(td_chan->bytes_per_line, td_chan->membase +
215 TIMBDMA_OFFS_RX_BPRR);
216 /* enable RX */
217 iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
218 } else {
219 /* address high */
220 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
221 iowrite32(td_desc->txd.phys, td_chan->membase +
222 TIMBDMA_OFFS_TX_DLAR);
225 td_chan->ongoing = true;
227 if (td_desc->interrupt)
228 __td_enable_chan_irq(td_chan);
231 static void __td_finish(struct timb_dma_chan *td_chan)
233 dma_async_tx_callback callback;
234 void *param;
235 struct dma_async_tx_descriptor *txd;
236 struct timb_dma_desc *td_desc;
238 /* can happen if the descriptor is canceled */
239 if (list_empty(&td_chan->active_list))
240 return;
242 td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
243 desc_node);
244 txd = &td_desc->txd;
246 dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
247 txd->cookie);
249 /* make sure to stop the transfer */
250 if (td_chan->direction == DMA_DEV_TO_MEM)
251 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
252 /* Currently no support for stopping DMA transfers
253 else
254 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
256 dma_cookie_complete(txd);
257 td_chan->ongoing = false;
259 callback = txd->callback;
260 param = txd->callback_param;
262 list_move(&td_desc->desc_node, &td_chan->free_list);
264 dma_descriptor_unmap(txd);
266 * The API requires that no submissions are done from a
267 * callback, so we don't need to drop the lock here
269 if (callback)
270 callback(param);
273 static u32 __td_ier_mask(struct timb_dma *td)
275 int i;
276 u32 ret = 0;
278 for (i = 0; i < td->dma.chancnt; i++) {
279 struct timb_dma_chan *td_chan = td->channels + i;
280 if (td_chan->ongoing) {
281 struct timb_dma_desc *td_desc =
282 list_entry(td_chan->active_list.next,
283 struct timb_dma_desc, desc_node);
284 if (td_desc->interrupt)
285 ret |= 1 << i;
289 return ret;
292 static void __td_start_next(struct timb_dma_chan *td_chan)
294 struct timb_dma_desc *td_desc;
296 BUG_ON(list_empty(&td_chan->queue));
297 BUG_ON(td_chan->ongoing);
299 td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
300 desc_node);
302 dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
303 __func__, td_desc->txd.cookie);
305 list_move(&td_desc->desc_node, &td_chan->active_list);
306 __td_start_dma(td_chan);
309 static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
311 struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
312 txd);
313 struct timb_dma_chan *td_chan = container_of(txd->chan,
314 struct timb_dma_chan, chan);
315 dma_cookie_t cookie;
317 spin_lock_bh(&td_chan->lock);
318 cookie = dma_cookie_assign(txd);
320 if (list_empty(&td_chan->active_list)) {
321 dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
322 txd->cookie);
323 list_add_tail(&td_desc->desc_node, &td_chan->active_list);
324 __td_start_dma(td_chan);
325 } else {
326 dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
327 txd->cookie);
329 list_add_tail(&td_desc->desc_node, &td_chan->queue);
332 spin_unlock_bh(&td_chan->lock);
334 return cookie;
337 static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
339 struct dma_chan *chan = &td_chan->chan;
340 struct timb_dma_desc *td_desc;
341 int err;
343 td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
344 if (!td_desc) {
345 dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
346 goto out;
349 td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
351 td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
352 if (!td_desc->desc_list) {
353 dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
354 goto err;
357 dma_async_tx_descriptor_init(&td_desc->txd, chan);
358 td_desc->txd.tx_submit = td_tx_submit;
359 td_desc->txd.flags = DMA_CTRL_ACK;
361 td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
362 td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
364 err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
365 if (err) {
366 dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
367 goto err;
370 return td_desc;
371 err:
372 kfree(td_desc->desc_list);
373 kfree(td_desc);
374 out:
375 return NULL;
379 static void td_free_desc(struct timb_dma_desc *td_desc)
381 dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
382 dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
383 td_desc->desc_list_len, DMA_TO_DEVICE);
385 kfree(td_desc->desc_list);
386 kfree(td_desc);
389 static void td_desc_put(struct timb_dma_chan *td_chan,
390 struct timb_dma_desc *td_desc)
392 dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
394 spin_lock_bh(&td_chan->lock);
395 list_add(&td_desc->desc_node, &td_chan->free_list);
396 spin_unlock_bh(&td_chan->lock);
399 static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
401 struct timb_dma_desc *td_desc, *_td_desc;
402 struct timb_dma_desc *ret = NULL;
404 spin_lock_bh(&td_chan->lock);
405 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
406 desc_node) {
407 if (async_tx_test_ack(&td_desc->txd)) {
408 list_del(&td_desc->desc_node);
409 ret = td_desc;
410 break;
412 dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
413 td_desc);
415 spin_unlock_bh(&td_chan->lock);
417 return ret;
420 static int td_alloc_chan_resources(struct dma_chan *chan)
422 struct timb_dma_chan *td_chan =
423 container_of(chan, struct timb_dma_chan, chan);
424 int i;
426 dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
428 BUG_ON(!list_empty(&td_chan->free_list));
429 for (i = 0; i < td_chan->descs; i++) {
430 struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
431 if (!td_desc) {
432 if (i)
433 break;
434 else {
435 dev_err(chan2dev(chan),
436 "Couldnt allocate any descriptors\n");
437 return -ENOMEM;
441 td_desc_put(td_chan, td_desc);
444 spin_lock_bh(&td_chan->lock);
445 dma_cookie_init(chan);
446 spin_unlock_bh(&td_chan->lock);
448 return 0;
451 static void td_free_chan_resources(struct dma_chan *chan)
453 struct timb_dma_chan *td_chan =
454 container_of(chan, struct timb_dma_chan, chan);
455 struct timb_dma_desc *td_desc, *_td_desc;
456 LIST_HEAD(list);
458 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
460 /* check that all descriptors are free */
461 BUG_ON(!list_empty(&td_chan->active_list));
462 BUG_ON(!list_empty(&td_chan->queue));
464 spin_lock_bh(&td_chan->lock);
465 list_splice_init(&td_chan->free_list, &list);
466 spin_unlock_bh(&td_chan->lock);
468 list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
469 dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
470 td_desc);
471 td_free_desc(td_desc);
475 static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
476 struct dma_tx_state *txstate)
478 enum dma_status ret;
480 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
482 ret = dma_cookie_status(chan, cookie, txstate);
484 dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret);
486 return ret;
489 static void td_issue_pending(struct dma_chan *chan)
491 struct timb_dma_chan *td_chan =
492 container_of(chan, struct timb_dma_chan, chan);
494 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
495 spin_lock_bh(&td_chan->lock);
497 if (!list_empty(&td_chan->active_list))
498 /* transfer ongoing */
499 if (__td_dma_done_ack(td_chan))
500 __td_finish(td_chan);
502 if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
503 __td_start_next(td_chan);
505 spin_unlock_bh(&td_chan->lock);
508 static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
509 struct scatterlist *sgl, unsigned int sg_len,
510 enum dma_transfer_direction direction, unsigned long flags,
511 void *context)
513 struct timb_dma_chan *td_chan =
514 container_of(chan, struct timb_dma_chan, chan);
515 struct timb_dma_desc *td_desc;
516 struct scatterlist *sg;
517 unsigned int i;
518 unsigned int desc_usage = 0;
520 if (!sgl || !sg_len) {
521 dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
522 return NULL;
525 /* even channels are for RX, odd for TX */
526 if (td_chan->direction != direction) {
527 dev_err(chan2dev(chan),
528 "Requesting channel in wrong direction\n");
529 return NULL;
532 td_desc = td_desc_get(td_chan);
533 if (!td_desc) {
534 dev_err(chan2dev(chan), "Not enough descriptors available\n");
535 return NULL;
538 td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
540 for_each_sg(sgl, sg, sg_len, i) {
541 int err;
542 if (desc_usage > td_desc->desc_list_len) {
543 dev_err(chan2dev(chan), "No descriptor space\n");
544 return NULL;
547 err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
548 i == (sg_len - 1));
549 if (err) {
550 dev_err(chan2dev(chan), "Failed to update desc: %d\n",
551 err);
552 td_desc_put(td_chan, td_desc);
553 return NULL;
555 desc_usage += TIMB_DMA_DESC_SIZE;
558 dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
559 td_desc->desc_list_len, DMA_MEM_TO_DEV);
561 return &td_desc->txd;
564 static int td_terminate_all(struct dma_chan *chan)
566 struct timb_dma_chan *td_chan =
567 container_of(chan, struct timb_dma_chan, chan);
568 struct timb_dma_desc *td_desc, *_td_desc;
570 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
572 /* first the easy part, put the queue into the free list */
573 spin_lock_bh(&td_chan->lock);
574 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
575 desc_node)
576 list_move(&td_desc->desc_node, &td_chan->free_list);
578 /* now tear down the running */
579 __td_finish(td_chan);
580 spin_unlock_bh(&td_chan->lock);
582 return 0;
585 static void td_tasklet(unsigned long data)
587 struct timb_dma *td = (struct timb_dma *)data;
588 u32 isr;
589 u32 ipr;
590 u32 ier;
591 int i;
593 isr = ioread32(td->membase + TIMBDMA_ISR);
594 ipr = isr & __td_ier_mask(td);
596 /* ack the interrupts */
597 iowrite32(ipr, td->membase + TIMBDMA_ISR);
599 for (i = 0; i < td->dma.chancnt; i++)
600 if (ipr & (1 << i)) {
601 struct timb_dma_chan *td_chan = td->channels + i;
602 spin_lock(&td_chan->lock);
603 __td_finish(td_chan);
604 if (!list_empty(&td_chan->queue))
605 __td_start_next(td_chan);
606 spin_unlock(&td_chan->lock);
609 ier = __td_ier_mask(td);
610 iowrite32(ier, td->membase + TIMBDMA_IER);
614 static irqreturn_t td_irq(int irq, void *devid)
616 struct timb_dma *td = devid;
617 u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
619 if (ipr) {
620 /* disable interrupts, will be re-enabled in tasklet */
621 iowrite32(0, td->membase + TIMBDMA_IER);
623 tasklet_schedule(&td->tasklet);
625 return IRQ_HANDLED;
626 } else
627 return IRQ_NONE;
631 static int td_probe(struct platform_device *pdev)
633 struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
634 struct timb_dma *td;
635 struct resource *iomem;
636 int irq;
637 int err;
638 int i;
640 if (!pdata) {
641 dev_err(&pdev->dev, "No platform data\n");
642 return -EINVAL;
645 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
646 if (!iomem)
647 return -EINVAL;
649 irq = platform_get_irq(pdev, 0);
650 if (irq < 0)
651 return irq;
653 if (!request_mem_region(iomem->start, resource_size(iomem),
654 DRIVER_NAME))
655 return -EBUSY;
657 td = kzalloc(sizeof(struct timb_dma) +
658 sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
659 if (!td) {
660 err = -ENOMEM;
661 goto err_release_region;
664 dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
666 td->membase = ioremap(iomem->start, resource_size(iomem));
667 if (!td->membase) {
668 dev_err(&pdev->dev, "Failed to remap I/O memory\n");
669 err = -ENOMEM;
670 goto err_free_mem;
673 /* 32bit addressing */
674 iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
676 /* disable and clear any interrupts */
677 iowrite32(0x0, td->membase + TIMBDMA_IER);
678 iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
680 tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
682 err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
683 if (err) {
684 dev_err(&pdev->dev, "Failed to request IRQ\n");
685 goto err_tasklet_kill;
688 td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
689 td->dma.device_free_chan_resources = td_free_chan_resources;
690 td->dma.device_tx_status = td_tx_status;
691 td->dma.device_issue_pending = td_issue_pending;
693 dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
694 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
695 td->dma.device_prep_slave_sg = td_prep_slave_sg;
696 td->dma.device_terminate_all = td_terminate_all;
698 td->dma.dev = &pdev->dev;
700 INIT_LIST_HEAD(&td->dma.channels);
702 for (i = 0; i < pdata->nr_channels; i++) {
703 struct timb_dma_chan *td_chan = &td->channels[i];
704 struct timb_dma_platform_data_channel *pchan =
705 pdata->channels + i;
707 /* even channels are RX, odd are TX */
708 if ((i % 2) == pchan->rx) {
709 dev_err(&pdev->dev, "Wrong channel configuration\n");
710 err = -EINVAL;
711 goto err_free_irq;
714 td_chan->chan.device = &td->dma;
715 dma_cookie_init(&td_chan->chan);
716 spin_lock_init(&td_chan->lock);
717 INIT_LIST_HEAD(&td_chan->active_list);
718 INIT_LIST_HEAD(&td_chan->queue);
719 INIT_LIST_HEAD(&td_chan->free_list);
721 td_chan->descs = pchan->descriptors;
722 td_chan->desc_elems = pchan->descriptor_elements;
723 td_chan->bytes_per_line = pchan->bytes_per_line;
724 td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
725 DMA_MEM_TO_DEV;
727 td_chan->membase = td->membase +
728 (i / 2) * TIMBDMA_INSTANCE_OFFSET +
729 (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
731 dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
732 i, td_chan->membase);
734 list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
737 err = dma_async_device_register(&td->dma);
738 if (err) {
739 dev_err(&pdev->dev, "Failed to register async device\n");
740 goto err_free_irq;
743 platform_set_drvdata(pdev, td);
745 dev_dbg(&pdev->dev, "Probe result: %d\n", err);
746 return err;
748 err_free_irq:
749 free_irq(irq, td);
750 err_tasklet_kill:
751 tasklet_kill(&td->tasklet);
752 iounmap(td->membase);
753 err_free_mem:
754 kfree(td);
755 err_release_region:
756 release_mem_region(iomem->start, resource_size(iomem));
758 return err;
762 static int td_remove(struct platform_device *pdev)
764 struct timb_dma *td = platform_get_drvdata(pdev);
765 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
766 int irq = platform_get_irq(pdev, 0);
768 dma_async_device_unregister(&td->dma);
769 free_irq(irq, td);
770 tasklet_kill(&td->tasklet);
771 iounmap(td->membase);
772 kfree(td);
773 release_mem_region(iomem->start, resource_size(iomem));
775 dev_dbg(&pdev->dev, "Removed...\n");
776 return 0;
779 static struct platform_driver td_driver = {
780 .driver = {
781 .name = DRIVER_NAME,
783 .probe = td_probe,
784 .remove = td_remove,
787 module_platform_driver(td_driver);
789 MODULE_LICENSE("GPL v2");
790 MODULE_DESCRIPTION("Timberdale DMA controller driver");
791 MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
792 MODULE_ALIAS("platform:"DRIVER_NAME);