pcmcia/omap_cf: don't redefine SZ_2K
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / dma / shdma.c
blobd10cc899c460619b072466805c078cb989c496dd
1 /*
2 * Renesas SuperH DMA Engine support
4 * base is drivers/dma/flsdma.c
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/dmaengine.h>
24 #include <linux/delay.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/platform_device.h>
27 #include <cpu/dma.h>
28 #include <asm/dma-sh.h>
29 #include "shdma.h"
31 /* DMA descriptor control */
32 enum sh_dmae_desc_status {
33 DESC_IDLE,
34 DESC_PREPARED,
35 DESC_SUBMITTED,
36 DESC_COMPLETED, /* completed, have to call callback */
37 DESC_WAITING, /* callback called, waiting for ack / re-submit */
40 #define NR_DESCS_PER_CHANNEL 32
42 * Define the default configuration for dual address memory-memory transfer.
43 * The 0x400 value represents auto-request, external->external.
45 * And this driver set 4byte burst mode.
46 * If you want to change mode, you need to change RS_DEFAULT of value.
47 * (ex 1byte burst mode -> (RS_DUAL & ~TS_32)
49 #define RS_DEFAULT (RS_DUAL)
51 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
53 #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
54 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
56 ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
59 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
61 return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg));
64 static void dmae_init(struct sh_dmae_chan *sh_chan)
66 u32 chcr = RS_DEFAULT; /* default is DUAL mode */
67 sh_dmae_writel(sh_chan, chcr, CHCR);
71 * Reset DMA controller
73 * SH7780 has two DMAOR register
75 static void sh_dmae_ctl_stop(int id)
77 unsigned short dmaor = dmaor_read_reg(id);
79 dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
80 dmaor_write_reg(id, dmaor);
83 static int sh_dmae_rst(int id)
85 unsigned short dmaor;
87 sh_dmae_ctl_stop(id);
88 dmaor = dmaor_read_reg(id) | DMAOR_INIT;
90 dmaor_write_reg(id, dmaor);
91 if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
92 pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
93 return -EINVAL;
95 return 0;
98 static int dmae_is_busy(struct sh_dmae_chan *sh_chan)
100 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
101 if (chcr & CHCR_DE) {
102 if (!(chcr & CHCR_TE))
103 return -EBUSY; /* working */
105 return 0; /* waiting */
108 static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan)
110 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
111 return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT];
114 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
116 sh_dmae_writel(sh_chan, hw->sar, SAR);
117 sh_dmae_writel(sh_chan, hw->dar, DAR);
118 sh_dmae_writel(sh_chan, hw->tcr >> calc_xmit_shift(sh_chan), TCR);
121 static void dmae_start(struct sh_dmae_chan *sh_chan)
123 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
125 chcr |= CHCR_DE | CHCR_IE;
126 sh_dmae_writel(sh_chan, chcr, CHCR);
129 static void dmae_halt(struct sh_dmae_chan *sh_chan)
131 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
133 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
134 sh_dmae_writel(sh_chan, chcr, CHCR);
137 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
139 int ret = dmae_is_busy(sh_chan);
140 /* When DMA was working, can not set data to CHCR */
141 if (ret)
142 return ret;
144 sh_dmae_writel(sh_chan, val, CHCR);
145 return 0;
148 #define DMARS1_ADDR 0x04
149 #define DMARS2_ADDR 0x08
150 #define DMARS_SHIFT 8
151 #define DMARS_CHAN_MSK 0x01
152 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
154 u32 addr;
155 int shift = 0;
156 int ret = dmae_is_busy(sh_chan);
157 if (ret)
158 return ret;
160 if (sh_chan->id & DMARS_CHAN_MSK)
161 shift = DMARS_SHIFT;
163 switch (sh_chan->id) {
164 /* DMARS0 */
165 case 0:
166 case 1:
167 addr = SH_DMARS_BASE;
168 break;
169 /* DMARS1 */
170 case 2:
171 case 3:
172 addr = (SH_DMARS_BASE + DMARS1_ADDR);
173 break;
174 /* DMARS2 */
175 case 4:
176 case 5:
177 addr = (SH_DMARS_BASE + DMARS2_ADDR);
178 break;
179 default:
180 return -EINVAL;
183 ctrl_outw((val << shift) |
184 (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)),
185 addr);
187 return 0;
190 static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
192 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
193 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
194 dma_async_tx_callback callback = tx->callback;
195 dma_cookie_t cookie;
197 spin_lock_bh(&sh_chan->desc_lock);
199 cookie = sh_chan->common.cookie;
200 cookie++;
201 if (cookie < 0)
202 cookie = 1;
204 sh_chan->common.cookie = cookie;
205 tx->cookie = cookie;
207 /* Mark all chunks of this descriptor as submitted, move to the queue */
208 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
210 * All chunks are on the global ld_free, so, we have to find
211 * the end of the chain ourselves
213 if (chunk != desc && (chunk->mark == DESC_IDLE ||
214 chunk->async_tx.cookie > 0 ||
215 chunk->async_tx.cookie == -EBUSY ||
216 &chunk->node == &sh_chan->ld_free))
217 break;
218 chunk->mark = DESC_SUBMITTED;
219 /* Callback goes to the last chunk */
220 chunk->async_tx.callback = NULL;
221 chunk->cookie = cookie;
222 list_move_tail(&chunk->node, &sh_chan->ld_queue);
223 last = chunk;
226 last->async_tx.callback = callback;
227 last->async_tx.callback_param = tx->callback_param;
229 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
230 tx->cookie, &last->async_tx, sh_chan->id,
231 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
233 spin_unlock_bh(&sh_chan->desc_lock);
235 return cookie;
238 /* Called with desc_lock held */
239 static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
241 struct sh_desc *desc;
243 list_for_each_entry(desc, &sh_chan->ld_free, node)
244 if (desc->mark != DESC_PREPARED) {
245 BUG_ON(desc->mark != DESC_IDLE);
246 list_del(&desc->node);
247 return desc;
250 return NULL;
253 static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
255 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
256 struct sh_desc *desc;
258 spin_lock_bh(&sh_chan->desc_lock);
259 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
260 spin_unlock_bh(&sh_chan->desc_lock);
261 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
262 if (!desc) {
263 spin_lock_bh(&sh_chan->desc_lock);
264 break;
266 dma_async_tx_descriptor_init(&desc->async_tx,
267 &sh_chan->common);
268 desc->async_tx.tx_submit = sh_dmae_tx_submit;
269 desc->mark = DESC_IDLE;
271 spin_lock_bh(&sh_chan->desc_lock);
272 list_add(&desc->node, &sh_chan->ld_free);
273 sh_chan->descs_allocated++;
275 spin_unlock_bh(&sh_chan->desc_lock);
277 return sh_chan->descs_allocated;
281 * sh_dma_free_chan_resources - Free all resources of the channel.
283 static void sh_dmae_free_chan_resources(struct dma_chan *chan)
285 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
286 struct sh_desc *desc, *_desc;
287 LIST_HEAD(list);
289 /* Prepared and not submitted descriptors can still be on the queue */
290 if (!list_empty(&sh_chan->ld_queue))
291 sh_dmae_chan_ld_cleanup(sh_chan, true);
293 spin_lock_bh(&sh_chan->desc_lock);
295 list_splice_init(&sh_chan->ld_free, &list);
296 sh_chan->descs_allocated = 0;
298 spin_unlock_bh(&sh_chan->desc_lock);
300 list_for_each_entry_safe(desc, _desc, &list, node)
301 kfree(desc);
304 static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
305 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
306 size_t len, unsigned long flags)
308 struct sh_dmae_chan *sh_chan;
309 struct sh_desc *first = NULL, *prev = NULL, *new;
310 size_t copy_size;
311 LIST_HEAD(tx_list);
312 int chunks = (len + SH_DMA_TCR_MAX) / (SH_DMA_TCR_MAX + 1);
314 if (!chan)
315 return NULL;
317 if (!len)
318 return NULL;
320 sh_chan = to_sh_chan(chan);
322 /* Have to lock the whole loop to protect against concurrent release */
323 spin_lock_bh(&sh_chan->desc_lock);
326 * Chaining:
327 * first descriptor is what user is dealing with in all API calls, its
328 * cookie is at first set to -EBUSY, at tx-submit to a positive
329 * number
330 * if more than one chunk is needed further chunks have cookie = -EINVAL
331 * the last chunk, if not equal to the first, has cookie = -ENOSPC
332 * all chunks are linked onto the tx_list head with their .node heads
333 * only during this function, then they are immediately spliced
334 * back onto the free list in form of a chain
336 do {
337 /* Allocate the link descriptor from the free list */
338 new = sh_dmae_get_desc(sh_chan);
339 if (!new) {
340 dev_err(sh_chan->dev,
341 "No free memory for link descriptor\n");
342 list_for_each_entry(new, &tx_list, node)
343 new->mark = DESC_IDLE;
344 list_splice(&tx_list, &sh_chan->ld_free);
345 spin_unlock_bh(&sh_chan->desc_lock);
346 return NULL;
349 copy_size = min(len, (size_t)SH_DMA_TCR_MAX + 1);
351 new->hw.sar = dma_src;
352 new->hw.dar = dma_dest;
353 new->hw.tcr = copy_size;
354 if (!first) {
355 /* First desc */
356 new->async_tx.cookie = -EBUSY;
357 first = new;
358 } else {
359 /* Other desc - invisible to the user */
360 new->async_tx.cookie = -EINVAL;
363 dev_dbg(sh_chan->dev,
364 "chaining %u of %u with %p, dst %x, cookie %d\n",
365 copy_size, len, &new->async_tx, dma_dest,
366 new->async_tx.cookie);
368 new->mark = DESC_PREPARED;
369 new->async_tx.flags = flags;
370 new->chunks = chunks--;
372 prev = new;
373 len -= copy_size;
374 dma_src += copy_size;
375 dma_dest += copy_size;
376 /* Insert the link descriptor to the LD ring */
377 list_add_tail(&new->node, &tx_list);
378 } while (len);
380 if (new != first)
381 new->async_tx.cookie = -ENOSPC;
383 /* Put them back on the free list, so, they don't get lost */
384 list_splice_tail(&tx_list, &sh_chan->ld_free);
386 spin_unlock_bh(&sh_chan->desc_lock);
388 return &first->async_tx;
391 static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
393 struct sh_desc *desc, *_desc;
394 /* Is the "exposed" head of a chain acked? */
395 bool head_acked = false;
396 dma_cookie_t cookie = 0;
397 dma_async_tx_callback callback = NULL;
398 void *param = NULL;
400 spin_lock_bh(&sh_chan->desc_lock);
401 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
402 struct dma_async_tx_descriptor *tx = &desc->async_tx;
404 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
405 BUG_ON(desc->mark != DESC_SUBMITTED &&
406 desc->mark != DESC_COMPLETED &&
407 desc->mark != DESC_WAITING);
410 * queue is ordered, and we use this loop to (1) clean up all
411 * completed descriptors, and to (2) update descriptor flags of
412 * any chunks in a (partially) completed chain
414 if (!all && desc->mark == DESC_SUBMITTED &&
415 desc->cookie != cookie)
416 break;
418 if (tx->cookie > 0)
419 cookie = tx->cookie;
421 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
422 BUG_ON(sh_chan->completed_cookie != desc->cookie - 1);
423 sh_chan->completed_cookie = desc->cookie;
426 /* Call callback on the last chunk */
427 if (desc->mark == DESC_COMPLETED && tx->callback) {
428 desc->mark = DESC_WAITING;
429 callback = tx->callback;
430 param = tx->callback_param;
431 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
432 tx->cookie, tx, sh_chan->id);
433 BUG_ON(desc->chunks != 1);
434 break;
437 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
438 if (desc->mark == DESC_COMPLETED) {
439 BUG_ON(tx->cookie < 0);
440 desc->mark = DESC_WAITING;
442 head_acked = async_tx_test_ack(tx);
443 } else {
444 switch (desc->mark) {
445 case DESC_COMPLETED:
446 desc->mark = DESC_WAITING;
447 /* Fall through */
448 case DESC_WAITING:
449 if (head_acked)
450 async_tx_ack(&desc->async_tx);
454 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
455 tx, tx->cookie);
457 if (((desc->mark == DESC_COMPLETED ||
458 desc->mark == DESC_WAITING) &&
459 async_tx_test_ack(&desc->async_tx)) || all) {
460 /* Remove from ld_queue list */
461 desc->mark = DESC_IDLE;
462 list_move(&desc->node, &sh_chan->ld_free);
465 spin_unlock_bh(&sh_chan->desc_lock);
467 if (callback)
468 callback(param);
470 return callback;
474 * sh_chan_ld_cleanup - Clean up link descriptors
476 * This function cleans up the ld_queue of DMA channel.
478 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
480 while (__ld_cleanup(sh_chan, all))
484 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
486 struct sh_desc *sd;
488 spin_lock_bh(&sh_chan->desc_lock);
489 /* DMA work check */
490 if (dmae_is_busy(sh_chan)) {
491 spin_unlock_bh(&sh_chan->desc_lock);
492 return;
495 /* Find the first un-transfer desciptor */
496 list_for_each_entry(sd, &sh_chan->ld_queue, node)
497 if (sd->mark == DESC_SUBMITTED) {
498 /* Get the ld start address from ld_queue */
499 dmae_set_reg(sh_chan, &sd->hw);
500 dmae_start(sh_chan);
501 break;
504 spin_unlock_bh(&sh_chan->desc_lock);
507 static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
509 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
510 sh_chan_xfer_ld_queue(sh_chan);
513 static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
514 dma_cookie_t cookie,
515 dma_cookie_t *done,
516 dma_cookie_t *used)
518 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
519 dma_cookie_t last_used;
520 dma_cookie_t last_complete;
522 sh_dmae_chan_ld_cleanup(sh_chan, false);
524 last_used = chan->cookie;
525 last_complete = sh_chan->completed_cookie;
526 BUG_ON(last_complete < 0);
528 if (done)
529 *done = last_complete;
531 if (used)
532 *used = last_used;
534 return dma_async_is_complete(cookie, last_complete, last_used);
537 static irqreturn_t sh_dmae_interrupt(int irq, void *data)
539 irqreturn_t ret = IRQ_NONE;
540 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
541 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
543 if (chcr & CHCR_TE) {
544 /* DMA stop */
545 dmae_halt(sh_chan);
547 ret = IRQ_HANDLED;
548 tasklet_schedule(&sh_chan->tasklet);
551 return ret;
554 #if defined(CONFIG_CPU_SH4)
555 static irqreturn_t sh_dmae_err(int irq, void *data)
557 int err = 0;
558 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
560 /* IRQ Multi */
561 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
562 int cnt = 0;
563 switch (irq) {
564 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
565 case DMTE6_IRQ:
566 cnt++;
567 #endif
568 case DMTE0_IRQ:
569 if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) {
570 disable_irq(irq);
571 return IRQ_HANDLED;
573 default:
574 return IRQ_NONE;
576 } else {
577 /* reset dma controller */
578 err = sh_dmae_rst(0);
579 if (err)
580 return err;
581 #ifdef SH_DMAC_BASE1
582 if (shdev->pdata.mode & SHDMA_DMAOR1) {
583 err = sh_dmae_rst(1);
584 if (err)
585 return err;
587 #endif
588 disable_irq(irq);
589 return IRQ_HANDLED;
592 #endif
594 static void dmae_do_tasklet(unsigned long data)
596 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
597 struct sh_desc *desc;
598 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
600 spin_lock(&sh_chan->desc_lock);
601 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
602 if ((desc->hw.sar + desc->hw.tcr) == sar_buf &&
603 desc->mark == DESC_SUBMITTED) {
604 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
605 desc->async_tx.cookie, &desc->async_tx,
606 desc->hw.dar);
607 desc->mark = DESC_COMPLETED;
608 break;
611 spin_unlock(&sh_chan->desc_lock);
613 /* Next desc */
614 sh_chan_xfer_ld_queue(sh_chan);
615 sh_dmae_chan_ld_cleanup(sh_chan, false);
618 static unsigned int get_dmae_irq(unsigned int id)
620 unsigned int irq = 0;
621 if (id < ARRAY_SIZE(dmte_irq_map))
622 irq = dmte_irq_map[id];
623 return irq;
626 static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
628 int err;
629 unsigned int irq = get_dmae_irq(id);
630 unsigned long irqflags = IRQF_DISABLED;
631 struct sh_dmae_chan *new_sh_chan;
633 /* alloc channel */
634 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
635 if (!new_sh_chan) {
636 dev_err(shdev->common.dev,
637 "No free memory for allocating dma channels!\n");
638 return -ENOMEM;
641 new_sh_chan->dev = shdev->common.dev;
642 new_sh_chan->id = id;
644 /* Init DMA tasklet */
645 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
646 (unsigned long)new_sh_chan);
648 /* Init the channel */
649 dmae_init(new_sh_chan);
651 spin_lock_init(&new_sh_chan->desc_lock);
653 /* Init descripter manage list */
654 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
655 INIT_LIST_HEAD(&new_sh_chan->ld_free);
657 /* copy struct dma_device */
658 new_sh_chan->common.device = &shdev->common;
660 /* Add the channel to DMA device channel list */
661 list_add_tail(&new_sh_chan->common.device_node,
662 &shdev->common.channels);
663 shdev->common.chancnt++;
665 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
666 irqflags = IRQF_SHARED;
667 #if defined(DMTE6_IRQ)
668 if (irq >= DMTE6_IRQ)
669 irq = DMTE6_IRQ;
670 else
671 #endif
672 irq = DMTE0_IRQ;
675 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
676 "sh-dmae%d", new_sh_chan->id);
678 /* set up channel irq */
679 err = request_irq(irq, &sh_dmae_interrupt, irqflags,
680 new_sh_chan->dev_id, new_sh_chan);
681 if (err) {
682 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
683 "with return %d\n", id, err);
684 goto err_no_irq;
687 /* CHCR register control function */
688 new_sh_chan->set_chcr = dmae_set_chcr;
689 /* DMARS register control function */
690 new_sh_chan->set_dmars = dmae_set_dmars;
692 shdev->chan[id] = new_sh_chan;
693 return 0;
695 err_no_irq:
696 /* remove from dmaengine device node */
697 list_del(&new_sh_chan->common.device_node);
698 kfree(new_sh_chan);
699 return err;
702 static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
704 int i;
706 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
707 if (shdev->chan[i]) {
708 struct sh_dmae_chan *shchan = shdev->chan[i];
709 if (!(shdev->pdata.mode & SHDMA_MIX_IRQ))
710 free_irq(dmte_irq_map[i], shchan);
712 list_del(&shchan->common.device_node);
713 kfree(shchan);
714 shdev->chan[i] = NULL;
717 shdev->common.chancnt = 0;
720 static int __init sh_dmae_probe(struct platform_device *pdev)
722 int err = 0, cnt, ecnt;
723 unsigned long irqflags = IRQF_DISABLED;
724 #if defined(CONFIG_CPU_SH4)
725 int eirq[] = { DMAE0_IRQ,
726 #if defined(DMAE1_IRQ)
727 DMAE1_IRQ
728 #endif
730 #endif
731 struct sh_dmae_device *shdev;
733 /* get platform data */
734 if (!pdev->dev.platform_data)
735 return -ENODEV;
737 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
738 if (!shdev) {
739 dev_err(&pdev->dev, "No enough memory\n");
740 return -ENOMEM;
743 /* platform data */
744 memcpy(&shdev->pdata, pdev->dev.platform_data,
745 sizeof(struct sh_dmae_pdata));
747 /* reset dma controller */
748 err = sh_dmae_rst(0);
749 if (err)
750 goto rst_err;
752 /* SH7780/85/23 has DMAOR1 */
753 if (shdev->pdata.mode & SHDMA_DMAOR1) {
754 err = sh_dmae_rst(1);
755 if (err)
756 goto rst_err;
759 INIT_LIST_HEAD(&shdev->common.channels);
761 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
762 shdev->common.device_alloc_chan_resources
763 = sh_dmae_alloc_chan_resources;
764 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
765 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
766 shdev->common.device_is_tx_complete = sh_dmae_is_complete;
767 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
768 shdev->common.dev = &pdev->dev;
769 /* Default transfer size of 32 bytes requires 32-byte alignment */
770 shdev->common.copy_align = 5;
772 #if defined(CONFIG_CPU_SH4)
773 /* Non Mix IRQ mode SH7722/SH7730 etc... */
774 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
775 irqflags = IRQF_SHARED;
776 eirq[0] = DMTE0_IRQ;
777 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
778 eirq[1] = DMTE6_IRQ;
779 #endif
782 for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
783 err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
784 "DMAC Address Error", shdev);
785 if (err) {
786 dev_err(&pdev->dev, "DMA device request_irq"
787 "error (irq %d) with return %d\n",
788 eirq[ecnt], err);
789 goto eirq_err;
792 #endif /* CONFIG_CPU_SH4 */
794 /* Create DMA Channel */
795 for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) {
796 err = sh_dmae_chan_probe(shdev, cnt);
797 if (err)
798 goto chan_probe_err;
801 platform_set_drvdata(pdev, shdev);
802 dma_async_device_register(&shdev->common);
804 return err;
806 chan_probe_err:
807 sh_dmae_chan_remove(shdev);
809 eirq_err:
810 for (ecnt-- ; ecnt >= 0; ecnt--)
811 free_irq(eirq[ecnt], shdev);
813 rst_err:
814 kfree(shdev);
816 return err;
819 static int __exit sh_dmae_remove(struct platform_device *pdev)
821 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
823 dma_async_device_unregister(&shdev->common);
825 if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
826 free_irq(DMTE0_IRQ, shdev);
827 #if defined(DMTE6_IRQ)
828 free_irq(DMTE6_IRQ, shdev);
829 #endif
832 /* channel data remove */
833 sh_dmae_chan_remove(shdev);
835 if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) {
836 free_irq(DMAE0_IRQ, shdev);
837 #if defined(DMAE1_IRQ)
838 free_irq(DMAE1_IRQ, shdev);
839 #endif
841 kfree(shdev);
843 return 0;
846 static void sh_dmae_shutdown(struct platform_device *pdev)
848 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
849 sh_dmae_ctl_stop(0);
850 if (shdev->pdata.mode & SHDMA_DMAOR1)
851 sh_dmae_ctl_stop(1);
854 static struct platform_driver sh_dmae_driver = {
855 .remove = __exit_p(sh_dmae_remove),
856 .shutdown = sh_dmae_shutdown,
857 .driver = {
858 .name = "sh-dma-engine",
862 static int __init sh_dmae_init(void)
864 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
866 module_init(sh_dmae_init);
868 static void __exit sh_dmae_exit(void)
870 platform_driver_unregister(&sh_dmae_driver);
872 module_exit(sh_dmae_exit);
874 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
875 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
876 MODULE_LICENSE("GPL");