SH: constify multiple DMA related objects and references to them
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / dma / shdma.c
blobaab352a63a4a6716b492018ecb03dc631123f05d
1 /*
2 * Renesas SuperH DMA Engine support
4 * base is drivers/dma/flsdma.c
6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 * This is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * - DMA of SuperH does not have Hardware DMA chain mode.
16 * - MAX DMA size is 16MB.
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/dmaengine.h>
24 #include <linux/delay.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/sh_dma.h>
30 #include "shdma.h"
32 /* DMA descriptor control */
33 enum sh_dmae_desc_status {
34 DESC_IDLE,
35 DESC_PREPARED,
36 DESC_SUBMITTED,
37 DESC_COMPLETED, /* completed, have to call callback */
38 DESC_WAITING, /* callback called, waiting for ack / re-submit */
41 #define NR_DESCS_PER_CHANNEL 32
42 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
43 #define LOG2_DEFAULT_XFER_SIZE 2
45 /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
46 static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
48 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
50 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
52 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
55 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
57 return __raw_readl(sh_dc->base + reg / sizeof(u32));
60 static u16 dmaor_read(struct sh_dmae_device *shdev)
62 return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
65 static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
67 __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
71 * Reset DMA controller
73 * SH7780 has two DMAOR register
75 static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
77 unsigned short dmaor = dmaor_read(shdev);
79 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
82 static int sh_dmae_rst(struct sh_dmae_device *shdev)
84 unsigned short dmaor;
86 sh_dmae_ctl_stop(shdev);
87 dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
89 dmaor_write(shdev, dmaor);
90 if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) {
91 pr_warning("dma-sh: Can't initialize DMAOR.\n");
92 return -EINVAL;
94 return 0;
97 static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
99 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
101 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
102 return true; /* working */
104 return false; /* waiting */
107 static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
109 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
110 struct sh_dmae_device, common);
111 struct sh_dmae_pdata *pdata = shdev->pdata;
112 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
113 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
115 if (cnt >= pdata->ts_shift_num)
116 cnt = 0;
118 return pdata->ts_shift[cnt];
121 static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
123 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
124 struct sh_dmae_device, common);
125 struct sh_dmae_pdata *pdata = shdev->pdata;
126 int i;
128 for (i = 0; i < pdata->ts_shift_num; i++)
129 if (pdata->ts_shift[i] == l2size)
130 break;
132 if (i == pdata->ts_shift_num)
133 i = 0;
135 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
136 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
139 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
141 sh_dmae_writel(sh_chan, hw->sar, SAR);
142 sh_dmae_writel(sh_chan, hw->dar, DAR);
143 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
146 static void dmae_start(struct sh_dmae_chan *sh_chan)
148 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
150 chcr |= CHCR_DE | CHCR_IE;
151 sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
154 static void dmae_halt(struct sh_dmae_chan *sh_chan)
156 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
158 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
159 sh_dmae_writel(sh_chan, chcr, CHCR);
162 static void dmae_init(struct sh_dmae_chan *sh_chan)
165 * Default configuration for dual address memory-memory transfer.
166 * 0x400 represents auto-request.
168 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
169 LOG2_DEFAULT_XFER_SIZE);
170 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
171 sh_dmae_writel(sh_chan, chcr, CHCR);
174 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
176 /* When DMA was working, can not set data to CHCR */
177 if (dmae_is_busy(sh_chan))
178 return -EBUSY;
180 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
181 sh_dmae_writel(sh_chan, val, CHCR);
183 return 0;
186 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
188 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
189 struct sh_dmae_device, common);
190 struct sh_dmae_pdata *pdata = shdev->pdata;
191 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
192 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
193 int shift = chan_pdata->dmars_bit;
195 if (dmae_is_busy(sh_chan))
196 return -EBUSY;
198 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
199 addr);
201 return 0;
204 static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
206 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
207 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
208 dma_async_tx_callback callback = tx->callback;
209 dma_cookie_t cookie;
211 spin_lock_bh(&sh_chan->desc_lock);
213 cookie = sh_chan->common.cookie;
214 cookie++;
215 if (cookie < 0)
216 cookie = 1;
218 sh_chan->common.cookie = cookie;
219 tx->cookie = cookie;
221 /* Mark all chunks of this descriptor as submitted, move to the queue */
222 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
224 * All chunks are on the global ld_free, so, we have to find
225 * the end of the chain ourselves
227 if (chunk != desc && (chunk->mark == DESC_IDLE ||
228 chunk->async_tx.cookie > 0 ||
229 chunk->async_tx.cookie == -EBUSY ||
230 &chunk->node == &sh_chan->ld_free))
231 break;
232 chunk->mark = DESC_SUBMITTED;
233 /* Callback goes to the last chunk */
234 chunk->async_tx.callback = NULL;
235 chunk->cookie = cookie;
236 list_move_tail(&chunk->node, &sh_chan->ld_queue);
237 last = chunk;
240 last->async_tx.callback = callback;
241 last->async_tx.callback_param = tx->callback_param;
243 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
244 tx->cookie, &last->async_tx, sh_chan->id,
245 desc->hw.sar, desc->hw.tcr, desc->hw.dar);
247 spin_unlock_bh(&sh_chan->desc_lock);
249 return cookie;
252 /* Called with desc_lock held */
253 static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
255 struct sh_desc *desc;
257 list_for_each_entry(desc, &sh_chan->ld_free, node)
258 if (desc->mark != DESC_PREPARED) {
259 BUG_ON(desc->mark != DESC_IDLE);
260 list_del(&desc->node);
261 return desc;
264 return NULL;
267 static const struct sh_dmae_slave_config *sh_dmae_find_slave(
268 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
270 struct dma_device *dma_dev = sh_chan->common.device;
271 struct sh_dmae_device *shdev = container_of(dma_dev,
272 struct sh_dmae_device, common);
273 struct sh_dmae_pdata *pdata = shdev->pdata;
274 int i;
276 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
277 return NULL;
279 for (i = 0; i < pdata->slave_num; i++)
280 if (pdata->slave[i].slave_id == param->slave_id)
281 return pdata->slave + i;
283 return NULL;
286 static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
288 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
289 struct sh_desc *desc;
290 struct sh_dmae_slave *param = chan->private;
292 pm_runtime_get_sync(sh_chan->dev);
295 * This relies on the guarantee from dmaengine that alloc_chan_resources
296 * never runs concurrently with itself or free_chan_resources.
298 if (param) {
299 const struct sh_dmae_slave_config *cfg;
301 cfg = sh_dmae_find_slave(sh_chan, param);
302 if (!cfg)
303 return -EINVAL;
305 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
306 return -EBUSY;
308 param->config = cfg;
310 dmae_set_dmars(sh_chan, cfg->mid_rid);
311 dmae_set_chcr(sh_chan, cfg->chcr);
312 } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
313 dmae_init(sh_chan);
316 spin_lock_bh(&sh_chan->desc_lock);
317 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
318 spin_unlock_bh(&sh_chan->desc_lock);
319 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
320 if (!desc) {
321 spin_lock_bh(&sh_chan->desc_lock);
322 break;
324 dma_async_tx_descriptor_init(&desc->async_tx,
325 &sh_chan->common);
326 desc->async_tx.tx_submit = sh_dmae_tx_submit;
327 desc->mark = DESC_IDLE;
329 spin_lock_bh(&sh_chan->desc_lock);
330 list_add(&desc->node, &sh_chan->ld_free);
331 sh_chan->descs_allocated++;
333 spin_unlock_bh(&sh_chan->desc_lock);
335 if (!sh_chan->descs_allocated)
336 pm_runtime_put(sh_chan->dev);
338 return sh_chan->descs_allocated;
342 * sh_dma_free_chan_resources - Free all resources of the channel.
344 static void sh_dmae_free_chan_resources(struct dma_chan *chan)
346 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
347 struct sh_desc *desc, *_desc;
348 LIST_HEAD(list);
349 int descs = sh_chan->descs_allocated;
351 dmae_halt(sh_chan);
353 /* Prepared and not submitted descriptors can still be on the queue */
354 if (!list_empty(&sh_chan->ld_queue))
355 sh_dmae_chan_ld_cleanup(sh_chan, true);
357 if (chan->private) {
358 /* The caller is holding dma_list_mutex */
359 struct sh_dmae_slave *param = chan->private;
360 clear_bit(param->slave_id, sh_dmae_slave_used);
363 spin_lock_bh(&sh_chan->desc_lock);
365 list_splice_init(&sh_chan->ld_free, &list);
366 sh_chan->descs_allocated = 0;
368 spin_unlock_bh(&sh_chan->desc_lock);
370 if (descs > 0)
371 pm_runtime_put(sh_chan->dev);
373 list_for_each_entry_safe(desc, _desc, &list, node)
374 kfree(desc);
378 * sh_dmae_add_desc - get, set up and return one transfer descriptor
379 * @sh_chan: DMA channel
380 * @flags: DMA transfer flags
381 * @dest: destination DMA address, incremented when direction equals
382 * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
383 * @src: source DMA address, incremented when direction equals
384 * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
385 * @len: DMA transfer length
386 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
387 * @direction: needed for slave DMA to decide which address to keep constant,
388 * equals DMA_BIDIRECTIONAL for MEMCPY
389 * Returns 0 or an error
390 * Locks: called with desc_lock held
392 static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
393 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
394 struct sh_desc **first, enum dma_data_direction direction)
396 struct sh_desc *new;
397 size_t copy_size;
399 if (!*len)
400 return NULL;
402 /* Allocate the link descriptor from the free list */
403 new = sh_dmae_get_desc(sh_chan);
404 if (!new) {
405 dev_err(sh_chan->dev, "No free link descriptor available\n");
406 return NULL;
409 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
411 new->hw.sar = *src;
412 new->hw.dar = *dest;
413 new->hw.tcr = copy_size;
415 if (!*first) {
416 /* First desc */
417 new->async_tx.cookie = -EBUSY;
418 *first = new;
419 } else {
420 /* Other desc - invisible to the user */
421 new->async_tx.cookie = -EINVAL;
424 dev_dbg(sh_chan->dev,
425 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
426 copy_size, *len, *src, *dest, &new->async_tx,
427 new->async_tx.cookie, sh_chan->xmit_shift);
429 new->mark = DESC_PREPARED;
430 new->async_tx.flags = flags;
431 new->direction = direction;
433 *len -= copy_size;
434 if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
435 *src += copy_size;
436 if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
437 *dest += copy_size;
439 return new;
443 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
445 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
446 * converted to scatter-gather to guarantee consistent locking and a correct
447 * list manipulation. For slave DMA direction carries the usual meaning, and,
448 * logically, the SG list is RAM and the addr variable contains slave address,
449 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
450 * and the SG list contains only one element and points at the source buffer.
452 static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
453 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
454 enum dma_data_direction direction, unsigned long flags)
456 struct scatterlist *sg;
457 struct sh_desc *first = NULL, *new = NULL /* compiler... */;
458 LIST_HEAD(tx_list);
459 int chunks = 0;
460 int i;
462 if (!sg_len)
463 return NULL;
465 for_each_sg(sgl, sg, sg_len, i)
466 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
467 (SH_DMA_TCR_MAX + 1);
469 /* Have to lock the whole loop to protect against concurrent release */
470 spin_lock_bh(&sh_chan->desc_lock);
473 * Chaining:
474 * first descriptor is what user is dealing with in all API calls, its
475 * cookie is at first set to -EBUSY, at tx-submit to a positive
476 * number
477 * if more than one chunk is needed further chunks have cookie = -EINVAL
478 * the last chunk, if not equal to the first, has cookie = -ENOSPC
479 * all chunks are linked onto the tx_list head with their .node heads
480 * only during this function, then they are immediately spliced
481 * back onto the free list in form of a chain
483 for_each_sg(sgl, sg, sg_len, i) {
484 dma_addr_t sg_addr = sg_dma_address(sg);
485 size_t len = sg_dma_len(sg);
487 if (!len)
488 goto err_get_desc;
490 do {
491 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
492 i, sg, len, (unsigned long long)sg_addr);
494 if (direction == DMA_FROM_DEVICE)
495 new = sh_dmae_add_desc(sh_chan, flags,
496 &sg_addr, addr, &len, &first,
497 direction);
498 else
499 new = sh_dmae_add_desc(sh_chan, flags,
500 addr, &sg_addr, &len, &first,
501 direction);
502 if (!new)
503 goto err_get_desc;
505 new->chunks = chunks--;
506 list_add_tail(&new->node, &tx_list);
507 } while (len);
510 if (new != first)
511 new->async_tx.cookie = -ENOSPC;
513 /* Put them back on the free list, so, they don't get lost */
514 list_splice_tail(&tx_list, &sh_chan->ld_free);
516 spin_unlock_bh(&sh_chan->desc_lock);
518 return &first->async_tx;
520 err_get_desc:
521 list_for_each_entry(new, &tx_list, node)
522 new->mark = DESC_IDLE;
523 list_splice(&tx_list, &sh_chan->ld_free);
525 spin_unlock_bh(&sh_chan->desc_lock);
527 return NULL;
530 static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
531 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
532 size_t len, unsigned long flags)
534 struct sh_dmae_chan *sh_chan;
535 struct scatterlist sg;
537 if (!chan || !len)
538 return NULL;
540 chan->private = NULL;
542 sh_chan = to_sh_chan(chan);
544 sg_init_table(&sg, 1);
545 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
546 offset_in_page(dma_src));
547 sg_dma_address(&sg) = dma_src;
548 sg_dma_len(&sg) = len;
550 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
551 flags);
554 static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
555 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
556 enum dma_data_direction direction, unsigned long flags)
558 struct sh_dmae_slave *param;
559 struct sh_dmae_chan *sh_chan;
560 dma_addr_t slave_addr;
562 if (!chan)
563 return NULL;
565 sh_chan = to_sh_chan(chan);
566 param = chan->private;
567 slave_addr = param->config->addr;
569 /* Someone calling slave DMA on a public channel? */
570 if (!param || !sg_len) {
571 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
572 __func__, param, sg_len, param ? param->slave_id : -1);
573 return NULL;
577 * if (param != NULL), this is a successfully requested slave channel,
578 * therefore param->config != NULL too.
580 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
581 direction, flags);
584 static void sh_dmae_terminate_all(struct dma_chan *chan)
586 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
588 if (!chan)
589 return;
591 dmae_halt(sh_chan);
593 spin_lock_bh(&sh_chan->desc_lock);
594 if (!list_empty(&sh_chan->ld_queue)) {
595 /* Record partial transfer */
596 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
597 struct sh_desc, node);
598 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
599 sh_chan->xmit_shift;
602 spin_unlock_bh(&sh_chan->desc_lock);
604 sh_dmae_chan_ld_cleanup(sh_chan, true);
607 static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
609 struct sh_desc *desc, *_desc;
610 /* Is the "exposed" head of a chain acked? */
611 bool head_acked = false;
612 dma_cookie_t cookie = 0;
613 dma_async_tx_callback callback = NULL;
614 void *param = NULL;
616 spin_lock_bh(&sh_chan->desc_lock);
617 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
618 struct dma_async_tx_descriptor *tx = &desc->async_tx;
620 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
621 BUG_ON(desc->mark != DESC_SUBMITTED &&
622 desc->mark != DESC_COMPLETED &&
623 desc->mark != DESC_WAITING);
626 * queue is ordered, and we use this loop to (1) clean up all
627 * completed descriptors, and to (2) update descriptor flags of
628 * any chunks in a (partially) completed chain
630 if (!all && desc->mark == DESC_SUBMITTED &&
631 desc->cookie != cookie)
632 break;
634 if (tx->cookie > 0)
635 cookie = tx->cookie;
637 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
638 if (sh_chan->completed_cookie != desc->cookie - 1)
639 dev_dbg(sh_chan->dev,
640 "Completing cookie %d, expected %d\n",
641 desc->cookie,
642 sh_chan->completed_cookie + 1);
643 sh_chan->completed_cookie = desc->cookie;
646 /* Call callback on the last chunk */
647 if (desc->mark == DESC_COMPLETED && tx->callback) {
648 desc->mark = DESC_WAITING;
649 callback = tx->callback;
650 param = tx->callback_param;
651 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
652 tx->cookie, tx, sh_chan->id);
653 BUG_ON(desc->chunks != 1);
654 break;
657 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
658 if (desc->mark == DESC_COMPLETED) {
659 BUG_ON(tx->cookie < 0);
660 desc->mark = DESC_WAITING;
662 head_acked = async_tx_test_ack(tx);
663 } else {
664 switch (desc->mark) {
665 case DESC_COMPLETED:
666 desc->mark = DESC_WAITING;
667 /* Fall through */
668 case DESC_WAITING:
669 if (head_acked)
670 async_tx_ack(&desc->async_tx);
674 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
675 tx, tx->cookie);
677 if (((desc->mark == DESC_COMPLETED ||
678 desc->mark == DESC_WAITING) &&
679 async_tx_test_ack(&desc->async_tx)) || all) {
680 /* Remove from ld_queue list */
681 desc->mark = DESC_IDLE;
682 list_move(&desc->node, &sh_chan->ld_free);
685 spin_unlock_bh(&sh_chan->desc_lock);
687 if (callback)
688 callback(param);
690 return callback;
694 * sh_chan_ld_cleanup - Clean up link descriptors
696 * This function cleans up the ld_queue of DMA channel.
698 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
700 while (__ld_cleanup(sh_chan, all))
704 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
706 struct sh_desc *desc;
708 spin_lock_bh(&sh_chan->desc_lock);
709 /* DMA work check */
710 if (dmae_is_busy(sh_chan)) {
711 spin_unlock_bh(&sh_chan->desc_lock);
712 return;
715 /* Find the first not transferred desciptor */
716 list_for_each_entry(desc, &sh_chan->ld_queue, node)
717 if (desc->mark == DESC_SUBMITTED) {
718 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
719 desc->async_tx.cookie, sh_chan->id,
720 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
721 /* Get the ld start address from ld_queue */
722 dmae_set_reg(sh_chan, &desc->hw);
723 dmae_start(sh_chan);
724 break;
727 spin_unlock_bh(&sh_chan->desc_lock);
730 static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
732 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
733 sh_chan_xfer_ld_queue(sh_chan);
736 static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
737 dma_cookie_t cookie,
738 dma_cookie_t *done,
739 dma_cookie_t *used)
741 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
742 dma_cookie_t last_used;
743 dma_cookie_t last_complete;
744 enum dma_status status;
746 sh_dmae_chan_ld_cleanup(sh_chan, false);
748 last_used = chan->cookie;
749 last_complete = sh_chan->completed_cookie;
750 BUG_ON(last_complete < 0);
752 if (done)
753 *done = last_complete;
755 if (used)
756 *used = last_used;
758 spin_lock_bh(&sh_chan->desc_lock);
760 status = dma_async_is_complete(cookie, last_complete, last_used);
763 * If we don't find cookie on the queue, it has been aborted and we have
764 * to report error
766 if (status != DMA_SUCCESS) {
767 struct sh_desc *desc;
768 status = DMA_ERROR;
769 list_for_each_entry(desc, &sh_chan->ld_queue, node)
770 if (desc->cookie == cookie) {
771 status = DMA_IN_PROGRESS;
772 break;
776 spin_unlock_bh(&sh_chan->desc_lock);
778 return status;
781 static irqreturn_t sh_dmae_interrupt(int irq, void *data)
783 irqreturn_t ret = IRQ_NONE;
784 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
785 u32 chcr = sh_dmae_readl(sh_chan, CHCR);
787 if (chcr & CHCR_TE) {
788 /* DMA stop */
789 dmae_halt(sh_chan);
791 ret = IRQ_HANDLED;
792 tasklet_schedule(&sh_chan->tasklet);
795 return ret;
798 #if defined(CONFIG_CPU_SH4)
799 static irqreturn_t sh_dmae_err(int irq, void *data)
801 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
802 int i;
804 /* halt the dma controller */
805 sh_dmae_ctl_stop(shdev);
807 /* We cannot detect, which channel caused the error, have to reset all */
808 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
809 struct sh_dmae_chan *sh_chan = shdev->chan[i];
810 if (sh_chan) {
811 struct sh_desc *desc;
812 /* Stop the channel */
813 dmae_halt(sh_chan);
814 /* Complete all */
815 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
816 struct dma_async_tx_descriptor *tx = &desc->async_tx;
817 desc->mark = DESC_IDLE;
818 if (tx->callback)
819 tx->callback(tx->callback_param);
821 list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free);
824 sh_dmae_rst(shdev);
826 return IRQ_HANDLED;
828 #endif
830 static void dmae_do_tasklet(unsigned long data)
832 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
833 struct sh_desc *desc;
834 u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
835 u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
837 spin_lock(&sh_chan->desc_lock);
838 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
839 if (desc->mark == DESC_SUBMITTED &&
840 ((desc->direction == DMA_FROM_DEVICE &&
841 (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
842 (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
843 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
844 desc->async_tx.cookie, &desc->async_tx,
845 desc->hw.dar);
846 desc->mark = DESC_COMPLETED;
847 break;
850 spin_unlock(&sh_chan->desc_lock);
852 /* Next desc */
853 sh_chan_xfer_ld_queue(sh_chan);
854 sh_dmae_chan_ld_cleanup(sh_chan, false);
857 static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
858 int irq, unsigned long flags)
860 int err;
861 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
862 struct platform_device *pdev = to_platform_device(shdev->common.dev);
863 struct sh_dmae_chan *new_sh_chan;
865 /* alloc channel */
866 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
867 if (!new_sh_chan) {
868 dev_err(shdev->common.dev,
869 "No free memory for allocating dma channels!\n");
870 return -ENOMEM;
873 /* copy struct dma_device */
874 new_sh_chan->common.device = &shdev->common;
876 new_sh_chan->dev = shdev->common.dev;
877 new_sh_chan->id = id;
878 new_sh_chan->irq = irq;
879 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
881 /* Init DMA tasklet */
882 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
883 (unsigned long)new_sh_chan);
885 /* Init the channel */
886 dmae_init(new_sh_chan);
888 spin_lock_init(&new_sh_chan->desc_lock);
890 /* Init descripter manage list */
891 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
892 INIT_LIST_HEAD(&new_sh_chan->ld_free);
894 /* Add the channel to DMA device channel list */
895 list_add_tail(&new_sh_chan->common.device_node,
896 &shdev->common.channels);
897 shdev->common.chancnt++;
899 if (pdev->id >= 0)
900 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
901 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
902 else
903 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
904 "sh-dma%d", new_sh_chan->id);
906 /* set up channel irq */
907 err = request_irq(irq, &sh_dmae_interrupt, flags,
908 new_sh_chan->dev_id, new_sh_chan);
909 if (err) {
910 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
911 "with return %d\n", id, err);
912 goto err_no_irq;
915 shdev->chan[id] = new_sh_chan;
916 return 0;
918 err_no_irq:
919 /* remove from dmaengine device node */
920 list_del(&new_sh_chan->common.device_node);
921 kfree(new_sh_chan);
922 return err;
925 static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
927 int i;
929 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
930 if (shdev->chan[i]) {
931 struct sh_dmae_chan *sh_chan = shdev->chan[i];
933 free_irq(sh_chan->irq, sh_chan);
935 list_del(&sh_chan->common.device_node);
936 kfree(sh_chan);
937 shdev->chan[i] = NULL;
940 shdev->common.chancnt = 0;
943 static int __init sh_dmae_probe(struct platform_device *pdev)
945 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
946 unsigned long irqflags = IRQF_DISABLED,
947 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
948 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
949 int err, i, irq_cnt = 0, irqres = 0;
950 struct sh_dmae_device *shdev;
951 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
953 /* get platform data */
954 if (!pdata || !pdata->channel_num)
955 return -ENODEV;
957 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
958 /* DMARS area is optional, if absent, this controller cannot do slave DMA */
959 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
961 * IRQ resources:
962 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
963 * the error IRQ, in which case it is the only IRQ in this resource:
964 * start == end. If it is the only IRQ resource, all channels also
965 * use the same IRQ.
966 * 2. DMA channel IRQ resources can be specified one per resource or in
967 * ranges (start != end)
968 * 3. iff all events (channels and, optionally, error) on this
969 * controller use the same IRQ, only one IRQ resource can be
970 * specified, otherwise there must be one IRQ per channel, even if
971 * some of them are equal
972 * 4. if all IRQs on this controller are equal or if some specific IRQs
973 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
974 * requested with the IRQF_SHARED flag
976 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
977 if (!chan || !errirq_res)
978 return -ENODEV;
980 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
981 dev_err(&pdev->dev, "DMAC register region already claimed\n");
982 return -EBUSY;
985 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
986 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
987 err = -EBUSY;
988 goto ermrdmars;
991 err = -ENOMEM;
992 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
993 if (!shdev) {
994 dev_err(&pdev->dev, "Not enough memory\n");
995 goto ealloc;
998 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
999 if (!shdev->chan_reg)
1000 goto emapchan;
1001 if (dmars) {
1002 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1003 if (!shdev->dmars)
1004 goto emapdmars;
1007 /* platform data */
1008 shdev->pdata = pdata;
1010 pm_runtime_enable(&pdev->dev);
1011 pm_runtime_get_sync(&pdev->dev);
1013 /* reset dma controller */
1014 err = sh_dmae_rst(shdev);
1015 if (err)
1016 goto rst_err;
1018 INIT_LIST_HEAD(&shdev->common.channels);
1020 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
1021 if (dmars)
1022 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
1024 shdev->common.device_alloc_chan_resources
1025 = sh_dmae_alloc_chan_resources;
1026 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
1027 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
1028 shdev->common.device_is_tx_complete = sh_dmae_is_complete;
1029 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
1031 /* Compulsory for DMA_SLAVE fields */
1032 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1033 shdev->common.device_terminate_all = sh_dmae_terminate_all;
1035 shdev->common.dev = &pdev->dev;
1036 /* Default transfer size of 32 bytes requires 32-byte alignment */
1037 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
1039 #if defined(CONFIG_CPU_SH4)
1040 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1042 if (!chanirq_res)
1043 chanirq_res = errirq_res;
1044 else
1045 irqres++;
1047 if (chanirq_res == errirq_res ||
1048 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
1049 irqflags = IRQF_SHARED;
1051 errirq = errirq_res->start;
1053 err = request_irq(errirq, sh_dmae_err, irqflags,
1054 "DMAC Address Error", shdev);
1055 if (err) {
1056 dev_err(&pdev->dev,
1057 "DMA failed requesting irq #%d, error %d\n",
1058 errirq, err);
1059 goto eirq_err;
1062 #else
1063 chanirq_res = errirq_res;
1064 #endif /* CONFIG_CPU_SH4 */
1066 if (chanirq_res->start == chanirq_res->end &&
1067 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1068 /* Special case - all multiplexed */
1069 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1070 chan_irq[irq_cnt] = chanirq_res->start;
1071 chan_flag[irq_cnt] = IRQF_SHARED;
1073 } else {
1074 do {
1075 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1076 if ((errirq_res->flags & IORESOURCE_BITS) ==
1077 IORESOURCE_IRQ_SHAREABLE)
1078 chan_flag[irq_cnt] = IRQF_SHARED;
1079 else
1080 chan_flag[irq_cnt] = IRQF_DISABLED;
1081 dev_dbg(&pdev->dev,
1082 "Found IRQ %d for channel %d\n",
1083 i, irq_cnt);
1084 chan_irq[irq_cnt++] = i;
1086 chanirq_res = platform_get_resource(pdev,
1087 IORESOURCE_IRQ, ++irqres);
1088 } while (irq_cnt < pdata->channel_num && chanirq_res);
1091 if (irq_cnt < pdata->channel_num)
1092 goto eirqres;
1094 /* Create DMA Channel */
1095 for (i = 0; i < pdata->channel_num; i++) {
1096 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
1097 if (err)
1098 goto chan_probe_err;
1101 pm_runtime_put(&pdev->dev);
1103 platform_set_drvdata(pdev, shdev);
1104 dma_async_device_register(&shdev->common);
1106 return err;
1108 chan_probe_err:
1109 sh_dmae_chan_remove(shdev);
1110 eirqres:
1111 #if defined(CONFIG_CPU_SH4)
1112 free_irq(errirq, shdev);
1113 eirq_err:
1114 #endif
1115 rst_err:
1116 pm_runtime_put(&pdev->dev);
1117 if (dmars)
1118 iounmap(shdev->dmars);
1119 emapdmars:
1120 iounmap(shdev->chan_reg);
1121 emapchan:
1122 kfree(shdev);
1123 ealloc:
1124 if (dmars)
1125 release_mem_region(dmars->start, resource_size(dmars));
1126 ermrdmars:
1127 release_mem_region(chan->start, resource_size(chan));
1129 return err;
1132 static int __exit sh_dmae_remove(struct platform_device *pdev)
1134 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1135 struct resource *res;
1136 int errirq = platform_get_irq(pdev, 0);
1138 dma_async_device_unregister(&shdev->common);
1140 if (errirq > 0)
1141 free_irq(errirq, shdev);
1143 /* channel data remove */
1144 sh_dmae_chan_remove(shdev);
1146 pm_runtime_disable(&pdev->dev);
1148 if (shdev->dmars)
1149 iounmap(shdev->dmars);
1150 iounmap(shdev->chan_reg);
1152 kfree(shdev);
1154 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1155 if (res)
1156 release_mem_region(res->start, resource_size(res));
1157 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1158 if (res)
1159 release_mem_region(res->start, resource_size(res));
1161 return 0;
1164 static void sh_dmae_shutdown(struct platform_device *pdev)
1166 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1167 sh_dmae_ctl_stop(shdev);
1170 static struct platform_driver sh_dmae_driver = {
1171 .remove = __exit_p(sh_dmae_remove),
1172 .shutdown = sh_dmae_shutdown,
1173 .driver = {
1174 .name = "sh-dma-engine",
1178 static int __init sh_dmae_init(void)
1180 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1182 module_init(sh_dmae_init);
1184 static void __exit sh_dmae_exit(void)
1186 platform_driver_unregister(&sh_dmae_driver);
1188 module_exit(sh_dmae_exit);
1190 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1191 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1192 MODULE_LICENSE("GPL");