iwlwifi: make iwl-power.c more readable
[wandboard.git] / drivers / dma / fsldma.c
blob70126a60623939dddc6af7a59aa7b1eac8a51637
1 /*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added.
15 * This is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/interrupt.h>
26 #include <linux/dmaengine.h>
27 #include <linux/delay.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/dmapool.h>
30 #include <linux/of_platform.h>
32 #include "fsldma.h"
34 static void dma_init(struct fsl_dma_chan *fsl_chan)
36 /* Reset the channel */
37 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
39 switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
40 case FSL_DMA_IP_85XX:
41 /* Set the channel to below modes:
42 * EIE - Error interrupt enable
43 * EOSIE - End of segments interrupt enable (basic mode)
44 * EOLNIE - End of links interrupt enable
46 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
47 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
48 break;
49 case FSL_DMA_IP_83XX:
50 /* Set the channel to below modes:
51 * EOTIE - End-of-transfer interrupt enable
53 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE,
54 32);
55 break;
60 static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
62 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
65 static u32 get_sr(struct fsl_dma_chan *fsl_chan)
67 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
70 static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
71 struct fsl_dma_ld_hw *hw, u32 count)
73 hw->count = CPU_TO_DMA(fsl_chan, count, 32);
76 static void set_desc_src(struct fsl_dma_chan *fsl_chan,
77 struct fsl_dma_ld_hw *hw, dma_addr_t src)
79 u64 snoop_bits;
81 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
82 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
83 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
86 static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
87 struct fsl_dma_ld_hw *hw, dma_addr_t dest)
89 u64 snoop_bits;
91 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
92 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
93 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
96 static void set_desc_next(struct fsl_dma_chan *fsl_chan,
97 struct fsl_dma_ld_hw *hw, dma_addr_t next)
99 u64 snoop_bits;
101 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
102 ? FSL_DMA_SNEN : 0;
103 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
106 static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
108 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
111 static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
113 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
116 static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
118 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
121 static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
123 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
126 static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
128 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
131 static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
133 u32 sr = get_sr(fsl_chan);
134 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
137 static void dma_start(struct fsl_dma_chan *fsl_chan)
139 u32 mr_set = 0;;
141 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
142 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
143 mr_set |= FSL_DMA_MR_EMP_EN;
144 } else
145 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
146 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
147 & ~FSL_DMA_MR_EMP_EN, 32);
149 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
150 mr_set |= FSL_DMA_MR_EMS_EN;
151 else
152 mr_set |= FSL_DMA_MR_CS;
154 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
155 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
156 | mr_set, 32);
159 static void dma_halt(struct fsl_dma_chan *fsl_chan)
161 int i = 0;
162 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
163 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
164 32);
165 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
166 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
167 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
169 while (!dma_is_idle(fsl_chan) && (i++ < 100))
170 udelay(10);
171 if (i >= 100 && !dma_is_idle(fsl_chan))
172 dev_err(fsl_chan->dev, "DMA halt timeout!\n");
175 static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
176 struct fsl_desc_sw *desc)
178 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
179 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL,
180 64);
183 static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
184 struct fsl_desc_sw *new_desc)
186 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
188 if (list_empty(&fsl_chan->ld_queue))
189 return;
191 /* Link to the new descriptor physical address and
192 * Enable End-of-segment interrupt for
193 * the last link descriptor.
194 * (the previous node's next link descriptor)
196 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
198 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
199 new_desc->async_tx.phys | FSL_DMA_EOSIE |
200 (((fsl_chan->feature & FSL_DMA_IP_MASK)
201 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
205 * fsl_chan_set_src_loop_size - Set source address hold transfer size
206 * @fsl_chan : Freescale DMA channel
207 * @size : Address loop size, 0 for disable loop
209 * The set source address hold transfer size. The source
210 * address hold or loop transfer size is when the DMA transfer
211 * data from source address (SA), if the loop size is 4, the DMA will
212 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
213 * SA + 1 ... and so on.
215 static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
217 switch (size) {
218 case 0:
219 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
220 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
221 (~FSL_DMA_MR_SAHE), 32);
222 break;
223 case 1:
224 case 2:
225 case 4:
226 case 8:
227 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
228 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
229 FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
230 32);
231 break;
236 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
237 * @fsl_chan : Freescale DMA channel
238 * @size : Address loop size, 0 for disable loop
240 * The set destination address hold transfer size. The destination
241 * address hold or loop transfer size is when the DMA transfer
242 * data to destination address (TA), if the loop size is 4, the DMA will
243 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
244 * TA + 1 ... and so on.
246 static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
248 switch (size) {
249 case 0:
250 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
251 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
252 (~FSL_DMA_MR_DAHE), 32);
253 break;
254 case 1:
255 case 2:
256 case 4:
257 case 8:
258 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
259 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
260 FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
261 32);
262 break;
267 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
268 * @fsl_chan : Freescale DMA channel
269 * @size : Pause control size, 0 for disable external pause control.
270 * The maximum is 1024.
272 * The Freescale DMA channel can be controlled by the external
273 * signal DREQ#. The pause control size is how many bytes are allowed
274 * to transfer before pausing the channel, after which a new assertion
275 * of DREQ# resumes channel operation.
277 static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size)
279 if (size > 1024)
280 return;
282 if (size) {
283 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
284 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
285 | ((__ilog2(size) << 24) & 0x0f000000),
286 32);
287 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
288 } else
289 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
293 * fsl_chan_toggle_ext_start - Toggle channel external start status
294 * @fsl_chan : Freescale DMA channel
295 * @enable : 0 is disabled, 1 is enabled.
297 * If enable the external start, the channel can be started by an
298 * external DMA start pin. So the dma_start() does not start the
299 * transfer immediately. The DMA channel will wait for the
300 * control pin asserted.
302 static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
304 if (enable)
305 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
306 else
307 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
310 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
312 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
313 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
314 unsigned long flags;
315 dma_cookie_t cookie;
317 /* cookie increment and adding to ld_queue must be atomic */
318 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
320 cookie = fsl_chan->common.cookie;
321 cookie++;
322 if (cookie < 0)
323 cookie = 1;
324 desc->async_tx.cookie = cookie;
325 fsl_chan->common.cookie = desc->async_tx.cookie;
327 append_ld_queue(fsl_chan, desc);
328 list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev);
330 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
332 return cookie;
336 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
337 * @fsl_chan : Freescale DMA channel
339 * Return - The descriptor allocated. NULL for failed.
341 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
342 struct fsl_dma_chan *fsl_chan)
344 dma_addr_t pdesc;
345 struct fsl_desc_sw *desc_sw;
347 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
348 if (desc_sw) {
349 memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
350 dma_async_tx_descriptor_init(&desc_sw->async_tx,
351 &fsl_chan->common);
352 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
353 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
354 desc_sw->async_tx.phys = pdesc;
357 return desc_sw;
362 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
363 * @fsl_chan : Freescale DMA channel
365 * This function will create a dma pool for descriptor allocation.
367 * Return - The number of descriptors allocated.
369 static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
371 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
373 /* Has this channel already been allocated? */
374 if (fsl_chan->desc_pool)
375 return 1;
377 /* We need the descriptor to be aligned to 32bytes
378 * for meeting FSL DMA specification requirement.
380 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
381 fsl_chan->dev, sizeof(struct fsl_desc_sw),
382 32, 0);
383 if (!fsl_chan->desc_pool) {
384 dev_err(fsl_chan->dev, "No memory for channel %d "
385 "descriptor dma pool.\n", fsl_chan->id);
386 return 0;
389 return 1;
393 * fsl_dma_free_chan_resources - Free all resources of the channel.
394 * @fsl_chan : Freescale DMA channel
396 static void fsl_dma_free_chan_resources(struct dma_chan *chan)
398 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
399 struct fsl_desc_sw *desc, *_desc;
400 unsigned long flags;
402 dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
403 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
404 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
405 #ifdef FSL_DMA_LD_DEBUG
406 dev_dbg(fsl_chan->dev,
407 "LD %p will be released.\n", desc);
408 #endif
409 list_del(&desc->node);
410 /* free link descriptor */
411 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
413 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
414 dma_pool_destroy(fsl_chan->desc_pool);
416 fsl_chan->desc_pool = NULL;
419 static struct dma_async_tx_descriptor *
420 fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
422 struct fsl_dma_chan *fsl_chan;
423 struct fsl_desc_sw *new;
425 if (!chan)
426 return NULL;
428 fsl_chan = to_fsl_chan(chan);
430 new = fsl_dma_alloc_descriptor(fsl_chan);
431 if (!new) {
432 dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
433 return NULL;
436 new->async_tx.cookie = -EBUSY;
437 new->async_tx.flags = flags;
439 /* Insert the link descriptor to the LD ring */
440 list_add_tail(&new->node, &new->async_tx.tx_list);
442 /* Set End-of-link to the last link descriptor of new list*/
443 set_ld_eol(fsl_chan, new);
445 return &new->async_tx;
448 static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
449 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
450 size_t len, unsigned long flags)
452 struct fsl_dma_chan *fsl_chan;
453 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
454 size_t copy;
455 LIST_HEAD(link_chain);
457 if (!chan)
458 return NULL;
460 if (!len)
461 return NULL;
463 fsl_chan = to_fsl_chan(chan);
465 do {
467 /* Allocate the link descriptor from DMA pool */
468 new = fsl_dma_alloc_descriptor(fsl_chan);
469 if (!new) {
470 dev_err(fsl_chan->dev,
471 "No free memory for link descriptor\n");
472 return NULL;
474 #ifdef FSL_DMA_LD_DEBUG
475 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
476 #endif
478 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
480 set_desc_cnt(fsl_chan, &new->hw, copy);
481 set_desc_src(fsl_chan, &new->hw, dma_src);
482 set_desc_dest(fsl_chan, &new->hw, dma_dest);
484 if (!first)
485 first = new;
486 else
487 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
489 new->async_tx.cookie = 0;
490 async_tx_ack(&new->async_tx);
492 prev = new;
493 len -= copy;
494 dma_src += copy;
495 dma_dest += copy;
497 /* Insert the link descriptor to the LD ring */
498 list_add_tail(&new->node, &first->async_tx.tx_list);
499 } while (len);
501 new->async_tx.flags = flags; /* client is in control of this ack */
502 new->async_tx.cookie = -EBUSY;
504 /* Set End-of-link to the last link descriptor of new list*/
505 set_ld_eol(fsl_chan, new);
507 return first ? &first->async_tx : NULL;
511 * fsl_dma_update_completed_cookie - Update the completed cookie.
512 * @fsl_chan : Freescale DMA channel
514 static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
516 struct fsl_desc_sw *cur_desc, *desc;
517 dma_addr_t ld_phy;
519 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
521 if (ld_phy) {
522 cur_desc = NULL;
523 list_for_each_entry(desc, &fsl_chan->ld_queue, node)
524 if (desc->async_tx.phys == ld_phy) {
525 cur_desc = desc;
526 break;
529 if (cur_desc && cur_desc->async_tx.cookie) {
530 if (dma_is_idle(fsl_chan))
531 fsl_chan->completed_cookie =
532 cur_desc->async_tx.cookie;
533 else
534 fsl_chan->completed_cookie =
535 cur_desc->async_tx.cookie - 1;
541 * fsl_chan_ld_cleanup - Clean up link descriptors
542 * @fsl_chan : Freescale DMA channel
544 * This function clean up the ld_queue of DMA channel.
545 * If 'in_intr' is set, the function will move the link descriptor to
546 * the recycle list. Otherwise, free it directly.
548 static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
550 struct fsl_desc_sw *desc, *_desc;
551 unsigned long flags;
553 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
555 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
556 fsl_chan->completed_cookie);
557 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
558 dma_async_tx_callback callback;
559 void *callback_param;
561 if (dma_async_is_complete(desc->async_tx.cookie,
562 fsl_chan->completed_cookie, fsl_chan->common.cookie)
563 == DMA_IN_PROGRESS)
564 break;
566 callback = desc->async_tx.callback;
567 callback_param = desc->async_tx.callback_param;
569 /* Remove from ld_queue list */
570 list_del(&desc->node);
572 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
573 desc);
574 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
576 /* Run the link descriptor callback function */
577 if (callback) {
578 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
579 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
580 desc);
581 callback(callback_param);
582 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
585 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
589 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
590 * @fsl_chan : Freescale DMA channel
592 static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
594 struct list_head *ld_node;
595 dma_addr_t next_dest_addr;
596 unsigned long flags;
598 if (!dma_is_idle(fsl_chan))
599 return;
601 dma_halt(fsl_chan);
603 /* If there are some link descriptors
604 * not transfered in queue. We need to start it.
606 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
608 /* Find the first un-transfer desciptor */
609 for (ld_node = fsl_chan->ld_queue.next;
610 (ld_node != &fsl_chan->ld_queue)
611 && (dma_async_is_complete(
612 to_fsl_desc(ld_node)->async_tx.cookie,
613 fsl_chan->completed_cookie,
614 fsl_chan->common.cookie) == DMA_SUCCESS);
615 ld_node = ld_node->next);
617 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
619 if (ld_node != &fsl_chan->ld_queue) {
620 /* Get the ld start address from ld_queue */
621 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
622 dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n",
623 (void *)next_dest_addr);
624 set_cdar(fsl_chan, next_dest_addr);
625 dma_start(fsl_chan);
626 } else {
627 set_cdar(fsl_chan, 0);
628 set_ndar(fsl_chan, 0);
633 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
634 * @fsl_chan : Freescale DMA channel
636 static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
638 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
640 #ifdef FSL_DMA_LD_DEBUG
641 struct fsl_desc_sw *ld;
642 unsigned long flags;
644 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
645 if (list_empty(&fsl_chan->ld_queue)) {
646 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
647 return;
650 dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
651 list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
652 int i;
653 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
654 fsl_chan->id, ld->async_tx.phys);
655 for (i = 0; i < 8; i++)
656 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
657 i, *(((u32 *)&ld->hw) + i));
659 dev_dbg(fsl_chan->dev, "----------------\n");
660 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
661 #endif
663 fsl_chan_xfer_ld_queue(fsl_chan);
667 * fsl_dma_is_complete - Determine the DMA status
668 * @fsl_chan : Freescale DMA channel
670 static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
671 dma_cookie_t cookie,
672 dma_cookie_t *done,
673 dma_cookie_t *used)
675 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
676 dma_cookie_t last_used;
677 dma_cookie_t last_complete;
679 fsl_chan_ld_cleanup(fsl_chan);
681 last_used = chan->cookie;
682 last_complete = fsl_chan->completed_cookie;
684 if (done)
685 *done = last_complete;
687 if (used)
688 *used = last_used;
690 return dma_async_is_complete(cookie, last_complete, last_used);
693 static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
695 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
696 u32 stat;
697 int update_cookie = 0;
698 int xfer_ld_q = 0;
700 stat = get_sr(fsl_chan);
701 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
702 fsl_chan->id, stat);
703 set_sr(fsl_chan, stat); /* Clear the event register */
705 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
706 if (!stat)
707 return IRQ_NONE;
709 if (stat & FSL_DMA_SR_TE)
710 dev_err(fsl_chan->dev, "Transfer Error!\n");
712 /* Programming Error
713 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
714 * triger a PE interrupt.
716 if (stat & FSL_DMA_SR_PE) {
717 dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
718 if (get_bcr(fsl_chan) == 0) {
719 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
720 * Now, update the completed cookie, and continue the
721 * next uncompleted transfer.
723 update_cookie = 1;
724 xfer_ld_q = 1;
726 stat &= ~FSL_DMA_SR_PE;
729 /* If the link descriptor segment transfer finishes,
730 * we will recycle the used descriptor.
732 if (stat & FSL_DMA_SR_EOSI) {
733 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
734 dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n",
735 (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan));
736 stat &= ~FSL_DMA_SR_EOSI;
737 update_cookie = 1;
740 /* For MPC8349, EOCDI event need to update cookie
741 * and start the next transfer if it exist.
743 if (stat & FSL_DMA_SR_EOCDI) {
744 dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
745 stat &= ~FSL_DMA_SR_EOCDI;
746 update_cookie = 1;
747 xfer_ld_q = 1;
750 /* If it current transfer is the end-of-transfer,
751 * we should clear the Channel Start bit for
752 * prepare next transfer.
754 if (stat & FSL_DMA_SR_EOLNI) {
755 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
756 stat &= ~FSL_DMA_SR_EOLNI;
757 xfer_ld_q = 1;
760 if (update_cookie)
761 fsl_dma_update_completed_cookie(fsl_chan);
762 if (xfer_ld_q)
763 fsl_chan_xfer_ld_queue(fsl_chan);
764 if (stat)
765 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
766 stat);
768 dev_dbg(fsl_chan->dev, "event: Exit\n");
769 tasklet_schedule(&fsl_chan->tasklet);
770 return IRQ_HANDLED;
773 static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
775 struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
776 u32 gsr;
777 int ch_nr;
779 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
780 : in_le32(fdev->reg_base);
781 ch_nr = (32 - ffs(gsr)) / 8;
783 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
784 fdev->chan[ch_nr]) : IRQ_NONE;
787 static void dma_do_tasklet(unsigned long data)
789 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
790 fsl_chan_ld_cleanup(fsl_chan);
793 static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
794 struct device_node *node, u32 feature, const char *compatible)
796 struct fsl_dma_chan *new_fsl_chan;
797 int err;
799 /* alloc channel */
800 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
801 if (!new_fsl_chan) {
802 dev_err(fdev->dev, "No free memory for allocating "
803 "dma channels!\n");
804 return -ENOMEM;
807 /* get dma channel register base */
808 err = of_address_to_resource(node, 0, &new_fsl_chan->reg);
809 if (err) {
810 dev_err(fdev->dev, "Can't get %s property 'reg'\n",
811 node->full_name);
812 goto err_no_reg;
815 new_fsl_chan->feature = feature;
817 if (!fdev->feature)
818 fdev->feature = new_fsl_chan->feature;
820 /* If the DMA device's feature is different than its channels',
821 * report the bug.
823 WARN_ON(fdev->feature != new_fsl_chan->feature);
825 new_fsl_chan->dev = fdev->dev;
826 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
827 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
829 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
830 if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) {
831 dev_err(fdev->dev, "There is no %d channel!\n",
832 new_fsl_chan->id);
833 err = -EINVAL;
834 goto err_no_chan;
836 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
837 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
838 (unsigned long)new_fsl_chan);
840 /* Init the channel */
841 dma_init(new_fsl_chan);
843 /* Clear cdar registers */
844 set_cdar(new_fsl_chan, 0);
846 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
847 case FSL_DMA_IP_85XX:
848 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
849 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
850 case FSL_DMA_IP_83XX:
851 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
852 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
855 spin_lock_init(&new_fsl_chan->desc_lock);
856 INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
858 new_fsl_chan->common.device = &fdev->common;
860 /* Add the channel to DMA device channel list */
861 list_add_tail(&new_fsl_chan->common.device_node,
862 &fdev->common.channels);
863 fdev->common.chancnt++;
865 new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
866 if (new_fsl_chan->irq != NO_IRQ) {
867 err = request_irq(new_fsl_chan->irq,
868 &fsl_dma_chan_do_interrupt, IRQF_SHARED,
869 "fsldma-channel", new_fsl_chan);
870 if (err) {
871 dev_err(fdev->dev, "DMA channel %s request_irq error "
872 "with return %d\n", node->full_name, err);
873 goto err_no_irq;
877 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
878 compatible,
879 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
881 return 0;
883 err_no_irq:
884 list_del(&new_fsl_chan->common.device_node);
885 err_no_chan:
886 iounmap(new_fsl_chan->reg_base);
887 err_no_reg:
888 kfree(new_fsl_chan);
889 return err;
892 static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
894 if (fchan->irq != NO_IRQ)
895 free_irq(fchan->irq, fchan);
896 list_del(&fchan->common.device_node);
897 iounmap(fchan->reg_base);
898 kfree(fchan);
901 static int __devinit of_fsl_dma_probe(struct of_device *dev,
902 const struct of_device_id *match)
904 int err;
905 struct fsl_dma_device *fdev;
906 struct device_node *child;
908 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
909 if (!fdev) {
910 dev_err(&dev->dev, "No enough memory for 'priv'\n");
911 return -ENOMEM;
913 fdev->dev = &dev->dev;
914 INIT_LIST_HEAD(&fdev->common.channels);
916 /* get DMA controller register base */
917 err = of_address_to_resource(dev->node, 0, &fdev->reg);
918 if (err) {
919 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
920 dev->node->full_name);
921 goto err_no_reg;
924 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
925 "controller at %p...\n",
926 match->compatible, (void *)fdev->reg.start);
927 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
928 - fdev->reg.start + 1);
930 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
931 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
932 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
933 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
934 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
935 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
936 fdev->common.device_is_tx_complete = fsl_dma_is_complete;
937 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
938 fdev->common.dev = &dev->dev;
940 fdev->irq = irq_of_parse_and_map(dev->node, 0);
941 if (fdev->irq != NO_IRQ) {
942 err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
943 "fsldma-device", fdev);
944 if (err) {
945 dev_err(&dev->dev, "DMA device request_irq error "
946 "with return %d\n", err);
947 goto err;
951 dev_set_drvdata(&(dev->dev), fdev);
953 /* We cannot use of_platform_bus_probe() because there is no
954 * of_platform_bus_remove. Instead, we manually instantiate every DMA
955 * channel object.
957 for_each_child_of_node(dev->node, child) {
958 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
959 fsl_dma_chan_probe(fdev, child,
960 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
961 "fsl,eloplus-dma-channel");
962 if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
963 fsl_dma_chan_probe(fdev, child,
964 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
965 "fsl,elo-dma-channel");
968 dma_async_device_register(&fdev->common);
969 return 0;
971 err:
972 iounmap(fdev->reg_base);
973 err_no_reg:
974 kfree(fdev);
975 return err;
978 static int of_fsl_dma_remove(struct of_device *of_dev)
980 struct fsl_dma_device *fdev;
981 unsigned int i;
983 fdev = dev_get_drvdata(&of_dev->dev);
985 dma_async_device_unregister(&fdev->common);
987 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
988 if (fdev->chan[i])
989 fsl_dma_chan_remove(fdev->chan[i]);
991 if (fdev->irq != NO_IRQ)
992 free_irq(fdev->irq, fdev);
994 iounmap(fdev->reg_base);
996 kfree(fdev);
997 dev_set_drvdata(&of_dev->dev, NULL);
999 return 0;
1002 static struct of_device_id of_fsl_dma_ids[] = {
1003 { .compatible = "fsl,eloplus-dma", },
1004 { .compatible = "fsl,elo-dma", },
1008 static struct of_platform_driver of_fsl_dma_driver = {
1009 .name = "fsl-elo-dma",
1010 .match_table = of_fsl_dma_ids,
1011 .probe = of_fsl_dma_probe,
1012 .remove = of_fsl_dma_remove,
1015 static __init int of_fsl_dma_init(void)
1017 int ret;
1019 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1021 ret = of_register_platform_driver(&of_fsl_dma_driver);
1022 if (ret)
1023 pr_err("fsldma: failed to register platform driver\n");
1025 return ret;
1028 static void __exit of_fsl_dma_exit(void)
1030 of_unregister_platform_driver(&of_fsl_dma_driver);
1033 subsys_initcall(of_fsl_dma_init);
1034 module_exit(of_fsl_dma_exit);
1036 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1037 MODULE_LICENSE("GPL");