2 * EDMA3 support for DaVinci
4 * Copyright (C) 2006-2009 Texas Instruments.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/platform_device.h>
27 #include <mach/edma.h>
29 /* Offsets matching "struct edmacc_param" */
32 #define PARM_A_B_CNT 0x08
34 #define PARM_SRC_DST_BIDX 0x10
35 #define PARM_LINK_BCNTRLD 0x14
36 #define PARM_SRC_DST_CIDX 0x18
37 #define PARM_CCNT 0x1c
39 #define PARM_SIZE 0x20
41 /* Offsets for EDMA CC global channel registers and their shadows */
42 #define SH_ER 0x00 /* 64 bits */
43 #define SH_ECR 0x08 /* 64 bits */
44 #define SH_ESR 0x10 /* 64 bits */
45 #define SH_CER 0x18 /* 64 bits */
46 #define SH_EER 0x20 /* 64 bits */
47 #define SH_EECR 0x28 /* 64 bits */
48 #define SH_EESR 0x30 /* 64 bits */
49 #define SH_SER 0x38 /* 64 bits */
50 #define SH_SECR 0x40 /* 64 bits */
51 #define SH_IER 0x50 /* 64 bits */
52 #define SH_IECR 0x58 /* 64 bits */
53 #define SH_IESR 0x60 /* 64 bits */
54 #define SH_IPR 0x68 /* 64 bits */
55 #define SH_ICR 0x70 /* 64 bits */
65 /* Offsets for EDMA CC global registers */
66 #define EDMA_REV 0x0000
67 #define EDMA_CCCFG 0x0004
68 #define EDMA_QCHMAP 0x0200 /* 8 registers */
69 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
70 #define EDMA_QDMAQNUM 0x0260
71 #define EDMA_QUETCMAP 0x0280
72 #define EDMA_QUEPRI 0x0284
73 #define EDMA_EMR 0x0300 /* 64 bits */
74 #define EDMA_EMCR 0x0308 /* 64 bits */
75 #define EDMA_QEMR 0x0310
76 #define EDMA_QEMCR 0x0314
77 #define EDMA_CCERR 0x0318
78 #define EDMA_CCERRCLR 0x031c
79 #define EDMA_EEVAL 0x0320
80 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
81 #define EDMA_QRAE 0x0380 /* 4 registers */
82 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
83 #define EDMA_QSTAT 0x0600 /* 2 registers */
84 #define EDMA_QWMTHRA 0x0620
85 #define EDMA_QWMTHRB 0x0624
86 #define EDMA_CCSTAT 0x0640
88 #define EDMA_M 0x1000 /* global channel registers */
89 #define EDMA_ECR 0x1008
90 #define EDMA_ECRH 0x100C
91 #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */
92 #define EDMA_PARM 0x4000 /* 128 param entries */
94 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
96 #define EDMA_DCHMAP 0x0100 /* 64 registers */
97 #define CHMAP_EXIST BIT(24)
99 #define EDMA_MAX_DMACH 64
100 #define EDMA_MAX_PARAMENTRY 512
101 #define EDMA_MAX_CC 2
104 /*****************************************************************************/
106 static void __iomem
*edmacc_regs_base
[EDMA_MAX_CC
];
108 static inline unsigned int edma_read(unsigned ctlr
, int offset
)
110 return (unsigned int)__raw_readl(edmacc_regs_base
[ctlr
] + offset
);
113 static inline void edma_write(unsigned ctlr
, int offset
, int val
)
115 __raw_writel(val
, edmacc_regs_base
[ctlr
] + offset
);
117 static inline void edma_modify(unsigned ctlr
, int offset
, unsigned and,
120 unsigned val
= edma_read(ctlr
, offset
);
123 edma_write(ctlr
, offset
, val
);
125 static inline void edma_and(unsigned ctlr
, int offset
, unsigned and)
127 unsigned val
= edma_read(ctlr
, offset
);
129 edma_write(ctlr
, offset
, val
);
131 static inline void edma_or(unsigned ctlr
, int offset
, unsigned or)
133 unsigned val
= edma_read(ctlr
, offset
);
135 edma_write(ctlr
, offset
, val
);
137 static inline unsigned int edma_read_array(unsigned ctlr
, int offset
, int i
)
139 return edma_read(ctlr
, offset
+ (i
<< 2));
141 static inline void edma_write_array(unsigned ctlr
, int offset
, int i
,
144 edma_write(ctlr
, offset
+ (i
<< 2), val
);
146 static inline void edma_modify_array(unsigned ctlr
, int offset
, int i
,
147 unsigned and, unsigned or)
149 edma_modify(ctlr
, offset
+ (i
<< 2), and, or);
151 static inline void edma_or_array(unsigned ctlr
, int offset
, int i
, unsigned or)
153 edma_or(ctlr
, offset
+ (i
<< 2), or);
155 static inline void edma_or_array2(unsigned ctlr
, int offset
, int i
, int j
,
158 edma_or(ctlr
, offset
+ ((i
*2 + j
) << 2), or);
160 static inline void edma_write_array2(unsigned ctlr
, int offset
, int i
, int j
,
163 edma_write(ctlr
, offset
+ ((i
*2 + j
) << 2), val
);
165 static inline unsigned int edma_shadow0_read(unsigned ctlr
, int offset
)
167 return edma_read(ctlr
, EDMA_SHADOW0
+ offset
);
169 static inline unsigned int edma_shadow0_read_array(unsigned ctlr
, int offset
,
172 return edma_read(ctlr
, EDMA_SHADOW0
+ offset
+ (i
<< 2));
174 static inline void edma_shadow0_write(unsigned ctlr
, int offset
, unsigned val
)
176 edma_write(ctlr
, EDMA_SHADOW0
+ offset
, val
);
178 static inline void edma_shadow0_write_array(unsigned ctlr
, int offset
, int i
,
181 edma_write(ctlr
, EDMA_SHADOW0
+ offset
+ (i
<< 2), val
);
183 static inline unsigned int edma_parm_read(unsigned ctlr
, int offset
,
186 return edma_read(ctlr
, EDMA_PARM
+ offset
+ (param_no
<< 5));
188 static inline void edma_parm_write(unsigned ctlr
, int offset
, int param_no
,
191 edma_write(ctlr
, EDMA_PARM
+ offset
+ (param_no
<< 5), val
);
193 static inline void edma_parm_modify(unsigned ctlr
, int offset
, int param_no
,
194 unsigned and, unsigned or)
196 edma_modify(ctlr
, EDMA_PARM
+ offset
+ (param_no
<< 5), and, or);
198 static inline void edma_parm_and(unsigned ctlr
, int offset
, int param_no
,
201 edma_and(ctlr
, EDMA_PARM
+ offset
+ (param_no
<< 5), and);
203 static inline void edma_parm_or(unsigned ctlr
, int offset
, int param_no
,
206 edma_or(ctlr
, EDMA_PARM
+ offset
+ (param_no
<< 5), or);
209 /*****************************************************************************/
211 /* actual number of DMA channels and slots on this silicon */
213 /* how many dma resources of each type */
214 unsigned num_channels
;
219 enum dma_event_q default_queue
;
221 /* list of channels with no even trigger; terminated by "-1" */
224 /* The edma_inuse bit for each PaRAM slot is clear unless the
225 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
227 DECLARE_BITMAP(edma_inuse
, EDMA_MAX_PARAMENTRY
);
229 /* The edma_noevent bit for each channel is clear unless
230 * it doesn't trigger DMA events on this platform. It uses a
231 * bit of SOC-specific initialization code.
233 DECLARE_BITMAP(edma_noevent
, EDMA_MAX_DMACH
);
235 unsigned irq_res_start
;
236 unsigned irq_res_end
;
238 struct dma_interrupt_data
{
239 void (*callback
)(unsigned channel
, unsigned short ch_status
,
242 } intr_data
[EDMA_MAX_DMACH
];
245 static struct edma
*edma_info
[EDMA_MAX_CC
];
246 static int arch_num_cc
;
248 /* dummy param set used to (re)initialize parameter RAM slots */
249 static const struct edmacc_param dummy_paramset
= {
250 .link_bcntrld
= 0xffff,
254 /*****************************************************************************/
256 static void map_dmach_queue(unsigned ctlr
, unsigned ch_no
,
257 enum dma_event_q queue_no
)
259 int bit
= (ch_no
& 0x7) * 4;
261 /* default to low priority queue */
262 if (queue_no
== EVENTQ_DEFAULT
)
263 queue_no
= edma_info
[ctlr
]->default_queue
;
266 edma_modify_array(ctlr
, EDMA_DMAQNUM
, (ch_no
>> 3),
267 ~(0x7 << bit
), queue_no
<< bit
);
270 static void __init
map_queue_tc(unsigned ctlr
, int queue_no
, int tc_no
)
272 int bit
= queue_no
* 4;
273 edma_modify(ctlr
, EDMA_QUETCMAP
, ~(0x7 << bit
), ((tc_no
& 0x7) << bit
));
276 static void __init
assign_priority_to_queue(unsigned ctlr
, int queue_no
,
279 int bit
= queue_no
* 4;
280 edma_modify(ctlr
, EDMA_QUEPRI
, ~(0x7 << bit
),
281 ((priority
& 0x7) << bit
));
285 * map_dmach_param - Maps channel number to param entry number
287 * This maps the dma channel number to param entry numberter. In
288 * other words using the DMA channel mapping registers a param entry
289 * can be mapped to any channel
291 * Callers are responsible for ensuring the channel mapping logic is
292 * included in that particular EDMA variant (Eg : dm646x)
295 static void __init
map_dmach_param(unsigned ctlr
)
298 for (i
= 0; i
< EDMA_MAX_DMACH
; i
++)
299 edma_write_array(ctlr
, EDMA_DCHMAP
, i
, (i
<< 5));
303 setup_dma_interrupt(unsigned lch
,
304 void (*callback
)(unsigned channel
, u16 ch_status
, void *data
),
309 ctlr
= EDMA_CTLR(lch
);
310 lch
= EDMA_CHAN_SLOT(lch
);
313 edma_shadow0_write_array(ctlr
, SH_IECR
, lch
>> 5,
314 (1 << (lch
& 0x1f)));
317 edma_info
[ctlr
]->intr_data
[lch
].callback
= callback
;
318 edma_info
[ctlr
]->intr_data
[lch
].data
= data
;
321 edma_shadow0_write_array(ctlr
, SH_ICR
, lch
>> 5,
322 (1 << (lch
& 0x1f)));
323 edma_shadow0_write_array(ctlr
, SH_IESR
, lch
>> 5,
324 (1 << (lch
& 0x1f)));
328 static int irq2ctlr(int irq
)
330 if (irq
>= edma_info
[0]->irq_res_start
&&
331 irq
<= edma_info
[0]->irq_res_end
)
333 else if (irq
>= edma_info
[1]->irq_res_start
&&
334 irq
<= edma_info
[1]->irq_res_end
)
340 /******************************************************************************
342 * DMA interrupt handler
344 *****************************************************************************/
345 static irqreturn_t
dma_irq_handler(int irq
, void *data
)
349 unsigned int cnt
= 0;
351 ctlr
= irq2ctlr(irq
);
353 dev_dbg(data
, "dma_irq_handler\n");
355 if ((edma_shadow0_read_array(ctlr
, SH_IPR
, 0) == 0)
356 && (edma_shadow0_read_array(ctlr
, SH_IPR
, 1) == 0))
361 if (edma_shadow0_read_array(ctlr
, SH_IPR
, 0))
363 else if (edma_shadow0_read_array(ctlr
, SH_IPR
, 1))
367 dev_dbg(data
, "IPR%d %08x\n", j
,
368 edma_shadow0_read_array(ctlr
, SH_IPR
, j
));
369 for (i
= 0; i
< 32; i
++) {
370 int k
= (j
<< 5) + i
;
371 if (edma_shadow0_read_array(ctlr
, SH_IPR
, j
) &
373 /* Clear the corresponding IPR bits */
374 edma_shadow0_write_array(ctlr
, SH_ICR
, j
,
376 if (edma_info
[ctlr
]->intr_data
[k
].callback
) {
377 edma_info
[ctlr
]->intr_data
[k
].callback(
379 edma_info
[ctlr
]->intr_data
[k
].
388 edma_shadow0_write(ctlr
, SH_IEVAL
, 1);
392 /******************************************************************************
394 * DMA error interrupt handler
396 *****************************************************************************/
397 static irqreturn_t
dma_ccerr_handler(int irq
, void *data
)
401 unsigned int cnt
= 0;
403 ctlr
= irq2ctlr(irq
);
405 dev_dbg(data
, "dma_ccerr_handler\n");
407 if ((edma_read_array(ctlr
, EDMA_EMR
, 0) == 0) &&
408 (edma_read_array(ctlr
, EDMA_EMR
, 1) == 0) &&
409 (edma_read(ctlr
, EDMA_QEMR
) == 0) &&
410 (edma_read(ctlr
, EDMA_CCERR
) == 0))
415 if (edma_read_array(ctlr
, EDMA_EMR
, 0))
417 else if (edma_read_array(ctlr
, EDMA_EMR
, 1))
420 dev_dbg(data
, "EMR%d %08x\n", j
,
421 edma_read_array(ctlr
, EDMA_EMR
, j
));
422 for (i
= 0; i
< 32; i
++) {
423 int k
= (j
<< 5) + i
;
424 if (edma_read_array(ctlr
, EDMA_EMR
, j
) &
426 /* Clear the corresponding EMR bits */
427 edma_write_array(ctlr
, EDMA_EMCR
, j
,
430 edma_shadow0_write_array(ctlr
, SH_SECR
,
432 if (edma_info
[ctlr
]->intr_data
[k
].
434 edma_info
[ctlr
]->intr_data
[k
].
437 edma_info
[ctlr
]->intr_data
442 } else if (edma_read(ctlr
, EDMA_QEMR
)) {
443 dev_dbg(data
, "QEMR %02x\n",
444 edma_read(ctlr
, EDMA_QEMR
));
445 for (i
= 0; i
< 8; i
++) {
446 if (edma_read(ctlr
, EDMA_QEMR
) & (1 << i
)) {
447 /* Clear the corresponding IPR bits */
448 edma_write(ctlr
, EDMA_QEMCR
, 1 << i
);
449 edma_shadow0_write(ctlr
, SH_QSECR
,
452 /* NOTE: not reported!! */
455 } else if (edma_read(ctlr
, EDMA_CCERR
)) {
456 dev_dbg(data
, "CCERR %08x\n",
457 edma_read(ctlr
, EDMA_CCERR
));
458 /* FIXME: CCERR.BIT(16) ignored! much better
459 * to just write CCERRCLR with CCERR value...
461 for (i
= 0; i
< 8; i
++) {
462 if (edma_read(ctlr
, EDMA_CCERR
) & (1 << i
)) {
463 /* Clear the corresponding IPR bits */
464 edma_write(ctlr
, EDMA_CCERRCLR
, 1 << i
);
466 /* NOTE: not reported!! */
470 if ((edma_read_array(ctlr
, EDMA_EMR
, 0) == 0)
471 && (edma_read_array(ctlr
, EDMA_EMR
, 1) == 0)
472 && (edma_read(ctlr
, EDMA_QEMR
) == 0)
473 && (edma_read(ctlr
, EDMA_CCERR
) == 0)) {
480 edma_write(ctlr
, EDMA_EEVAL
, 1);
484 /******************************************************************************
486 * Transfer controller error interrupt handlers
488 *****************************************************************************/
490 #define tc_errs_handled false /* disabled as long as they're NOPs */
492 static irqreturn_t
dma_tc0err_handler(int irq
, void *data
)
494 dev_dbg(data
, "dma_tc0err_handler\n");
498 static irqreturn_t
dma_tc1err_handler(int irq
, void *data
)
500 dev_dbg(data
, "dma_tc1err_handler\n");
504 static int reserve_contiguous_slots(int ctlr
, unsigned int id
,
505 unsigned int num_slots
,
506 unsigned int start_slot
)
509 unsigned int count
= num_slots
;
510 int stop_slot
= start_slot
;
511 DECLARE_BITMAP(tmp_inuse
, EDMA_MAX_PARAMENTRY
);
513 for (i
= start_slot
; i
< edma_info
[ctlr
]->num_slots
; ++i
) {
514 j
= EDMA_CHAN_SLOT(i
);
515 if (!test_and_set_bit(j
, edma_info
[ctlr
]->edma_inuse
)) {
516 /* Record our current beginning slot */
517 if (count
== num_slots
)
521 set_bit(j
, tmp_inuse
);
526 clear_bit(j
, tmp_inuse
);
528 if (id
== EDMA_CONT_PARAMS_FIXED_EXACT
) {
537 * We have to clear any bits that we set
538 * if we run out parameter RAM slots, i.e we do find a set
539 * of contiguous parameter RAM slots but do not find the exact number
540 * requested as we may reach the total number of parameter RAM slots
542 if (i
== edma_info
[ctlr
]->num_slots
)
545 for (j
= start_slot
; j
< stop_slot
; j
++)
546 if (test_bit(j
, tmp_inuse
))
547 clear_bit(j
, edma_info
[ctlr
]->edma_inuse
);
552 for (j
= i
- num_slots
+ 1; j
<= i
; ++j
)
553 memcpy_toio(edmacc_regs_base
[ctlr
] + PARM_OFFSET(j
),
554 &dummy_paramset
, PARM_SIZE
);
556 return EDMA_CTLR_CHAN(ctlr
, i
- num_slots
+ 1);
559 /*-----------------------------------------------------------------------*/
561 /* Resource alloc/free: dma channels, parameter RAM slots */
564 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
565 * @channel: specific channel to allocate; negative for "any unmapped channel"
566 * @callback: optional; to be issued on DMA completion or errors
567 * @data: passed to callback
568 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
569 * Controller (TC) executes requests using this channel. Use
570 * EVENTQ_DEFAULT unless you really need a high priority queue.
572 * This allocates a DMA channel and its associated parameter RAM slot.
573 * The parameter RAM is initialized to hold a dummy transfer.
575 * Normal use is to pass a specific channel number as @channel, to make
576 * use of hardware events mapped to that channel. When the channel will
577 * be used only for software triggering or event chaining, channels not
578 * mapped to hardware events (or mapped to unused events) are preferable.
580 * DMA transfers start from a channel using edma_start(), or by
581 * chaining. When the transfer described in that channel's parameter RAM
582 * slot completes, that slot's data may be reloaded through a link.
584 * DMA errors are only reported to the @callback associated with the
585 * channel driving that transfer, but transfer completion callbacks can
586 * be sent to another channel under control of the TCC field in
587 * the option word of the transfer's parameter RAM set. Drivers must not
588 * use DMA transfer completion callbacks for channels they did not allocate.
589 * (The same applies to TCC codes used in transfer chaining.)
591 * Returns the number of the channel, else negative errno.
593 int edma_alloc_channel(int channel
,
594 void (*callback
)(unsigned channel
, u16 ch_status
, void *data
),
596 enum dma_event_q eventq_no
)
598 unsigned i
, done
, ctlr
= 0;
601 ctlr
= EDMA_CTLR(channel
);
602 channel
= EDMA_CHAN_SLOT(channel
);
606 for (i
= 0; i
< arch_num_cc
; i
++) {
609 channel
= find_next_bit(edma_info
[i
]->
611 edma_info
[i
]->num_channels
,
613 if (channel
== edma_info
[i
]->num_channels
)
615 if (!test_and_set_bit(channel
,
616 edma_info
[i
]->edma_inuse
)) {
626 } else if (channel
>= edma_info
[ctlr
]->num_channels
) {
628 } else if (test_and_set_bit(channel
, edma_info
[ctlr
]->edma_inuse
)) {
632 /* ensure access through shadow region 0 */
633 edma_or_array2(ctlr
, EDMA_DRAE
, 0, channel
>> 5, 1 << (channel
& 0x1f));
635 /* ensure no events are pending */
636 edma_stop(EDMA_CTLR_CHAN(ctlr
, channel
));
637 memcpy_toio(edmacc_regs_base
[ctlr
] + PARM_OFFSET(channel
),
638 &dummy_paramset
, PARM_SIZE
);
641 setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr
, channel
),
644 map_dmach_queue(ctlr
, channel
, eventq_no
);
646 return EDMA_CTLR_CHAN(ctlr
, channel
);
648 EXPORT_SYMBOL(edma_alloc_channel
);
652 * edma_free_channel - deallocate DMA channel
653 * @channel: dma channel returned from edma_alloc_channel()
655 * This deallocates the DMA channel and associated parameter RAM slot
656 * allocated by edma_alloc_channel().
658 * Callers are responsible for ensuring the channel is inactive, and
659 * will not be reactivated by linking, chaining, or software calls to
662 void edma_free_channel(unsigned channel
)
666 ctlr
= EDMA_CTLR(channel
);
667 channel
= EDMA_CHAN_SLOT(channel
);
669 if (channel
>= edma_info
[ctlr
]->num_channels
)
672 setup_dma_interrupt(channel
, NULL
, NULL
);
673 /* REVISIT should probably take out of shadow region 0 */
675 memcpy_toio(edmacc_regs_base
[ctlr
] + PARM_OFFSET(channel
),
676 &dummy_paramset
, PARM_SIZE
);
677 clear_bit(channel
, edma_info
[ctlr
]->edma_inuse
);
679 EXPORT_SYMBOL(edma_free_channel
);
682 * edma_alloc_slot - allocate DMA parameter RAM
683 * @slot: specific slot to allocate; negative for "any unused slot"
685 * This allocates a parameter RAM slot, initializing it to hold a
686 * dummy transfer. Slots allocated using this routine have not been
687 * mapped to a hardware DMA channel, and will normally be used by
688 * linking to them from a slot associated with a DMA channel.
690 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
691 * slots may be allocated on behalf of DSP firmware.
693 * Returns the number of the slot, else negative errno.
695 int edma_alloc_slot(unsigned ctlr
, int slot
)
698 slot
= EDMA_CHAN_SLOT(slot
);
701 slot
= edma_info
[ctlr
]->num_channels
;
703 slot
= find_next_zero_bit(edma_info
[ctlr
]->edma_inuse
,
704 edma_info
[ctlr
]->num_slots
, slot
);
705 if (slot
== edma_info
[ctlr
]->num_slots
)
707 if (!test_and_set_bit(slot
,
708 edma_info
[ctlr
]->edma_inuse
))
711 } else if (slot
< edma_info
[ctlr
]->num_channels
||
712 slot
>= edma_info
[ctlr
]->num_slots
) {
714 } else if (test_and_set_bit(slot
, edma_info
[ctlr
]->edma_inuse
)) {
718 memcpy_toio(edmacc_regs_base
[ctlr
] + PARM_OFFSET(slot
),
719 &dummy_paramset
, PARM_SIZE
);
721 return EDMA_CTLR_CHAN(ctlr
, slot
);
723 EXPORT_SYMBOL(edma_alloc_slot
);
726 * edma_free_slot - deallocate DMA parameter RAM
727 * @slot: parameter RAM slot returned from edma_alloc_slot()
729 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
730 * Callers are responsible for ensuring the slot is inactive, and will
733 void edma_free_slot(unsigned slot
)
737 ctlr
= EDMA_CTLR(slot
);
738 slot
= EDMA_CHAN_SLOT(slot
);
740 if (slot
< edma_info
[ctlr
]->num_channels
||
741 slot
>= edma_info
[ctlr
]->num_slots
)
744 memcpy_toio(edmacc_regs_base
[ctlr
] + PARM_OFFSET(slot
),
745 &dummy_paramset
, PARM_SIZE
);
746 clear_bit(slot
, edma_info
[ctlr
]->edma_inuse
);
748 EXPORT_SYMBOL(edma_free_slot
);
752 * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
753 * The API will return the starting point of a set of
754 * contiguous parameter RAM slots that have been requested
756 * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
757 * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
758 * @count: number of contiguous Paramter RAM slots
759 * @slot - the start value of Parameter RAM slot that should be passed if id
760 * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
762 * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
763 * contiguous Parameter RAM slots from parameter RAM 64 in the case of
764 * DaVinci SOCs and 32 in the case of DA8xx SOCs.
766 * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
767 * set of contiguous parameter RAM slots from the "slot" that is passed as an
768 * argument to the API.
770 * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
771 * starts looking for a set of contiguous parameter RAMs from the "slot"
772 * that is passed as an argument to the API. On failure the API will try to
773 * find a set of contiguous Parameter RAM slots from the remaining Parameter
776 int edma_alloc_cont_slots(unsigned ctlr
, unsigned int id
, int slot
, int count
)
779 * The start slot requested should be greater than
780 * the number of channels and lesser than the total number
783 if ((id
!= EDMA_CONT_PARAMS_ANY
) &&
784 (slot
< edma_info
[ctlr
]->num_channels
||
785 slot
>= edma_info
[ctlr
]->num_slots
))
789 * The number of parameter RAM slots requested cannot be less than 1
790 * and cannot be more than the number of slots minus the number of
793 if (count
< 1 || count
>
794 (edma_info
[ctlr
]->num_slots
- edma_info
[ctlr
]->num_channels
))
798 case EDMA_CONT_PARAMS_ANY
:
799 return reserve_contiguous_slots(ctlr
, id
, count
,
800 edma_info
[ctlr
]->num_channels
);
801 case EDMA_CONT_PARAMS_FIXED_EXACT
:
802 case EDMA_CONT_PARAMS_FIXED_NOT_EXACT
:
803 return reserve_contiguous_slots(ctlr
, id
, count
, slot
);
809 EXPORT_SYMBOL(edma_alloc_cont_slots
);
812 * edma_free_cont_slots - deallocate DMA parameter RAM slots
813 * @slot: first parameter RAM of a set of parameter RAM slots to be freed
814 * @count: the number of contiguous parameter RAM slots to be freed
816 * This deallocates the parameter RAM slots allocated by
817 * edma_alloc_cont_slots.
818 * Callers/applications need to keep track of sets of contiguous
819 * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
821 * Callers are responsible for ensuring the slots are inactive, and will
824 int edma_free_cont_slots(unsigned slot
, int count
)
826 unsigned ctlr
, slot_to_free
;
829 ctlr
= EDMA_CTLR(slot
);
830 slot
= EDMA_CHAN_SLOT(slot
);
832 if (slot
< edma_info
[ctlr
]->num_channels
||
833 slot
>= edma_info
[ctlr
]->num_slots
||
837 for (i
= slot
; i
< slot
+ count
; ++i
) {
839 slot_to_free
= EDMA_CHAN_SLOT(i
);
841 memcpy_toio(edmacc_regs_base
[ctlr
] + PARM_OFFSET(slot_to_free
),
842 &dummy_paramset
, PARM_SIZE
);
843 clear_bit(slot_to_free
, edma_info
[ctlr
]->edma_inuse
);
848 EXPORT_SYMBOL(edma_free_cont_slots
);
850 /*-----------------------------------------------------------------------*/
852 /* Parameter RAM operations (i) -- read/write partial slots */
855 * edma_set_src - set initial DMA source address in parameter RAM slot
856 * @slot: parameter RAM slot being configured
857 * @src_port: physical address of source (memory, controller FIFO, etc)
858 * @addressMode: INCR, except in very rare cases
859 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
860 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
862 * Note that the source address is modified during the DMA transfer
863 * according to edma_set_src_index().
865 void edma_set_src(unsigned slot
, dma_addr_t src_port
,
866 enum address_mode mode
, enum fifo_width width
)
870 ctlr
= EDMA_CTLR(slot
);
871 slot
= EDMA_CHAN_SLOT(slot
);
873 if (slot
< edma_info
[ctlr
]->num_slots
) {
874 unsigned int i
= edma_parm_read(ctlr
, PARM_OPT
, slot
);
877 /* set SAM and program FWID */
878 i
= (i
& ~(EDMA_FWID
)) | (SAM
| ((width
& 0x7) << 8));
883 edma_parm_write(ctlr
, PARM_OPT
, slot
, i
);
885 /* set the source port address
886 in source register of param structure */
887 edma_parm_write(ctlr
, PARM_SRC
, slot
, src_port
);
890 EXPORT_SYMBOL(edma_set_src
);
893 * edma_set_dest - set initial DMA destination address in parameter RAM slot
894 * @slot: parameter RAM slot being configured
895 * @dest_port: physical address of destination (memory, controller FIFO, etc)
896 * @addressMode: INCR, except in very rare cases
897 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
898 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
900 * Note that the destination address is modified during the DMA transfer
901 * according to edma_set_dest_index().
903 void edma_set_dest(unsigned slot
, dma_addr_t dest_port
,
904 enum address_mode mode
, enum fifo_width width
)
908 ctlr
= EDMA_CTLR(slot
);
909 slot
= EDMA_CHAN_SLOT(slot
);
911 if (slot
< edma_info
[ctlr
]->num_slots
) {
912 unsigned int i
= edma_parm_read(ctlr
, PARM_OPT
, slot
);
915 /* set DAM and program FWID */
916 i
= (i
& ~(EDMA_FWID
)) | (DAM
| ((width
& 0x7) << 8));
921 edma_parm_write(ctlr
, PARM_OPT
, slot
, i
);
922 /* set the destination port address
923 in dest register of param structure */
924 edma_parm_write(ctlr
, PARM_DST
, slot
, dest_port
);
927 EXPORT_SYMBOL(edma_set_dest
);
930 * edma_get_position - returns the current transfer points
931 * @slot: parameter RAM slot being examined
932 * @src: pointer to source port position
933 * @dst: pointer to destination port position
935 * Returns current source and destination addresses for a particular
936 * parameter RAM slot. Its channel should not be active when this is called.
938 void edma_get_position(unsigned slot
, dma_addr_t
*src
, dma_addr_t
*dst
)
940 struct edmacc_param temp
;
943 ctlr
= EDMA_CTLR(slot
);
944 slot
= EDMA_CHAN_SLOT(slot
);
946 edma_read_slot(EDMA_CTLR_CHAN(ctlr
, slot
), &temp
);
952 EXPORT_SYMBOL(edma_get_position
);
955 * edma_set_src_index - configure DMA source address indexing
956 * @slot: parameter RAM slot being configured
957 * @src_bidx: byte offset between source arrays in a frame
958 * @src_cidx: byte offset between source frames in a block
960 * Offsets are specified to support either contiguous or discontiguous
961 * memory transfers, or repeated access to a hardware register, as needed.
962 * When accessing hardware registers, both offsets are normally zero.
964 void edma_set_src_index(unsigned slot
, s16 src_bidx
, s16 src_cidx
)
968 ctlr
= EDMA_CTLR(slot
);
969 slot
= EDMA_CHAN_SLOT(slot
);
971 if (slot
< edma_info
[ctlr
]->num_slots
) {
972 edma_parm_modify(ctlr
, PARM_SRC_DST_BIDX
, slot
,
973 0xffff0000, src_bidx
);
974 edma_parm_modify(ctlr
, PARM_SRC_DST_CIDX
, slot
,
975 0xffff0000, src_cidx
);
978 EXPORT_SYMBOL(edma_set_src_index
);
981 * edma_set_dest_index - configure DMA destination address indexing
982 * @slot: parameter RAM slot being configured
983 * @dest_bidx: byte offset between destination arrays in a frame
984 * @dest_cidx: byte offset between destination frames in a block
986 * Offsets are specified to support either contiguous or discontiguous
987 * memory transfers, or repeated access to a hardware register, as needed.
988 * When accessing hardware registers, both offsets are normally zero.
990 void edma_set_dest_index(unsigned slot
, s16 dest_bidx
, s16 dest_cidx
)
994 ctlr
= EDMA_CTLR(slot
);
995 slot
= EDMA_CHAN_SLOT(slot
);
997 if (slot
< edma_info
[ctlr
]->num_slots
) {
998 edma_parm_modify(ctlr
, PARM_SRC_DST_BIDX
, slot
,
999 0x0000ffff, dest_bidx
<< 16);
1000 edma_parm_modify(ctlr
, PARM_SRC_DST_CIDX
, slot
,
1001 0x0000ffff, dest_cidx
<< 16);
1004 EXPORT_SYMBOL(edma_set_dest_index
);
1007 * edma_set_transfer_params - configure DMA transfer parameters
1008 * @slot: parameter RAM slot being configured
1009 * @acnt: how many bytes per array (at least one)
1010 * @bcnt: how many arrays per frame (at least one)
1011 * @ccnt: how many frames per block (at least one)
1012 * @bcnt_rld: used only for A-Synchronized transfers; this specifies
1013 * the value to reload into bcnt when it decrements to zero
1014 * @sync_mode: ASYNC or ABSYNC
1016 * See the EDMA3 documentation to understand how to configure and link
1017 * transfers using the fields in PaRAM slots. If you are not doing it
1018 * all at once with edma_write_slot(), you will use this routine
1019 * plus two calls each for source and destination, setting the initial
1020 * address and saying how to index that address.
1022 * An example of an A-Synchronized transfer is a serial link using a
1023 * single word shift register. In that case, @acnt would be equal to
1024 * that word size; the serial controller issues a DMA synchronization
1025 * event to transfer each word, and memory access by the DMA transfer
1026 * controller will be word-at-a-time.
1028 * An example of an AB-Synchronized transfer is a device using a FIFO.
1029 * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
1030 * The controller with the FIFO issues DMA synchronization events when
1031 * the FIFO threshold is reached, and the DMA transfer controller will
1032 * transfer one frame to (or from) the FIFO. It will probably use
1033 * efficient burst modes to access memory.
1035 void edma_set_transfer_params(unsigned slot
,
1036 u16 acnt
, u16 bcnt
, u16 ccnt
,
1037 u16 bcnt_rld
, enum sync_dimension sync_mode
)
1041 ctlr
= EDMA_CTLR(slot
);
1042 slot
= EDMA_CHAN_SLOT(slot
);
1044 if (slot
< edma_info
[ctlr
]->num_slots
) {
1045 edma_parm_modify(ctlr
, PARM_LINK_BCNTRLD
, slot
,
1046 0x0000ffff, bcnt_rld
<< 16);
1047 if (sync_mode
== ASYNC
)
1048 edma_parm_and(ctlr
, PARM_OPT
, slot
, ~SYNCDIM
);
1050 edma_parm_or(ctlr
, PARM_OPT
, slot
, SYNCDIM
);
1051 /* Set the acount, bcount, ccount registers */
1052 edma_parm_write(ctlr
, PARM_A_B_CNT
, slot
, (bcnt
<< 16) | acnt
);
1053 edma_parm_write(ctlr
, PARM_CCNT
, slot
, ccnt
);
1056 EXPORT_SYMBOL(edma_set_transfer_params
);
1059 * edma_link - link one parameter RAM slot to another
1060 * @from: parameter RAM slot originating the link
1061 * @to: parameter RAM slot which is the link target
1063 * The originating slot should not be part of any active DMA transfer.
1065 void edma_link(unsigned from
, unsigned to
)
1067 unsigned ctlr_from
, ctlr_to
;
1069 ctlr_from
= EDMA_CTLR(from
);
1070 from
= EDMA_CHAN_SLOT(from
);
1071 ctlr_to
= EDMA_CTLR(to
);
1072 to
= EDMA_CHAN_SLOT(to
);
1074 if (from
>= edma_info
[ctlr_from
]->num_slots
)
1076 if (to
>= edma_info
[ctlr_to
]->num_slots
)
1078 edma_parm_modify(ctlr_from
, PARM_LINK_BCNTRLD
, from
, 0xffff0000,
1081 EXPORT_SYMBOL(edma_link
);
1084 * edma_unlink - cut link from one parameter RAM slot
1085 * @from: parameter RAM slot originating the link
1087 * The originating slot should not be part of any active DMA transfer.
1088 * Its link is set to 0xffff.
1090 void edma_unlink(unsigned from
)
1094 ctlr
= EDMA_CTLR(from
);
1095 from
= EDMA_CHAN_SLOT(from
);
1097 if (from
>= edma_info
[ctlr
]->num_slots
)
1099 edma_parm_or(ctlr
, PARM_LINK_BCNTRLD
, from
, 0xffff);
1101 EXPORT_SYMBOL(edma_unlink
);
1103 /*-----------------------------------------------------------------------*/
1105 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
1108 * edma_write_slot - write parameter RAM data for slot
1109 * @slot: number of parameter RAM slot being modified
1110 * @param: data to be written into parameter RAM slot
1112 * Use this to assign all parameters of a transfer at once. This
1113 * allows more efficient setup of transfers than issuing multiple
1114 * calls to set up those parameters in small pieces, and provides
1115 * complete control over all transfer options.
1117 void edma_write_slot(unsigned slot
, const struct edmacc_param
*param
)
1121 ctlr
= EDMA_CTLR(slot
);
1122 slot
= EDMA_CHAN_SLOT(slot
);
1124 if (slot
>= edma_info
[ctlr
]->num_slots
)
1126 memcpy_toio(edmacc_regs_base
[ctlr
] + PARM_OFFSET(slot
), param
,
1129 EXPORT_SYMBOL(edma_write_slot
);
1132 * edma_read_slot - read parameter RAM data from slot
1133 * @slot: number of parameter RAM slot being copied
1134 * @param: where to store copy of parameter RAM data
1136 * Use this to read data from a parameter RAM slot, perhaps to
1137 * save them as a template for later reuse.
1139 void edma_read_slot(unsigned slot
, struct edmacc_param
*param
)
1143 ctlr
= EDMA_CTLR(slot
);
1144 slot
= EDMA_CHAN_SLOT(slot
);
1146 if (slot
>= edma_info
[ctlr
]->num_slots
)
1148 memcpy_fromio(param
, edmacc_regs_base
[ctlr
] + PARM_OFFSET(slot
),
1151 EXPORT_SYMBOL(edma_read_slot
);
1153 /*-----------------------------------------------------------------------*/
1155 /* Various EDMA channel control operations */
1158 * edma_pause - pause dma on a channel
1159 * @channel: on which edma_start() has been called
1161 * This temporarily disables EDMA hardware events on the specified channel,
1162 * preventing them from triggering new transfers on its behalf
1164 void edma_pause(unsigned channel
)
1168 ctlr
= EDMA_CTLR(channel
);
1169 channel
= EDMA_CHAN_SLOT(channel
);
1171 if (channel
< edma_info
[ctlr
]->num_channels
) {
1172 unsigned int mask
= (1 << (channel
& 0x1f));
1174 edma_shadow0_write_array(ctlr
, SH_EECR
, channel
>> 5, mask
);
1177 EXPORT_SYMBOL(edma_pause
);
1180 * edma_resume - resumes dma on a paused channel
1181 * @channel: on which edma_pause() has been called
1183 * This re-enables EDMA hardware events on the specified channel.
1185 void edma_resume(unsigned channel
)
1189 ctlr
= EDMA_CTLR(channel
);
1190 channel
= EDMA_CHAN_SLOT(channel
);
1192 if (channel
< edma_info
[ctlr
]->num_channels
) {
1193 unsigned int mask
= (1 << (channel
& 0x1f));
1195 edma_shadow0_write_array(ctlr
, SH_EESR
, channel
>> 5, mask
);
1198 EXPORT_SYMBOL(edma_resume
);
1201 * edma_start - start dma on a channel
1202 * @channel: channel being activated
1204 * Channels with event associations will be triggered by their hardware
1205 * events, and channels without such associations will be triggered by
1206 * software. (At this writing there is no interface for using software
1207 * triggers except with channels that don't support hardware triggers.)
1209 * Returns zero on success, else negative errno.
1211 int edma_start(unsigned channel
)
1215 ctlr
= EDMA_CTLR(channel
);
1216 channel
= EDMA_CHAN_SLOT(channel
);
1218 if (channel
< edma_info
[ctlr
]->num_channels
) {
1219 int j
= channel
>> 5;
1220 unsigned int mask
= (1 << (channel
& 0x1f));
1222 /* EDMA channels without event association */
1223 if (test_bit(channel
, edma_info
[ctlr
]->edma_noevent
)) {
1224 pr_debug("EDMA: ESR%d %08x\n", j
,
1225 edma_shadow0_read_array(ctlr
, SH_ESR
, j
));
1226 edma_shadow0_write_array(ctlr
, SH_ESR
, j
, mask
);
1230 /* EDMA channel with event association */
1231 pr_debug("EDMA: ER%d %08x\n", j
,
1232 edma_shadow0_read_array(ctlr
, SH_ER
, j
));
1233 /* Clear any pending error */
1234 edma_write_array(ctlr
, EDMA_EMCR
, j
, mask
);
1236 edma_shadow0_write_array(ctlr
, SH_SECR
, j
, mask
);
1237 edma_shadow0_write_array(ctlr
, SH_EESR
, j
, mask
);
1238 pr_debug("EDMA: EER%d %08x\n", j
,
1239 edma_shadow0_read_array(ctlr
, SH_EER
, j
));
1245 EXPORT_SYMBOL(edma_start
);
1248 * edma_stop - stops dma on the channel passed
1249 * @channel: channel being deactivated
1251 * When @lch is a channel, any active transfer is paused and
1252 * all pending hardware events are cleared. The current transfer
1253 * may not be resumed, and the channel's Parameter RAM should be
1254 * reinitialized before being reused.
1256 void edma_stop(unsigned channel
)
1260 ctlr
= EDMA_CTLR(channel
);
1261 channel
= EDMA_CHAN_SLOT(channel
);
1263 if (channel
< edma_info
[ctlr
]->num_channels
) {
1264 int j
= channel
>> 5;
1265 unsigned int mask
= (1 << (channel
& 0x1f));
1267 edma_shadow0_write_array(ctlr
, SH_EECR
, j
, mask
);
1268 edma_shadow0_write_array(ctlr
, SH_ECR
, j
, mask
);
1269 edma_shadow0_write_array(ctlr
, SH_SECR
, j
, mask
);
1270 edma_write_array(ctlr
, EDMA_EMCR
, j
, mask
);
1272 pr_debug("EDMA: EER%d %08x\n", j
,
1273 edma_shadow0_read_array(ctlr
, SH_EER
, j
));
1275 /* REVISIT: consider guarding against inappropriate event
1276 * chaining by overwriting with dummy_paramset.
1280 EXPORT_SYMBOL(edma_stop
);
1282 /******************************************************************************
1284 * It cleans ParamEntry qand bring back EDMA to initial state if media has
1285 * been removed before EDMA has finished.It is usedful for removable media.
1287 * ch_no - channel no
1289 * Return: zero on success, or corresponding error no on failure
1291 * FIXME this should not be needed ... edma_stop() should suffice.
1293 *****************************************************************************/
1295 void edma_clean_channel(unsigned channel
)
1299 ctlr
= EDMA_CTLR(channel
);
1300 channel
= EDMA_CHAN_SLOT(channel
);
1302 if (channel
< edma_info
[ctlr
]->num_channels
) {
1303 int j
= (channel
>> 5);
1304 unsigned int mask
= 1 << (channel
& 0x1f);
1306 pr_debug("EDMA: EMR%d %08x\n", j
,
1307 edma_read_array(ctlr
, EDMA_EMR
, j
));
1308 edma_shadow0_write_array(ctlr
, SH_ECR
, j
, mask
);
1309 /* Clear the corresponding EMR bits */
1310 edma_write_array(ctlr
, EDMA_EMCR
, j
, mask
);
1312 edma_shadow0_write_array(ctlr
, SH_SECR
, j
, mask
);
1313 edma_write(ctlr
, EDMA_CCERRCLR
, (1 << 16) | 0x3);
1316 EXPORT_SYMBOL(edma_clean_channel
);
1319 * edma_clear_event - clear an outstanding event on the DMA channel
1321 * channel - channel number
1323 void edma_clear_event(unsigned channel
)
1327 ctlr
= EDMA_CTLR(channel
);
1328 channel
= EDMA_CHAN_SLOT(channel
);
1330 if (channel
>= edma_info
[ctlr
]->num_channels
)
1333 edma_write(ctlr
, EDMA_ECR
, 1 << channel
);
1335 edma_write(ctlr
, EDMA_ECRH
, 1 << (channel
- 32));
1337 EXPORT_SYMBOL(edma_clear_event
);
1339 /*-----------------------------------------------------------------------*/
1341 static int __init
edma_probe(struct platform_device
*pdev
)
1343 struct edma_soc_info
*info
= pdev
->dev
.platform_data
;
1344 const s8 (*queue_priority_mapping
)[2];
1345 const s8 (*queue_tc_mapping
)[2];
1346 int i
, j
, found
= 0;
1349 int irq
[EDMA_MAX_CC
] = {0, 0};
1350 int err_irq
[EDMA_MAX_CC
] = {0, 0};
1351 struct resource
*r
[EDMA_MAX_CC
] = {NULL
};
1352 resource_size_t len
[EDMA_MAX_CC
];
1359 for (j
= 0; j
< EDMA_MAX_CC
; j
++) {
1360 sprintf(res_name
, "edma_cc%d", j
);
1361 r
[j
] = platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
1371 len
[j
] = resource_size(r
[j
]);
1373 r
[j
] = request_mem_region(r
[j
]->start
, len
[j
],
1374 dev_name(&pdev
->dev
));
1380 edmacc_regs_base
[j
] = ioremap(r
[j
]->start
, len
[j
]);
1381 if (!edmacc_regs_base
[j
]) {
1386 edma_info
[j
] = kmalloc(sizeof(struct edma
), GFP_KERNEL
);
1387 if (!edma_info
[j
]) {
1391 memset(edma_info
[j
], 0, sizeof(struct edma
));
1393 edma_info
[j
]->num_channels
= min_t(unsigned, info
[j
].n_channel
,
1395 edma_info
[j
]->num_slots
= min_t(unsigned, info
[j
].n_slot
,
1396 EDMA_MAX_PARAMENTRY
);
1397 edma_info
[j
]->num_cc
= min_t(unsigned, info
[j
].n_cc
,
1400 edma_info
[j
]->default_queue
= info
[j
].default_queue
;
1401 if (!edma_info
[j
]->default_queue
)
1402 edma_info
[j
]->default_queue
= EVENTQ_1
;
1404 dev_dbg(&pdev
->dev
, "DMA REG BASE ADDR=%p\n",
1405 edmacc_regs_base
[j
]);
1407 for (i
= 0; i
< edma_info
[j
]->num_slots
; i
++)
1408 memcpy_toio(edmacc_regs_base
[j
] + PARM_OFFSET(i
),
1409 &dummy_paramset
, PARM_SIZE
);
1411 noevent
= info
[j
].noevent
;
1413 while (*noevent
!= -1)
1414 set_bit(*noevent
++, edma_info
[j
]->edma_noevent
);
1417 sprintf(irq_name
, "edma%d", j
);
1418 irq
[j
] = platform_get_irq_byname(pdev
, irq_name
);
1419 edma_info
[j
]->irq_res_start
= irq
[j
];
1420 status
= request_irq(irq
[j
], dma_irq_handler
, 0, "edma",
1423 dev_dbg(&pdev
->dev
, "request_irq %d failed --> %d\n",
1428 sprintf(irq_name
, "edma%d_err", j
);
1429 err_irq
[j
] = platform_get_irq_byname(pdev
, irq_name
);
1430 edma_info
[j
]->irq_res_end
= err_irq
[j
];
1431 status
= request_irq(err_irq
[j
], dma_ccerr_handler
, 0,
1432 "edma_error", &pdev
->dev
);
1434 dev_dbg(&pdev
->dev
, "request_irq %d failed --> %d\n",
1435 err_irq
[j
], status
);
1439 /* Everything lives on transfer controller 1 until otherwise
1440 * specified. This way, long transfers on the low priority queue
1441 * started by the codec engine will not cause audio defects.
1443 for (i
= 0; i
< edma_info
[j
]->num_channels
; i
++)
1444 map_dmach_queue(j
, i
, EVENTQ_1
);
1446 queue_tc_mapping
= info
[j
].queue_tc_mapping
;
1447 queue_priority_mapping
= info
[j
].queue_priority_mapping
;
1449 /* Event queue to TC mapping */
1450 for (i
= 0; queue_tc_mapping
[i
][0] != -1; i
++)
1451 map_queue_tc(j
, queue_tc_mapping
[i
][0],
1452 queue_tc_mapping
[i
][1]);
1454 /* Event queue priority mapping */
1455 for (i
= 0; queue_priority_mapping
[i
][0] != -1; i
++)
1456 assign_priority_to_queue(j
,
1457 queue_priority_mapping
[i
][0],
1458 queue_priority_mapping
[i
][1]);
1460 /* Map the channel to param entry if channel mapping logic
1463 if (edma_read(j
, EDMA_CCCFG
) & CHMAP_EXIST
)
1466 for (i
= 0; i
< info
[j
].n_region
; i
++) {
1467 edma_write_array2(j
, EDMA_DRAE
, i
, 0, 0x0);
1468 edma_write_array2(j
, EDMA_DRAE
, i
, 1, 0x0);
1469 edma_write_array(j
, EDMA_QRAE
, i
, 0x0);
1474 if (tc_errs_handled
) {
1475 status
= request_irq(IRQ_TCERRINT0
, dma_tc0err_handler
, 0,
1476 "edma_tc0", &pdev
->dev
);
1478 dev_dbg(&pdev
->dev
, "request_irq %d failed --> %d\n",
1479 IRQ_TCERRINT0
, status
);
1482 status
= request_irq(IRQ_TCERRINT
, dma_tc1err_handler
, 0,
1483 "edma_tc1", &pdev
->dev
);
1485 dev_dbg(&pdev
->dev
, "request_irq %d --> %d\n",
1486 IRQ_TCERRINT
, status
);
1494 for (i
= 0; i
< EDMA_MAX_CC
; i
++) {
1496 free_irq(err_irq
[i
], &pdev
->dev
);
1498 free_irq(irq
[i
], &pdev
->dev
);
1501 for (i
= 0; i
< EDMA_MAX_CC
; i
++) {
1503 release_mem_region(r
[i
]->start
, len
[i
]);
1504 if (edmacc_regs_base
[i
])
1505 iounmap(edmacc_regs_base
[i
]);
1506 kfree(edma_info
[i
]);
1512 static struct platform_driver edma_driver
= {
1513 .driver
.name
= "edma",
1516 static int __init
edma_init(void)
1518 return platform_driver_probe(&edma_driver
, edma_probe
);
1520 arch_initcall(edma_init
);