2 /******************************************************************************
4 * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
5 * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
6 * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
7 * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
8 * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
9 * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
10 * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
11 * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
12 * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
13 * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
14 * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
15 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
16 * FOR A PARTICULAR PURPOSE.
18 * (c) Copyright 2006 Xilinx Inc.
19 * All rights reserved.
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 ******************************************************************************/
30 /*****************************************************************************/
35 * This file implements Scatter-Gather DMA (SGDMA) related functions. For more
36 * information on this driver, see xdmav3.h.
39 * MODIFICATION HISTORY:
41 * Ver Who Date Changes
42 * ----- ---- -------- -------------------------------------------------------
43 * 3.00a rmm 03/11/06 First release
44 * rmm 06/22/06 Fixed C++ compiler warnings
46 ******************************************************************************/
48 /***************************** Include Files *********************************/
50 #include <linux/string.h>
51 #include <asm/delay.h>
55 /************************** Constant Definitions *****************************/
58 /**************************** Type Definitions *******************************/
61 /***************** Macros (Inline Functions) Definitions *********************/
63 /****************************************************************************
64 * These cache macros are used throughout this source code file to show
65 * users where cache operations should occur if BDs were to be placed in
66 * a cached memory region. Cacheing BD regions, however, is not common.
68 * The macros are implemented as NULL operations, but may be hooked into
69 * XENV macros in future revisions of this driver.
70 ****************************************************************************/
71 #define XDMAV3_CACHE_FLUSH(BdPtr)
72 #define XDMAV3_CACHE_INVALIDATE(BdPtr)
74 /****************************************************************************
75 * Compute the virtual address of a descriptor from its physical address
77 * @param Ring is the ring BdPtr appears in
78 * @param BdPtr is the physical address of the BD
80 * @returns Virtual address of BdPtr
82 * @note Assume BdPtr is always a valid BD in the ring
83 ****************************************************************************/
84 #define XDMAV3_PHYS_TO_VIRT(Ring, BdPtr) \
85 ((u32)BdPtr + (Ring->BaseAddr - Ring->PhysBaseAddr))
87 /****************************************************************************
88 * Compute the physical address of a descriptor from its virtual address
90 * @param Ring is the ring BdPtr appears in
91 * @param BdPtr is the physical address of the BD
93 * @returns Physical address of BdPtr
95 * @note Assume BdPtr is always a valid BD in the ring
96 ****************************************************************************/
97 #define XDMAV3_VIRT_TO_PHYS(Ring, BdPtr) \
98 ((u32)BdPtr - (Ring->BaseAddr - Ring->PhysBaseAddr))
100 /****************************************************************************
101 * Clear or set the SGS bit of the DMACR register
102 ****************************************************************************/
103 #define XDMAV3_HW_SGS_CLEAR \
104 XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_DMACR_OFFSET, \
105 XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_DMACR_OFFSET) \
106 & ~XDMAV3_DMACR_SGS_MASK)
108 #define XDMAV3_HW_SGS_SET \
109 XDmaV3_mWriteReg(InstancePtr->RegBase, XDMAV3_DMACR_OFFSET, \
110 XDmaV3_mReadReg(InstancePtr->RegBase, XDMAV3_DMACR_OFFSET) \
111 | XDMAV3_DMACR_SGS_MASK)
113 /****************************************************************************
114 * Move the BdPtr argument ahead an arbitrary number of BDs wrapping around
115 * to the beginning of the ring if needed.
117 * We know if a wrapaound should occur if the new BdPtr is greater than
118 * the high address in the ring OR if the new BdPtr crosses over the
119 * 0xFFFFFFFF to 0 boundary. The latter test is a valid one since we do not
120 * allow a BD space to span this boundary.
122 * @param Ring is the ring BdPtr appears in
123 * @param BdPtr on input is the starting BD position and on output is the
125 * @param NumBd is the number of BD spaces to increment
127 ****************************************************************************/
128 #define XDMAV3_RING_SEEKAHEAD(Ring, BdPtr, NumBd) \
130 u32 Addr = (u32)BdPtr; \
132 Addr += (Ring->Separation * NumBd); \
133 if ((Addr > Ring->HighAddr) || ((u32)BdPtr > Addr)) \
135 Addr -= Ring->Length; \
138 BdPtr = (XDmaBdV3*)Addr; \
141 /****************************************************************************
142 * Move the BdPtr argument backwards an arbitrary number of BDs wrapping
143 * around to the end of the ring if needed.
145 * We know if a wrapaound should occur if the new BdPtr is less than
146 * the base address in the ring OR if the new BdPtr crosses over the
147 * 0xFFFFFFFF to 0 boundary. The latter test is a valid one since we do not
148 * allow a BD space to span this boundary.
150 * @param Ring is the ring BdPtr appears in
151 * @param BdPtr on input is the starting BD position and on output is the
153 * @param NumBd is the number of BD spaces to increment
155 ****************************************************************************/
156 #define XDMAV3_RING_SEEKBACK(Ring, BdPtr, NumBd) \
158 u32 Addr = (u32)BdPtr; \
160 Addr -= (Ring->Separation * NumBd); \
161 if ((Addr < Ring->BaseAddr) || ((u32)BdPtr < Addr)) \
163 Addr += Ring->Length; \
166 BdPtr = (XDmaBdV3*)Addr; \
170 /************************** Function Prototypes ******************************/
172 static int IsSgDmaChannel(XDmaV3
* InstancePtr
);
175 /************************** Variable Definitions *****************************/
177 /******************************************************************************/
179 * Start the SGDMA channel.
181 * @param InstancePtr is a pointer to the instance to be started.
184 * - XST_SUCCESS if channel was started.
185 * - XST_DMA_SG_NO_LIST if the channel has no initialized BD ring.
187 ******************************************************************************/
188 int XDmaV3_SgStart(XDmaV3
* InstancePtr
)
190 XDmaV3_BdRing
*Ring
= &InstancePtr
->BdRing
;
193 /* BD list has yet to be created for this channel */
194 if (Ring
->AllCnt
== 0) {
195 return (XST_DMA_SG_NO_LIST
);
198 /* Do nothing if already started */
199 if (Ring
->RunState
== XST_DMA_SG_IS_STARTED
) {
200 return (XST_SUCCESS
);
203 /* Note as started */
204 Ring
->RunState
= XST_DMA_SG_IS_STARTED
;
207 XDmaV3_mWriteReg(InstancePtr
->RegBase
, XDMAV3_BDA_OFFSET
,
210 /* If there are unprocessed BDs then we want to channel to begin processing
213 if ((XDmaV3_mReadBd(XDMAV3_PHYS_TO_VIRT(Ring
, Ring
->BdaRestart
),
214 XDMAV3_BD_DMASR_OFFSET
) & XDMAV3_DMASR_DMADONE_MASK
)
220 /* To start, clear SWCR.DSGAR, and set SWCR.SGE */
221 Swcr
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
);
222 Swcr
&= ~XDMAV3_SWCR_DSGAR_MASK
;
223 Swcr
|= XDMAV3_SWCR_SGE_MASK
;
224 XDmaV3_mWriteReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
, Swcr
);
226 return (XST_SUCCESS
);
230 /******************************************************************************/
232 * Stop the SGDMA or Simple SGDMA channel gracefully. Any DMA operation
233 * currently in progress is allowed to finish.
235 * An interrupt may be generated as the DMA engine finishes the packet in
236 * process. To prevent this (if desired) then disabled DMA interrupts prior to
237 * invoking this function.
239 * If after stopping the channel, new BDs are enqueued with XDmaV3_SgBdToHw(),
240 * then those BDs will not be processed until after XDmaV3_SgStart() is called.
242 * @param InstancePtr is a pointer to the instance to be stopped.
244 * @note This function will block until the HW indicates that DMA has stopped.
246 ******************************************************************************/
247 void XDmaV3_SgStop(XDmaV3
* InstancePtr
)
251 XDmaV3_BdRing
*Ring
= &InstancePtr
->BdRing
;
254 /* Save the contents of the interrupt enable register then disable
255 * interrupts. This register will be restored at the end of the function
257 Ier
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_IER_OFFSET
);
258 XDmaV3_mWriteReg(InstancePtr
->RegBase
, XDMAV3_IER_OFFSET
, 0);
260 /* Stopping the HW is a three step process:
262 * 2. Wait for SWCR.SGE=0
263 * 3. Set SWCR.DSGAR=0 and SWCR.SGE=1
265 * Once we've successfully gone through this process, the HW is fully
266 * stopped. To restart we must give the HW a new BDA.
268 Swcr
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
);
270 /* If the channel is currently active, stop it by setting SWCR.SGD=1
271 * and waiting for SWCR.SGE to toggle to 0
273 if (Swcr
& XDMAV3_SWCR_SGE_MASK
) {
274 Swcr
|= XDMAV3_SWCR_SGD_MASK
;
275 XDmaV3_mWriteReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
,
278 while (Swcr
& XDMAV3_SWCR_SGE_MASK
) {
279 Swcr
= XDmaV3_mReadReg(InstancePtr
->RegBase
,
284 /* Note as stopped */
285 Ring
->RunState
= XST_DMA_SG_IS_STOPPED
;
287 /* Save the BDA to restore when channel is restarted */
289 (XDmaBdV3
*) XDmaV3_mReadReg(InstancePtr
->RegBase
,
292 /* If this is a receive channel, then the BDA restore may require a more
293 * complex treatment. If the channel stopped without processing a packet,
294 * then DMASR.SGDONE will be clear. The BDA we've already read in this case
295 * is really BDA->BDA so we need to backup one BDA to get the correct
298 Dmasr
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_DMASR_OFFSET
);
299 if ((Dmasr
& XDMAV3_DMASR_DMACNFG_MASK
) ==
300 XDMAV3_DMASR_DMACNFG_SGDMARX_MASK
) {
301 if (!(Dmasr
& XDMAV3_DMASR_SGDONE_MASK
)) {
303 (XDmaBdV3
*) XDMAV3_PHYS_TO_VIRT(Ring
,
307 XDmaV3_mSgBdPrev(InstancePtr
, Ring
->BdaRestart
);
309 (XDmaBdV3
*) XDMAV3_VIRT_TO_PHYS(Ring
,
315 Swcr
|= XDMAV3_SWCR_DSGAR_MASK
;
316 Swcr
&= ~XDMAV3_SWCR_SGD_MASK
;
317 XDmaV3_mWriteReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
, Swcr
);
319 /* Restore interrupt enables. If an interrupt occurs due to this function
320 * stopping the channel then it will happen right here
322 XDmaV3_mWriteReg(InstancePtr
->RegBase
, XDMAV3_IER_OFFSET
, Ier
);
326 /******************************************************************************/
328 * Set the packet threshold for this SGDMA channel. This has the effect of
329 * delaying processor interrupts until the given number of packets (not BDs)
330 * have been processed.
332 * @param InstancePtr is a pointer to the instance to be worked on.
333 * @param Threshold is the packet threshold to set. If 0 is specified, then
334 * this feature is disabled. Maximum threshold is 2^12 - 1.
337 * - XST_SUCCESS if threshold set properly.
338 * - XST_NO_FEATURE if this function was called on a DMA channel that does not
339 * have interrupt coalescing capabilities.
341 * @note This function should not be prempted by another XDmaV3 function.
343 ******************************************************************************/
344 int XDmaV3_SgSetPktThreshold(XDmaV3
* InstancePtr
, u16 Threshold
)
348 /* Is this a SGDMA channel */
349 Reg
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_DMASR_OFFSET
);
350 if (!IsSgDmaChannel(InstancePtr
)) {
351 return (XST_NO_FEATURE
);
354 /* Replace the pkt threshold field in the SWCR */
355 Reg
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
);
356 Reg
&= ~XDMAV3_SWCR_PCT_MASK
;
357 Reg
|= ((Threshold
<< XDMAV3_SWCR_PCT_SHIFT
) & XDMAV3_SWCR_PCT_MASK
);
358 XDmaV3_mWriteReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
, Reg
);
361 return (XST_SUCCESS
);
365 /******************************************************************************/
367 * Set the packet waitbound timer for this SGDMA channel. See xdmav3.h for more
368 * information on interrupt coalescing and the effects of the waitbound timer.
370 * @param InstancePtr is a pointer to the instance to be worked on.
371 * @param TimerVal is the waitbound period to set. If 0 is specified, then
372 * this feature is disabled. Maximum waitbound is 2^12 - 1. LSB is
373 * 1 millisecond (approx).
376 * - XST_SUCCESS if waitbound set properly.
377 * - XST_NO_FEATURE if this function was called on a DMA channel that does not
378 * have interrupt coalescing capabilities.
380 * @note This function should not be prempted by another XDmaV3 function.
382 ******************************************************************************/
383 int XDmaV3_SgSetPktWaitbound(XDmaV3
* InstancePtr
, u16 TimerVal
)
387 /* Is this a SGDMA channel */
388 Reg
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_DMASR_OFFSET
);
389 if (!IsSgDmaChannel(InstancePtr
)) {
390 return (XST_NO_FEATURE
);
393 /* Replace the waitbound field in the SWCR */
394 Reg
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
);
395 Reg
&= ~XDMAV3_SWCR_PWB_MASK
;
396 Reg
|= ((TimerVal
<< XDMAV3_SWCR_PWB_SHIFT
) & XDMAV3_SWCR_PWB_MASK
);
397 XDmaV3_mWriteReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
, Reg
);
400 return (XST_SUCCESS
);
404 /******************************************************************************/
406 * Get the packet threshold for this channel that was set with
407 * XDmaV3_SgSetPktThreshold().
409 * @param InstancePtr is a pointer to the instance to be worked on.
411 * @return Current packet threshold as reported by HW. If the channel does not
412 * include interrupt coalescing, then the return value will always be 0.
413 ******************************************************************************/
414 u16
XDmaV3_SgGetPktThreshold(XDmaV3
* InstancePtr
)
418 /* Is this a SGDMA channel */
419 Reg
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_DMASR_OFFSET
);
420 if (!IsSgDmaChannel(InstancePtr
)) {
424 /* Get the threshold */
425 Reg
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
);
426 Reg
&= XDMAV3_SWCR_PCT_MASK
;
427 Reg
>>= XDMAV3_SWCR_PCT_SHIFT
;
432 /******************************************************************************/
434 * Get the waitbound timer for this channel that was set with
435 * XDmaV3_SgSetPktWaitbound().
437 * @param InstancePtr is a pointer to the instance to be worked on.
439 * @return Current waitbound timer as reported by HW. If the channel does not
440 * include interrupt coalescing, then the return value will always be 0.
441 ******************************************************************************/
442 u16
XDmaV3_SgGetPktWaitbound(XDmaV3
* InstancePtr
)
446 /* Is this a SGDMA channel */
447 Reg
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_DMASR_OFFSET
);
448 if (!IsSgDmaChannel(InstancePtr
)) {
452 /* Get the threshold */
453 Reg
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
);
454 Reg
&= XDMAV3_SWCR_PWB_MASK
;
455 Reg
>>= XDMAV3_SWCR_PWB_SHIFT
;
460 /******************************************************************************/
462 * Using a memory segment allocated by the caller, create and setup the BD list
463 * for the given SGDMA channel.
465 * @param InstancePtr is the instance to be worked on.
466 * @param PhysAddr is the physical base address of user memory region.
467 * @param VirtAddr is the virtual base address of the user memory region. If
468 * address translation is not being utilized, then VirtAddr should be
469 * equivalent to PhysAddr.
470 * @param Alignment governs the byte alignment of individual BDs. This function
471 * will enforce a minimum alignment of 4 bytes with no maximum as long as
472 * it is specified as a power of 2.
473 * @param BdCount is the number of BDs to setup in the user memory region. It is
474 * assumed the region is large enough to contain the BDs. Refer to the
475 * "SGDMA List Creation" section in xdmav3.h for more information on
480 * - XST_SUCCESS if initialization was successful
481 * - XST_NO_FEATURE if the provided instance is a non SGDMA type of DMA
483 * - XST_INVALID_PARAM under any of the following conditions: 1) PhysAddr and/or
484 * VirtAddr are not aligned to the given Alignment parameter; 2) Alignment
485 * parameter does not meet minimum requirements or is not a power of 2 value;
487 * - XST_DMA_SG_LIST_ERROR if the memory segment containing the list spans
488 * over address 0x00000000 in virtual address space.
492 * Some DMA HW requires 8 or more byte alignments of BDs. Make sure the correct
493 * value is passed into the Alignment parameter to meet individual DMA HW
496 ******************************************************************************/
497 int XDmaV3_SgListCreate(XDmaV3
* InstancePtr
, u32 PhysAddr
, u32 VirtAddr
,
498 u32 Alignment
, unsigned BdCount
)
503 XDmaV3_BdRing
*Ring
= &InstancePtr
->BdRing
;
505 /* In case there is a failure prior to creating list, make sure the following
506 * attributes are 0 to prevent calls to other SG functions from doing anything
514 /* Is this a SGDMA channel */
515 if (!IsSgDmaChannel(InstancePtr
)) {
516 return (XST_NO_FEATURE
);
519 /* Make sure Alignment parameter meets minimum requirements */
520 if (Alignment
< XDMABDV3_MINIMUM_ALIGNMENT
) {
521 return (XST_INVALID_PARAM
);
524 /* Make sure Alignment is a power of 2 */
525 if ((Alignment
- 1) & Alignment
) {
526 return (XST_INVALID_PARAM
);
529 /* Make sure PhysAddr and VirtAddr are on same Alignment */
530 if ((PhysAddr
% Alignment
) || (VirtAddr
% Alignment
)) {
531 return (XST_INVALID_PARAM
);
534 /* Is BdCount reasonable? */
536 return (XST_INVALID_PARAM
);
539 /* Parameters are sane. Stop the HW just to be safe */
540 XDmaV3_SgStop(InstancePtr
);
542 /* Figure out how many bytes will be between the start of adjacent BDs */
544 (sizeof(XDmaBdV3
) + (Alignment
- 1)) & ~(Alignment
- 1);
546 /* Must make sure the ring doesn't span address 0x00000000. If it does,
547 * then the next/prev BD traversal macros will fail.
549 if (VirtAddr
> (VirtAddr
+ (Ring
->Separation
* BdCount
) - 1)) {
550 return (XST_DMA_SG_LIST_ERROR
);
553 /* Initial ring setup:
554 * - Clear the entire space
555 * - Setup each BD's BDA field with the physical address of the next BD
556 * - Set each BD's DMASR.DMADONE bit
558 memset((void *) VirtAddr
, 0, (Ring
->Separation
* BdCount
));
561 BdP
= PhysAddr
+ Ring
->Separation
;
562 for (i
= 1; i
< BdCount
; i
++) {
563 XDmaV3_mWriteBd(BdV
, XDMAV3_BD_BDA_OFFSET
, BdP
);
564 XDmaV3_mWriteBd(BdV
, XDMAV3_BD_DMASR_OFFSET
,
565 XDMAV3_DMASR_DMADONE_MASK
);
566 XDMAV3_CACHE_FLUSH(BdV
);
567 BdV
+= Ring
->Separation
;
568 BdP
+= Ring
->Separation
;
571 /* At the end of the ring, link the last BD back to the top */
572 XDmaV3_mWriteBd(BdV
, XDMAV3_BD_BDA_OFFSET
, PhysAddr
);
574 /* Setup and initialize pointers and counters */
575 InstancePtr
->BdRing
.RunState
= XST_DMA_SG_IS_STOPPED
;
576 Ring
->BaseAddr
= VirtAddr
;
577 Ring
->PhysBaseAddr
= PhysAddr
;
578 Ring
->HighAddr
= BdV
;
579 Ring
->Length
= Ring
->HighAddr
- Ring
->BaseAddr
+ Ring
->Separation
;
580 Ring
->AllCnt
= BdCount
;
581 Ring
->FreeCnt
= BdCount
;
582 Ring
->FreeHead
= (XDmaBdV3
*) VirtAddr
;
583 Ring
->PreHead
= (XDmaBdV3
*) VirtAddr
;
584 Ring
->HwHead
= (XDmaBdV3
*) VirtAddr
;
585 Ring
->HwTail
= (XDmaBdV3
*) VirtAddr
;
586 Ring
->PostHead
= (XDmaBdV3
*) VirtAddr
;
587 Ring
->BdaRestart
= (XDmaBdV3
*) PhysAddr
;
589 /* Make sure the DMACR.SGS is 1 so that no DMA operations proceed until
590 * the start function is called.
594 return (XST_SUCCESS
);
598 /******************************************************************************/
600 * Clone the given BD into every BD in the list. Except for XDMAV3_BD_BDA_OFFSET,
601 * every field of the source BD is replicated in every BD of the list.
603 * This function can be called only when all BDs are in the free group such as
604 * they are immediately after initialization with XDmaV3_SgListCreate(). This
605 * prevents modification of BDs while they are in use by HW or the user.
607 * @param InstancePtr is the instance to be worked on.
608 * @param SrcBdPtr is the source BD template to be cloned into the list. This BD
612 * - XST_SUCCESS if the list was modified.
613 * - XST_DMA_SG_NO_LIST if a list has not been created.
614 * - XST_DMA_SG_LIST_ERROR if some of the BDs in this channel are under HW
616 * - XST_DEVICE_IS_STARTED if the DMA channel has not been stopped.
618 ******************************************************************************/
619 int XDmaV3_SgListClone(XDmaV3
* InstancePtr
, XDmaBdV3
* SrcBdPtr
)
624 XDmaV3_BdRing
*Ring
= &InstancePtr
->BdRing
;
626 /* Can't do this function if there isn't a ring */
627 if (Ring
->AllCnt
== 0) {
628 return (XST_DMA_SG_NO_LIST
);
631 /* Can't do this function with the channel running */
632 if (Ring
->RunState
== XST_DMA_SG_IS_STARTED
) {
633 return (XST_DEVICE_IS_STARTED
);
636 /* Can't do this function with some of the BDs in use */
637 if (Ring
->FreeCnt
!= Ring
->AllCnt
) {
638 return (XST_DMA_SG_LIST_ERROR
);
641 /* Modify the template by setting DMASR.DMADONE */
642 Save
= XDmaV3_mReadBd(SrcBdPtr
, XDMAV3_BD_DMASR_OFFSET
);
643 Save
|= XDMAV3_DMASR_DMADONE_MASK
;
644 XDmaV3_mWriteBd(SrcBdPtr
, XDMAV3_BD_DMASR_OFFSET
, Save
);
646 /* Starting from the top of the ring, save BD.Next, overwrite the entire BD
647 * with the template, then restore BD.Next
649 for (i
= 0, CurBd
= Ring
->BaseAddr
;
650 i
< Ring
->AllCnt
; i
++, CurBd
+= Ring
->Separation
) {
651 Save
= XDmaV3_mReadBd(CurBd
, XDMAV3_BD_BDA_OFFSET
);
652 memcpy((void *) CurBd
, SrcBdPtr
, sizeof(XDmaBdV3
));
653 XDmaV3_mWriteBd(CurBd
, XDMAV3_BD_BDA_OFFSET
, Save
);
654 XDMAV3_CACHE_FLUSH(CurBd
);
657 return (XST_SUCCESS
);
661 /******************************************************************************/
663 * Reserve locations in the BD list. The set of returned BDs may be modified in
664 * preparation for future DMA transaction(s). Once the BDs are ready to be
665 * submitted to HW, the user must call XDmaV3_SgBdToHw() in the same order which
666 * they were allocated here. Example:
670 * Status = XDmaV3_SgBdAlloc(MyDmaInstPtr, NumBd, &MyBdSet);
672 * if (Status != XST_SUCCESS)
674 * // Not enough BDs available for the request
678 * for (i=0; i<NumBd; i++)
680 * // Prepare CurBd.....
683 * CurBd = XDmaV3_mSgBdNext(MyDmaInstPtr, CurBd);
687 * Status = XDmaV3_SgBdToHw(MyDmaInstPtr, NumBd, MyBdSet);
690 * A more advanced use of this function may allocate multiple sets of BDs.
691 * They must be allocated and given to HW in the correct sequence:
694 * XDmaV3_SgBdAlloc(MyDmaInstPtr, NumBd1, &MySet1);
695 * XDmaV3_SgBdToHw(MyDmaInstPtr, NumBd1, MySet1);
698 * XDmaV3_SgBdAlloc(MyDmaInstPtr, NumBd1, &MySet1);
699 * XDmaV3_SgBdAlloc(MyDmaInstPtr, NumBd2, &MySet2);
700 * XDmaV3_SgBdToHw(MyDmaInstPtr, NumBd1, MySet1);
701 * XDmaV3_SgBdToHw(MyDmaInstPtr, NumBd2, MySet2);
704 * XDmaV3_SgBdAlloc(MyDmaInstPtr, NumBd1, &MySet1);
705 * XDmaV3_SgBdAlloc(MyDmaInstPtr, NumBd2, &MySet2);
706 * XDmaV3_SgBdToHw(MyDmaInstPtr, NumBd2, MySet2);
707 * XDmaV3_SgBdToHw(MyDmaInstPtr, NumBd1, MySet1);
710 * Use the API defined in xdmabdv3.h to modify individual BDs. Traversal of the
711 * BD set can be done using XDmaV3_mSgBdNext() and XDmaV3_mSgBdPrev().
713 * @param InstancePtr is a pointer to the instance to be worked on.
714 * @param NumBd is the number of BDs to allocate
715 * @param BdSetPtr is an output parameter, it points to the first BD available
719 * - XST_SUCCESS if the requested number of BDs was returned in the BdSetPtr
721 * - XST_FAILURE if there were not enough free BDs to satisfy the request.
723 * @note This function should not be preempted by another XDmaV3 function call
724 * that modifies the BD space. It is the caller's responsibility to
725 * provide a mutual exclusion mechanism.
727 * @note Do not modify more BDs than the number requested with the NumBd
728 * parameter. Doing so will lead to data corruption and system
731 ******************************************************************************/
732 int XDmaV3_SgBdAlloc(XDmaV3
* InstancePtr
, unsigned NumBd
, XDmaBdV3
** BdSetPtr
)
734 XDmaV3_BdRing
*Ring
= &InstancePtr
->BdRing
;
736 /* Enough free BDs available for the request? */
737 if (Ring
->FreeCnt
< NumBd
) {
738 return (XST_FAILURE
);
741 /* Set the return argument and move FreeHead forward */
742 *BdSetPtr
= Ring
->FreeHead
;
743 XDMAV3_RING_SEEKAHEAD(Ring
, Ring
->FreeHead
, NumBd
);
744 Ring
->FreeCnt
-= NumBd
;
745 Ring
->PreCnt
+= NumBd
;
746 return (XST_SUCCESS
);
749 /******************************************************************************/
751 * Fully or partially undo an XDmaV3_SgBdAlloc() operation. Use this function
752 * if all the BDs allocated by XDmaV3_SgBdAlloc() could not be transferred to
753 * HW with XDmaV3_SgBdToHw().
755 * This function helps out in situations when an unrelated error occurs after
756 * BDs have been allocated but before they have been given to HW. An example of
757 * this type of error would be an OS running out of resources.
759 * This function is not the same as XDmaV3_SgBdFree(). The Free function returns
760 * BDs to the free list after they have been processed by HW, while UnAlloc
761 * returns them before being processed by HW.
763 * There are two scenarios where this function can be used. Full UnAlloc or
764 * Partial UnAlloc. A Full UnAlloc means all the BDs Alloc'd will be returned:
767 * Status = XDmaV3_SgBdAlloc(Inst, 10, &BdPtr);
772 * Status = XDmaV3_SgBdUnAlloc(Inst, 10, &BdPtr);
776 * A partial UnAlloc means some of the BDs Alloc'd will be returned:
779 * Status = XDmaV3_SgBdAlloc(Inst, 10, &BdPtr);
787 * Status = XDmaV3_SgBdUnAlloc(Inst, BdsLeft, CurBdPtr);
790 * CurBdPtr = XDmaV3_SgBdNext(Inst, CurBdPtr);
795 * A partial UnAlloc must include the last BD in the list that was Alloc'd.
797 * @param InstancePtr is a pointer to the instance to be worked on.
798 * @param NumBd is the number of BDs to allocate
799 * @param BdSetPtr is an output parameter, it points to the first BD available
803 * - XST_SUCCESS if the BDs were unallocated.
804 * - XST_FAILURE if NumBd parameter was greater that the number of BDs in the
805 * preprocessing state.
807 * @note This function should not be preempted by another XDmaV3 function call
808 * that modifies the BD space. It is the caller's responsibility to
809 * provide a mutual exclusion mechanism.
811 ******************************************************************************/
812 int XDmaV3_SgBdUnAlloc(XDmaV3
* InstancePtr
, unsigned NumBd
,
815 XDmaV3_BdRing
*Ring
= &InstancePtr
->BdRing
;
817 /* Enough BDs in the free state for the request? */
818 if (Ring
->PreCnt
< NumBd
) {
819 return (XST_FAILURE
);
822 /* Set the return argument and move FreeHead backward */
823 XDMAV3_RING_SEEKBACK(Ring
, Ring
->FreeHead
, NumBd
);
824 Ring
->FreeCnt
+= NumBd
;
825 Ring
->PreCnt
-= NumBd
;
826 return (XST_SUCCESS
);
830 /******************************************************************************/
832 * Enqueue a set of BDs to HW that were previously allocated by
833 * XDmaV3_SgBdAlloc(). Once this function returns, the argument BD set goes
834 * under HW control. Any changes made to these BDs after this point will corrupt
835 * the BD list leading to data corruption and system instability.
837 * The set will be rejected if the last BD of the set does not mark the end of
838 * a packet (see XDmaBdV3_mSetLast()).
840 * @param InstancePtr is a pointer to the instance to be worked on.
841 * @param NumBd is the number of BDs in the set.
842 * @param BdSetPtr is the first BD of the set to commit to HW.
845 * - XST_SUCCESS if the set of BDs was accepted and enqueued to HW.
846 * - XST_FAILURE if the set of BDs was rejected because the last BD of the set
847 * did not have its "last" bit set.
848 * - XST_DMA_SG_LIST_ERROR if this function was called out of sequence with
849 * XDmaV3_SgBdAlloc().
851 * @note This function should not be preempted by another XDmaV3 function call
852 * that modifies the BD space. It is the caller's responsibility to
853 * provide a mutual exclusion mechanism.
855 ******************************************************************************/
856 int XDmaV3_SgBdToHw(XDmaV3
* InstancePtr
, unsigned NumBd
, XDmaBdV3
* BdSetPtr
)
858 XDmaV3_BdRing
*Ring
= &InstancePtr
->BdRing
;
864 /* Make sure we are in sync with XDmaV3_SgBdAlloc() */
865 if ((Ring
->PreCnt
< NumBd
) || (Ring
->PreHead
!= BdSetPtr
)) {
866 return (XST_DMA_SG_LIST_ERROR
);
869 /* For all BDs in this set (except the last one)
870 * - Clear DMASR except for DMASR.DMABSY
873 * For the last BD in this set
874 * - Clear DMASR except for DMASR.DMABSY
875 * - Set DMACR.SGS (marks the end of the new active list)
877 LastBdPtr
= BdSetPtr
;
878 for (i
= 1; i
< NumBd
; i
++) {
879 XDmaV3_mWriteBd(LastBdPtr
, XDMAV3_BD_DMASR_OFFSET
,
880 XDMAV3_DMASR_DMABSY_MASK
);
882 Dmacr
= XDmaV3_mReadBd(LastBdPtr
, XDMAV3_BD_DMACR_OFFSET
);
883 XDmaV3_mWriteBd(LastBdPtr
, XDMAV3_BD_DMACR_OFFSET
, /* DMACR.SGS = 0 */
884 Dmacr
& ~XDMAV3_DMACR_SGS_MASK
);
885 XDMAV3_CACHE_FLUSH(LastBdPtr
);
887 LastBdPtr
= XDmaV3_mSgBdNext(InstancePtr
, LastBdPtr
);
891 XDmaV3_mWriteBd(LastBdPtr
, XDMAV3_BD_DMASR_OFFSET
,
892 XDMAV3_DMASR_DMABSY_MASK
);
894 Dmacr
= XDmaV3_mReadBd(LastBdPtr
, XDMAV3_BD_DMACR_OFFSET
);
895 XDmaV3_mWriteBd(LastBdPtr
, XDMAV3_BD_DMACR_OFFSET
, /* DMACR.SGS = 1 */
896 Dmacr
| XDMAV3_DMACR_SGS_MASK
);
897 XDMAV3_CACHE_FLUSH(LastBdPtr
);
899 /* The last BD should have DMACR.LAST set */
900 if (!(Dmacr
& XDMAV3_DMACR_LAST_MASK
)) {
901 return (XST_FAILURE
);
904 /* This set has completed pre-processing, adjust ring pointers & counters */
905 XDMAV3_RING_SEEKAHEAD(Ring
, Ring
->PreHead
, NumBd
);
906 Ring
->PreCnt
-= NumBd
;
908 /* If it is running, tell the DMA engine to pause */
909 Swcr
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
);
910 if (Ring
->RunState
== XST_DMA_SG_IS_STARTED
) {
911 Swcr
|= XDMAV3_SWCR_SGD_MASK
;
912 XDmaV3_mWriteReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
,
916 /* Transfer control of the BDs to the DMA engine. There are two cases to
919 * 1) No currently active list.
920 * In this case, just resume the engine.
923 * In this case, the last BD in the current list should have DMACR.SGS
924 * cleared so the engine will never stop there. The new stopping
925 * point is at the end of the extended list. Once the SGS bits are
926 * changed, resume the engine.
928 if (Ring
->HwCnt
!= 0) {
930 Dmacr
= XDmaV3_mReadBd(Ring
->HwTail
, XDMAV3_BD_DMACR_OFFSET
);
931 Dmacr
&= ~XDMAV3_DMACR_SGS_MASK
;
932 XDmaV3_mWriteBd(Ring
->HwTail
, XDMAV3_BD_DMACR_OFFSET
, Dmacr
);
933 XDMAV3_CACHE_FLUSH(Ring
->HwTail
);
936 /* Adjust Hw pointers and counters. XDMAV3_RING_SEEKAHEAD could be used to
937 * advance HwTail, but it will always evaluate to LastBdPtr
939 Ring
->HwTail
= LastBdPtr
;
940 Ring
->HwCnt
+= NumBd
;
942 /* If it was enabled, tell the engine to resume */
943 if (Ring
->RunState
== XST_DMA_SG_IS_STARTED
) {
944 Swcr
&= ~XDMAV3_SWCR_SGD_MASK
;
945 Swcr
|= XDMAV3_SWCR_SGE_MASK
;
946 XDmaV3_mWriteReg(InstancePtr
->RegBase
, XDMAV3_SWCR_OFFSET
,
950 return (XST_SUCCESS
);
954 /******************************************************************************/
956 * Returns a set of BD(s) that have been processed by HW. The returned BDs may
957 * be examined to determine the outcome of the DMA transaction(s). Once the BDs
958 * have been examined, the user must call XDmaV3_SgBdFree() in the same order
959 * which they were retrieved here. Example:
962 * MaxBd = 0xFFFFFFFF; // Ensure we get all that are ready
964 * NumBd = XDmaV3_SgBdFromHw(MyDmaInstPtr, MaxBd, &MyBdSet);
968 * // HW has nothing ready for us yet
972 * for (i=0; i<NumBd; i++)
974 * // Examine CurBd for post processing.....
977 * CurBd = XDmaV3_mSgBdNext(MyDmaInstPtr, CurBd);
980 * XDmaV3_SgBdFree(MyDmaInstPtr, NumBd, MyBdSet); // Return the list
984 * A more advanced use of this function may allocate multiple sets of BDs.
985 * They must be retrieved from HW and freed in the correct sequence:
988 * XDmaV3_SgBdFromHw(MyDmaInstPtr, NumBd1, &MySet1);
989 * XDmaV3_SgBdFree(MyDmaInstPtr, NumBd1, MySet1);
992 * XDmaV3_SgBdFromHw(MyDmaInstPtr, NumBd1, &MySet1);
993 * XDmaV3_SgBdFromHw(MyDmaInstPtr, NumBd2, &MySet2);
994 * XDmaV3_SgBdFree(MyDmaInstPtr, NumBd1, MySet1);
995 * XDmaV3_SgBdFree(MyDmaInstPtr, NumBd2, MySet2);
998 * XDmaV3_SgBdFromHw(MyDmaInstPtr, NumBd1, &MySet1);
999 * XDmaV3_SgBdFromHw(MyDmaInstPtr, NumBd2, &MySet2);
1000 * XDmaV3_SgBdFree(MyDmaInstPtr, NumBd2, MySet2);
1001 * XDmaV3_SgBdFree(MyDmaInstPtr, NumBd1, MySet1);
1004 * If HW has only partially completed a packet spanning multiple BDs, then none
1005 * of the BDs for that packet will be included in the results.
1007 * @param InstancePtr is a pointer to the instance to be worked on.
1008 * @param BdLimit is the maximum number of BDs to return in the set.
1009 * @param BdSetPtr is an output parameter, it points to the first BD available
1013 * The number of BDs processed by HW. A value of 0 indicates that no data
1014 * is available. No more than BdLimit BDs will be returned.
1016 * @note Treat BDs returned by this function as read-only.
1018 * @note This function should not be preempted by another XDmaV3 function call
1019 * that modifies the BD space. It is the caller's responsibility to
1020 * provide a mutual exclusion mechanism.
1022 ******************************************************************************/
1023 unsigned XDmaV3_SgBdFromHw(XDmaV3
* InstancePtr
, unsigned BdLimit
,
1024 XDmaBdV3
** BdSetPtr
)
1026 XDmaV3_BdRing
*Ring
= &InstancePtr
->BdRing
;
1029 unsigned BdPartialCount
;
1032 CurBd
= Ring
->HwHead
;
1036 /* If no BDs in work group, then there's nothing to search */
1037 if (Ring
->HwCnt
== 0) {
1042 /* Starting at HwHead, keep moving forward in the list until:
1043 * - A BD is encountered with its DMASR.DMABSY bit set which means HW has
1044 * not completed processing of that BD.
1045 * - Ring->HwTail is reached
1046 * - The number of requested BDs has been processed
1048 while (BdCount
< BdLimit
) {
1049 /* Read the status */
1050 XDMAV3_CACHE_INVALIDATE(CurBd
);
1051 Dmasr
= XDmaV3_mReadBd(CurBd
, XDMAV3_BD_DMASR_OFFSET
);
1053 /* If the HW still hasn't processed this BD then we are done */
1054 if (Dmasr
& XDMAV3_DMASR_DMABSY_MASK
) {
1060 /* HW has processed this BD so check the "last" bit. If it is clear,
1061 * then there are more BDs for the current packet. Keep a count of
1062 * these partial packet BDs.
1064 if (Dmasr
& XDMAV3_DMASR_LAST_MASK
) {
1071 /* Reached the end of the work group */
1072 if (CurBd
== Ring
->HwTail
) {
1076 /* Move on to next BD in work group */
1077 CurBd
= XDmaV3_mSgBdNext(InstancePtr
, CurBd
);
1080 /* Subtract off any partial packet BDs found */
1081 BdCount
-= BdPartialCount
;
1083 /* If BdCount is non-zero then BDs were found to return. Set return
1084 * parameters, update pointers and counters, return success
1087 *BdSetPtr
= Ring
->HwHead
;
1088 Ring
->HwCnt
-= BdCount
;
1089 Ring
->PostCnt
+= BdCount
;
1090 XDMAV3_RING_SEEKAHEAD(Ring
, Ring
->HwHead
, BdCount
);
1100 /******************************************************************************/
1102 * Frees a set of BDs that had been previously retrieved with XDmaV3_SgBdFromHw().
1104 * @param InstancePtr is a pointer to the instance to be worked on.
1105 * @param NumBd is the number of BDs to free.
1106 * @param BdSetPtr is the head of a list of BDs returned by XDmaV3_SgBdFromHw().
1109 * - XST_SUCCESS if the set of BDs was freed.
1110 * - XST_DMA_SG_LIST_ERROR if this function was called out of sequence with
1111 * XDmaV3_SgBdFromHw().
1113 * @note This function should not be preempted by another XDmaV3 function call
1114 * that modifies the BD space. It is the caller's responsibility to
1115 * provide a mutual exclusion mechanism.
1117 ******************************************************************************/
1118 int XDmaV3_SgBdFree(XDmaV3
* InstancePtr
, unsigned NumBd
, XDmaBdV3
* BdSetPtr
)
1120 XDmaV3_BdRing
*Ring
= &InstancePtr
->BdRing
;
1122 /* Make sure we are in sync with XDmaV3_SgBdFromHw() */
1123 if ((Ring
->PostCnt
< NumBd
) || (Ring
->PostHead
!= BdSetPtr
)) {
1124 return (XST_DMA_SG_LIST_ERROR
);
1127 /* Update pointers and counters */
1128 Ring
->FreeCnt
+= NumBd
;
1129 Ring
->PostCnt
-= NumBd
;
1130 XDMAV3_RING_SEEKAHEAD(Ring
, Ring
->PostHead
, NumBd
);
1131 return (XST_SUCCESS
);
1135 /******************************************************************************/
1137 * Check the internal data structures of the BD ring for the provided channel.
1138 * The following checks are made:
1140 * - Is the BD ring linked correctly in physical address space.
1141 * - Do the internal pointers point to BDs in the ring.
1142 * - Do the internal counters add up.
1144 * The channel should be stopped prior to calling this function.
1146 * @param InstancePtr is a pointer to the instance to be worked on.
1149 * - XST_SUCCESS if the set of BDs was freed.
1150 * - XST_DMA_SG_NO_LIST if the list has not been created.
1151 * - XST_IS_STARTED if the channel is not stopped.
1152 * - XST_DMA_SG_LIST_ERROR if a problem is found with the internal data
1153 * structures. If this value is returned, the channel should be reset to
1154 * avoid data corruption or system instability.
1156 * @note This function should not be preempted by another XDmaV3 function call
1157 * that modifies the BD space. It is the caller's responsibility to
1158 * provide a mutual exclusion mechanism.
1160 ******************************************************************************/
1161 int XDmaV3_SgCheck(XDmaV3
* InstancePtr
)
1163 XDmaV3_BdRing
*RingPtr
= &InstancePtr
->BdRing
;
1167 /* Is the list created */
1168 if (RingPtr
->AllCnt
== 0) {
1169 return (XST_DMA_SG_NO_LIST
);
1172 /* Can't check if channel is running */
1173 if (RingPtr
->RunState
== XST_DMA_SG_IS_STARTED
) {
1174 return (XST_IS_STARTED
);
1177 /* RunState doesn't make sense */
1178 else if (RingPtr
->RunState
!= XST_DMA_SG_IS_STOPPED
) {
1179 return (XST_DMA_SG_LIST_ERROR
);
1182 /* Verify internal pointers point to correct memory space */
1183 AddrV
= (u32
) RingPtr
->FreeHead
;
1184 if ((AddrV
< RingPtr
->BaseAddr
) || (AddrV
> RingPtr
->HighAddr
)) {
1185 return (XST_DMA_SG_LIST_ERROR
);
1188 AddrV
= (u32
) RingPtr
->PreHead
;
1189 if ((AddrV
< RingPtr
->BaseAddr
) || (AddrV
> RingPtr
->HighAddr
)) {
1190 return (XST_DMA_SG_LIST_ERROR
);
1193 AddrV
= (u32
) RingPtr
->HwHead
;
1194 if ((AddrV
< RingPtr
->BaseAddr
) || (AddrV
> RingPtr
->HighAddr
)) {
1195 return (XST_DMA_SG_LIST_ERROR
);
1198 AddrV
= (u32
) RingPtr
->HwTail
;
1199 if ((AddrV
< RingPtr
->BaseAddr
) || (AddrV
> RingPtr
->HighAddr
)) {
1200 return (XST_DMA_SG_LIST_ERROR
);
1203 AddrV
= (u32
) RingPtr
->PostHead
;
1204 if ((AddrV
< RingPtr
->BaseAddr
) || (AddrV
> RingPtr
->HighAddr
)) {
1205 return (XST_DMA_SG_LIST_ERROR
);
1208 /* Verify internal counters add up */
1209 if ((RingPtr
->HwCnt
+ RingPtr
->PreCnt
+ RingPtr
->FreeCnt
+
1210 RingPtr
->PostCnt
) != RingPtr
->AllCnt
) {
1211 return (XST_DMA_SG_LIST_ERROR
);
1214 /* Verify BDs are linked correctly */
1215 AddrV
= RingPtr
->BaseAddr
;
1216 AddrP
= RingPtr
->PhysBaseAddr
+ RingPtr
->Separation
;
1217 for (i
= 1; i
< RingPtr
->AllCnt
; i
++) {
1218 /* Check BDA for this BD. It should point to next physical addr */
1219 if (XDmaV3_mReadBd(AddrV
, XDMAV3_BD_BDA_OFFSET
) != AddrP
) {
1220 return (XST_DMA_SG_LIST_ERROR
);
1223 /* Move on to next BD */
1224 AddrV
+= RingPtr
->Separation
;
1225 AddrP
+= RingPtr
->Separation
;
1228 /* Last BD should point back to the beginning of ring */
1229 if (XDmaV3_mReadBd(AddrV
, XDMAV3_BD_BDA_OFFSET
) !=
1230 RingPtr
->PhysBaseAddr
) {
1231 return (XST_DMA_SG_LIST_ERROR
);
1234 /* No problems found */
1235 return (XST_SUCCESS
);
1239 /******************************************************************************
1240 * Verify given channel is of the SGDMA variety.
1242 * @param InstancePtr is a pointer to the instance to be worked on.
1245 * - 1 if channel is of type SGDMA
1246 * - 0 if channel is not of type SGDMA
1247 ******************************************************************************/
1248 static int IsSgDmaChannel(XDmaV3
* InstancePtr
)
1252 Dmasr
= XDmaV3_mReadReg(InstancePtr
->RegBase
, XDMAV3_DMASR_OFFSET
);
1253 if (Dmasr
& (XDMAV3_DMASR_DMACNFG_SGDMARX_MASK
|
1254 XDMAV3_DMASR_DMACNFG_SGDMATX_MASK
|
1255 XDMAV3_DMASR_DMACNFG_SSGDMA_MASK
)) {