serial: xilinx_uartps: fix bad register write in console_write
[linux-2.6-xlnx.git] / drivers / xilinx_common / xlldma_bdring.c
blob1f5a29affdb30e18dfe3fdcc0a883724b5d86d27
1 /* $Id: */
2 /******************************************************************************
4 * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
5 * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
6 * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
7 * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
8 * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
9 * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
10 * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
11 * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
12 * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
13 * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
14 * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
15 * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
16 * FOR A PARTICULAR PURPOSE.
18 * (c) Copyright 2007-2008 Xilinx Inc.
19 * All rights reserved.
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 ******************************************************************************/
30 /*****************************************************************************/
31 /**
33 * @file xlldma_bdring.c
35 * This file implements buffer descriptor ring related functions. For more
36 * information on this driver, see xlldma.h.
38 * <pre>
39 * MODIFICATION HISTORY:
41 * Ver Who Date Changes
42 * ----- ---- -------- -------------------------------------------------------
43 * 1.00a xd 12/21/06 First release
44 * </pre>
45 ******************************************************************************/
47 /***************************** Include Files *********************************/
49 #include <linux/string.h>
51 #include "xlldma.h"
52 #include "xenv.h"
54 /************************** Constant Definitions *****************************/
57 /**************************** Type Definitions *******************************/
60 /***************** Macros (Inline Functions) Definitions *********************/
62 /******************************************************************************
63 * Define methods to flush and invalidate cache for BDs should they be
64 * located in cached memory. These macros may NOPs if the underlying
65 * XCACHE_FLUSH_DCACHE_RANGE and XCACHE_INVALIDATE_DCACHE_RANGE macros are not
66 * implemented or they do nothing.
67 *****************************************************************************/
68 #ifdef XCACHE_FLUSH_DCACHE_RANGE
69 # define XLLDMA_CACHE_FLUSH(BdPtr) \
70 XCACHE_FLUSH_DCACHE_RANGE((BdPtr), XLLDMA_BD_HW_NUM_BYTES)
71 #else
72 # define XLLDMA_CACHE_FLUSH(BdPtr)
73 #endif
75 #ifdef XCACHE_INVALIDATE_DCACHE_RANGE
76 # define XLLDMA_CACHE_INVALIDATE(BdPtr) \
77 XCACHE_INVALIDATE_DCACHE_RANGE((BdPtr), XLLDMA_BD_HW_NUM_BYTES)
78 #else
79 # define XLLDMA_CACHE_INVALIDATE(BdPtr)
80 #endif
82 /******************************************************************************
83 * Compute the virtual address of a descriptor from its physical address
85 * @param BdPtr is the physical address of the BD
87 * @returns Virtual address of BdPtr
89 * @note Assume BdPtr is always a valid BD in the ring
90 * @note RingPtr is an implicit parameter
91 *****************************************************************************/
92 #define XLLDMA_PHYS_TO_VIRT(BdPtr) \
93 ((u32)(BdPtr) + (RingPtr->FirstBdAddr - RingPtr->FirstBdPhysAddr))
95 /******************************************************************************
96 * Compute the physical address of a descriptor from its virtual address
98 * @param BdPtr is the virtual address of the BD
100 * @returns Physical address of BdPtr
102 * @note Assume BdPtr is always a valid BD in the ring
103 * @note RingPtr is an implicit parameter
104 *****************************************************************************/
105 #define XLLDMA_VIRT_TO_PHYS(BdPtr) \
106 ((u32)(BdPtr) - (RingPtr->FirstBdAddr - RingPtr->FirstBdPhysAddr))
108 /******************************************************************************
109 * Move the BdPtr argument ahead an arbitrary number of BDs wrapping around
110 * to the beginning of the ring if needed.
112 * We know if a wraparound should occur if the new BdPtr is greater than
113 * the high address in the ring OR if the new BdPtr crosses the 0xFFFFFFFF
114 * to 0 boundary.
116 * @param RingPtr is the ring BdPtr appears in
117 * @param BdPtr on input is the starting BD position and on output is the
118 * final BD position
119 * @param NumBd is the number of BD spaces to increment
121 *****************************************************************************/
122 #define XLLDMA_RING_SEEKAHEAD(RingPtr, BdPtr, NumBd) \
124 u32 Addr = (u32)(BdPtr); \
126 Addr += ((RingPtr)->Separation * (NumBd)); \
127 if ((Addr > (RingPtr)->LastBdAddr) || ((u32)(BdPtr) > Addr))\
129 Addr -= (RingPtr)->Length; \
132 (BdPtr) = (XLlDma_Bd*)Addr; \
135 /******************************************************************************
136 * Move the BdPtr argument backwards an arbitrary number of BDs wrapping
137 * around to the end of the ring if needed.
139 * We know if a wraparound should occur if the new BdPtr is less than
140 * the base address in the ring OR if the new BdPtr crosses the 0xFFFFFFFF
141 * to 0 boundary.
143 * @param RingPtr is the ring BdPtr appears in
144 * @param BdPtr on input is the starting BD position and on output is the
145 * final BD position
146 * @param NumBd is the number of BD spaces to increment
148 *****************************************************************************/
149 #define XLLDMA_RING_SEEKBACK(RingPtr, BdPtr, NumBd) \
151 u32 Addr = (u32)(BdPtr); \
153 Addr -= ((RingPtr)->Separation * (NumBd)); \
154 if ((Addr < (RingPtr)->FirstBdAddr) || ((u32)(BdPtr) < Addr)) \
156 Addr += (RingPtr)->Length; \
159 (BdPtr) = (XLlDma_Bd*)Addr; \
163 /************************** Function Prototypes ******************************/
166 /************************** Variable Definitions *****************************/
169 /*****************************************************************************/
171 * Using a memory segment allocated by the caller, create and setup the BD list
172 * for the given SGDMA ring.
174 * @param InstancePtr is the instance to be worked on.
175 * @param PhysAddr is the physical base address of application memory region.
176 * @param VirtAddr is the virtual base address of the application memory
177 * region.If address translation is not being utilized, then VirtAddr
178 * should be equivalent to PhysAddr.
179 * @param Alignment governs the byte alignment of individual BDs. This function
180 * will enforce a minimum alignment of XLLDMA_BD_MINIMUM_ALIGNMENT bytes
181 * with no maximum as long as it is specified as a power of 2.
182 * @param BdCount is the number of BDs to setup in the application memory
183 * region. It is assumed the region is large enough to contain the BDs.
184 * Refer to the "SGDMA Ring Creation" section in xlldma.h for more
185 * information. The minimum valid value for this parameter is 1.
187 * @return
189 * - XST_SUCCESS if initialization was successful
190 * - XST_NO_FEATURE if the provided instance is a non SGDMA type of DMA
191 * channel.
192 * - XST_INVALID_PARAM under any of the following conditions: 1) PhysAddr
193 * and/or VirtAddr are not aligned to the given Alignment parameter;
194 * 2) Alignment parameter does not meet minimum requirements or is not a
195 * power of 2 value; 3) BdCount is 0.
196 * - XST_DMA_SG_LIST_ERROR if the memory segment containing the list spans
197 * over address 0x00000000 in virtual address space.
199 *****************************************************************************/
200 int XLlDma_BdRingCreate(XLlDma_BdRing * RingPtr, u32 PhysAddr,
201 u32 VirtAddr, u32 Alignment, unsigned BdCount)
203 unsigned i;
204 u32 BdVirtAddr;
205 u32 BdPhysAddr;
207 /* In case there is a failure prior to creating list, make sure the
208 * following attributes are 0 to prevent calls to other SG functions
209 * from doing anything
211 RingPtr->AllCnt = 0;
212 RingPtr->FreeCnt = 0;
213 RingPtr->HwCnt = 0;
214 RingPtr->PreCnt = 0;
215 RingPtr->PostCnt = 0;
217 /* Make sure Alignment parameter meets minimum requirements */
218 if (Alignment < XLLDMA_BD_MINIMUM_ALIGNMENT) {
219 return (XST_INVALID_PARAM);
222 /* Make sure Alignment is a power of 2 */
223 if ((Alignment - 1) & Alignment) {
224 return (XST_INVALID_PARAM);
227 /* Make sure PhysAddr and VirtAddr are on same Alignment */
228 if ((PhysAddr % Alignment) || (VirtAddr % Alignment)) {
229 return (XST_INVALID_PARAM);
232 /* Is BdCount reasonable? */
233 if (BdCount == 0) {
234 return (XST_INVALID_PARAM);
237 /* Compute how many bytes will be between the start of adjacent BDs */
238 RingPtr->Separation =
239 (sizeof(XLlDma_Bd) + (Alignment - 1)) & ~(Alignment - 1);
241 /* Must make sure the ring doesn't span address 0x00000000. If it does,
242 * then the next/prev BD traversal macros will fail.
244 if (VirtAddr > (VirtAddr + (RingPtr->Separation * BdCount) - 1)) {
245 return (XST_DMA_SG_LIST_ERROR);
248 /* Initial ring setup:
249 * - Clear the entire space
250 * - Setup each BD's next pointer with the physical address of the
251 * next BD
252 * - Set each BD's DMA complete status bit
254 memset((void *) VirtAddr, 0, (RingPtr->Separation * BdCount));
256 BdVirtAddr = VirtAddr;
257 BdPhysAddr = PhysAddr + RingPtr->Separation;
258 for (i = 1; i < BdCount; i++) {
259 XLlDma_mBdWrite(BdVirtAddr, XLLDMA_BD_NDESC_OFFSET, BdPhysAddr);
260 XLlDma_mBdWrite(BdVirtAddr, XLLDMA_BD_STSCTRL_USR0_OFFSET,
261 XLLDMA_BD_STSCTRL_COMPLETED_MASK);
262 XLLDMA_CACHE_FLUSH(BdVirtAddr);
263 BdVirtAddr += RingPtr->Separation;
264 BdPhysAddr += RingPtr->Separation;
267 /* At the end of the ring, link the last BD back to the top */
268 XLlDma_mBdWrite(BdVirtAddr, XLLDMA_BD_NDESC_OFFSET, PhysAddr);
269 XLLDMA_CACHE_FLUSH(BdVirtAddr);
271 /* Setup and initialize pointers and counters */
272 RingPtr->RunState = XST_DMA_SG_IS_STOPPED;
273 RingPtr->FirstBdAddr = VirtAddr;
274 RingPtr->FirstBdPhysAddr = PhysAddr;
275 RingPtr->LastBdAddr = BdVirtAddr;
276 RingPtr->Length = RingPtr->LastBdAddr - RingPtr->FirstBdAddr +
277 RingPtr->Separation;
278 RingPtr->AllCnt = BdCount;
279 RingPtr->FreeCnt = BdCount;
280 RingPtr->FreeHead = (XLlDma_Bd *) VirtAddr;
281 RingPtr->PreHead = (XLlDma_Bd *) VirtAddr;
282 RingPtr->HwHead = (XLlDma_Bd *) VirtAddr;
283 RingPtr->HwTail = (XLlDma_Bd *) VirtAddr;
284 RingPtr->PostHead = (XLlDma_Bd *) VirtAddr;
285 RingPtr->BdaRestart = (XLlDma_Bd *) PhysAddr;
287 return (XST_SUCCESS);
291 /*****************************************************************************/
293 * Clone the given BD into every BD in the ring. Except for
294 * XLLDMA_BD_NDESC_OFFSET, every field of the source BD is replicated in every
295 * BD in the ring.
297 * This function can be called only when all BDs are in the free group such as
298 * they are immediately after creation of the ring. This prevents modification
299 * of BDs while they are in use by hardware or the application.
301 * @param InstancePtr is the instance to be worked on.
302 * @param SrcBdPtr is the source BD template to be cloned into the list.
304 * @return
305 * - XST_SUCCESS if the list was modified.
306 * - XST_DMA_SG_NO_LIST if a list has not been created.
307 * - XST_DMA_SG_LIST_ERROR if some of the BDs in this channel are under
308 * hardware or application control.
309 * - XST_DEVICE_IS_STARTED if the DMA channel has not been stopped.
311 *****************************************************************************/
312 int XLlDma_BdRingClone(XLlDma_BdRing * RingPtr, XLlDma_Bd * SrcBdPtr)
314 unsigned i;
315 u32 CurBd;
316 u32 Save;
317 XLlDma_Bd TmpBd;
319 /* Can't do this function if there isn't a ring */
320 if (RingPtr->AllCnt == 0) {
321 return (XST_DMA_SG_NO_LIST);
324 /* Can't do this function with the channel running */
325 if (RingPtr->RunState == XST_DMA_SG_IS_STARTED) {
326 return (XST_DEVICE_IS_STARTED);
329 /* Can't do this function with some of the BDs in use */
330 if (RingPtr->FreeCnt != RingPtr->AllCnt) {
331 return (XST_DMA_SG_LIST_ERROR);
335 /* Make a copy of the template then modify it by setting complete bit
336 * in status/control field
338 memcpy(&TmpBd, SrcBdPtr, sizeof(XLlDma_Bd));
339 Save = XLlDma_mBdRead(&TmpBd, XLLDMA_BD_STSCTRL_USR0_OFFSET);
340 Save |= XLLDMA_BD_STSCTRL_COMPLETED_MASK;
341 XLlDma_mBdWrite(&TmpBd, XLLDMA_BD_STSCTRL_USR0_OFFSET, Save);
343 /* Starting from the top of the ring, save BD.Next, overwrite the
344 * entire BD with the template, then restore BD.Next
346 for (i = 0, CurBd = RingPtr->FirstBdAddr;
347 i < RingPtr->AllCnt; i++, CurBd += RingPtr->Separation) {
348 Save = XLlDma_mBdRead(CurBd, XLLDMA_BD_NDESC_OFFSET);
349 memcpy((void *) CurBd, (void *) &TmpBd, sizeof(XLlDma_Bd));
350 XLlDma_mBdWrite(CurBd, XLLDMA_BD_NDESC_OFFSET, Save);
351 XLLDMA_CACHE_FLUSH(CurBd);
354 return (XST_SUCCESS);
358 /*****************************************************************************/
360 * Allow DMA transactions to commence on the given channels if descriptors are
361 * ready to be processed.
363 * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
365 * @return
366 * - XST_SUCCESS if the channel) were started.
367 * - XST_DMA_SG_NO_LIST if the channel) have no initialized BD ring.
369 *****************************************************************************/
370 int XLlDma_BdRingStart(XLlDma_BdRing * RingPtr)
372 /* BD list has yet to be created for this channel */
373 if (RingPtr->AllCnt == 0) {
374 return (XST_DMA_SG_NO_LIST);
377 /* Do nothing if already started */
378 if (RingPtr->RunState == XST_DMA_SG_IS_STARTED) {
379 return (XST_SUCCESS);
382 /* Sync hardware and driver with the last unprocessed BD or the 1st BD
383 * in the ring if this is the first time starting the channel
385 XLlDma_mWriteReg(RingPtr->ChanBase, XLLDMA_CDESC_OFFSET,
386 (u32) RingPtr->BdaRestart);
388 /* Note as started */
389 RingPtr->RunState = XST_DMA_SG_IS_STARTED;
391 /* If there are unprocessed BDs then we want to channel to begin
392 * processing right away
394 if (RingPtr->HwCnt > 0) {
395 XLLDMA_CACHE_INVALIDATE(RingPtr->HwTail);
397 if ((XLlDma_mBdRead(RingPtr->HwTail,
398 XLLDMA_BD_STSCTRL_USR0_OFFSET) &
399 XLLDMA_BD_STSCTRL_COMPLETED_MASK) == 0) {
400 XLlDma_mWriteReg(RingPtr->ChanBase,
401 XLLDMA_TDESC_OFFSET,
402 XLLDMA_VIRT_TO_PHYS(RingPtr->HwTail));
406 return (XST_SUCCESS);
410 /*****************************************************************************/
412 * Set interrupt coalescing parameters for the given descriptor ring channel.
414 * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
415 * @param Counter sets the packet counter on the channel. Valid range is
416 * 1..255, or XLLDMA_NO_CHANGE to leave this setting unchanged.
417 * @param Timer sets the waitbound timer on the channel. Valid range is
418 * 1..255, or XLLDMA_NO_CHANGE to leave this setting unchanged. LSB is
419 * in units of 1 / (local link clock).
421 * @return
422 * - XST_SUCCESS if interrupt coalescing settings updated
423 * - XST_FAILURE if Counter or Timer parameters are out of range
424 *****************************************************************************/
425 int XLlDma_BdRingSetCoalesce(XLlDma_BdRing * RingPtr, u32 Counter, u32 Timer)
427 u32 Cr = XLlDma_mReadReg(RingPtr->ChanBase, XLLDMA_CR_OFFSET);
429 if (Counter != XLLDMA_NO_CHANGE) {
430 if ((Counter == 0) || (Counter > 0xFF)) {
431 return (XST_FAILURE);
434 Cr = (Cr & ~XLLDMA_CR_IRQ_COUNT_MASK) |
435 (Counter << XLLDMA_CR_IRQ_COUNT_SHIFT);
436 Cr |= XLLDMA_CR_LD_IRQ_CNT_MASK;
439 if (Timer != XLLDMA_NO_CHANGE) {
440 if ((Timer == 0) || (Timer > 0xFF)) {
441 return (XST_FAILURE);
444 Cr = (Cr & ~XLLDMA_CR_IRQ_TIMEOUT_MASK) |
445 (Timer << XLLDMA_CR_IRQ_TIMEOUT_SHIFT);
446 Cr |= XLLDMA_CR_LD_IRQ_CNT_MASK;
449 XLlDma_mWriteReg(RingPtr->ChanBase, XLLDMA_CR_OFFSET, Cr);
450 return (XST_SUCCESS);
454 /*****************************************************************************/
456 * Retrieve current interrupt coalescing parameters from the given descriptor
457 * ring channel.
459 * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
460 * @param CounterPtr points to a memory location where the current packet
461 * counter will be written.
462 * @param TimerPtr points to a memory location where the current waitbound
463 * timer will be written.
464 *****************************************************************************/
465 void XLlDma_BdRingGetCoalesce(XLlDma_BdRing * RingPtr,
466 u32 *CounterPtr, u32 *TimerPtr)
468 u32 Cr = XLlDma_mReadReg(RingPtr->ChanBase, XLLDMA_CR_OFFSET);
470 *CounterPtr =
471 ((Cr & XLLDMA_CR_IRQ_COUNT_MASK) >> XLLDMA_CR_IRQ_COUNT_SHIFT);
472 *TimerPtr =
473 ((Cr & XLLDMA_CR_IRQ_TIMEOUT_MASK) >>
474 XLLDMA_CR_IRQ_TIMEOUT_SHIFT);
478 /*****************************************************************************/
480 * Reserve locations in the BD ring. The set of returned BDs may be modified in
481 * preparation for future DMA transactions). Once the BDs are ready to be
482 * submitted to hardware, the application must call XLlDma_BdRingToHw() in the
483 * same order which they were allocated here. Example:
485 * <pre>
486 * NumBd = 2;
487 * Status = XDsma_RingBdAlloc(MyRingPtr, NumBd, &MyBdSet);
489 * if (Status != XST_SUCCESS)
491 * // Not enough BDs available for the request
494 * CurBd = MyBdSet;
495 * for (i=0; i<NumBd; i++)
497 * // Prepare CurBd.....
499 * // Onto next BD
500 * CurBd = XLlDma_mBdRingNext(MyRingPtr, CurBd);
503 * // Give list to hardware
504 * Status = XLlDma_BdRingToHw(MyRingPtr, NumBd, MyBdSet);
505 * </pre>
507 * A more advanced use of this function may allocate multiple sets of BDs.
508 * They must be allocated and given to hardware in the correct sequence:
509 * <pre>
510 * // Legal
511 * XLlDma_BdRingAlloc(MyRingPtr, NumBd1, &MySet1);
512 * XLlDma_BdRingToHw(MyRingPtr, NumBd1, MySet1);
514 * // Legal
515 * XLlDma_BdRingAlloc(MyRingPtr, NumBd1, &MySet1);
516 * XLlDma_BdRingAlloc(MyRingPtr, NumBd2, &MySet2);
517 * XLlDma_BdRingToHw(MyRingPtr, NumBd1, MySet1);
518 * XLlDma_BdRingToHw(MyRingPtr, NumBd2, MySet2);
520 * // Not legal
521 * XLlDma_BdRingAlloc(MyRingPtr, NumBd1, &MySet1);
522 * XLlDma_BdRingAlloc(MyRingPtr, NumBd2, &MySet2);
523 * XLlDma_BdRingToHw(MyRingPtr, NumBd2, MySet2);
524 * XLlDma_BdRingToHw(MyRingPtr, NumBd1, MySet1);
525 * </pre>
527 * Use the API defined in xlldmabd.h to modify individual BDs. Traversal of the
528 * BD set can be done using XLlDma_mBdRingNext() and XLlDma_mBdRingPrev().
530 * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
531 * @param NumBd is the number of BDs to allocate
532 * @param BdSetPtr is an output parameter, it points to the first BD available
533 * for modification.
535 * @return
536 * - XST_SUCCESS if the requested number of BDs was returned in the BdSetPtr
537 * parameter.
538 * - XST_FAILURE if there were not enough free BDs to satisfy the request.
540 * @note This function should not be preempted by another XLlDma_BdRing
541 * function call that modifies the BD space. It is the caller's
542 * responsibility to provide a mutual exclusion mechanism.
544 * @note Do not modify more BDs than the number requested with the NumBd
545 * parameter. Doing so will lead to data corruption and system
546 * instability.
548 *****************************************************************************/
549 int XLlDma_BdRingAlloc(XLlDma_BdRing * RingPtr, unsigned NumBd,
550 XLlDma_Bd ** BdSetPtr)
552 /* Enough free BDs available for the request? */
553 if (RingPtr->FreeCnt < NumBd) {
554 return (XST_FAILURE);
557 /* Set the return argument and move FreeHead forward */
558 *BdSetPtr = RingPtr->FreeHead;
559 XLLDMA_RING_SEEKAHEAD(RingPtr, RingPtr->FreeHead, NumBd);
560 RingPtr->FreeCnt -= NumBd;
561 RingPtr->PreCnt += NumBd;
563 return (XST_SUCCESS);
567 /*****************************************************************************/
569 * Fully or partially undo an XLlDma_BdRingAlloc() operation. Use this function
570 * if all the BDs allocated by XLlDma_BdRingAlloc() could not be transferred to
571 * hardware with XLlDma_BdRingToHw().
573 * This function helps out in situations when an unrelated error occurs after
574 * BDs have been allocated but before they have been given to hardware.
576 * This function is not the same as XLlDma_BdRingFree(). The Free function
577 * returns BDs to the free list after they have been processed by hardware,
578 * while UnAlloc returns them before being processed by hardware.
580 * There are two scenarios where this function can be used. Full UnAlloc or
581 * Partial UnAlloc. A Full UnAlloc means all the BDs Alloc'd will be returned:
583 * <pre>
584 * Status = XLlDma_BdRingAlloc(MyRingPtr, 10, &BdPtr);
587 * if (Error)
589 * Status = XLlDma_BdRingUnAlloc(MyRingPtr, 10, &BdPtr);
591 * </pre>
593 * A partial UnAlloc means some of the BDs Alloc'd will be returned:
595 * <pre>
596 * Status = XLlDma_BdRingAlloc(MyRingPtr, 10, &BdPtr);
597 * BdsLeft = 10;
598 * CurBdPtr = BdPtr;
600 * while (BdsLeft)
602 * if (Error)
604 * Status = XLlDma_BdRingUnAlloc(MyRingPtr, BdsLeft, CurBdPtr);
607 * CurBdPtr = XLlDma_mBdRingNext(MyRingPtr, CurBdPtr);
608 * BdsLeft--;
610 * </pre>
612 * A partial UnAlloc must include the last BD in the list that was Alloc'd.
614 * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
615 * @param NumBd is the number of BDs to unallocate
616 * @param BdSetPtr points to the first of the BDs to be returned.
618 * @return
619 * - XST_SUCCESS if the BDs were unallocated.
620 * - XST_FAILURE if NumBd parameter was greater that the number of BDs in the
621 * preprocessing state.
623 * @note This function should not be preempted by another XLlDma ring function
624 * call that modifies the BD space. It is the caller's responsibility to
625 * provide a mutual exclusion mechanism.
627 *****************************************************************************/
628 int XLlDma_BdRingUnAlloc(XLlDma_BdRing * RingPtr, unsigned NumBd,
629 XLlDma_Bd * BdSetPtr)
631 /* Enough BDs in the free state for the request? */
632 if (RingPtr->PreCnt < NumBd) {
633 return (XST_FAILURE);
636 /* Set the return argument and move FreeHead backward */
637 XLLDMA_RING_SEEKBACK(RingPtr, RingPtr->FreeHead, NumBd);
638 RingPtr->FreeCnt += NumBd;
639 RingPtr->PreCnt -= NumBd;
641 return (XST_SUCCESS);
645 /*****************************************************************************/
647 * Enqueue a set of BDs to hardware that were previously allocated by
648 * XLlDma_BdRingAlloc(). Once this function returns, the argument BD set goes
649 * under hardware control. Any changes made to these BDs after this point will
650 * corrupt the BD list leading to data corruption and system instability.
652 * The set will be rejected if the last BD of the set does not mark the end of
653 * a packet.
655 * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
656 * @param NumBd is the number of BDs in the set.
657 * @param BdSetPtr is the first BD of the set to commit to hardware.
659 * @return
660 * - XST_SUCCESS if the set of BDs was accepted and enqueued to hardware
661 * - XST_FAILURE if the set of BDs was rejected because the first BD
662 * did not have its start-of-packet bit set, the last BD did not have
663 * its end-of-packet bit set, or any one of the BD set has 0 as length
664 * value
665 * - XST_DMA_SG_LIST_ERROR if this function was called out of sequence with
666 * XLlDma_BdRingAlloc()
668 * @note This function should not be preempted by another XLlDma ring function
669 * call that modifies the BD space. It is the caller's responsibility to
670 * provide a mutual exclusion mechanism.
672 *****************************************************************************/
673 int XLlDma_BdRingToHw(XLlDma_BdRing * RingPtr, unsigned NumBd,
674 XLlDma_Bd * BdSetPtr)
676 XLlDma_Bd *CurBdPtr;
677 unsigned i;
678 u32 BdStsCr;
680 /* If the commit set is empty, do nothing */
681 if (NumBd == 0) {
682 return (XST_SUCCESS);
685 /* Make sure we are in sync with XLlDma_BdRingAlloc() */
686 if ((RingPtr->PreCnt < NumBd) || (RingPtr->PreHead != BdSetPtr)) {
687 return (XST_DMA_SG_LIST_ERROR);
690 CurBdPtr = BdSetPtr;
691 BdStsCr = XLlDma_mBdRead(CurBdPtr, XLLDMA_BD_STSCTRL_USR0_OFFSET);
693 /* The first BD should have been marked as start-of-packet */
694 if (!(BdStsCr & XLLDMA_BD_STSCTRL_SOP_MASK)) {
695 return (XST_FAILURE);
698 /* For each BD being submitted except the last one, clear the completed
699 * bit and stop_on_end bit in the status word
701 for (i = 0; i < NumBd - 1; i++) {
703 /* Make sure the length value in the BD is non-zero. */
704 if (XLlDma_mBdGetLength(CurBdPtr) == 0) {
705 return (XST_FAILURE);
708 BdStsCr &=
709 ~(XLLDMA_BD_STSCTRL_COMPLETED_MASK |
710 XLLDMA_BD_STSCTRL_SOE_MASK);
711 XLlDma_mBdWrite(CurBdPtr, XLLDMA_BD_STSCTRL_USR0_OFFSET,
712 BdStsCr);
714 /* In RX channel case, the current BD should have the
715 * XLLDMA_USERIP_APPWORD_OFFSET initialized to
716 * XLLDMA_USERIP_APPWORD_INITVALUE
718 if (RingPtr->IsRxChannel) {
719 XLlDma_mBdWrite(CurBdPtr, XLLDMA_USERIP_APPWORD_OFFSET,
720 XLLDMA_USERIP_APPWORD_INITVALUE);
723 /* Flush the current BD so DMA core could see the updates */
724 XLLDMA_CACHE_FLUSH(CurBdPtr);
726 CurBdPtr = XLlDma_mBdRingNext(RingPtr, CurBdPtr);
727 BdStsCr =
728 XLlDma_mBdRead(CurBdPtr, XLLDMA_BD_STSCTRL_USR0_OFFSET);
731 /* The last BD should have end-of-packet bit set */
732 if (!(BdStsCr & XLLDMA_BD_STSCTRL_EOP_MASK)) {
733 return (XST_FAILURE);
736 /* Make sure the length value in the last BD is non-zero. */
737 if (XLlDma_mBdGetLength(CurBdPtr) == 0) {
738 return (XST_FAILURE);
741 /* The last BD should also have the completed and stop-on-end bits
742 * cleared
744 BdStsCr &=
745 ~(XLLDMA_BD_STSCTRL_COMPLETED_MASK |
746 XLLDMA_BD_STSCTRL_SOE_MASK);
747 XLlDma_mBdWrite(CurBdPtr, XLLDMA_BD_STSCTRL_USR0_OFFSET, BdStsCr);
749 /* In RX channel case, the last BD should have the
750 * XLLDMA_USERIP_APPWORD_OFFSET initialized to
751 * XLLDMA_USERIP_APPWORD_INITVALUE
753 if (RingPtr->IsRxChannel) {
754 XLlDma_mBdWrite(CurBdPtr, XLLDMA_USERIP_APPWORD_OFFSET,
755 XLLDMA_USERIP_APPWORD_INITVALUE);
758 /* Flush the last BD so DMA core could see the updates */
759 XLLDMA_CACHE_FLUSH(CurBdPtr);
761 /* This set has completed pre-processing, adjust ring pointers and
762 * counters
764 XLLDMA_RING_SEEKAHEAD(RingPtr, RingPtr->PreHead, NumBd);
765 RingPtr->PreCnt -= NumBd;
766 RingPtr->HwTail = CurBdPtr;
767 RingPtr->HwCnt += NumBd;
769 /* If it was enabled, tell the engine to begin processing */
770 if (RingPtr->RunState == XST_DMA_SG_IS_STARTED) {
771 XLlDma_mWriteReg(RingPtr->ChanBase, XLLDMA_TDESC_OFFSET,
772 XLLDMA_VIRT_TO_PHYS(RingPtr->HwTail));
774 return (XST_SUCCESS);
778 /*****************************************************************************/
780 * Returns a set of BD(s) that have been processed by hardware. The returned
781 * BDs may be examined by the application to determine the outcome of the DMA
782 * transactions). Once the BDs have been examined, the application must call
783 * XLlDma_BdRingFree() in the same order which they were retrieved here.
785 * Example:
787 * <pre>
788 * NumBd = XLlDma_BdRingFromHw(MyRingPtr, XLLDMA_ALL_BDS, &MyBdSet);
790 * if (NumBd == 0)
792 * // hardware has nothing ready for us yet
795 * CurBd = MyBdSet;
796 * for (i=0; i<NumBd; i++)
798 * // Examine CurBd for post processing.....
800 * // Onto next BD
801 * CurBd = XLlDma_mBdRingNext(MyRingPtr, CurBd);
804 * XLlDma_BdRingFree(MyRingPtr, NumBd, MyBdSet); // Return the list
805 * </pre>
807 * A more advanced use of this function may allocate multiple sets of BDs.
808 * They must be retrieved from hardware and freed in the correct sequence:
809 * <pre>
810 * // Legal
811 * XLlDma_BdRingFromHw(MyRingPtr, NumBd1, &MySet1);
812 * XLlDma_BdRingFree(MyRingPtr, NumBd1, MySet1);
814 * // Legal
815 * XLlDma_BdRingFromHw(MyRingPtr, NumBd1, &MySet1);
816 * XLlDma_BdRingFromHw(MyRingPtr, NumBd2, &MySet2);
817 * XLlDma_BdRingFree(MyRingPtr, NumBd1, MySet1);
818 * XLlDma_BdRingFree(MyRingPtr, NumBd2, MySet2);
820 * // Not legal
821 * XLlDma_BdRingFromHw(MyRingPtr, NumBd1, &MySet1);
822 * XLlDma_BdRingFromHw(MyRingPtr, NumBd2, &MySet2);
823 * XLlDma_BdRingFree(MyRingPtr, NumBd2, MySet2);
824 * XLlDma_BdRingFree(MyRingPtr, NumBd1, MySet1);
825 * </pre>
827 * If hardware has partially completed a packet spanning multiple BDs, then
828 * none of the BDs for that packet will be included in the results.
830 * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
831 * @param BdLimit is the maximum number of BDs to return in the set. Use
832 * XLLDMA_ALL_BDS to return all BDs that have been processed.
833 * @param BdSetPtr is an output parameter, it points to the first BD available
834 * for examination.
836 * @return
837 * The number of BDs processed by hardware. A value of 0 indicates that no
838 * data is available. No more than BdLimit BDs will be returned.
840 * @note Treat BDs returned by this function as read-only.
842 * @note This function should not be preempted by another XLlDma ring function
843 * call that modifies the BD space. It is the caller's responsibility to
844 * provide a mutual exclusion mechanism.
846 *****************************************************************************/
847 unsigned XLlDma_BdRingFromHw(XLlDma_BdRing * RingPtr, unsigned BdLimit,
848 XLlDma_Bd ** BdSetPtr)
850 XLlDma_Bd *CurBdPtr;
851 unsigned BdCount;
852 unsigned BdPartialCount;
853 u32 BdStsCr;
854 u32 UserIpAppWord;
856 CurBdPtr = RingPtr->HwHead;
857 BdCount = 0;
858 BdPartialCount = 0;
860 /* If no BDs in work group, then there's nothing to search */
861 if (RingPtr->HwCnt == 0) {
862 *BdSetPtr = NULL;
863 return (0);
866 /* Starting at HwHead, keep moving forward in the list until:
867 * - A BD is encountered with its completed bit clear in the status
868 * word which means hardware has not completed processing of that
869 * BD.
870 * - A BD is encountered with its XLLDMA_USERIP_APPWORD_OFFSET field
871 * with value XLLDMA_USERIP_APPWORD_INITVALUE which means hardware
872 * has not completed updating the BD structure.
873 * - RingPtr->HwTail is reached
874 * - The number of requested BDs has been processed
876 while (BdCount < BdLimit) {
877 /* Read the status */
878 XLLDMA_CACHE_INVALIDATE(CurBdPtr);
879 BdStsCr = XLlDma_mBdRead(CurBdPtr,
880 XLLDMA_BD_STSCTRL_USR0_OFFSET);
882 /* If the hardware still hasn't processed this BD then we are
883 * done
885 if (!(BdStsCr & XLLDMA_BD_STSCTRL_COMPLETED_MASK)) {
886 break;
889 /* In RX channel case, check if XLLDMA_USERIP_APPWORD_OFFSET
890 * field of the BD has been updated. If not, RX channel has
891 * not completed updating the BD structure and we delay
892 * the processing of this BD to next time
894 if (RingPtr->IsRxChannel) {
895 UserIpAppWord = XLlDma_mBdRead(CurBdPtr,
896 XLLDMA_USERIP_APPWORD_OFFSET);
897 if (UserIpAppWord == XLLDMA_USERIP_APPWORD_INITVALUE) {
898 break;
903 BdCount++;
905 /* Hardware has processed this BD so check the "last" bit. If
906 * it is clear, then there are more BDs for the current packet.
907 * Keep a count of these partial packet BDs.
909 if (BdStsCr & XLLDMA_BD_STSCTRL_EOP_MASK) {
910 BdPartialCount = 0;
912 else {
913 BdPartialCount++;
916 /* Reached the end of the work group */
917 if (CurBdPtr == RingPtr->HwTail) {
918 break;
921 /* Move on to next BD in work group */
922 CurBdPtr = XLlDma_mBdRingNext(RingPtr, CurBdPtr);
925 /* Subtract off any partial packet BDs found */
926 BdCount -= BdPartialCount;
928 /* If BdCount is non-zero then BDs were found to return. Set return
929 * parameters, update pointers and counters, return success
931 if (BdCount) {
932 *BdSetPtr = RingPtr->HwHead;
933 RingPtr->HwCnt -= BdCount;
934 RingPtr->PostCnt += BdCount;
935 XLLDMA_RING_SEEKAHEAD(RingPtr, RingPtr->HwHead, BdCount);
936 return (BdCount);
938 else {
939 *BdSetPtr = NULL;
940 return (0);
945 /*****************************************************************************/
947 * Frees a set of BDs that had been previously retrieved with
948 * XLlDma_BdRingFromHw().
950 * @param RingPtr is a pointer to the descriptor ring instance to be worked on.
951 * @param NumBd is the number of BDs to free.
952 * @param BdSetPtr is the head of a list of BDs returned by
953 * XLlDma_BdRingFromHw().
955 * @return
956 * - XST_SUCCESS if the set of BDs was freed.
957 * - XST_DMA_SG_LIST_ERROR if this function was called out of sequence with
958 * XLlDma_BdRingFromHw().
960 * @note This function should not be preempted by another XLlDma function call
961 * that modifies the BD space. It is the caller's responsibility to
962 * provide a mutual exclusion mechanism.
964 * @internal
965 * This Interrupt handler provided by application MUST clear pending
966 * interrupts before handling them by calling the call back. Otherwise
967 * the following corner case could raise some issue:
969 * - A packet was transmitted and asserted an TX interrupt, and if
970 * this interrupt handler calls the call back before clears the
971 * interrupt, another packet could get transmitted (and assert the
972 * interrupt) between when the call back function returned and when
973 * the interrupt clearing operation begins, and the interrupt
974 * clearing operation will clear the interrupt raised by the second
975 * packet and won't never process its according buffer descriptors
976 * until a new interrupt occurs.
978 * Changing the sequence to "Clear interrupts, then handle" solve this
979 * issue. If the interrupt raised by the second packet is before the
980 * the interrupt clearing operation, the descriptors associated with
981 * the second packet must have been finished by hardware and ready for
982 * the handling by the call back; otherwise, the interrupt raised by
983 * the second packet is after the interrupt clearing operation,
984 * the packet's buffer descriptors will be handled by the call back in
985 * current pass, if the descriptors are finished before the call back
986 * is invoked, or next pass otherwise.
988 * Please note that if the second packet is handled by the call back
989 * in current pass, the next pass could find no buffer descriptor
990 * finished by the hardware. (i.e., XLlDma_BdRingFromHw() returns 0).
991 * As XLlDma_BdRingFromHw() and XLlDma_BdRingFree() are used in pair,
992 * XLlDma_BdRingFree() covers this situation by checking if the BD
993 * list to free is empty
994 *****************************************************************************/
995 int XLlDma_BdRingFree(XLlDma_BdRing * RingPtr, unsigned NumBd,
996 XLlDma_Bd * BdSetPtr)
998 /* If the BD Set to free is empty, return immediately with value
999 * XST_SUCCESS. See the @internal comment block above for detailed
1000 * information
1002 if (NumBd == 0) {
1003 return XST_SUCCESS;
1006 /* Make sure we are in sync with XLlDma_BdRingFromHw() */
1007 if ((RingPtr->PostCnt < NumBd) || (RingPtr->PostHead != BdSetPtr)) {
1008 return (XST_DMA_SG_LIST_ERROR);
1011 /* Update pointers and counters */
1012 RingPtr->FreeCnt += NumBd;
1013 RingPtr->PostCnt -= NumBd;
1014 XLLDMA_RING_SEEKAHEAD(RingPtr, RingPtr->PostHead, NumBd);
1016 return (XST_SUCCESS);
1020 /*****************************************************************************/
1022 * Check the internal data structures of the BD ring for the provided channel.
1023 * The following checks are made:
1025 * - Is the BD ring linked correctly in physical address space.
1026 * - Do the internal pointers point to BDs in the ring.
1027 * - Do the internal counters add up.
1029 * The channel should be stopped prior to calling this function.
1031 * @param RingPtr is a pointer to the descriptor ring to be worked on.
1033 * @return
1034 * - XST_SUCCESS if no errors were found.
1035 * - XST_DMA_SG_NO_LIST if the ring has not been created.
1036 * - XST_IS_STARTED if the channel is not stopped.
1037 * - XST_DMA_SG_LIST_ERROR if a problem is found with the internal data
1038 * structures. If this value is returned, the channel should be reset to
1039 * avoid data corruption or system instability.
1041 * @note This function should not be preempted by another XLlDma ring function
1042 * call that modifies the BD space. It is the caller's responsibility to
1043 * provide a mutual exclusion mechanism.
1045 *****************************************************************************/
1046 int XLlDma_BdRingCheck(XLlDma_BdRing * RingPtr)
1048 u32 AddrV, AddrP;
1049 unsigned i;
1051 /* Is the list created */
1052 if (RingPtr->AllCnt == 0) {
1053 return (XST_DMA_SG_NO_LIST);
1056 /* Can't check if channel is running */
1057 if (RingPtr->RunState == XST_DMA_SG_IS_STARTED) {
1058 return (XST_IS_STARTED);
1061 /* RunState doesn't make sense */
1062 else if (RingPtr->RunState != XST_DMA_SG_IS_STOPPED) {
1063 return (XST_DMA_SG_LIST_ERROR);
1066 /* Verify internal pointers point to correct memory space */
1067 AddrV = (u32) RingPtr->FreeHead;
1068 if ((AddrV < RingPtr->FirstBdAddr) || (AddrV > RingPtr->LastBdAddr)) {
1069 return (XST_DMA_SG_LIST_ERROR);
1072 AddrV = (u32) RingPtr->PreHead;
1073 if ((AddrV < RingPtr->FirstBdAddr) || (AddrV > RingPtr->LastBdAddr)) {
1074 return (XST_DMA_SG_LIST_ERROR);
1077 AddrV = (u32) RingPtr->HwHead;
1078 if ((AddrV < RingPtr->FirstBdAddr) || (AddrV > RingPtr->LastBdAddr)) {
1079 return (XST_DMA_SG_LIST_ERROR);
1082 AddrV = (u32) RingPtr->HwTail;
1083 if ((AddrV < RingPtr->FirstBdAddr) || (AddrV > RingPtr->LastBdAddr)) {
1084 return (XST_DMA_SG_LIST_ERROR);
1087 AddrV = (u32) RingPtr->PostHead;
1088 if ((AddrV < RingPtr->FirstBdAddr) || (AddrV > RingPtr->LastBdAddr)) {
1089 return (XST_DMA_SG_LIST_ERROR);
1092 /* Verify internal counters add up */
1093 if ((RingPtr->HwCnt + RingPtr->PreCnt + RingPtr->FreeCnt +
1094 RingPtr->PostCnt) != RingPtr->AllCnt) {
1095 return (XST_DMA_SG_LIST_ERROR);
1098 /* Verify BDs are linked correctly */
1099 AddrV = RingPtr->FirstBdAddr;
1100 AddrP = RingPtr->FirstBdPhysAddr + RingPtr->Separation;
1101 for (i = 1; i < RingPtr->AllCnt; i++) {
1102 XLLDMA_CACHE_INVALIDATE(AddrV);
1103 /* Check next pointer for this BD. It should equal to the
1104 * physical address of next BD
1106 if (XLlDma_mBdRead(AddrV, XLLDMA_BD_NDESC_OFFSET) != AddrP) {
1107 return (XST_DMA_SG_LIST_ERROR);
1110 /* Move on to next BD */
1111 AddrV += RingPtr->Separation;
1112 AddrP += RingPtr->Separation;
1115 XLLDMA_CACHE_INVALIDATE(AddrV);
1116 /* Last BD should point back to the beginning of ring */
1117 if (XLlDma_mBdRead(AddrV, XLLDMA_BD_NDESC_OFFSET) !=
1118 RingPtr->FirstBdPhysAddr) {
1119 return (XST_DMA_SG_LIST_ERROR);
1122 /* No problems found */
1123 return (XST_SUCCESS);