2 * This file is part of Cleanflight and Betaflight.
4 * Cleanflight and Betaflight are free software. You can redistribute
5 * this software and/or modify this software under the terms of the
6 * GNU General Public License as published by the Free Software
7 * Foundation, either version 3 of the License, or (at your option)
10 * Cleanflight and Betaflight are distributed in the hope that they
11 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 * See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this software.
18 * If not, see <http://www.gnu.org/licenses/>.
27 #include "build/atomic.h"
31 #include "drivers/bus.h"
32 #include "drivers/bus_spi.h"
33 #include "drivers/bus_spi_impl.h"
34 #include "drivers/dma_reqmap.h"
35 #include "drivers/exti.h"
36 #include "drivers/io.h"
37 #include "drivers/motor.h"
38 #include "drivers/rcc.h"
40 #include "pg/bus_spi.h"
42 #define NUM_QUEUE_SEGS 5
44 #if !defined(STM32G4) && !defined(STM32H7) && !defined(AT32F435)
45 #define USE_TX_IRQ_HANDLER
48 static uint8_t spiRegisteredDeviceCount
= 0;
50 spiDevice_t spiDevice
[SPIDEV_COUNT
];
51 busDevice_t spiBusDevice
[SPIDEV_COUNT
];
53 SPIDevice
spiDeviceByInstance(SPI_TypeDef
*instance
)
55 #ifdef USE_SPI_DEVICE_1
56 if (instance
== SPI1
) {
61 #ifdef USE_SPI_DEVICE_2
62 if (instance
== SPI2
) {
67 #ifdef USE_SPI_DEVICE_3
68 if (instance
== SPI3
) {
73 #ifdef USE_SPI_DEVICE_4
74 if (instance
== SPI4
) {
79 #ifdef USE_SPI_DEVICE_5
80 if (instance
== SPI5
) {
85 #ifdef USE_SPI_DEVICE_6
86 if (instance
== SPI6
) {
94 SPI_TypeDef
*spiInstanceByDevice(SPIDevice device
)
96 if (device
== SPIINVALID
|| device
>= SPIDEV_COUNT
) {
100 return spiDevice
[device
].dev
;
103 bool spiInit(SPIDevice device
)
110 #ifdef USE_SPI_DEVICE_1
111 spiInitDevice(device
);
118 #ifdef USE_SPI_DEVICE_2
119 spiInitDevice(device
);
126 #if defined(USE_SPI_DEVICE_3)
127 spiInitDevice(device
);
134 #if defined(USE_SPI_DEVICE_4)
135 spiInitDevice(device
);
142 #if defined(USE_SPI_DEVICE_5)
143 spiInitDevice(device
);
150 #if defined(USE_SPI_DEVICE_6)
151 spiInitDevice(device
);
160 // Return true if DMA engine is busy
161 bool spiIsBusy(const extDevice_t
*dev
)
163 return (dev
->bus
->curSegment
!= (busSegment_t
*)BUS_SPI_FREE
);
166 // Wait for DMA completion
167 void spiWait(const extDevice_t
*dev
)
169 // Wait for completion
170 while (spiIsBusy(dev
));
173 // Negate CS if held asserted after a transfer
174 void spiRelease(const extDevice_t
*dev
)
176 // Negate Chip Select
177 IOHi(dev
->busType_u
.spi
.csnPin
);
180 // Wait for bus to become free, then read/write block of data
181 void spiReadWriteBuf(const extDevice_t
*dev
, uint8_t *txData
, uint8_t *rxData
, int len
)
183 // This routine blocks so no need to use static data
184 busSegment_t segments
[] = {
185 {.u
.buffers
= {txData
, rxData
}, len
, true, NULL
},
186 {.u
.link
= {NULL
, NULL
}, 0, true, NULL
},
189 spiSequence(dev
, &segments
[0]);
194 // Read/Write a block of data, returning false if the bus is busy
195 bool spiReadWriteBufRB(const extDevice_t
*dev
, uint8_t *txData
, uint8_t *rxData
, int length
)
197 // Ensure any prior DMA has completed before continuing
198 if (spiIsBusy(dev
)) {
202 spiReadWriteBuf(dev
, txData
, rxData
, length
);
207 // Wait for bus to become free, then read/write a single byte
208 uint8_t spiReadWrite(const extDevice_t
*dev
, uint8_t data
)
212 // This routine blocks so no need to use static data
213 busSegment_t segments
[] = {
214 {.u
.buffers
= {&data
, &retval
}, sizeof(data
), true, NULL
},
215 {.u
.link
= {NULL
, NULL
}, 0, true, NULL
},
218 spiSequence(dev
, &segments
[0]);
225 // Wait for bus to become free, then read/write a single byte from a register
226 uint8_t spiReadWriteReg(const extDevice_t
*dev
, uint8_t reg
, uint8_t data
)
230 // This routine blocks so no need to use static data
231 busSegment_t segments
[] = {
232 {.u
.buffers
= {®
, NULL
}, sizeof(reg
), false, NULL
},
233 {.u
.buffers
= {&data
, &retval
}, sizeof(data
), true, NULL
},
234 {.u
.link
= {NULL
, NULL
}, 0, true, NULL
},
237 spiSequence(dev
, &segments
[0]);
244 // Wait for bus to become free, then write a single byte
245 void spiWrite(const extDevice_t
*dev
, uint8_t data
)
247 // This routine blocks so no need to use static data
248 busSegment_t segments
[] = {
249 {.u
.buffers
= {&data
, NULL
}, sizeof(data
), true, NULL
},
250 {.u
.link
= {NULL
, NULL
}, 0, true, NULL
},
253 spiSequence(dev
, &segments
[0]);
258 // Write data to a register
259 void spiWriteReg(const extDevice_t
*dev
, uint8_t reg
, uint8_t data
)
261 // This routine blocks so no need to use static data
262 busSegment_t segments
[] = {
263 {.u
.buffers
= {®
, NULL
}, sizeof(reg
), false, NULL
},
264 {.u
.buffers
= {&data
, NULL
}, sizeof(data
), true, NULL
},
265 {.u
.link
= {NULL
, NULL
}, 0, true, NULL
},
268 spiSequence(dev
, &segments
[0]);
273 // Write data to a register, returning false if the bus is busy
274 bool spiWriteRegRB(const extDevice_t
*dev
, uint8_t reg
, uint8_t data
)
276 // Ensure any prior DMA has completed before continuing
277 if (spiIsBusy(dev
)) {
281 spiWriteReg(dev
, reg
, data
);
286 // Read a block of data from a register
287 void spiReadRegBuf(const extDevice_t
*dev
, uint8_t reg
, uint8_t *data
, uint8_t length
)
289 // This routine blocks so no need to use static data
290 busSegment_t segments
[] = {
291 {.u
.buffers
= {®
, NULL
}, sizeof(reg
), false, NULL
},
292 {.u
.buffers
= {NULL
, data
}, length
, true, NULL
},
293 {.u
.link
= {NULL
, NULL
}, 0, true, NULL
},
296 spiSequence(dev
, &segments
[0]);
301 // Read a block of data from a register, returning false if the bus is busy
302 bool spiReadRegBufRB(const extDevice_t
*dev
, uint8_t reg
, uint8_t *data
, uint8_t length
)
304 // Ensure any prior DMA has completed before continuing
305 if (spiIsBusy(dev
)) {
309 spiReadRegBuf(dev
, reg
, data
, length
);
314 // Read a block of data where the register is ORed with 0x80, returning false if the bus is busy
315 bool spiReadRegMskBufRB(const extDevice_t
*dev
, uint8_t reg
, uint8_t *data
, uint8_t length
)
317 return spiReadRegBufRB(dev
, reg
| 0x80, data
, length
);
320 // Wait for bus to become free, then write a block of data to a register
321 void spiWriteRegBuf(const extDevice_t
*dev
, uint8_t reg
, uint8_t *data
, uint32_t length
)
323 // This routine blocks so no need to use static data
324 busSegment_t segments
[] = {
325 {.u
.buffers
= {®
, NULL
}, sizeof(reg
), false, NULL
},
326 {.u
.buffers
= {data
, NULL
}, length
, true, NULL
},
327 {.u
.link
= {NULL
, NULL
}, 0, true, NULL
},
330 spiSequence(dev
, &segments
[0]);
335 // Wait for bus to become free, then read a byte from a register
336 uint8_t spiReadReg(const extDevice_t
*dev
, uint8_t reg
)
339 // This routine blocks so no need to use static data
340 busSegment_t segments
[] = {
341 {.u
.buffers
= {®
, NULL
}, sizeof(reg
), false, NULL
},
342 {.u
.buffers
= {NULL
, &data
}, sizeof(data
), true, NULL
},
343 {.u
.link
= {NULL
, NULL
}, 0, true, NULL
},
346 spiSequence(dev
, &segments
[0]);
353 // Wait for bus to become free, then read a byte of data where the register is ORed with 0x80
354 uint8_t spiReadRegMsk(const extDevice_t
*dev
, uint8_t reg
)
356 return spiReadReg(dev
, reg
| 0x80);
359 uint16_t spiCalculateDivider(uint32_t freq
)
361 #if defined(STM32F4) || defined(STM32G4) || defined(STM32F7)
362 uint32_t spiClk
= SystemCoreClock
/ 2;
363 #elif defined(STM32H7)
364 uint32_t spiClk
= 100000000;
365 #elif defined(AT32F4)
370 uint32_t spiClk
= system_core_clock
/ 2;
372 #error "Base SPI clock not defined for this architecture"
375 uint16_t divisor
= 2;
379 for (; (spiClk
> freq
) && (divisor
< 256); divisor
<<= 1, spiClk
>>= 1);
384 uint32_t spiCalculateClock(uint16_t spiClkDivisor
)
386 #if defined(STM32F4) || defined(STM32G4) || defined(STM32F7)
387 uint32_t spiClk
= SystemCoreClock
/ 2;
388 #elif defined(STM32H7)
389 uint32_t spiClk
= 100000000;
390 #elif defined(AT32F4)
391 uint32_t spiClk
= system_core_clock
/ 2;
393 if ((spiClk
/ spiClkDivisor
) > 36000000){
398 #error "Base SPI clock not defined for this architecture"
401 return spiClk
/ spiClkDivisor
;
404 // Interrupt handler for SPI receive DMA completion
405 FAST_IRQ_HANDLER
static void spiIrqHandler(const extDevice_t
*dev
)
407 busDevice_t
*bus
= dev
->bus
;
408 busSegment_t
*nextSegment
;
410 if (bus
->curSegment
->callback
) {
411 switch(bus
->curSegment
->callback(dev
->callbackArg
)) {
413 // Repeat the last DMA segment
415 // Reinitialise the cached init values as segment is not progressing
416 spiInternalInitStream(dev
, true);
420 bus
->curSegment
= (busSegment_t
*)BUS_SPI_FREE
;
425 // Advance to the next DMA segment
430 // Advance through the segment list
431 // OK to discard the volatile qualifier here
432 nextSegment
= (busSegment_t
*)bus
->curSegment
+ 1;
434 if (nextSegment
->len
== 0) {
435 // If a following transaction has been linked, start it
436 if (nextSegment
->u
.link
.dev
) {
437 const extDevice_t
*nextDev
= nextSegment
->u
.link
.dev
;
438 busSegment_t
*nextSegments
= (busSegment_t
*)nextSegment
->u
.link
.segments
;
439 // The end of the segment list has been reached
440 bus
->curSegment
= nextSegments
;
441 nextSegment
->u
.link
.dev
= NULL
;
442 nextSegment
->u
.link
.segments
= NULL
;
443 spiSequenceStart(nextDev
);
445 // The end of the segment list has been reached, so mark transactions as complete
446 bus
->curSegment
= (busSegment_t
*)BUS_SPI_FREE
;
449 // Do as much processing as possible before asserting CS to avoid violating minimum high time
450 bool negateCS
= bus
->curSegment
->negateCS
;
452 bus
->curSegment
= nextSegment
;
454 // After the completion of the first segment setup the init structure for the subsequent segment
455 if (bus
->initSegment
) {
456 spiInternalInitStream(dev
, false);
457 bus
->initSegment
= false;
461 // Assert Chip Select - it's costly so only do so if necessary
462 IOLo(dev
->busType_u
.spi
.csnPin
);
465 // Launch the next transfer
466 spiInternalStartDMA(dev
);
468 // Prepare the init structures ready for the next segment to reduce inter-segment time
469 spiInternalInitStream(dev
, true);
473 // Interrupt handler for SPI receive DMA completion
474 FAST_IRQ_HANDLER
static void spiRxIrqHandler(dmaChannelDescriptor_t
* descriptor
)
476 const extDevice_t
*dev
= (const extDevice_t
*)descriptor
->userParam
;
482 busDevice_t
*bus
= dev
->bus
;
484 if (bus
->curSegment
->negateCS
) {
485 // Negate Chip Select
486 IOHi(dev
->busType_u
.spi
.csnPin
);
489 spiInternalStopDMA(dev
);
491 #ifdef __DCACHE_PRESENT
493 if (bus
->curSegment
->u
.buffers
.rxData
&&
494 ((bus
->curSegment
->u
.buffers
.rxData
< &_dmaram_start__
) || (bus
->curSegment
->u
.buffers
.rxData
>= &_dmaram_end__
))) {
496 if (bus
->curSegment
->u
.buffers
.rxData
) {
498 // Invalidate the D cache covering the area into which data has been read
499 SCB_InvalidateDCache_by_Addr(
500 (uint32_t *)((uint32_t)bus
->curSegment
->u
.buffers
.rxData
& ~CACHE_LINE_MASK
),
501 (((uint32_t)bus
->curSegment
->u
.buffers
.rxData
& CACHE_LINE_MASK
) +
502 bus
->curSegment
->len
- 1 + CACHE_LINE_SIZE
) & ~CACHE_LINE_MASK
);
504 #endif // __DCACHE_PRESENT
509 #ifdef USE_TX_IRQ_HANDLER
510 // Interrupt handler for SPI transmit DMA completion
511 FAST_IRQ_HANDLER
static void spiTxIrqHandler(dmaChannelDescriptor_t
* descriptor
)
513 const extDevice_t
*dev
= (const extDevice_t
*)descriptor
->userParam
;
519 busDevice_t
*bus
= dev
->bus
;
521 spiInternalStopDMA(dev
);
523 if (bus
->curSegment
->negateCS
) {
524 // Negate Chip Select
525 IOHi(dev
->busType_u
.spi
.csnPin
);
532 // Mark this bus as being SPI and record the first owner to use it
533 bool spiSetBusInstance(extDevice_t
*dev
, uint32_t device
)
535 if ((device
== 0) || (device
> SPIDEV_COUNT
)) {
539 dev
->bus
= &spiBusDevice
[SPI_CFG_TO_DEV(device
)];
541 // By default each device should use SPI DMA if the bus supports it
544 if (dev
->bus
->busType
== BUS_TYPE_SPI
) {
545 // This bus has already been initialised
546 dev
->bus
->deviceCount
++;
550 busDevice_t
*bus
= dev
->bus
;
552 bus
->busType_u
.spi
.instance
= spiInstanceByDevice(SPI_CFG_TO_DEV(device
));
554 if (bus
->busType_u
.spi
.instance
== NULL
) {
558 bus
->busType
= BUS_TYPE_SPI
;
560 bus
->deviceCount
= 1;
561 bus
->initTx
= &dev
->initTx
;
562 bus
->initRx
= &dev
->initRx
;
567 void spiInitBusDMA(void)
570 #if defined(STM32F4) && defined(USE_DSHOT_BITBANG)
571 /* Check https://www.st.com/resource/en/errata_sheet/dm00037591-stm32f405407xx-and-stm32f415417xx-device-limitations-stmicroelectronics.pdf
572 * section 2.1.10 which reports an errata that corruption may occurs on DMA2 if AHB peripherals (eg GPIO ports) are
573 * access concurrently with APB peripherals (eg SPI busses). Bitbang DSHOT uses DMA2 to write to GPIO ports. If this
574 * is enabled, then don't enable DMA on an SPI bus using DMA2
576 const bool dshotBitbangActive
= isDshotBitbangActive(&motorConfig()->dev
);
579 for (device
= 0; device
< SPIDEV_COUNT
; device
++) {
580 busDevice_t
*bus
= &spiBusDevice
[device
];
582 if (bus
->busType
!= BUS_TYPE_SPI
) {
583 // This bus is not in use
587 dmaIdentifier_e dmaTxIdentifier
= DMA_NONE
;
588 dmaIdentifier_e dmaRxIdentifier
= DMA_NONE
;
590 int8_t txDmaopt
= spiPinConfig(device
)->txDmaopt
;
591 uint8_t txDmaoptMin
= 0;
592 uint8_t txDmaoptMax
= MAX_PERIPHERAL_DMA_OPTIONS
- 1;
594 if (txDmaopt
!= -1) {
595 txDmaoptMin
= txDmaopt
;
596 txDmaoptMax
= txDmaopt
;
599 for (uint8_t opt
= txDmaoptMin
; opt
<= txDmaoptMax
; opt
++) {
600 const dmaChannelSpec_t
*dmaTxChannelSpec
= dmaGetChannelSpecByPeripheral(DMA_PERIPH_SPI_SDO
, device
, opt
);
602 if (dmaTxChannelSpec
) {
603 dmaTxIdentifier
= dmaGetIdentifier(dmaTxChannelSpec
->ref
);
604 #if defined(STM32F4) && defined(USE_DSHOT_BITBANG)
605 if (dshotBitbangActive
&& (DMA_DEVICE_NO(dmaTxIdentifier
) == 2)) {
606 dmaTxIdentifier
= DMA_NONE
;
610 if (!dmaAllocate(dmaTxIdentifier
, OWNER_SPI_SDO
, device
+ 1)) {
611 dmaTxIdentifier
= DMA_NONE
;
614 bus
->dmaTx
= dmaGetDescriptorByIdentifier(dmaTxIdentifier
);
615 #if defined(STM32F4) || defined(STM32F7) || defined(STM32G4) || defined(STM32H7)
616 bus
->dmaTx
->stream
= DMA_DEVICE_INDEX(dmaTxIdentifier
);
617 bus
->dmaTx
->channel
= dmaTxChannelSpec
->channel
;
620 dmaEnable(dmaTxIdentifier
);
621 #if defined(USE_ATBSP_DRIVER)
622 dmaMuxEnable(dmaTxIdentifier
,dmaTxChannelSpec
->dmaMuxId
);
628 int8_t rxDmaopt
= spiPinConfig(device
)->rxDmaopt
;
629 uint8_t rxDmaoptMin
= 0;
630 uint8_t rxDmaoptMax
= MAX_PERIPHERAL_DMA_OPTIONS
- 1;
632 if (rxDmaopt
!= -1) {
633 rxDmaoptMin
= rxDmaopt
;
634 rxDmaoptMax
= rxDmaopt
;
637 for (uint8_t opt
= rxDmaoptMin
; opt
<= rxDmaoptMax
; opt
++) {
638 const dmaChannelSpec_t
*dmaRxChannelSpec
= dmaGetChannelSpecByPeripheral(DMA_PERIPH_SPI_SDI
, device
, opt
);
640 if (dmaRxChannelSpec
) {
641 dmaRxIdentifier
= dmaGetIdentifier(dmaRxChannelSpec
->ref
);
642 #if defined(STM32F4) && defined(USE_DSHOT_BITBANG)
643 if (dshotBitbangActive
&& (DMA_DEVICE_NO(dmaRxIdentifier
) == 2)) {
644 dmaRxIdentifier
= DMA_NONE
;
648 if (!dmaAllocate(dmaRxIdentifier
, OWNER_SPI_SDI
, device
+ 1)) {
649 dmaRxIdentifier
= DMA_NONE
;
652 bus
->dmaRx
= dmaGetDescriptorByIdentifier(dmaRxIdentifier
);
653 #if defined(STM32F4) || defined(STM32F7) || defined(STM32G4) || defined(STM32H7)
654 bus
->dmaRx
->stream
= DMA_DEVICE_INDEX(dmaRxIdentifier
);
655 bus
->dmaRx
->channel
= dmaRxChannelSpec
->channel
;
658 dmaEnable(dmaRxIdentifier
);
659 #if defined(USE_ATBSP_DRIVER)
660 dmaMuxEnable(dmaRxIdentifier
,dmaRxChannelSpec
->dmaMuxId
);
666 if (dmaTxIdentifier
&& dmaRxIdentifier
) {
667 // Ensure streams are disabled
668 spiInternalResetStream(bus
->dmaRx
);
669 spiInternalResetStream(bus
->dmaTx
);
671 spiInternalResetDescriptors(bus
);
673 /* Note that this driver may be called both from the normal thread of execution, or from USB interrupt
674 * handlers, so the DMA completion interrupt must be at a higher priority
676 dmaSetHandler(dmaRxIdentifier
, spiRxIrqHandler
, NVIC_PRIO_SPI_DMA
, 0);
679 #ifdef USE_TX_IRQ_HANDLER
680 } else if (dmaTxIdentifier
) {
681 // Transmit on DMA is adequate for OSD so worth having
682 bus
->dmaTx
= dmaGetDescriptorByIdentifier(dmaTxIdentifier
);
683 bus
->dmaRx
= (dmaChannelDescriptor_t
*)NULL
;
685 // Ensure streams are disabled
686 spiInternalResetStream(bus
->dmaTx
);
688 spiInternalResetDescriptors(bus
);
690 dmaSetHandler(dmaTxIdentifier
, spiTxIrqHandler
, NVIC_PRIO_SPI_DMA
, 0);
695 // Disassociate channels from bus
696 bus
->dmaRx
= (dmaChannelDescriptor_t
*)NULL
;
697 bus
->dmaTx
= (dmaChannelDescriptor_t
*)NULL
;
702 void spiSetClkDivisor(const extDevice_t
*dev
, uint16_t divisor
)
704 ((extDevice_t
*)dev
)->busType_u
.spi
.speed
= divisor
;
707 // Set the clock phase/polarity to be used for accesses by the given device
708 void spiSetClkPhasePolarity(const extDevice_t
*dev
, bool leadingEdge
)
710 ((extDevice_t
*)dev
)->busType_u
.spi
.leadingEdge
= leadingEdge
;
713 // Enable/disable DMA on a specific device. Enabled by default.
714 void spiDmaEnable(const extDevice_t
*dev
, bool enable
)
716 ((extDevice_t
*)dev
)->useDMA
= enable
;
719 bool spiUseDMA(const extDevice_t
*dev
)
721 // Full DMA only requires both transmit and receive}
722 return dev
->bus
->useDMA
&& dev
->bus
->dmaRx
&& dev
->useDMA
;
725 bool spiUseSDO_DMA(const extDevice_t
*dev
)
727 return dev
->bus
->useDMA
&& dev
->useDMA
;
730 void spiBusDeviceRegister(const extDevice_t
*dev
)
734 spiRegisteredDeviceCount
++;
737 uint8_t spiGetRegisteredDeviceCount(void)
739 return spiRegisteredDeviceCount
;
742 uint8_t spiGetExtDeviceCount(const extDevice_t
*dev
)
744 return dev
->bus
->deviceCount
;
747 // Link two segment lists
748 // Note that there is no need to unlink segment lists as this is done automatically as they are processed
749 void spiLinkSegments(const extDevice_t
*dev
, busSegment_t
*firstSegment
, busSegment_t
*secondSegment
)
751 busSegment_t
*endSegment
;
753 // Find the last segment of the new transfer
754 for (endSegment
= firstSegment
; endSegment
->len
; endSegment
++);
756 endSegment
->u
.link
.dev
= dev
;
757 endSegment
->u
.link
.segments
= secondSegment
;
760 // DMA transfer setup and start
761 void spiSequence(const extDevice_t
*dev
, busSegment_t
*segments
)
763 busDevice_t
*bus
= dev
->bus
;
765 ATOMIC_BLOCK(NVIC_PRIO_MAX
) {
766 if (spiIsBusy(dev
)) {
767 busSegment_t
*endSegment
;
769 // Defer this transfer to be triggered upon completion of the current transfer
771 // Find the last segment of the new transfer
772 for (endSegment
= segments
; endSegment
->len
; endSegment
++);
774 // Safe to discard the volatile qualifier as we're in an atomic block
775 busSegment_t
*endCmpSegment
= (busSegment_t
*)bus
->curSegment
;
779 // Find the last segment of the current transfer
780 for (; endCmpSegment
->len
; endCmpSegment
++);
782 if (endCmpSegment
== endSegment
) {
783 /* Attempt to use the new segment list twice in the same queue. Abort.
784 * Note that this can only happen with non-blocking transfers so drivers must take
785 * care to avoid this.
790 if (endCmpSegment
->u
.link
.dev
== NULL
) {
791 // End of the segment list queue reached
794 // Follow the link to the next queued segment list
795 endCmpSegment
= (busSegment_t
*)endCmpSegment
->u
.link
.segments
;
799 // Record the dev and segments parameters in the terminating segment entry
800 endCmpSegment
->u
.link
.dev
= dev
;
801 endCmpSegment
->u
.link
.segments
= segments
;
806 // Claim the bus with this list of segments
807 bus
->curSegment
= segments
;
811 spiSequenceStart(dev
);