Merge pull request #11939 from blckmn/flash-fix
[betaflight.git] / src / main / drivers / bus_spi.c
blob3662cf8ce818931f5c320a10b5884b9ccc12b5e2
1 /*
2 * This file is part of Cleanflight and Betaflight.
4 * Cleanflight and Betaflight are free software. You can redistribute
5 * this software and/or modify this software under the terms of the
6 * GNU General Public License as published by the Free Software
7 * Foundation, either version 3 of the License, or (at your option)
8 * any later version.
10 * Cleanflight and Betaflight are distributed in the hope that they
11 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 * See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this software.
18 * If not, see <http://www.gnu.org/licenses/>.
21 #include <stdbool.h>
22 #include <stdint.h>
23 #include <string.h>
25 #include "platform.h"
27 #include "build/atomic.h"
29 #ifdef USE_SPI
31 #include "drivers/bus.h"
32 #include "drivers/bus_spi.h"
33 #include "drivers/bus_spi_impl.h"
34 #include "drivers/dma_reqmap.h"
35 #include "drivers/exti.h"
36 #include "drivers/io.h"
37 #include "drivers/motor.h"
38 #include "drivers/rcc.h"
39 #include "nvic.h"
40 #include "pg/bus_spi.h"
42 #define NUM_QUEUE_SEGS 5
44 static uint8_t spiRegisteredDeviceCount = 0;
46 spiDevice_t spiDevice[SPIDEV_COUNT];
47 busDevice_t spiBusDevice[SPIDEV_COUNT];
49 SPIDevice spiDeviceByInstance(SPI_TypeDef *instance)
51 #ifdef USE_SPI_DEVICE_1
52 if (instance == SPI1) {
53 return SPIDEV_1;
55 #endif
57 #ifdef USE_SPI_DEVICE_2
58 if (instance == SPI2) {
59 return SPIDEV_2;
61 #endif
63 #ifdef USE_SPI_DEVICE_3
64 if (instance == SPI3) {
65 return SPIDEV_3;
67 #endif
69 #ifdef USE_SPI_DEVICE_4
70 if (instance == SPI4) {
71 return SPIDEV_4;
73 #endif
75 #ifdef USE_SPI_DEVICE_5
76 if (instance == SPI5) {
77 return SPIDEV_5;
79 #endif
81 #ifdef USE_SPI_DEVICE_6
82 if (instance == SPI6) {
83 return SPIDEV_6;
85 #endif
87 return SPIINVALID;
90 SPI_TypeDef *spiInstanceByDevice(SPIDevice device)
92 if (device == SPIINVALID || device >= SPIDEV_COUNT) {
93 return NULL;
96 return spiDevice[device].dev;
99 bool spiInit(SPIDevice device)
101 switch (device) {
102 case SPIINVALID:
103 return false;
105 case SPIDEV_1:
106 #ifdef USE_SPI_DEVICE_1
107 spiInitDevice(device);
108 return true;
109 #else
110 break;
111 #endif
113 case SPIDEV_2:
114 #ifdef USE_SPI_DEVICE_2
115 spiInitDevice(device);
116 return true;
117 #else
118 break;
119 #endif
121 case SPIDEV_3:
122 #if defined(USE_SPI_DEVICE_3)
123 spiInitDevice(device);
124 return true;
125 #else
126 break;
127 #endif
129 case SPIDEV_4:
130 #if defined(USE_SPI_DEVICE_4)
131 spiInitDevice(device);
132 return true;
133 #else
134 break;
135 #endif
137 case SPIDEV_5:
138 #if defined(USE_SPI_DEVICE_5)
139 spiInitDevice(device);
140 return true;
141 #else
142 break;
143 #endif
145 case SPIDEV_6:
146 #if defined(USE_SPI_DEVICE_6)
147 spiInitDevice(device);
148 return true;
149 #else
150 break;
151 #endif
153 return false;
156 // Return true if DMA engine is busy
157 bool spiIsBusy(const extDevice_t *dev)
159 return (dev->bus->curSegment != (busSegment_t *)BUS_SPI_FREE);
162 // Wait for DMA completion
163 void spiWait(const extDevice_t *dev)
165 // Wait for completion
166 while (dev->bus->curSegment != (busSegment_t *)BUS_SPI_FREE);
169 // Wait for bus to become free, then read/write block of data
170 void spiReadWriteBuf(const extDevice_t *dev, uint8_t *txData, uint8_t *rxData, int len)
172 // This routine blocks so no need to use static data
173 busSegment_t segments[] = {
174 {.u.buffers = {txData, rxData}, len, true, NULL},
175 {.u.link = {NULL, NULL}, 0, true, NULL},
178 spiSequence(dev, &segments[0]);
180 spiWait(dev);
183 // Read/Write a block of data, returning false if the bus is busy
184 bool spiReadWriteBufRB(const extDevice_t *dev, uint8_t *txData, uint8_t *rxData, int length)
186 // Ensure any prior DMA has completed before continuing
187 if (spiIsBusy(dev)) {
188 return false;
191 spiReadWriteBuf(dev, txData, rxData, length);
193 return true;
196 // Wait for bus to become free, then read/write a single byte
197 uint8_t spiReadWrite(const extDevice_t *dev, uint8_t data)
199 uint8_t retval;
201 // This routine blocks so no need to use static data
202 busSegment_t segments[] = {
203 {.u.buffers = {&data, &retval}, sizeof(data), true, NULL},
204 {.u.link = {NULL, NULL}, 0, true, NULL},
207 spiSequence(dev, &segments[0]);
209 spiWait(dev);
211 return retval;
214 // Wait for bus to become free, then read/write a single byte from a register
215 uint8_t spiReadWriteReg(const extDevice_t *dev, uint8_t reg, uint8_t data)
217 uint8_t retval;
219 // This routine blocks so no need to use static data
220 busSegment_t segments[] = {
221 {.u.buffers = {&reg, NULL}, sizeof(reg), false, NULL},
222 {.u.buffers = {&data, &retval}, sizeof(data), true, NULL},
223 {.u.link = {NULL, NULL}, 0, true, NULL},
226 spiSequence(dev, &segments[0]);
228 spiWait(dev);
230 return retval;
233 // Wait for bus to become free, then write a single byte
234 void spiWrite(const extDevice_t *dev, uint8_t data)
236 // This routine blocks so no need to use static data
237 busSegment_t segments[] = {
238 {.u.buffers = {&data, NULL}, sizeof(data), true, NULL},
239 {.u.link = {NULL, NULL}, 0, true, NULL},
242 spiSequence(dev, &segments[0]);
244 spiWait(dev);
247 // Write data to a register
248 void spiWriteReg(const extDevice_t *dev, uint8_t reg, uint8_t data)
250 // This routine blocks so no need to use static data
251 busSegment_t segments[] = {
252 {.u.buffers = {&reg, NULL}, sizeof(reg), false, NULL},
253 {.u.buffers = {&data, NULL}, sizeof(data), true, NULL},
254 {.u.link = {NULL, NULL}, 0, true, NULL},
257 spiSequence(dev, &segments[0]);
259 spiWait(dev);
262 // Write data to a register, returning false if the bus is busy
263 bool spiWriteRegRB(const extDevice_t *dev, uint8_t reg, uint8_t data)
265 // Ensure any prior DMA has completed before continuing
266 if (spiIsBusy(dev)) {
267 return false;
270 spiWriteReg(dev, reg, data);
272 return true;
275 // Read a block of data from a register
276 void spiReadRegBuf(const extDevice_t *dev, uint8_t reg, uint8_t *data, uint8_t length)
278 // This routine blocks so no need to use static data
279 busSegment_t segments[] = {
280 {.u.buffers = {&reg, NULL}, sizeof(reg), false, NULL},
281 {.u.buffers = {NULL, data}, length, true, NULL},
282 {.u.link = {NULL, NULL}, 0, true, NULL},
285 spiSequence(dev, &segments[0]);
287 spiWait(dev);
290 // Read a block of data from a register, returning false if the bus is busy
291 bool spiReadRegBufRB(const extDevice_t *dev, uint8_t reg, uint8_t *data, uint8_t length)
293 // Ensure any prior DMA has completed before continuing
294 if (spiIsBusy(dev)) {
295 return false;
298 spiReadRegBuf(dev, reg, data, length);
300 return true;
303 // Read a block of data where the register is ORed with 0x80, returning false if the bus is busy
304 bool spiReadRegMskBufRB(const extDevice_t *dev, uint8_t reg, uint8_t *data, uint8_t length)
306 return spiReadRegBufRB(dev, reg | 0x80, data, length);
309 // Wait for bus to become free, then write a block of data to a register
310 void spiWriteRegBuf(const extDevice_t *dev, uint8_t reg, uint8_t *data, uint32_t length)
312 // This routine blocks so no need to use static data
313 busSegment_t segments[] = {
314 {.u.buffers = {&reg, NULL}, sizeof(reg), false, NULL},
315 {.u.buffers = {data, NULL}, length, true, NULL},
316 {.u.link = {NULL, NULL}, 0, true, NULL},
319 spiSequence(dev, &segments[0]);
321 spiWait(dev);
324 // Wait for bus to become free, then read a byte from a register
325 uint8_t spiReadReg(const extDevice_t *dev, uint8_t reg)
327 uint8_t data;
328 // This routine blocks so no need to use static data
329 busSegment_t segments[] = {
330 {.u.buffers = {&reg, NULL}, sizeof(reg), false, NULL},
331 {.u.buffers = {NULL, &data}, sizeof(data), true, NULL},
332 {.u.link = {NULL, NULL}, 0, true, NULL},
335 spiSequence(dev, &segments[0]);
337 spiWait(dev);
339 return data;
342 // Wait for bus to become free, then read a byte of data where the register is ORed with 0x80
343 uint8_t spiReadRegMsk(const extDevice_t *dev, uint8_t reg)
345 return spiReadReg(dev, reg | 0x80);
348 uint16_t spiCalculateDivider(uint32_t freq)
350 #if defined(STM32F4) || defined(STM32G4) || defined(STM32F7)
351 uint32_t spiClk = SystemCoreClock / 2;
352 #elif defined(STM32H7)
353 uint32_t spiClk = 100000000;
354 #else
355 #error "Base SPI clock not defined for this architecture"
356 #endif
358 uint16_t divisor = 2;
360 spiClk >>= 1;
362 for (; (spiClk > freq) && (divisor < 256); divisor <<= 1, spiClk >>= 1);
364 return divisor;
367 uint32_t spiCalculateClock(uint16_t spiClkDivisor)
369 #if defined(STM32F4) || defined(STM32G4) || defined(STM32F7)
370 uint32_t spiClk = SystemCoreClock / 2;
371 #elif defined(STM32H7)
372 uint32_t spiClk = 100000000;
373 #else
374 #error "Base SPI clock not defined for this architecture"
375 #endif
377 return spiClk / spiClkDivisor;
380 // Interrupt handler for SPI receive DMA completion
381 static void spiIrqHandler(const extDevice_t *dev)
383 busDevice_t *bus = dev->bus;
384 busSegment_t *nextSegment;
386 if (bus->curSegment->callback) {
387 switch(bus->curSegment->callback(dev->callbackArg)) {
388 case BUS_BUSY:
389 // Repeat the last DMA segment
390 bus->curSegment--;
391 // Reinitialise the cached init values as segment is not progressing
392 spiInternalInitStream(dev, true);
393 break;
395 case BUS_ABORT:
396 bus->curSegment = (busSegment_t *)BUS_SPI_FREE;
397 return;
399 case BUS_READY:
400 default:
401 // Advance to the next DMA segment
402 break;
406 // Advance through the segment list
407 // OK to discard the volatile qualifier here
408 nextSegment = (busSegment_t *)bus->curSegment + 1;
410 if (nextSegment->len == 0) {
411 // If a following transaction has been linked, start it
412 if (nextSegment->u.link.dev) {
413 const extDevice_t *nextDev = nextSegment->u.link.dev;
414 busSegment_t *nextSegments = (busSegment_t *)nextSegment->u.link.segments;
415 // The end of the segment list has been reached
416 bus->curSegment = nextSegments;
417 nextSegment->u.link.dev = NULL;
418 nextSegment->u.link.segments = NULL;
419 spiSequenceStart(nextDev);
420 } else {
421 // The end of the segment list has been reached, so mark transactions as complete
422 bus->curSegment = (busSegment_t *)BUS_SPI_FREE;
424 } else {
425 // Do as much processing as possible before asserting CS to avoid violating minimum high time
426 bool negateCS = bus->curSegment->negateCS;
428 bus->curSegment = nextSegment;
430 // After the completion of the first segment setup the init structure for the subsequent segment
431 if (bus->initSegment) {
432 spiInternalInitStream(dev, false);
433 bus->initSegment = false;
436 if (negateCS) {
437 // Assert Chip Select - it's costly so only do so if necessary
438 IOLo(dev->busType_u.spi.csnPin);
441 // Launch the next transfer
442 spiInternalStartDMA(dev);
444 // Prepare the init structures ready for the next segment to reduce inter-segment time
445 spiInternalInitStream(dev, true);
449 // Interrupt handler for SPI receive DMA completion
450 static void spiRxIrqHandler(dmaChannelDescriptor_t* descriptor)
452 const extDevice_t *dev = (const extDevice_t *)descriptor->userParam;
454 if (!dev) {
455 return;
458 busDevice_t *bus = dev->bus;
460 if (bus->curSegment->negateCS) {
461 // Negate Chip Select
462 IOHi(dev->busType_u.spi.csnPin);
465 spiInternalStopDMA(dev);
467 #ifdef __DCACHE_PRESENT
468 #ifdef STM32H7
469 if (bus->curSegment->u.buffers.rxData &&
470 ((bus->curSegment->u.buffers.rxData < &_dmaram_start__) || (bus->curSegment->u.buffers.rxData >= &_dmaram_end__))) {
471 #else
472 if (bus->curSegment->u.buffers.rxData) {
473 #endif
474 // Invalidate the D cache covering the area into which data has been read
475 SCB_InvalidateDCache_by_Addr(
476 (uint32_t *)((uint32_t)bus->curSegment->u.buffers.rxData & ~CACHE_LINE_MASK),
477 (((uint32_t)bus->curSegment->u.buffers.rxData & CACHE_LINE_MASK) +
478 bus->curSegment->len - 1 + CACHE_LINE_SIZE) & ~CACHE_LINE_MASK);
480 #endif // __DCACHE_PRESENT
482 spiIrqHandler(dev);
485 #if !defined(STM32G4) && !defined(STM32H7)
486 // Interrupt handler for SPI transmit DMA completion
487 static void spiTxIrqHandler(dmaChannelDescriptor_t* descriptor)
489 const extDevice_t *dev = (const extDevice_t *)descriptor->userParam;
491 if (!dev) {
492 return;
495 busDevice_t *bus = dev->bus;
497 spiInternalStopDMA(dev);
499 if (bus->curSegment->negateCS) {
500 // Negate Chip Select
501 IOHi(dev->busType_u.spi.csnPin);
504 spiIrqHandler(dev);
506 #endif
508 // Mark this bus as being SPI and record the first owner to use it
509 bool spiSetBusInstance(extDevice_t *dev, uint32_t device)
511 if ((device == 0) || (device > SPIDEV_COUNT)) {
512 return false;
515 dev->bus = &spiBusDevice[SPI_CFG_TO_DEV(device)];
517 // By default each device should use SPI DMA if the bus supports it
518 dev->useDMA = true;
520 if (dev->bus->busType == BUS_TYPE_SPI) {
521 // This bus has already been initialised
522 dev->bus->deviceCount++;
523 return true;
526 busDevice_t *bus = dev->bus;
528 bus->busType_u.spi.instance = spiInstanceByDevice(SPI_CFG_TO_DEV(device));
530 if (bus->busType_u.spi.instance == NULL) {
531 return false;
534 bus->busType = BUS_TYPE_SPI;
535 bus->useDMA = false;
536 bus->deviceCount = 1;
537 bus->initTx = &dev->initTx;
538 bus->initRx = &dev->initRx;
540 return true;
543 void spiInitBusDMA(void)
545 uint32_t device;
546 #if defined(STM32F4) && defined(USE_DSHOT_BITBANG)
547 /* Check https://www.st.com/resource/en/errata_sheet/dm00037591-stm32f405407xx-and-stm32f415417xx-device-limitations-stmicroelectronics.pdf
548 * section 2.1.10 which reports an errata that corruption may occurs on DMA2 if AHB peripherals (eg GPIO ports) are
549 * access concurrently with APB peripherals (eg SPI busses). Bitbang DSHOT uses DMA2 to write to GPIO ports. If this
550 * is enabled, then don't enable DMA on an SPI bus using DMA2
552 const bool dshotBitbangActive = isDshotBitbangActive(&motorConfig()->dev);
553 #endif
555 for (device = 0; device < SPIDEV_COUNT; device++) {
556 busDevice_t *bus = &spiBusDevice[device];
558 if (bus->busType != BUS_TYPE_SPI) {
559 // This bus is not in use
560 continue;
563 dmaIdentifier_e dmaTxIdentifier = DMA_NONE;
564 dmaIdentifier_e dmaRxIdentifier = DMA_NONE;
566 int8_t txDmaopt = spiPinConfig(device)->txDmaopt;
567 uint8_t txDmaoptMin = 0;
568 uint8_t txDmaoptMax = MAX_PERIPHERAL_DMA_OPTIONS - 1;
570 if (txDmaopt != -1) {
571 txDmaoptMin = txDmaopt;
572 txDmaoptMax = txDmaopt;
575 for (uint8_t opt = txDmaoptMin; opt <= txDmaoptMax; opt++) {
576 const dmaChannelSpec_t *dmaTxChannelSpec = dmaGetChannelSpecByPeripheral(DMA_PERIPH_SPI_MOSI, device, opt);
578 if (dmaTxChannelSpec) {
579 dmaTxIdentifier = dmaGetIdentifier(dmaTxChannelSpec->ref);
580 #if defined(STM32F4) && defined(USE_DSHOT_BITBANG)
581 if (dshotBitbangActive && (DMA_DEVICE_NO(dmaTxIdentifier) == 2)) {
582 dmaTxIdentifier = DMA_NONE;
583 break;
585 #endif
586 if (!dmaAllocate(dmaTxIdentifier, OWNER_SPI_MOSI, device + 1)) {
587 dmaTxIdentifier = DMA_NONE;
588 continue;
590 bus->dmaTx = dmaGetDescriptorByIdentifier(dmaTxIdentifier);
591 bus->dmaTx->stream = DMA_DEVICE_INDEX(dmaTxIdentifier);
592 bus->dmaTx->channel = dmaTxChannelSpec->channel;
594 dmaEnable(dmaTxIdentifier);
596 break;
600 int8_t rxDmaopt = spiPinConfig(device)->rxDmaopt;
601 uint8_t rxDmaoptMin = 0;
602 uint8_t rxDmaoptMax = MAX_PERIPHERAL_DMA_OPTIONS - 1;
604 if (rxDmaopt != -1) {
605 rxDmaoptMin = rxDmaopt;
606 rxDmaoptMax = rxDmaopt;
609 for (uint8_t opt = rxDmaoptMin; opt <= rxDmaoptMax; opt++) {
610 const dmaChannelSpec_t *dmaRxChannelSpec = dmaGetChannelSpecByPeripheral(DMA_PERIPH_SPI_MISO, device, opt);
612 if (dmaRxChannelSpec) {
613 dmaRxIdentifier = dmaGetIdentifier(dmaRxChannelSpec->ref);
614 #if defined(STM32F4) && defined(USE_DSHOT_BITBANG)
615 if (dshotBitbangActive && (DMA_DEVICE_NO(dmaRxIdentifier) == 2)) {
616 dmaRxIdentifier = DMA_NONE;
617 break;
619 #endif
620 if (!dmaAllocate(dmaRxIdentifier, OWNER_SPI_MISO, device + 1)) {
621 dmaRxIdentifier = DMA_NONE;
622 continue;
624 bus->dmaRx = dmaGetDescriptorByIdentifier(dmaRxIdentifier);
625 bus->dmaRx->stream = DMA_DEVICE_INDEX(dmaRxIdentifier);
626 bus->dmaRx->channel = dmaRxChannelSpec->channel;
628 dmaEnable(dmaRxIdentifier);
630 break;
634 if (dmaTxIdentifier && dmaRxIdentifier) {
635 // Ensure streams are disabled
636 spiInternalResetStream(bus->dmaRx);
637 spiInternalResetStream(bus->dmaTx);
639 spiInternalResetDescriptors(bus);
641 /* Note that this driver may be called both from the normal thread of execution, or from USB interrupt
642 * handlers, so the DMA completion interrupt must be at a higher priority
644 dmaSetHandler(dmaRxIdentifier, spiRxIrqHandler, NVIC_PRIO_SPI_DMA, 0);
646 bus->useDMA = true;
647 #if !defined(STM32G4) && !defined(STM32H7)
648 } else if (dmaTxIdentifier) {
649 // Transmit on DMA is adequate for OSD so worth having
650 bus->dmaTx = dmaGetDescriptorByIdentifier(dmaTxIdentifier);
651 bus->dmaRx = (dmaChannelDescriptor_t *)NULL;
653 // Ensure streams are disabled
654 spiInternalResetStream(bus->dmaTx);
656 spiInternalResetDescriptors(bus);
658 dmaSetHandler(dmaTxIdentifier, spiTxIrqHandler, NVIC_PRIO_SPI_DMA, 0);
660 bus->useDMA = true;
661 #endif
662 } else {
663 // Disassociate channels from bus
664 bus->dmaRx = (dmaChannelDescriptor_t *)NULL;
665 bus->dmaTx = (dmaChannelDescriptor_t *)NULL;
670 void spiSetClkDivisor(const extDevice_t *dev, uint16_t divisor)
672 ((extDevice_t *)dev)->busType_u.spi.speed = divisor;
675 // Set the clock phase/polarity to be used for accesses by the given device
676 void spiSetClkPhasePolarity(const extDevice_t *dev, bool leadingEdge)
678 ((extDevice_t *)dev)->busType_u.spi.leadingEdge = leadingEdge;
681 // Enable/disable DMA on a specific device. Enabled by default.
682 void spiDmaEnable(const extDevice_t *dev, bool enable)
684 ((extDevice_t *)dev)->useDMA = enable;
687 bool spiUseDMA(const extDevice_t *dev)
689 // Full DMA only requires both transmit and receive}
690 return dev->bus->useDMA && dev->bus->dmaRx && dev->useDMA;
693 bool spiUseMOSI_DMA(const extDevice_t *dev)
695 return dev->bus->useDMA && dev->useDMA;
698 void spiBusDeviceRegister(const extDevice_t *dev)
700 UNUSED(dev);
702 spiRegisteredDeviceCount++;
705 uint8_t spiGetRegisteredDeviceCount(void)
707 return spiRegisteredDeviceCount;
710 uint8_t spiGetExtDeviceCount(const extDevice_t *dev)
712 return dev->bus->deviceCount;
715 // DMA transfer setup and start
716 void spiSequence(const extDevice_t *dev, busSegment_t *segments)
718 busDevice_t *bus = dev->bus;
720 ATOMIC_BLOCK(NVIC_PRIO_MAX) {
721 if (spiIsBusy(dev)) {
722 busSegment_t *endSegment;
724 // Defer this transfer to be triggered upon completion of the current transfer
726 // Find the last segment of the new transfer
727 for (endSegment = segments; endSegment->len; endSegment++);
729 // Safe to discard the volatile qualifier as we're in an atomic block
730 busSegment_t *endCmpSegment = (busSegment_t *)bus->curSegment;
732 if (endCmpSegment) {
733 while (true) {
734 // Find the last segment of the current transfer
735 for (; endCmpSegment->len; endCmpSegment++);
737 if (endCmpSegment == endSegment) {
738 /* Attempt to use the new segment list twice in the same queue. Abort.
739 * Note that this can only happen with non-blocking transfers so drivers must take
740 * care to avoid this.
741 * */
742 return;
745 if (endCmpSegment->u.link.dev == NULL) {
746 // End of the segment list queue reached
747 break;
748 } else {
749 // Follow the link to the next queued segment list
750 endCmpSegment = (busSegment_t *)endCmpSegment->u.link.segments;
755 // Record the dev and segments parameters in the terminating segment entry
756 endCmpSegment->u.link.dev = dev;
757 endCmpSegment->u.link.segments = segments;
759 return;
760 } else {
761 // Claim the bus with this list of segments
762 bus->curSegment = segments;
766 spiSequenceStart(dev);
768 #endif