src: Remove unused 'include <boot_device.h>'
[coreboot.git] / src / soc / nvidia / tegra210 / spi.c
blob067d77e51e7b7923b42fda8780c23c723ff7a3ac
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* NVIDIA Tegra SPI controller (T114 and later) */
4 #include <arch/cache.h>
5 #include <device/mmio.h>
6 #include <assert.h>
7 #include <console/console.h>
8 #include <delay.h>
9 #include <spi-generic.h>
10 #include <spi_flash.h>
11 #include <soc/addressmap.h>
12 #include <soc/dma.h>
13 #include <soc/spi.h>
14 #include <symbols.h>
15 #include <types.h>
17 #if defined(CONFIG_DEBUG_SPI) && CONFIG_DEBUG_SPI
18 # define DEBUG_SPI(x,...) printk(BIOS_DEBUG, "TEGRA_SPI: " x)
19 #else
20 # define DEBUG_SPI(x,...)
21 #endif
24 * 64 packets in FIFO mode, BLOCK_SIZE packets in DMA mode. Packets can vary
25 * in size from 4 to 32 bits. To keep things simple we'll use 8-bit packets.
27 #define SPI_PACKET_SIZE_BYTES 1
28 #define SPI_MAX_TRANSFER_BYTES_FIFO (64 * SPI_PACKET_SIZE_BYTES)
29 #define SPI_MAX_TRANSFER_BYTES_DMA (65535 * SPI_PACKET_SIZE_BYTES)
32 * This is used to workaround an issue seen where it may take some time for
33 * packets to show up in the FIFO after they have been received and the
34 * BLOCK_COUNT has been incremented.
36 #define SPI_FIFO_XFER_TIMEOUT_US 1000
38 /* COMMAND1 */
39 #define SPI_CMD1_GO (1 << 31)
40 #define SPI_CMD1_M_S (1 << 30)
41 #define SPI_CMD1_MODE_MASK 0x3
42 #define SPI_CMD1_MODE_SHIFT 28
43 #define SPI_CMD1_CS_SEL_MASK 0x3
44 #define SPI_CMD1_CS_SEL_SHIFT 26
45 #define SPI_CMD1_CS_POL_INACTIVE3 (1 << 25)
46 #define SPI_CMD1_CS_POL_INACTIVE2 (1 << 24)
47 #define SPI_CMD1_CS_POL_INACTIVE1 (1 << 23)
48 #define SPI_CMD1_CS_POL_INACTIVE0 (1 << 22)
49 #define SPI_CMD1_CS_SW_HW (1 << 21)
50 #define SPI_CMD1_CS_SW_VAL (1 << 20)
51 #define SPI_CMD1_IDLE_SDA_MASK 0x3
52 #define SPI_CMD1_IDLE_SDA_SHIFT 18
53 #define SPI_CMD1_BIDIR (1 << 17)
54 #define SPI_CMD1_LSBI_FE (1 << 16)
55 #define SPI_CMD1_LSBY_FE (1 << 15)
56 #define SPI_CMD1_BOTH_EN_BIT (1 << 14)
57 #define SPI_CMD1_BOTH_EN_BYTE (1 << 13)
58 #define SPI_CMD1_RX_EN (1 << 12)
59 #define SPI_CMD1_TX_EN (1 << 11)
60 #define SPI_CMD1_PACKED (1 << 5)
61 #define SPI_CMD1_BIT_LEN_MASK 0x1f
62 #define SPI_CMD1_BIT_LEN_SHIFT 0
64 /* COMMAND2 */
65 #define SPI_CMD2_TX_CLK_TAP_DELAY (1 << 6)
66 #define SPI_CMD2_TX_CLK_TAP_DELAY_MASK (0x3F << 6)
67 #define SPI_CMD2_RX_CLK_TAP_DELAY (1 << 0)
68 #define SPI_CMD2_RX_CLK_TAP_DELAY_MASK (0x3F << 0)
70 /* SPI_TRANS_STATUS */
71 #define SPI_STATUS_RDY (1 << 30)
72 #define SPI_STATUS_SLV_IDLE_COUNT_MASK 0xff
73 #define SPI_STATUS_SLV_IDLE_COUNT_SHIFT 16
74 #define SPI_STATUS_BLOCK_COUNT 0xffff
75 #define SPI_STATUS_BLOCK_COUNT_SHIFT 0
77 /* SPI_FIFO_STATUS */
78 #define SPI_FIFO_STATUS_CS_INACTIVE (1 << 31)
79 #define SPI_FIFO_STATUS_FRAME_END (1 << 30)
80 #define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK 0x7f
81 #define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT 23
82 #define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_MASK 0x7f
83 #define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_SHIFT 16
84 #define SPI_FIFO_STATUS_RX_FIFO_FLUSH (1 << 15)
85 #define SPI_FIFO_STATUS_TX_FIFO_FLUSH (1 << 14)
86 #define SPI_FIFO_STATUS_ERR (1 << 8)
87 #define SPI_FIFO_STATUS_TX_FIFO_OVF (1 << 7)
88 #define SPI_FIFO_STATUS_TX_FIFO_UNR (1 << 6)
89 #define SPI_FIFO_STATUS_RX_FIFO_OVF (1 << 5)
90 #define SPI_FIFO_STATUS_RX_FIFO_UNR (1 << 4)
91 #define SPI_FIFO_STATUS_TX_FIFO_FULL (1 << 3)
92 #define SPI_FIFO_STATUS_TX_FIFO_EMPTY (1 << 2)
93 #define SPI_FIFO_STATUS_RX_FIFO_FULL (1 << 1)
94 #define SPI_FIFO_STATUS_RX_FIFO_EMPTY (1 << 0)
96 /* SPI_DMA_CTL */
97 #define SPI_DMA_CTL_DMA (1 << 31)
98 #define SPI_DMA_CTL_CONT (1 << 30)
99 #define SPI_DMA_CTL_IE_RX (1 << 29)
100 #define SPI_DMA_CTL_IE_TX (1 << 28)
101 #define SPI_DMA_CTL_RX_TRIG_MASK 0x3
102 #define SPI_DMA_CTL_RX_TRIG_SHIFT 19
103 #define SPI_DMA_CTL_TX_TRIG_MASK 0x3
104 #define SPI_DMA_CTL_TX_TRIG_SHIFT 15
106 /* SPI_DMA_BLK */
107 #define SPI_DMA_CTL_BLOCK_SIZE_MASK 0xffff
108 #define SPI_DMA_CTL_BLOCK_SIZE_SHIFT 0
110 static struct tegra_spi_channel tegra_spi_channels[] = {
112 * Note: Tegra pinmux must be setup for corresponding SPI channel in
113 * order for its registers to be accessible. If pinmux has not been
114 * set up, access to the channel's registers will simply hang.
116 * TODO(dhendrix): Clarify or remove this comment (is clock setup
117 * necessary first, or just pinmux, or both?)
120 .slave = { .bus = 1, },
121 .regs = (struct tegra_spi_regs *)TEGRA_SPI1_BASE,
122 .req_sel = APBDMA_SLAVE_SL2B1,
125 .slave = { .bus = 2, },
126 .regs = (struct tegra_spi_regs *)TEGRA_SPI2_BASE,
127 .req_sel = APBDMA_SLAVE_SL2B2,
130 .slave = { .bus = 3, },
131 .regs = (struct tegra_spi_regs *)TEGRA_SPI3_BASE,
132 .req_sel = APBDMA_SLAVE_SL2B3,
135 .slave = { .bus = 4, },
136 .regs = (struct tegra_spi_regs *)TEGRA_SPI4_BASE,
137 .req_sel = APBDMA_SLAVE_SL2B4,
140 .slave = { .bus = 5, },
141 .regs = (struct tegra_spi_regs *)TEGRA_SPI5_BASE,
142 .req_sel = APBDMA_SLAVE_SL2B5,
145 .slave = { .bus = 6, },
146 .regs = (struct tegra_spi_regs *)TEGRA_SPI6_BASE,
147 .req_sel = APBDMA_SLAVE_SL2B6,
150 .slave = { .bus = 7, },
151 .regs = (struct tegra_spi_regs *)TEGRA_QSPI_BASE,
152 .req_sel = APBDMA_SLAVE_QSPI,
156 enum spi_direction {
157 SPI_SEND,
158 SPI_RECEIVE,
161 struct tegra_spi_channel *tegra_spi_init(unsigned int bus)
163 int i;
164 struct tegra_spi_channel *spi = NULL;
166 for (i = 0; i < ARRAY_SIZE(tegra_spi_channels); i++) {
167 if (tegra_spi_channels[i].slave.bus == bus) {
168 spi = &tegra_spi_channels[i];
169 break;
172 if (!spi)
173 return NULL;
175 /* software drives chip-select, set value to high */
176 setbits32(&spi->regs->command1,
177 SPI_CMD1_CS_SW_HW | SPI_CMD1_CS_SW_VAL);
179 /* 8-bit transfers, unpacked mode, most significant bit first */
180 clrbits32(&spi->regs->command1,
181 SPI_CMD1_BIT_LEN_MASK | SPI_CMD1_PACKED);
182 setbits32(&spi->regs->command1, 7 << SPI_CMD1_BIT_LEN_SHIFT);
184 return spi;
187 static struct tegra_spi_channel * const to_tegra_spi(int bus) {
188 return &tegra_spi_channels[bus - 1];
191 static int spi_ctrlr_claim_bus(const struct spi_slave *slave)
193 struct tegra_spi_regs *regs = to_tegra_spi(slave->bus)->regs;
194 u32 val;
196 tegra_spi_init(slave->bus);
198 val = read32(&regs->command1);
200 /* select appropriate chip-select line */
201 val &= ~(SPI_CMD1_CS_SEL_MASK << SPI_CMD1_CS_SEL_SHIFT);
202 val |= (slave->cs << SPI_CMD1_CS_SEL_SHIFT);
204 /* drive chip-select with the inverse of the "inactive" value */
205 if (val & (SPI_CMD1_CS_POL_INACTIVE0 << slave->cs))
206 val &= ~SPI_CMD1_CS_SW_VAL;
207 else
208 val |= SPI_CMD1_CS_SW_VAL;
210 write32(&regs->command1, val);
211 return 0;
214 static void spi_ctrlr_release_bus(const struct spi_slave *slave)
216 struct tegra_spi_regs *regs = to_tegra_spi(slave->bus)->regs;
217 u32 val;
219 val = read32(&regs->command1);
221 if (val & (SPI_CMD1_CS_POL_INACTIVE0 << slave->cs))
222 val |= SPI_CMD1_CS_SW_VAL;
223 else
224 val &= ~SPI_CMD1_CS_SW_VAL;
226 write32(&regs->command1, val);
229 static void dump_fifo_status(struct tegra_spi_channel *spi)
231 u32 status = read32(&spi->regs->fifo_status);
233 printk(BIOS_INFO, "Raw FIFO status: 0x%08x\n", status);
234 if (status & SPI_FIFO_STATUS_TX_FIFO_OVF)
235 printk(BIOS_INFO, "\tTx overflow detected\n");
236 if (status & SPI_FIFO_STATUS_TX_FIFO_UNR)
237 printk(BIOS_INFO, "\tTx underrun detected\n");
238 if (status & SPI_FIFO_STATUS_RX_FIFO_OVF)
239 printk(BIOS_INFO, "\tRx overflow detected\n");
240 if (status & SPI_FIFO_STATUS_RX_FIFO_UNR)
241 printk(BIOS_INFO, "\tRx underrun detected\n");
243 printk(BIOS_INFO, "TX_FIFO: 0x%08x, TX_DATA: 0x%08x\n",
244 read32(&spi->regs->tx_fifo), read32(&spi->regs->tx_data));
245 printk(BIOS_INFO, "RX_FIFO: 0x%08x, RX_DATA: 0x%08x\n",
246 read32(&spi->regs->rx_fifo), read32(&spi->regs->rx_data));
249 static void clear_fifo_status(struct tegra_spi_channel *spi)
251 clrbits32(&spi->regs->fifo_status,
252 SPI_FIFO_STATUS_ERR |
253 SPI_FIFO_STATUS_TX_FIFO_OVF |
254 SPI_FIFO_STATUS_TX_FIFO_UNR |
255 SPI_FIFO_STATUS_RX_FIFO_OVF |
256 SPI_FIFO_STATUS_RX_FIFO_UNR);
259 static void dump_spi_regs(struct tegra_spi_channel *spi)
261 printk(BIOS_INFO, "SPI regs:\n"
262 "\tdma_blk: 0x%08x\n"
263 "\tcommand1: 0x%08x\n"
264 "\tdma_ctl: 0x%08x\n"
265 "\ttrans_status: 0x%08x\n",
266 read32(&spi->regs->dma_blk),
267 read32(&spi->regs->command1),
268 read32(&spi->regs->dma_ctl),
269 read32(&spi->regs->trans_status));
272 static void dump_dma_regs(struct apb_dma_channel *dma)
274 if (dma) {
275 printk(BIOS_INFO, "DMA regs:\n"
276 "\tahb_ptr: 0x%08x\n"
277 "\tapb_ptr: 0x%08x\n"
278 "\tahb_seq: 0x%08x\n"
279 "\tapb_seq: 0x%08x\n"
280 "\tcsr: 0x%08x\n"
281 "\tcsre: 0x%08x\n"
282 "\twcount: 0x%08x\n"
283 "\tdma_byte_sta: 0x%08x\n"
284 "\tword_transfer: 0x%08x\n",
285 read32(&dma->regs->ahb_ptr),
286 read32(&dma->regs->apb_ptr),
287 read32(&dma->regs->ahb_seq),
288 read32(&dma->regs->apb_seq),
289 read32(&dma->regs->csr),
290 read32(&dma->regs->csre),
291 read32(&dma->regs->wcount),
292 read32(&dma->regs->dma_byte_sta),
293 read32(&dma->regs->word_transfer));
297 static inline unsigned int spi_byte_count(struct tegra_spi_channel *spi)
299 /* FIXME: Make this take total packet size into account */
300 return read32(&spi->regs->trans_status) &
301 (SPI_STATUS_BLOCK_COUNT << SPI_STATUS_BLOCK_COUNT_SHIFT);
304 static void tegra_spi_wait(struct tegra_spi_channel *spi)
306 uint32_t dma_blk_count = 1 + (read32(&spi->regs->dma_blk) &
307 (SPI_DMA_CTL_BLOCK_SIZE_MASK <<
308 SPI_DMA_CTL_BLOCK_SIZE_SHIFT));
310 while ((read32(&spi->regs->trans_status) & SPI_STATUS_RDY) !=
311 SPI_STATUS_RDY)
315 * If RDY bit is set, we should never encounter the condition that
316 * blocks processed is not equal to the number programmed in dma_blk
317 * register.
319 ASSERT(spi_byte_count(spi) == dma_blk_count);
323 static int fifo_error(struct tegra_spi_channel *spi)
325 return read32(&spi->regs->fifo_status) & SPI_FIFO_STATUS_ERR ? 1 : 0;
328 static void flush_fifos(struct tegra_spi_channel *spi)
330 const uint32_t flush_mask = SPI_FIFO_STATUS_TX_FIFO_FLUSH |
331 SPI_FIFO_STATUS_RX_FIFO_FLUSH;
333 uint32_t fifo_status = read32(&spi->regs->fifo_status);
334 fifo_status |= flush_mask;
335 write32(&spi->regs->fifo_status, flush_mask);
337 while (read32(&spi->regs->fifo_status) & flush_mask)
341 static int tegra_spi_pio_prepare(struct tegra_spi_channel *spi,
342 unsigned int bytes, enum spi_direction dir)
344 u8 *p = spi->out_buf;
345 unsigned int todo = MIN(bytes, SPI_MAX_TRANSFER_BYTES_FIFO);
346 u32 enable_mask;
348 flush_fifos(spi);
350 if (dir == SPI_SEND)
351 enable_mask = SPI_CMD1_TX_EN;
352 else
353 enable_mask = SPI_CMD1_RX_EN;
356 * BLOCK_SIZE in SPI_DMA_BLK register applies to both DMA and
357 * PIO transfers. And, it should be programmed before RX_EN or
358 * TX_EN is set.
360 write32(&spi->regs->dma_blk, todo - 1);
362 setbits32(&spi->regs->command1, enable_mask);
364 if (dir == SPI_SEND) {
365 unsigned int to_fifo = bytes;
366 while (to_fifo) {
367 write32(&spi->regs->tx_fifo, *p);
368 p++;
369 to_fifo--;
373 return todo;
376 static void tegra_spi_pio_start(struct tegra_spi_channel *spi)
378 setbits32(&spi->regs->trans_status, SPI_STATUS_RDY);
380 * Need to stabilize other reg bit before GO bit set.
382 * From IAS:
383 * For successful operation at various freq combinations, min of 4-5
384 * spi_clk cycle delay might be required before enabling PIO or DMA bit.
385 * This is needed to overcome the MCP between core and pad_macro.
386 * The worst case delay calculation can be done considering slowest
387 * qspi_clk as 1 MHz. based on that 1 us delay should be enough before
388 * enabling pio or dma.
390 udelay(2);
391 setbits32(&spi->regs->command1, SPI_CMD1_GO);
392 /* Need to wait a few cycles before command1 register is read */
393 udelay(1);
394 /* Make sure the write to command1 completes. */
395 read32(&spi->regs->command1);
398 static inline u32 rx_fifo_count(struct tegra_spi_channel *spi)
400 return (read32(&spi->regs->fifo_status) >>
401 SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT) &
402 SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK;
405 static int tegra_spi_pio_finish(struct tegra_spi_channel *spi)
407 u8 *p = spi->in_buf;
409 clrbits32(&spi->regs->command1, SPI_CMD1_RX_EN | SPI_CMD1_TX_EN);
411 ASSERT(rx_fifo_count(spi) == spi_byte_count(spi));
413 if (p) {
414 while (!(read32(&spi->regs->fifo_status) &
415 SPI_FIFO_STATUS_RX_FIFO_EMPTY)) {
416 *p = read8(&spi->regs->rx_fifo);
417 p++;
421 if (fifo_error(spi)) {
422 printk(BIOS_ERR, "%s: ERROR:\n", __func__);
423 dump_spi_regs(spi);
424 dump_fifo_status(spi);
425 return -1;
428 return 0;
431 static void setup_dma_params(struct tegra_spi_channel *spi,
432 struct apb_dma_channel *dma)
434 /* APB bus width = 8-bits, address wrap for each word */
435 clrbits32(&dma->regs->apb_seq,
436 APB_BUS_WIDTH_MASK << APB_BUS_WIDTH_SHIFT);
437 /* AHB 1 word burst, bus width = 32 bits (fixed in hardware),
438 * no address wrapping */
439 clrsetbits32(&dma->regs->ahb_seq,
440 (AHB_BURST_MASK << AHB_BURST_SHIFT),
441 4 << AHB_BURST_SHIFT);
443 /* Set ONCE mode to transfer one "block" at a time (64KB) and enable
444 * flow control. */
445 clrbits32(&dma->regs->csr,
446 APB_CSR_REQ_SEL_MASK << APB_CSR_REQ_SEL_SHIFT);
447 setbits32(&dma->regs->csr, APB_CSR_ONCE | APB_CSR_FLOW |
448 (spi->req_sel << APB_CSR_REQ_SEL_SHIFT));
451 static int tegra_spi_dma_prepare(struct tegra_spi_channel *spi,
452 unsigned int bytes, enum spi_direction dir)
454 unsigned int todo, wcount;
457 * For DMA we need to think of things in terms of word count.
458 * AHB width is fixed at 32-bits. To avoid overrunning
459 * the in/out buffers we must align down. (Note: lowest 2-bits
460 * in WCOUNT register are ignored, and WCOUNT seems to count
461 * words starting at n-1)
463 * Example: If "bytes" is 7 and we are transferring 1-byte at a time,
464 * WCOUNT should be 4. The remaining 3 bytes must be transferred
465 * using PIO.
467 todo = MIN(bytes, SPI_MAX_TRANSFER_BYTES_DMA - TEGRA_DMA_ALIGN_BYTES);
468 todo = ALIGN_DOWN(todo, TEGRA_DMA_ALIGN_BYTES);
469 wcount = ALIGN_DOWN(todo - TEGRA_DMA_ALIGN_BYTES, TEGRA_DMA_ALIGN_BYTES);
471 flush_fifos(spi);
473 if (dir == SPI_SEND) {
474 spi->dma_out = dma_claim();
475 if (!spi->dma_out)
476 return -1;
478 /* ensure bytes to send will be visible to DMA controller */
479 dcache_clean_by_mva(spi->out_buf, bytes);
481 write32(&spi->dma_out->regs->apb_ptr,
482 (uintptr_t) & spi->regs->tx_fifo);
483 write32(&spi->dma_out->regs->ahb_ptr, (uintptr_t)spi->out_buf);
484 setbits32(&spi->dma_out->regs->csr, APB_CSR_DIR);
485 setup_dma_params(spi, spi->dma_out);
486 write32(&spi->dma_out->regs->wcount, wcount);
487 } else {
488 spi->dma_in = dma_claim();
489 if (!spi->dma_in)
490 return -1;
492 /* avoid data collisions */
493 dcache_clean_invalidate_by_mva(spi->in_buf, bytes);
495 write32(&spi->dma_in->regs->apb_ptr,
496 (uintptr_t)&spi->regs->rx_fifo);
497 write32(&spi->dma_in->regs->ahb_ptr, (uintptr_t)spi->in_buf);
498 clrbits32(&spi->dma_in->regs->csr, APB_CSR_DIR);
499 setup_dma_params(spi, spi->dma_in);
500 write32(&spi->dma_in->regs->wcount, wcount);
503 /* BLOCK_SIZE starts at n-1 */
504 write32(&spi->regs->dma_blk, todo - 1);
505 return todo;
508 static void tegra_spi_dma_start(struct tegra_spi_channel *spi)
511 * The RDY bit in SPI_TRANS_STATUS needs to be cleared manually
512 * (set bit to clear) between each transaction. Otherwise the next
513 * transaction does not start.
515 setbits32(&spi->regs->trans_status, SPI_STATUS_RDY);
517 struct apb_dma * const apb_dma = (struct apb_dma *)TEGRA_APB_DMA_BASE;
520 * The DMA triggers have units of packets. As each packet is currently
521 * 1 byte the triggers need to be set to 4 packets (0b01) to match
522 * the AHB 32-bit (4 byte) tranfser. Otherwise the FIFO errors can
523 * occur.
525 if (spi->dma_out) {
526 /* Enable secure access for the channel. */
527 setbits32(&apb_dma->security_reg,
528 SECURITY_EN_BIT(spi->dma_out->num));
529 clrsetbits32(&spi->regs->dma_ctl,
530 SPI_DMA_CTL_TX_TRIG_MASK << SPI_DMA_CTL_TX_TRIG_SHIFT,
531 1 << SPI_DMA_CTL_TX_TRIG_SHIFT);
532 setbits32(&spi->regs->command1, SPI_CMD1_TX_EN);
534 if (spi->dma_in) {
535 /* Enable secure access for the channel. */
536 setbits32(&apb_dma->security_reg,
537 SECURITY_EN_BIT(spi->dma_in->num));
538 clrsetbits32(&spi->regs->dma_ctl,
539 SPI_DMA_CTL_RX_TRIG_MASK << SPI_DMA_CTL_RX_TRIG_SHIFT,
540 1 << SPI_DMA_CTL_RX_TRIG_SHIFT);
541 setbits32(&spi->regs->command1, SPI_CMD1_RX_EN);
545 * To avoid underrun conditions, enable APB DMA before SPI DMA for
546 * Tx and enable SPI DMA before APB DMA before Rx.
548 if (spi->dma_out)
549 dma_start(spi->dma_out);
550 setbits32(&spi->regs->dma_ctl, SPI_DMA_CTL_DMA);
551 if (spi->dma_in)
552 dma_start(spi->dma_in);
557 static int tegra_spi_dma_finish(struct tegra_spi_channel *spi)
559 int ret;
560 unsigned int todo;
562 struct apb_dma * const apb_dma = (struct apb_dma *)TEGRA_APB_DMA_BASE;
564 if (spi->dma_in) {
565 todo = read32(&spi->dma_in->regs->wcount);
567 while ((read32(&spi->dma_in->regs->dma_byte_sta) < todo) ||
568 dma_busy(spi->dma_in))
570 dma_stop(spi->dma_in);
571 clrbits32(&spi->regs->command1, SPI_CMD1_RX_EN);
572 /* Disable secure access for the channel. */
573 clrbits32(&apb_dma->security_reg,
574 SECURITY_EN_BIT(spi->dma_in->num));
575 dma_release(spi->dma_in);
578 if (spi->dma_out) {
579 todo = read32(&spi->dma_out->regs->wcount);
581 while ((read32(&spi->dma_out->regs->dma_byte_sta) < todo) ||
582 dma_busy(spi->dma_out))
584 clrbits32(&spi->regs->command1, SPI_CMD1_TX_EN);
585 dma_stop(spi->dma_out);
586 /* Disable secure access for the channel. */
587 clrbits32(&apb_dma->security_reg,
588 SECURITY_EN_BIT(spi->dma_out->num));
589 dma_release(spi->dma_out);
592 if (fifo_error(spi)) {
593 printk(BIOS_ERR, "%s: ERROR:\n", __func__);
594 dump_dma_regs(spi->dma_out);
595 dump_dma_regs(spi->dma_in);
596 dump_spi_regs(spi);
597 dump_fifo_status(spi);
598 ret = -1;
599 goto done;
602 ret = 0;
603 done:
604 spi->dma_in = NULL;
605 spi->dma_out = NULL;
606 return ret;
610 * xfer_setup() prepares a transfer. It does sanity checking, alignment, and
611 * sets transfer mode used by this channel (if not set already).
613 * A few caveats to watch out for:
614 * - The number of bytes which can be transferred may be smaller than the
615 * number of bytes the caller specifies. The number of bytes ready for
616 * a transfer will be returned (unless an error occurs).
618 * - Only one mode can be used for both RX and TX. The transfer mode of the
619 * SPI channel (spi->xfer_mode) is checked each time this function is called.
620 * If conflicting modes are detected, spi->xfer_mode will be set to
621 * XFER_MODE_NONE and an error will be returned.
623 * Returns bytes ready for transfer if successful, <0 to indicate error.
625 static int xfer_setup(struct tegra_spi_channel *spi, void *buf,
626 unsigned int bytes, enum spi_direction dir)
628 unsigned int line_size = dcache_line_bytes();
629 unsigned int align;
630 int ret = -1;
632 if (!bytes)
633 return 0;
635 if (dir == SPI_SEND)
636 spi->out_buf = buf;
637 else if (dir == SPI_RECEIVE)
638 spi->in_buf = buf;
641 * Alignment consideratons:
642 * When we enable caching we'll need to clean/invalidate portions of
643 * memory. So we need to be careful about memory alignment. Also, DMA
644 * likes to operate on 4-bytes at a time on the AHB side. So for
645 * example, if we only want to receive 1 byte, 4 bytes will be be
646 * written in memory even if those extra 3 bytes are beyond the length
647 * we want.
649 * For now we'll use PIO to send/receive unaligned bytes. We may
650 * consider setting aside some space for a kind of bounce buffer to
651 * stay in DMA mode once we have a chance to benchmark the two
652 * approaches.
655 if (bytes < line_size) {
656 if (spi->xfer_mode == XFER_MODE_DMA) {
657 spi->xfer_mode = XFER_MODE_NONE;
658 ret = -1;
659 } else {
660 spi->xfer_mode = XFER_MODE_PIO;
661 ret = tegra_spi_pio_prepare(spi, bytes, dir);
663 goto done;
666 /* transfer bytes before the aligned boundary */
667 align = line_size - ((uintptr_t)buf % line_size);
668 if ((align != 0) && (align != line_size)) {
669 if (spi->xfer_mode == XFER_MODE_DMA) {
670 spi->xfer_mode = XFER_MODE_NONE;
671 ret = -1;
672 } else {
673 spi->xfer_mode = XFER_MODE_PIO;
674 ret = tegra_spi_pio_prepare(spi, align, dir);
676 goto done;
679 /* do aligned DMA transfer */
680 align = (((uintptr_t)buf + bytes) % line_size);
681 if (bytes - align > 0) {
682 unsigned int dma_bytes = bytes - align;
684 if (spi->xfer_mode == XFER_MODE_PIO) {
685 spi->xfer_mode = XFER_MODE_NONE;
686 ret = -1;
687 } else {
688 spi->xfer_mode = XFER_MODE_DMA;
689 ret = tegra_spi_dma_prepare(spi, dma_bytes, dir);
692 goto done;
695 /* transfer any remaining unaligned bytes */
696 if (align) {
697 if (spi->xfer_mode == XFER_MODE_DMA) {
698 spi->xfer_mode = XFER_MODE_NONE;
699 ret = -1;
700 } else {
701 spi->xfer_mode = XFER_MODE_PIO;
702 ret = tegra_spi_pio_prepare(spi, align, dir);
704 goto done;
707 done:
708 return ret;
711 static void xfer_start(struct tegra_spi_channel *spi)
713 if (spi->xfer_mode == XFER_MODE_DMA)
714 tegra_spi_dma_start(spi);
715 else
716 tegra_spi_pio_start(spi);
719 static void xfer_wait(struct tegra_spi_channel *spi)
721 tegra_spi_wait(spi);
724 static int xfer_finish(struct tegra_spi_channel *spi)
726 int ret;
728 if (spi->xfer_mode == XFER_MODE_DMA)
729 ret = tegra_spi_dma_finish(spi);
730 else
731 ret = tegra_spi_pio_finish(spi);
733 spi->xfer_mode = XFER_MODE_NONE;
734 return ret;
737 static int spi_ctrlr_xfer(const struct spi_slave *slave, const void *dout,
738 size_t out_bytes, void *din, size_t in_bytes)
740 struct tegra_spi_channel *spi = to_tegra_spi(slave->bus);
741 u8 *out_buf = (u8 *)dout;
742 u8 *in_buf = (u8 *)din;
743 size_t todo;
744 int ret = 0;
746 /* tegra bus numbers start at 1 */
747 ASSERT(slave->bus >= 1 && slave->bus <= ARRAY_SIZE(tegra_spi_channels));
749 while (out_bytes || in_bytes) {
750 int x = 0;
752 if (out_bytes == 0)
753 todo = in_bytes;
754 else if (in_bytes == 0)
755 todo = out_bytes;
756 else
757 todo = MIN(out_bytes, in_bytes);
759 if (out_bytes) {
760 x = xfer_setup(spi, out_buf, todo, SPI_SEND);
761 if (x < 0) {
762 if (spi->xfer_mode == XFER_MODE_NONE) {
763 spi->xfer_mode = XFER_MODE_PIO;
764 continue;
765 } else {
766 ret = -1;
767 break;
771 if (in_bytes) {
772 x = xfer_setup(spi, in_buf, todo, SPI_RECEIVE);
773 if (x < 0) {
774 if (spi->xfer_mode == XFER_MODE_NONE) {
775 spi->xfer_mode = XFER_MODE_PIO;
776 continue;
777 } else {
778 ret = -1;
779 break;
785 * Note: Some devices (such as Chrome EC) are sensitive to
786 * delays, so be careful when adding debug prints not to
787 * cause timeouts between transfers.
789 xfer_start(spi);
790 xfer_wait(spi);
791 if (xfer_finish(spi)) {
792 ret = -1;
793 break;
796 /* Post-processing. */
797 if (out_bytes) {
798 out_bytes -= x;
799 out_buf += x;
801 if (in_bytes) {
802 in_bytes -= x;
803 in_buf += x;
807 if (ret < 0) {
808 printk(BIOS_ERR, "%s: Error detected\n", __func__);
809 printk(BIOS_ERR, "Transaction size: %zu, bytes remaining: "
810 "%zu out / %zu in\n", todo, out_bytes, in_bytes);
811 clear_fifo_status(spi);
813 return ret;
816 static const struct spi_ctrlr spi_ctrlr = {
817 .claim_bus = spi_ctrlr_claim_bus,
818 .release_bus = spi_ctrlr_release_bus,
819 .xfer = spi_ctrlr_xfer,
820 .max_xfer_size = SPI_CTRLR_DEFAULT_MAX_XFER_SIZE,
823 const struct spi_ctrlr_buses spi_ctrlr_bus_map[] = {
825 .ctrlr = &spi_ctrlr,
826 .bus_start = 1,
827 .bus_end = ARRAY_SIZE(tegra_spi_channels)
831 const size_t spi_ctrlr_bus_map_count = ARRAY_SIZE(spi_ctrlr_bus_map);