treewide: replace GPLv2 long form headers with SPDX header
[coreboot.git] / src / soc / qualcomm / sdm845 / qspi.c
blob7337fd94da4009a1f21c103f8e1635e4c5a0d812
1 /* This file is part of the coreboot project. */
2 /* SPDX-License-Identifier: GPL-2.0-only */
4 #include <spi-generic.h>
5 #include <spi_flash.h>
6 #include <arch/cache.h>
7 #include <device/mmio.h>
8 #include <soc/addressmap.h>
9 #include <soc/qspi.h>
10 #include <soc/gpio.h>
11 #include <soc/clock.h>
12 #include <symbols.h>
13 #include <assert.h>
14 #include <gpio.h>
15 #include <string.h>
17 #define CACHE_LINE_SIZE 64
19 static int curr_desc_idx = -1;
21 struct cmd_desc {
22 uint32_t data_address;
23 uint32_t next_descriptor;
24 uint32_t direction:1;
25 uint32_t multi_io_mode:3;
26 uint32_t reserved1:4;
27 uint32_t fragment:1;
28 uint32_t reserved2:7;
29 uint32_t length:16;
30 //------------------------//
31 uint32_t bounce_src;
32 uint32_t bounce_dst;
33 uint32_t bounce_length;
34 uint64_t padding[5];
37 enum qspi_mode {
38 SDR_1BIT = 1,
39 SDR_2BIT = 2,
40 SDR_4BIT = 3,
41 DDR_1BIT = 5,
42 DDR_2BIT = 6,
43 DDR_4BIT = 7,
46 enum cs_state {
47 CS_DEASSERT,
48 CS_ASSERT
51 struct xfer_cfg {
52 enum qspi_mode mode;
55 enum bus_xfer_direction {
56 MASTER_READ = 0,
57 MASTER_WRITE = 1,
60 struct {
61 struct cmd_desc descriptors[3];
62 uint8_t buffers[3][CACHE_LINE_SIZE];
63 } *dma = (void *)_dma_coherent;
65 static void dma_transfer_chain(struct cmd_desc *chain)
67 uint32_t mstr_int_status;
69 write32(&sdm845_qspi->mstr_int_sts, 0xFFFFFFFF);
70 write32(&sdm845_qspi->next_dma_desc_addr, (uint32_t)(uintptr_t) chain);
72 while (1) {
73 mstr_int_status = read32(&sdm845_qspi->mstr_int_sts);
74 if (mstr_int_status & DMA_CHAIN_DONE)
75 break;
79 static void flush_chain(void)
81 struct cmd_desc *desc = &dma->descriptors[0];
82 uint8_t *src;
83 uint8_t *dst;
85 dma_transfer_chain(desc);
87 while (desc) {
88 if (desc->direction == MASTER_READ) {
89 if (desc->bounce_length == 0)
90 dcache_invalidate_by_mva(
91 (void *)(uintptr_t) desc->data_address,
92 desc->length);
93 else {
94 src = (void *)(uintptr_t) desc->bounce_src;
95 dst = (void *)(uintptr_t) desc->bounce_dst;
96 memcpy(dst, src, desc->bounce_length);
99 desc = (void *)(uintptr_t) desc->next_descriptor;
101 curr_desc_idx = -1;
104 static struct cmd_desc *allocate_descriptor(void)
106 struct cmd_desc *current;
107 struct cmd_desc *next;
108 uint8_t index;
110 current = (curr_desc_idx == -1) ?
111 NULL : &dma->descriptors[curr_desc_idx];
113 index = ++curr_desc_idx;
114 next = &dma->descriptors[index];
116 next->data_address = (uint32_t) (uintptr_t) dma->buffers[index];
118 next->next_descriptor = 0;
119 next->direction = MASTER_READ;
120 next->multi_io_mode = 0;
121 next->reserved1 = 0;
122 next->fragment = 0;
123 next->reserved2 = 0;
124 next->length = 0;
125 next->bounce_src = 0;
126 next->bounce_dst = 0;
127 next->bounce_length = 0;
129 if (current) {
130 current->next_descriptor = (uint32_t)(uintptr_t) next;
131 current->fragment = 1;
134 return next;
137 static void cs_change(enum cs_state state)
139 gpio_set(GPIO(90), state == CS_DEASSERT);
142 static void configure_gpios(void)
144 gpio_output(GPIO(90), 1);
146 gpio_configure(GPIO(91), GPIO91_FUNC_QSPI_DATA,
147 GPIO_NO_PULL, GPIO_2MA, GPIO_ENABLE);
149 gpio_configure(GPIO(92), GPIO92_FUNC_QSPI_DATA,
150 GPIO_NO_PULL, GPIO_2MA, GPIO_ENABLE);
152 gpio_configure(GPIO(95), GPIO95_FUNC_QSPI_CLK,
153 GPIO_NO_PULL, GPIO_2MA, GPIO_ENABLE);
156 static void queue_bounce_data(uint8_t *data, uint32_t data_bytes,
157 enum qspi_mode data_mode, bool write)
159 struct cmd_desc *desc;
160 uint8_t *ptr;
162 desc = allocate_descriptor();
163 desc->direction = write;
164 desc->multi_io_mode = data_mode;
165 ptr = (void *)(uintptr_t) desc->data_address;
167 if (write) {
168 memcpy(ptr, data, data_bytes);
169 } else {
170 desc->bounce_src = (uint32_t)(uintptr_t) ptr;
171 desc->bounce_dst = (uint32_t)(uintptr_t) data;
172 desc->bounce_length = data_bytes;
175 desc->length = data_bytes;
178 static void queue_direct_data(uint8_t *data, uint32_t data_bytes,
179 enum qspi_mode data_mode, bool write)
181 struct cmd_desc *desc;
183 desc = allocate_descriptor();
184 desc->direction = write;
185 desc->multi_io_mode = data_mode;
186 desc->data_address = (uint32_t)(uintptr_t) data;
187 desc->length = data_bytes;
189 if (write)
190 dcache_clean_by_mva(data, data_bytes);
191 else
192 dcache_invalidate_by_mva(data, data_bytes);
195 static void queue_data(uint8_t *data, uint32_t data_bytes,
196 enum qspi_mode data_mode, bool write)
198 uint8_t *aligned_ptr;
199 uint8_t *epilog_ptr;
200 uint32_t prolog_bytes, aligned_bytes, epilog_bytes;
202 if (data_bytes == 0)
203 return;
205 aligned_ptr =
206 (uint8_t *)ALIGN_UP((uintptr_t)data, CACHE_LINE_SIZE);
208 prolog_bytes = MIN(data_bytes, aligned_ptr - data);
209 aligned_bytes = ALIGN_DOWN(data_bytes - prolog_bytes, CACHE_LINE_SIZE);
210 epilog_bytes = data_bytes - prolog_bytes - aligned_bytes;
212 epilog_ptr = data + prolog_bytes + aligned_bytes;
214 if (prolog_bytes)
215 queue_bounce_data(data, prolog_bytes, data_mode, write);
216 if (aligned_bytes)
217 queue_direct_data(aligned_ptr, aligned_bytes, data_mode, write);
218 if (epilog_bytes)
219 queue_bounce_data(epilog_ptr, epilog_bytes, data_mode, write);
222 static void reg_init(void)
224 uint32_t spi_mode;
225 uint32_t tx_data_oe_delay, tx_data_delay;
226 uint32_t mstr_config;
228 spi_mode = 0;
230 tx_data_oe_delay = 0;
231 tx_data_delay = 0;
233 mstr_config = (tx_data_oe_delay << TX_DATA_OE_DELAY_SHIFT) |
234 (tx_data_delay << TX_DATA_DELAY_SHIFT) | (SBL_EN) |
235 (spi_mode << SPI_MODE_SHIFT) |
236 (PIN_HOLDN) |
237 (FB_CLK_EN) |
238 (DMA_ENABLE) |
239 (FULL_CYCLE_MODE);
241 write32(&sdm845_qspi->mstr_cfg, mstr_config);
242 write32(&sdm845_qspi->ahb_mstr_cfg, 0xA42);
243 write32(&sdm845_qspi->mstr_int_en, 0x0);
244 write32(&sdm845_qspi->mstr_int_sts, 0xFFFFFFFF);
245 write32(&sdm845_qspi->rd_fifo_cfg, 0x0);
246 write32(&sdm845_qspi->rd_fifo_rst, RESET_FIFO);
249 void quadspi_init(uint32_t hz)
251 assert(dcache_line_bytes() == CACHE_LINE_SIZE);
252 clock_configure_qspi(hz * 4);
253 configure_gpios();
254 reg_init();
257 int sdm845_claim_bus(const struct spi_slave *slave)
259 cs_change(CS_ASSERT);
260 return 0;
263 void sdm845_release_bus(const struct spi_slave *slave)
265 cs_change(CS_DEASSERT);
268 static int xfer(enum qspi_mode mode, const void *dout, size_t out_bytes,
269 void *din, size_t in_bytes)
271 if ((out_bytes && !dout) || (in_bytes && !din) ||
272 (in_bytes && out_bytes)) {
273 return -1;
276 queue_data((uint8_t *) (out_bytes ? dout : din),
277 in_bytes | out_bytes, mode, !!out_bytes);
279 flush_chain();
281 return 0;
284 int sdm845_xfer(const struct spi_slave *slave, const void *dout,
285 size_t out_bytes, void *din, size_t in_bytes)
287 return xfer(SDR_1BIT, dout, out_bytes, din, in_bytes);
290 int sdm845_xfer_dual(const struct spi_slave *slave, const void *dout,
291 size_t out_bytes, void *din, size_t in_bytes)
293 return xfer(SDR_2BIT, dout, out_bytes, din, in_bytes);