2 * This file is part of the coreboot project.
4 * Copyright (C) 2016 Intel Corp.
5 * (Written by Alexandru Gagniuc <alexandrux.gagniuc@intel.com> for Intel Corp.)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #define __SIMPLE_DEVICE__
20 #include <arch/early_variables.h>
22 #include <console/console.h>
23 #include <device/device.h>
24 #include <device/pci.h>
25 #include <soc/intel/common/spi_flash.h>
26 #include <soc/pci_devs.h>
28 #include <spi_flash.h>
29 #include <spi-generic.h>
33 /* Helper to create a SPI context on API entry. */
34 #define BOILERPLATE_CREATE_CTX(ctx) \
35 struct spi_ctx real_ctx; \
36 struct spi_ctx *ctx = &real_ctx; \
40 * Anything that's not success is <0. Provided solely for readability, as these
41 * constants are not used outside this file.
45 E_NOT_IMPLEMENTED
= -1,
51 /* Reduce data-passing burden by grouping transaction data in a context. */
55 uint32_t hsfsts_on_last_error
;
58 static void _spi_get_ctx(struct spi_ctx
*ctx
)
62 /* FIXME: use device definition */
63 ctx
->pci_dev
= SPI_DEV
;
65 bar
= pci_read_config32(ctx
->pci_dev
, PCI_BASE_ADDRESS_0
);
66 ctx
->mmio_base
= bar
& ~PCI_BASE_ADDRESS_MEM_ATTR_MASK
;
67 ctx
->hsfsts_on_last_error
= 0;
70 /* Read register from the SPI controller. 'reg' is the register offset. */
71 static uint32_t _spi_ctrlr_reg_read(struct spi_ctx
*ctx
, uint16_t reg
)
73 uintptr_t addr
= ALIGN_DOWN(ctx
->mmio_base
+ reg
, 4);
74 return read32((void *)addr
);
77 uint32_t spi_ctrlr_reg_read(uint16_t reg
)
79 BOILERPLATE_CREATE_CTX(ctx
);
80 return _spi_ctrlr_reg_read(ctx
, reg
);
83 /* Write to register in the SPI controller. 'reg' is the register offset. */
84 static void _spi_ctrlr_reg_write(struct spi_ctx
*ctx
, uint16_t reg
,
87 uintptr_t addr
= ALIGN_DOWN(ctx
->mmio_base
+ reg
, 4);
88 write32((void *)addr
, val
);
93 * The hardware datasheet is not clear on what HORD values actually do. It
94 * seems that HORD_SFDP provides access to the first 8 bytes of the SFDP, which
95 * is the signature and revision fields. HORD_JEDEC provides access to the
96 * actual flash parameters, and is most likely what you want to use when
97 * probing the flash from software.
98 * It's okay to rely on SFPD, since the SPI controller requires an SFDP 1.5 or
99 * newer compliant SPI chip.
100 * NOTE: Due to the register layout of the hardware, all accesses will be
101 * aligned to a 4 byte boundary.
103 static uint32_t read_spi_sfdp_param(struct spi_ctx
*ctx
, uint16_t sfdp_reg
)
105 uint32_t ptinx_index
= sfdp_reg
& SPIBAR_PTINX_IDX_MASK
;
106 _spi_ctrlr_reg_write(ctx
, SPIBAR_PTINX
,
107 ptinx_index
| SPIBAR_PTINX_HORD_JEDEC
);
108 return _spi_ctrlr_reg_read(ctx
, SPIBAR_PTDATA
);
111 /* Fill FDATAn FIFO in preparation for a write transaction. */
112 static void fill_xfer_fifo(struct spi_ctx
*ctx
, const void *data
, size_t len
)
114 len
= min(len
, SPIBAR_FDATA_FIFO_SIZE
);
116 /* YES! memcpy() works. FDATAn does not require 32-bit accesses. */
117 memcpy((void*)(ctx
->mmio_base
+ SPIBAR_FDATA(0)), data
, len
);
120 /* Drain FDATAn FIFO after a read transaction populates data. */
121 static void drain_xfer_fifo(struct spi_ctx
*ctx
, void *dest
, size_t len
)
123 len
= min(len
, SPIBAR_FDATA_FIFO_SIZE
);
125 /* YES! memcpy() works. FDATAn does not require 32-bit accesses. */
126 memcpy(dest
, (void*)(ctx
->mmio_base
+ SPIBAR_FDATA(0)), len
);
129 /* Fire up a transfer using the hardware sequencer. */
130 static void start_hwseq_xfer(struct spi_ctx
*ctx
, uint32_t hsfsts_cycle
,
131 uint32_t flash_addr
, size_t len
)
133 /* Make sure all W1C status bits get cleared. */
134 uint32_t hsfsts
= SPIBAR_HSFSTS_W1C_BITS
;
135 /* Set up transaction parameters. */
136 hsfsts
|= hsfsts_cycle
& SPIBAR_HSFSTS_FCYCLE_MASK
;
137 hsfsts
|= SPIBAR_HSFSTS_FBDC(len
- 1);
139 _spi_ctrlr_reg_write(ctx
, SPIBAR_FADDR
, flash_addr
);
140 _spi_ctrlr_reg_write(ctx
, SPIBAR_HSFSTS_CTL
,
141 hsfsts
| SPIBAR_HSFSTS_FGO
);
144 static void print_xfer_error(struct spi_ctx
*ctx
, const char *failure_reason
,
147 printk(BIOS_ERR
, "SPI Transaction %s at flash offset %x.\n"
148 "\tHSFSTS = 0x%08x\n",
149 failure_reason
, flash_addr
, ctx
->hsfsts_on_last_error
);
152 static int wait_for_hwseq_xfer(struct spi_ctx
*ctx
)
156 hsfsts
= _spi_ctrlr_reg_read(ctx
, SPIBAR_HSFSTS_CTL
);
158 if (hsfsts
& SPIBAR_HSFSTS_FCERR
) {
159 ctx
->hsfsts_on_last_error
= hsfsts
;
162 /* TODO: set up timer and abort on timeout */
163 } while (!(hsfsts
& SPIBAR_HSFSTS_FDONE
));
168 /* Execute SPI transfer. This is a blocking call. */
169 static int exec_sync_hwseq_xfer(struct spi_ctx
*ctx
, uint32_t hsfsts_cycle
,
170 uint32_t flash_addr
, size_t len
)
173 start_hwseq_xfer(ctx
, hsfsts_cycle
, flash_addr
, len
);
174 ret
= wait_for_hwseq_xfer(ctx
);
175 if (ret
!= SUCCESS
) {
176 const char *reason
= (ret
== E_TIMEOUT
) ? "timeout" : "error";
177 print_xfer_error(ctx
, reason
, flash_addr
);
182 unsigned int spi_crop_chunk(unsigned int cmd_len
, unsigned int buf_len
)
184 return MIN(buf_len
, SPIBAR_FDATA_FIFO_SIZE
);
188 * Write-protection status for BIOS region (BIOS_CONTROL register):
189 * EISS/WPD bits 00 01 10 11
191 * normal mode RO RW RO RO
192 * SMM mode RO RW RO RW
198 BOILERPLATE_CREATE_CTX(ctx
);
200 bios_ctl
= pci_read_config32(ctx
->pci_dev
, SPIBAR_BIOS_CONTROL
);
201 bios_ctl
|= SPIBAR_BIOS_CONTROL_WPD
;
202 bios_ctl
&= ~SPIBAR_BIOS_CONTROL_EISS
;
204 /* Enable Prefetching and caching. */
205 bios_ctl
|= SPIBAR_BIOS_CONTROL_PREFETCH_ENABLE
;
206 bios_ctl
&= ~SPIBAR_BIOS_CONTROL_CACHE_DISABLE
;
208 pci_write_config32(ctx
->pci_dev
, SPIBAR_BIOS_CONTROL
, bios_ctl
);
211 static int nuclear_spi_erase(const struct spi_flash
*flash
, uint32_t offset
,
216 uint32_t erase_cycle
;
218 BOILERPLATE_CREATE_CTX(ctx
);
220 if (!IS_ALIGNED(offset
, 4 * KiB
) || !IS_ALIGNED(len
, 4 * KiB
)) {
221 printk(BIOS_ERR
, "BUG! SPI erase region not sector aligned.\n");
226 if (IS_ALIGNED(offset
, 64 * KiB
) && (len
>= 64 * KiB
)) {
227 erase_size
= 64 * KiB
;
228 erase_cycle
= SPIBAR_HSFSTS_CYCLE_64K_ERASE
;
230 erase_size
= 4 * KiB
;
231 erase_cycle
= SPIBAR_HSFSTS_CYCLE_4K_ERASE
;
233 printk(BIOS_SPEW
, "Erasing flash addr %x + %zu KiB\n",
234 offset
, erase_size
/ KiB
);
236 ret
= exec_sync_hwseq_xfer(ctx
, erase_cycle
, offset
, 0);
240 offset
+= erase_size
;
248 * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
249 * that the operation does not cross 256-byte boundary.
251 static size_t get_xfer_len(uint32_t addr
, size_t len
)
253 size_t xfer_len
= min(len
, SPIBAR_FDATA_FIFO_SIZE
);
254 size_t bytes_left
= ALIGN_UP(addr
, 256) - addr
;
257 xfer_len
= min(xfer_len
, bytes_left
);
262 static int nuclear_spi_read(const struct spi_flash
*flash
, uint32_t addr
,
263 size_t len
, void *buf
)
269 BOILERPLATE_CREATE_CTX(ctx
);
272 xfer_len
= get_xfer_len(addr
, len
);
274 ret
= exec_sync_hwseq_xfer(ctx
, SPIBAR_HSFSTS_CYCLE_READ
,
279 drain_xfer_fifo(ctx
, data
, xfer_len
);
289 static int nuclear_spi_write(const struct spi_flash
*flash
, uint32_t addr
,
290 size_t len
, const void *buf
)
294 const uint8_t *data
= buf
;
296 BOILERPLATE_CREATE_CTX(ctx
);
299 xfer_len
= get_xfer_len(addr
, len
);
300 fill_xfer_fifo(ctx
, data
, xfer_len
);
302 ret
= exec_sync_hwseq_xfer(ctx
, SPIBAR_HSFSTS_CYCLE_WRITE
,
315 static int nuclear_spi_status(const struct spi_flash
*flash
, uint8_t *reg
)
318 BOILERPLATE_CREATE_CTX(ctx
);
320 ret
= exec_sync_hwseq_xfer(ctx
, SPIBAR_HSFSTS_CYCLE_RD_STATUS
, 0,
325 drain_xfer_fifo(ctx
, reg
, sizeof(*reg
));
329 static struct spi_flash boot_flash CAR_GLOBAL
;
332 * We can't use FDOC and FDOD to read FLCOMP, as previous platforms did.
335 * The size of the flash component is always taken from density field in the
336 * SFDP table. FLCOMP.C0DEN is no longer used by the Flash Controller.
338 struct spi_flash
*spi_flash_programmer_probe(struct spi_slave
*spi
, int force
)
340 BOILERPLATE_CREATE_CTX(ctx
);
341 struct spi_flash
*flash
;
344 flash
= car_get_var_ptr(&boot_flash
);
347 * bytes = (bits + 1) / 8;
348 * But we need to do the addition in a way which doesn't overflow for
349 * 4 Gbit devices (flash_bits == 0xffffffff).
351 /* FIXME: Don't hardcode 0x04 ? */
352 flash_bits
= read_spi_sfdp_param(ctx
, 0x04);
353 flash
->size
= (flash_bits
>> 3) + 1;
355 memcpy(&flash
->spi
, spi
, sizeof(*spi
));
357 flash
->name
= "Apollolake hardware sequencer";
359 /* Can erase both 4 KiB and 64 KiB chunks. Declare the smaller size. */
360 flash
->sector_size
= 4 * KiB
;
362 * FIXME: Get erase+cmd, and status_cmd from SFDP.
364 * flash->erase_cmd = ???
365 * flash->status_cmd = ???
368 flash
->internal_write
= nuclear_spi_write
;
369 flash
->internal_erase
= nuclear_spi_erase
;
370 flash
->internal_read
= nuclear_spi_read
;
371 flash
->internal_status
= nuclear_spi_status
;
376 int spi_setup_slave(unsigned int bus
, unsigned int cs
, struct spi_slave
*slave
)
378 BOILERPLATE_CREATE_CTX(ctx
);
380 /* This is special hardware. We expect bus 0 and CS line 0 here. */
381 if ((bus
!= 0) || (cs
!= 0))
391 int spi_read_status(uint8_t *status
)
393 BOILERPLATE_CREATE_CTX(ctx
);
395 if (exec_sync_hwseq_xfer(ctx
, SPIBAR_HSFSTS_CYCLE_RD_STATUS
, 0,
396 sizeof(*status
)) != SUCCESS
)
399 drain_xfer_fifo(ctx
, status
, sizeof(*status
));
404 int spi_flash_get_fpr_info(struct fpr_info
*info
)
406 BOILERPLATE_CREATE_CTX(ctx
);
411 info
->base
= ctx
->mmio_base
+ SPIBAR_FPR_BASE
;
412 info
->max
= SPIBAR_FPR_MAX
;