blob: aead8debc915f722de671918fd6d1567a1130c6e [file] [log] [blame]
Angel Pons0612b272020-04-05 15:46:56 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Barnali Sarkar89331cd2017-02-16 17:22:37 +05302
Kyösti Mälkki13f66502019-03-03 08:01:05 +02003#include <device/mmio.h>
Elyes HAOUASf97c1c92019-12-03 18:22:06 +01004#include <commonlib/helpers.h>
Barnali Sarkar89331cd2017-02-16 17:22:37 +05305#include <console/console.h>
6#include <fast_spi_def.h>
7#include <intelblocks/fast_spi.h>
Barnali Sarkar89331cd2017-02-16 17:22:37 +05308#include <soc/pci_devs.h>
9#include <spi_flash.h>
10#include <string.h>
11#include <timer.h>
12
13/* Helper to create a FAST_SPI context on API entry. */
14#define BOILERPLATE_CREATE_CTX(ctx) \
15 struct fast_spi_flash_ctx real_ctx; \
16 struct fast_spi_flash_ctx *ctx = &real_ctx; \
17 _fast_spi_flash_get_ctx(ctx)
18
19/*
20 * Anything that's not success is <0. Provided solely for readability, as these
21 * constants are not used outside this file.
22 */
23enum errors {
24 SUCCESS = 0,
25 E_TIMEOUT = -1,
26 E_HW_ERROR = -2,
27 E_ARGUMENT = -3,
28};
29
30/* Reduce data-passing burden by grouping transaction data in a context. */
31struct fast_spi_flash_ctx {
32 uintptr_t mmio_base;
33};
34
35static void _fast_spi_flash_get_ctx(struct fast_spi_flash_ctx *ctx)
36{
37 ctx->mmio_base = (uintptr_t)fast_spi_get_bar();
38}
39
40/* Read register from the FAST_SPI flash controller. */
41static uint32_t fast_spi_flash_ctrlr_reg_read(struct fast_spi_flash_ctx *ctx,
42 uint16_t reg)
43{
44 uintptr_t addr = ALIGN_DOWN(ctx->mmio_base + reg, sizeof(uint32_t));
45 return read32((void *)addr);
46}
47
48/* Write to register in FAST_SPI flash controller. */
49static void fast_spi_flash_ctrlr_reg_write(struct fast_spi_flash_ctx *ctx,
50 uint16_t reg, uint32_t val)
51{
52 uintptr_t addr = ALIGN_DOWN(ctx->mmio_base + reg, sizeof(uint32_t));
53 write32((void *)addr, val);
54}
55
56/*
57 * The hardware datasheet is not clear on what HORD values actually do. It
58 * seems that HORD_SFDP provides access to the first 8 bytes of the SFDP, which
59 * is the signature and revision fields. HORD_JEDEC provides access to the
60 * actual flash parameters, and is most likely what you want to use when
61 * probing the flash from software.
62 * It's okay to rely on SFDP, since the SPI flash controller requires an SFDP
63 * 1.5 or newer compliant FAST_SPI flash chip.
64 * NOTE: Due to the register layout of the hardware, all accesses will be
65 * aligned to a 4 byte boundary.
66 */
67static uint32_t fast_spi_flash_read_sfdp_param(struct fast_spi_flash_ctx *ctx,
68 uint16_t sfdp_reg)
69{
70 uint32_t ptinx_index = sfdp_reg & SPIBAR_PTINX_IDX_MASK;
71 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_PTINX,
72 ptinx_index | SPIBAR_PTINX_HORD_JEDEC);
73 return fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_PTDATA);
74}
75
76/* Fill FDATAn FIFO in preparation for a write transaction. */
77static void fill_xfer_fifo(struct fast_spi_flash_ctx *ctx, const void *data,
78 size_t len)
79{
80 /* YES! memcpy() works. FDATAn does not require 32-bit accesses. */
81 memcpy((void *)(ctx->mmio_base + SPIBAR_FDATA(0)), data, len);
82}
83
84/* Drain FDATAn FIFO after a read transaction populates data. */
85static void drain_xfer_fifo(struct fast_spi_flash_ctx *ctx, void *dest,
86 size_t len)
87{
88 /* YES! memcpy() works. FDATAn does not require 32-bit accesses. */
89 memcpy(dest, (void *)(ctx->mmio_base + SPIBAR_FDATA(0)), len);
90}
91
92/* Fire up a transfer using the hardware sequencer. */
93static void start_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
94 uint32_t hsfsts_cycle, uint32_t flash_addr, size_t len)
95{
96 /* Make sure all W1C status bits get cleared. */
97 uint32_t hsfsts = SPIBAR_HSFSTS_W1C_BITS;
98 /* Set up transaction parameters. */
99 hsfsts |= hsfsts_cycle & SPIBAR_HSFSTS_FCYCLE_MASK;
100 hsfsts |= SPIBAR_HSFSTS_FDBC(len - 1);
101
102 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_FADDR, flash_addr);
103 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_HSFSTS_CTL,
104 hsfsts | SPIBAR_HSFSTS_FGO);
105}
106
107static int wait_for_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
108 uint32_t flash_addr)
109{
110 struct stopwatch sw;
111 uint32_t hsfsts;
112
Angel Pons122cc8c2021-02-15 17:18:55 +0100113 stopwatch_init_msecs_expire(&sw, SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530114 do {
115 hsfsts = fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_HSFSTS_CTL);
116
117 if (hsfsts & SPIBAR_HSFSTS_FCERR) {
118 printk(BIOS_ERR, "SPI Transaction Error at Flash Offset %x HSFSTS = 0x%08x\n",
119 flash_addr, hsfsts);
120 return E_HW_ERROR;
121 }
122
123 if (hsfsts & SPIBAR_HSFSTS_FDONE)
124 return SUCCESS;
125 } while (!(stopwatch_expired(&sw)));
126
127 printk(BIOS_ERR, "SPI Transaction Timeout (Exceeded %d ms) at Flash Offset %x HSFSTS = 0x%08x\n",
Angel Pons122cc8c2021-02-15 17:18:55 +0100128 SPIBAR_HWSEQ_XFER_TIMEOUT_MS, flash_addr, hsfsts);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530129 return E_TIMEOUT;
130}
131
132/* Execute FAST_SPI flash transfer. This is a blocking call. */
133static int exec_sync_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
134 uint32_t hsfsts_cycle, uint32_t flash_addr,
135 size_t len)
136{
137 start_hwseq_xfer(ctx, hsfsts_cycle, flash_addr, len);
138 return wait_for_hwseq_xfer(ctx, flash_addr);
139}
140
141/*
142 * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700143 * that the operation does not cross page boundary.
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530144 */
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700145static size_t get_xfer_len(const struct spi_flash *flash, uint32_t addr,
146 size_t len)
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530147{
Elyes HAOUASf97c1c92019-12-03 18:22:06 +0100148 size_t xfer_len = MIN(len, SPIBAR_FDATA_FIFO_SIZE);
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700149 size_t bytes_left = ALIGN_UP(addr, flash->page_size) - addr;
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530150
151 if (bytes_left)
Elyes HAOUASf97c1c92019-12-03 18:22:06 +0100152 xfer_len = MIN(xfer_len, bytes_left);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530153
154 return xfer_len;
155}
156
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530157static int fast_spi_flash_erase(const struct spi_flash *flash,
158 uint32_t offset, size_t len)
159{
160 int ret;
161 size_t erase_size;
162 uint32_t erase_cycle;
163
164 BOILERPLATE_CREATE_CTX(ctx);
165
166 if (!IS_ALIGNED(offset, 4 * KiB) || !IS_ALIGNED(len, 4 * KiB)) {
167 printk(BIOS_ERR, "BUG! SPI erase region not sector aligned\n");
168 return E_ARGUMENT;
169 }
170
171 while (len) {
172 if (IS_ALIGNED(offset, 64 * KiB) && (len >= 64 * KiB)) {
173 erase_size = 64 * KiB;
174 erase_cycle = SPIBAR_HSFSTS_CYCLE_64K_ERASE;
175 } else {
176 erase_size = 4 * KiB;
177 erase_cycle = SPIBAR_HSFSTS_CYCLE_4K_ERASE;
178 }
179 printk(BIOS_SPEW, "Erasing flash addr %x + %zu KiB\n",
180 offset, erase_size / KiB);
181
182 ret = exec_sync_hwseq_xfer(ctx, erase_cycle, offset, 0);
183 if (ret != SUCCESS)
184 return ret;
185
186 offset += erase_size;
187 len -= erase_size;
188 }
189
190 return SUCCESS;
191}
192
193static int fast_spi_flash_read(const struct spi_flash *flash,
194 uint32_t addr, size_t len, void *buf)
195{
196 int ret;
197 size_t xfer_len;
198 uint8_t *data = buf;
199
200 BOILERPLATE_CREATE_CTX(ctx);
201
202 while (len) {
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700203 xfer_len = get_xfer_len(flash, addr, len);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530204
205 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_READ,
206 addr, xfer_len);
207 if (ret != SUCCESS)
208 return ret;
209
210 drain_xfer_fifo(ctx, data, xfer_len);
211
212 addr += xfer_len;
213 data += xfer_len;
214 len -= xfer_len;
215 }
216
217 return SUCCESS;
218}
219
220static int fast_spi_flash_write(const struct spi_flash *flash,
221 uint32_t addr, size_t len, const void *buf)
222{
223 int ret;
224 size_t xfer_len;
225 const uint8_t *data = buf;
226
227 BOILERPLATE_CREATE_CTX(ctx);
228
229 while (len) {
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700230 xfer_len = get_xfer_len(flash, addr, len);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530231 fill_xfer_fifo(ctx, data, xfer_len);
232
233 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_WRITE,
234 addr, xfer_len);
235 if (ret != SUCCESS)
236 return ret;
237
238 addr += xfer_len;
239 data += xfer_len;
240 len -= xfer_len;
241 }
242
243 return SUCCESS;
244}
245
246static int fast_spi_flash_status(const struct spi_flash *flash,
247 uint8_t *reg)
248{
249 int ret;
250 BOILERPLATE_CREATE_CTX(ctx);
251
252 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_RD_STATUS, 0,
253 sizeof(*reg));
254 if (ret != SUCCESS)
255 return ret;
256
257 drain_xfer_fifo(ctx, reg, sizeof(*reg));
258 return ret;
259}
260
Furquan Shaikhe2fc5e22017-05-17 17:26:01 -0700261const struct spi_flash_ops fast_spi_flash_ops = {
262 .read = fast_spi_flash_read,
263 .write = fast_spi_flash_write,
264 .erase = fast_spi_flash_erase,
265 .status = fast_spi_flash_status,
266};
267
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530268/*
269 * We can't use FDOC and FDOD to read FLCOMP, as previous platforms did.
270 * For details see:
271 * Ch 31, SPI: p. 194
272 * The size of the flash component is always taken from density field in the
273 * SFDP table. FLCOMP.C0DEN is no longer used by the Flash Controller.
274 */
Furquan Shaikha1491572017-05-17 19:14:06 -0700275static int fast_spi_flash_probe(const struct spi_slave *dev,
276 struct spi_flash *flash)
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530277{
278 BOILERPLATE_CREATE_CTX(ctx);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530279 uint32_t flash_bits;
280
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530281 /*
282 * bytes = (bits + 1) / 8;
283 * But we need to do the addition in a way which doesn't overflow for
284 * 4 Gbit devices (flash_bits == 0xffffffff).
285 */
286 flash_bits = fast_spi_flash_read_sfdp_param(ctx, 0x04);
287 flash->size = (flash_bits >> 3) + 1;
288
289 memcpy(&flash->spi, dev, sizeof(*dev));
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530290
291 /* Can erase both 4 KiB and 64 KiB chunks. Declare the smaller size. */
292 flash->sector_size = 4 * KiB;
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700293 flash->page_size = 256;
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530294 /*
295 * FIXME: Get erase+cmd, and status_cmd from SFDP.
296 *
297 * flash->erase_cmd = ???
298 * flash->status_cmd = ???
299 */
300
Furquan Shaikhe2fc5e22017-05-17 17:26:01 -0700301 flash->ops = &fast_spi_flash_ops;
Furquan Shaikh30221b42017-05-15 14:35:15 -0700302 return 0;
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530303}
304
Furquan Shaikhf1db5fd2017-05-02 19:43:20 -0700305static int fast_spi_flash_ctrlr_setup(const struct spi_slave *dev)
306{
307 if (dev->cs != 0) {
308 printk(BIOS_ERR, "%s: Invalid CS for fast SPI bus=0x%x,cs=0x%x!\n",
309 __func__, dev->bus, dev->cs);
310 return -1;
311 }
312
313 return 0;
314}
315
Aaron Durbin2b96f422017-12-14 15:32:37 -0700316#define SPI_FPR_SHIFT 12
317#define SPI_FPR_MASK 0x7fff
318#define SPI_FPR_BASE_SHIFT 0
319#define SPI_FPR_LIMIT_SHIFT 16
320#define SPI_FPR_RPE (1 << 15) /* Read Protect */
321#define SPI_FPR_WPE (1 << 31) /* Write Protect */
322#define SPI_FPR(base, limit) \
323 (((((limit) >> SPI_FPR_SHIFT) & SPI_FPR_MASK) << SPI_FPR_LIMIT_SHIFT) |\
324 ((((base) >> SPI_FPR_SHIFT) & SPI_FPR_MASK) << SPI_FPR_BASE_SHIFT))
325
326/*
327 * Protect range of SPI flash defined by [start, start+size-1] using Flash
328 * Protected Range (FPR) register if available.
329 */
330static int fast_spi_flash_protect(const struct spi_flash *flash,
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530331 const struct region *region,
332 const enum ctrlr_prot_type type)
Aaron Durbin2b96f422017-12-14 15:32:37 -0700333{
334 u32 start = region_offset(region);
335 u32 end = start + region_sz(region) - 1;
336 u32 reg;
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530337 u32 protect_mask = 0;
Aaron Durbin2b96f422017-12-14 15:32:37 -0700338 int fpr;
339 uintptr_t fpr_base;
340 BOILERPLATE_CREATE_CTX(ctx);
341
342 fpr_base = ctx->mmio_base + SPIBAR_FPR_BASE;
343
344 /* Find first empty FPR */
345 for (fpr = 0; fpr < SPIBAR_FPR_MAX; fpr++) {
346 reg = read32((void *)fpr_base);
347 if (reg == 0)
348 break;
349 fpr_base += sizeof(uint32_t);
350 }
351
352 if (fpr >= SPIBAR_FPR_MAX) {
Julius Wernere9665952022-01-21 17:06:20 -0800353 printk(BIOS_ERR, "No SPI FPR free!\n");
Aaron Durbin2b96f422017-12-14 15:32:37 -0700354 return -1;
355 }
356
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530357 switch (type) {
358 case WRITE_PROTECT:
359 protect_mask |= SPI_FPR_WPE;
360 break;
361 case READ_PROTECT:
362 protect_mask |= SPI_FPR_RPE;
363 break;
364 case READ_WRITE_PROTECT:
365 protect_mask |= (SPI_FPR_RPE | SPI_FPR_WPE);
366 break;
367 default:
Julius Wernere9665952022-01-21 17:06:20 -0800368 printk(BIOS_ERR, "Seeking invalid protection!\n");
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530369 return -1;
370 }
371
Aaron Durbin2b96f422017-12-14 15:32:37 -0700372 /* Set protected range base and limit */
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530373 reg = SPI_FPR(start, end) | protect_mask;
Aaron Durbin2b96f422017-12-14 15:32:37 -0700374
375 /* Set the FPR register and verify it is protected */
376 write32((void *)fpr_base, reg);
377 reg = read32((void *)fpr_base);
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530378 if (!(reg & protect_mask)) {
Julius Wernere9665952022-01-21 17:06:20 -0800379 printk(BIOS_ERR, "Unable to set SPI FPR %d\n", fpr);
Aaron Durbin2b96f422017-12-14 15:32:37 -0700380 return -1;
381 }
382
383 printk(BIOS_INFO, "%s: FPR %d is enabled for range 0x%08x-0x%08x\n",
384 __func__, fpr, start, end);
385 return 0;
386}
387
Furquan Shaikhf1db5fd2017-05-02 19:43:20 -0700388const struct spi_ctrlr fast_spi_flash_ctrlr = {
389 .setup = fast_spi_flash_ctrlr_setup,
Furquan Shaikhde705fa2017-04-19 19:27:28 -0700390 .max_xfer_size = SPI_CTRLR_DEFAULT_MAX_XFER_SIZE,
Furquan Shaikha1491572017-05-17 19:14:06 -0700391 .flash_probe = fast_spi_flash_probe,
Aaron Durbin2b96f422017-12-14 15:32:37 -0700392 .flash_protect = fast_spi_flash_protect,
Furquan Shaikhf1db5fd2017-05-02 19:43:20 -0700393};