blob: e9cfc230a409650fcb37dc5fca0006a72fc2b1ba [file] [log] [blame]
Angel Pons0612b272020-04-05 15:46:56 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Barnali Sarkar89331cd2017-02-16 17:22:37 +05302
Kyösti Mälkki13f66502019-03-03 08:01:05 +02003#include <device/mmio.h>
Elyes HAOUASf97c1c92019-12-03 18:22:06 +01004#include <commonlib/helpers.h>
Barnali Sarkar89331cd2017-02-16 17:22:37 +05305#include <console/console.h>
6#include <fast_spi_def.h>
7#include <intelblocks/fast_spi.h>
Barnali Sarkar89331cd2017-02-16 17:22:37 +05308#include <soc/pci_devs.h>
9#include <spi_flash.h>
10#include <string.h>
11#include <timer.h>
12
13/* Helper to create a FAST_SPI context on API entry. */
14#define BOILERPLATE_CREATE_CTX(ctx) \
15 struct fast_spi_flash_ctx real_ctx; \
16 struct fast_spi_flash_ctx *ctx = &real_ctx; \
17 _fast_spi_flash_get_ctx(ctx)
18
19/*
20 * Anything that's not success is <0. Provided solely for readability, as these
21 * constants are not used outside this file.
22 */
23enum errors {
24 SUCCESS = 0,
25 E_TIMEOUT = -1,
26 E_HW_ERROR = -2,
27 E_ARGUMENT = -3,
28};
29
30/* Reduce data-passing burden by grouping transaction data in a context. */
31struct fast_spi_flash_ctx {
32 uintptr_t mmio_base;
33};
34
35static void _fast_spi_flash_get_ctx(struct fast_spi_flash_ctx *ctx)
36{
37 ctx->mmio_base = (uintptr_t)fast_spi_get_bar();
38}
39
40/* Read register from the FAST_SPI flash controller. */
41static uint32_t fast_spi_flash_ctrlr_reg_read(struct fast_spi_flash_ctx *ctx,
42 uint16_t reg)
43{
44 uintptr_t addr = ALIGN_DOWN(ctx->mmio_base + reg, sizeof(uint32_t));
Elyes Haouasc4fbeac2022-12-04 16:06:02 +010045 return read32p(addr);
Barnali Sarkar89331cd2017-02-16 17:22:37 +053046}
47
48/* Write to register in FAST_SPI flash controller. */
49static void fast_spi_flash_ctrlr_reg_write(struct fast_spi_flash_ctx *ctx,
50 uint16_t reg, uint32_t val)
51{
52 uintptr_t addr = ALIGN_DOWN(ctx->mmio_base + reg, sizeof(uint32_t));
Elyes Haouasc4fbeac2022-12-04 16:06:02 +010053 write32p(addr, val);
Barnali Sarkar89331cd2017-02-16 17:22:37 +053054}
55
56/*
Michał Żygowskic72a65d2024-02-16 19:54:39 +010057 * Via component field (bits 15-14) we can select either 1st or 2nd flash
58 * (in dual flash setups).
59 * Via HORD - Header or Data (bits 13-12) - we can select either:
60 * - SFDP Header
61 * - Param Table Header
62 * - Data (JEDEC params, including density)
63 *
Barnali Sarkar89331cd2017-02-16 17:22:37 +053064 * It's okay to rely on SFDP, since the SPI flash controller requires an SFDP
65 * 1.5 or newer compliant FAST_SPI flash chip.
66 * NOTE: Due to the register layout of the hardware, all accesses will be
67 * aligned to a 4 byte boundary.
68 */
Michał Żygowskic72a65d2024-02-16 19:54:39 +010069static uint32_t fast_spi_flash_read_sfdp(struct fast_spi_flash_ctx *ctx,
70 uint32_t ptinx_reg)
Barnali Sarkar89331cd2017-02-16 17:22:37 +053071{
Michał Żygowskic72a65d2024-02-16 19:54:39 +010072 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_PTINX, ptinx_reg);
Barnali Sarkar89331cd2017-02-16 17:22:37 +053073 return fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_PTDATA);
74}
75
76/* Fill FDATAn FIFO in preparation for a write transaction. */
77static void fill_xfer_fifo(struct fast_spi_flash_ctx *ctx, const void *data,
78 size_t len)
79{
80 /* YES! memcpy() works. FDATAn does not require 32-bit accesses. */
81 memcpy((void *)(ctx->mmio_base + SPIBAR_FDATA(0)), data, len);
82}
83
84/* Drain FDATAn FIFO after a read transaction populates data. */
85static void drain_xfer_fifo(struct fast_spi_flash_ctx *ctx, void *dest,
86 size_t len)
87{
88 /* YES! memcpy() works. FDATAn does not require 32-bit accesses. */
89 memcpy(dest, (void *)(ctx->mmio_base + SPIBAR_FDATA(0)), len);
90}
91
92/* Fire up a transfer using the hardware sequencer. */
93static void start_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
94 uint32_t hsfsts_cycle, uint32_t flash_addr, size_t len)
95{
96 /* Make sure all W1C status bits get cleared. */
97 uint32_t hsfsts = SPIBAR_HSFSTS_W1C_BITS;
98 /* Set up transaction parameters. */
99 hsfsts |= hsfsts_cycle & SPIBAR_HSFSTS_FCYCLE_MASK;
100 hsfsts |= SPIBAR_HSFSTS_FDBC(len - 1);
101
102 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_FADDR, flash_addr);
103 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_HSFSTS_CTL,
104 hsfsts | SPIBAR_HSFSTS_FGO);
105}
106
107static int wait_for_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
108 uint32_t flash_addr)
109{
110 struct stopwatch sw;
111 uint32_t hsfsts;
112
Angel Pons122cc8c2021-02-15 17:18:55 +0100113 stopwatch_init_msecs_expire(&sw, SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530114 do {
115 hsfsts = fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_HSFSTS_CTL);
116
117 if (hsfsts & SPIBAR_HSFSTS_FCERR) {
118 printk(BIOS_ERR, "SPI Transaction Error at Flash Offset %x HSFSTS = 0x%08x\n",
119 flash_addr, hsfsts);
120 return E_HW_ERROR;
121 }
122
123 if (hsfsts & SPIBAR_HSFSTS_FDONE)
124 return SUCCESS;
125 } while (!(stopwatch_expired(&sw)));
126
127 printk(BIOS_ERR, "SPI Transaction Timeout (Exceeded %d ms) at Flash Offset %x HSFSTS = 0x%08x\n",
Angel Pons122cc8c2021-02-15 17:18:55 +0100128 SPIBAR_HWSEQ_XFER_TIMEOUT_MS, flash_addr, hsfsts);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530129 return E_TIMEOUT;
130}
131
Subrata Banik4de2c342022-02-11 13:58:31 +0530132static int wait_for_hwseq_spi_cycle_complete(struct fast_spi_flash_ctx *ctx)
133{
134 struct stopwatch sw;
135 uint32_t hsfsts;
136
137 stopwatch_init_msecs_expire(&sw, SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
138 do {
139 hsfsts = fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_HSFSTS_CTL);
140
141 if (!(hsfsts & SPIBAR_HSFSTS_SCIP))
142 return SUCCESS;
143 } while (!(stopwatch_expired(&sw)));
144
145 return E_TIMEOUT;
146}
147
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530148/* Execute FAST_SPI flash transfer. This is a blocking call. */
149static int exec_sync_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
150 uint32_t hsfsts_cycle, uint32_t flash_addr,
151 size_t len)
152{
Subrata Banik4de2c342022-02-11 13:58:31 +0530153 if (wait_for_hwseq_spi_cycle_complete(ctx) != SUCCESS) {
154 printk(BIOS_ERR, "SPI Transaction Timeout (Exceeded %d ms) due to prior"
155 " operation at Flash Offset %x\n",
156 SPIBAR_HWSEQ_XFER_TIMEOUT_MS, flash_addr);
157 return E_TIMEOUT;
158 }
159
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530160 start_hwseq_xfer(ctx, hsfsts_cycle, flash_addr, len);
161 return wait_for_hwseq_xfer(ctx, flash_addr);
162}
163
Subrata Banika26bb782022-04-13 19:18:34 +0530164int fast_spi_cycle_in_progress(void)
165{
166 BOILERPLATE_CREATE_CTX(ctx);
167
168 int ret = wait_for_hwseq_spi_cycle_complete(ctx);
169 if (ret != SUCCESS)
170 printk(BIOS_ERR, "SPI Transaction Timeout (Exceeded %d ms) due to prior"
171 " operation is pending\n", SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
172
173 return ret;
174}
175
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530176/*
177 * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700178 * that the operation does not cross page boundary.
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530179 */
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700180static size_t get_xfer_len(const struct spi_flash *flash, uint32_t addr,
181 size_t len)
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530182{
Elyes HAOUASf97c1c92019-12-03 18:22:06 +0100183 size_t xfer_len = MIN(len, SPIBAR_FDATA_FIFO_SIZE);
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700184 size_t bytes_left = ALIGN_UP(addr, flash->page_size) - addr;
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530185
186 if (bytes_left)
Elyes HAOUASf97c1c92019-12-03 18:22:06 +0100187 xfer_len = MIN(xfer_len, bytes_left);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530188
189 return xfer_len;
190}
191
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530192static int fast_spi_flash_erase(const struct spi_flash *flash,
193 uint32_t offset, size_t len)
194{
195 int ret;
196 size_t erase_size;
197 uint32_t erase_cycle;
198
199 BOILERPLATE_CREATE_CTX(ctx);
200
201 if (!IS_ALIGNED(offset, 4 * KiB) || !IS_ALIGNED(len, 4 * KiB)) {
202 printk(BIOS_ERR, "BUG! SPI erase region not sector aligned\n");
203 return E_ARGUMENT;
204 }
205
206 while (len) {
207 if (IS_ALIGNED(offset, 64 * KiB) && (len >= 64 * KiB)) {
208 erase_size = 64 * KiB;
209 erase_cycle = SPIBAR_HSFSTS_CYCLE_64K_ERASE;
210 } else {
211 erase_size = 4 * KiB;
212 erase_cycle = SPIBAR_HSFSTS_CYCLE_4K_ERASE;
213 }
214 printk(BIOS_SPEW, "Erasing flash addr %x + %zu KiB\n",
215 offset, erase_size / KiB);
216
217 ret = exec_sync_hwseq_xfer(ctx, erase_cycle, offset, 0);
218 if (ret != SUCCESS)
219 return ret;
220
221 offset += erase_size;
222 len -= erase_size;
223 }
224
225 return SUCCESS;
226}
227
228static int fast_spi_flash_read(const struct spi_flash *flash,
229 uint32_t addr, size_t len, void *buf)
230{
231 int ret;
232 size_t xfer_len;
233 uint8_t *data = buf;
234
235 BOILERPLATE_CREATE_CTX(ctx);
236
237 while (len) {
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700238 xfer_len = get_xfer_len(flash, addr, len);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530239
240 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_READ,
241 addr, xfer_len);
242 if (ret != SUCCESS)
243 return ret;
244
245 drain_xfer_fifo(ctx, data, xfer_len);
246
247 addr += xfer_len;
248 data += xfer_len;
249 len -= xfer_len;
250 }
251
252 return SUCCESS;
253}
254
255static int fast_spi_flash_write(const struct spi_flash *flash,
256 uint32_t addr, size_t len, const void *buf)
257{
258 int ret;
259 size_t xfer_len;
260 const uint8_t *data = buf;
261
262 BOILERPLATE_CREATE_CTX(ctx);
263
264 while (len) {
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700265 xfer_len = get_xfer_len(flash, addr, len);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530266 fill_xfer_fifo(ctx, data, xfer_len);
267
268 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_WRITE,
269 addr, xfer_len);
270 if (ret != SUCCESS)
271 return ret;
272
273 addr += xfer_len;
274 data += xfer_len;
275 len -= xfer_len;
276 }
277
278 return SUCCESS;
279}
280
281static int fast_spi_flash_status(const struct spi_flash *flash,
282 uint8_t *reg)
283{
284 int ret;
285 BOILERPLATE_CREATE_CTX(ctx);
286
287 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_RD_STATUS, 0,
288 sizeof(*reg));
289 if (ret != SUCCESS)
290 return ret;
291
292 drain_xfer_fifo(ctx, reg, sizeof(*reg));
293 return ret;
294}
295
Furquan Shaikhe2fc5e22017-05-17 17:26:01 -0700296const struct spi_flash_ops fast_spi_flash_ops = {
297 .read = fast_spi_flash_read,
298 .write = fast_spi_flash_write,
299 .erase = fast_spi_flash_erase,
300 .status = fast_spi_flash_status,
301};
302
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530303/*
304 * We can't use FDOC and FDOD to read FLCOMP, as previous platforms did.
305 * For details see:
306 * Ch 31, SPI: p. 194
307 * The size of the flash component is always taken from density field in the
308 * SFDP table. FLCOMP.C0DEN is no longer used by the Flash Controller.
309 */
Furquan Shaikha1491572017-05-17 19:14:06 -0700310static int fast_spi_flash_probe(const struct spi_slave *dev,
311 struct spi_flash *flash)
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530312{
313 BOILERPLATE_CREATE_CTX(ctx);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530314 uint32_t flash_bits;
Michał Żygowskic72a65d2024-02-16 19:54:39 +0100315 uint32_t ptinx_reg;
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530316
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530317 /*
318 * bytes = (bits + 1) / 8;
319 * But we need to do the addition in a way which doesn't overflow for
320 * 4 Gbit devices (flash_bits == 0xffffffff).
321 */
Michał Żygowskic72a65d2024-02-16 19:54:39 +0100322 ptinx_reg = SPIBAR_PTINX_COMP_0 | SPIBAR_PTINX_HORD_JEDEC | SFDP_PARAM_DENSITY;
323 flash_bits = fast_spi_flash_read_sfdp(ctx, ptinx_reg);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530324 flash->size = (flash_bits >> 3) + 1;
325
Michał Żygowskic72a65d2024-02-16 19:54:39 +0100326 /*
327 * Now check if we have a second flash component.
328 * Check SFDP header for the SFDP signature. If valid, then 2nd component is present.
329 * Increase the flash size if 2nd component is found, analogically like the 1st
330 * component.
331 */
332 ptinx_reg = SPIBAR_PTINX_COMP_1 | SPIBAR_PTINX_HORD_SFDP | SFDP_HDR_SIG;
333 if (fast_spi_flash_read_sfdp(ctx, ptinx_reg) == SFDP_SIGNATURE) {
334 ptinx_reg = SPIBAR_PTINX_COMP_1 | SPIBAR_PTINX_HORD_JEDEC | SFDP_PARAM_DENSITY;
335 flash_bits = fast_spi_flash_read_sfdp(ctx, ptinx_reg);
336 flash->size += ((flash_bits >> 3) + 1);
337 }
338
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530339 memcpy(&flash->spi, dev, sizeof(*dev));
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530340
341 /* Can erase both 4 KiB and 64 KiB chunks. Declare the smaller size. */
342 flash->sector_size = 4 * KiB;
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700343 flash->page_size = 256;
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530344 /*
345 * FIXME: Get erase+cmd, and status_cmd from SFDP.
346 *
347 * flash->erase_cmd = ???
348 * flash->status_cmd = ???
349 */
350
Furquan Shaikhe2fc5e22017-05-17 17:26:01 -0700351 flash->ops = &fast_spi_flash_ops;
Furquan Shaikh30221b42017-05-15 14:35:15 -0700352 return 0;
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530353}
354
Furquan Shaikhf1db5fd2017-05-02 19:43:20 -0700355static int fast_spi_flash_ctrlr_setup(const struct spi_slave *dev)
356{
357 if (dev->cs != 0) {
358 printk(BIOS_ERR, "%s: Invalid CS for fast SPI bus=0x%x,cs=0x%x!\n",
359 __func__, dev->bus, dev->cs);
360 return -1;
361 }
362
363 return 0;
364}
365
Aaron Durbin2b96f422017-12-14 15:32:37 -0700366#define SPI_FPR_SHIFT 12
367#define SPI_FPR_MASK 0x7fff
368#define SPI_FPR_BASE_SHIFT 0
369#define SPI_FPR_LIMIT_SHIFT 16
370#define SPI_FPR_RPE (1 << 15) /* Read Protect */
371#define SPI_FPR_WPE (1 << 31) /* Write Protect */
372#define SPI_FPR(base, limit) \
373 (((((limit) >> SPI_FPR_SHIFT) & SPI_FPR_MASK) << SPI_FPR_LIMIT_SHIFT) |\
374 ((((base) >> SPI_FPR_SHIFT) & SPI_FPR_MASK) << SPI_FPR_BASE_SHIFT))
375
376/*
377 * Protect range of SPI flash defined by [start, start+size-1] using Flash
378 * Protected Range (FPR) register if available.
379 */
380static int fast_spi_flash_protect(const struct spi_flash *flash,
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530381 const struct region *region,
382 const enum ctrlr_prot_type type)
Aaron Durbin2b96f422017-12-14 15:32:37 -0700383{
384 u32 start = region_offset(region);
385 u32 end = start + region_sz(region) - 1;
386 u32 reg;
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530387 u32 protect_mask = 0;
Aaron Durbin2b96f422017-12-14 15:32:37 -0700388 int fpr;
389 uintptr_t fpr_base;
390 BOILERPLATE_CREATE_CTX(ctx);
391
392 fpr_base = ctx->mmio_base + SPIBAR_FPR_BASE;
393
394 /* Find first empty FPR */
395 for (fpr = 0; fpr < SPIBAR_FPR_MAX; fpr++) {
Elyes Haouasc4fbeac2022-12-04 16:06:02 +0100396 reg = read32p(fpr_base);
Aaron Durbin2b96f422017-12-14 15:32:37 -0700397 if (reg == 0)
398 break;
399 fpr_base += sizeof(uint32_t);
400 }
401
402 if (fpr >= SPIBAR_FPR_MAX) {
Julius Wernere9665952022-01-21 17:06:20 -0800403 printk(BIOS_ERR, "No SPI FPR free!\n");
Aaron Durbin2b96f422017-12-14 15:32:37 -0700404 return -1;
405 }
406
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530407 switch (type) {
408 case WRITE_PROTECT:
409 protect_mask |= SPI_FPR_WPE;
410 break;
411 case READ_PROTECT:
412 protect_mask |= SPI_FPR_RPE;
413 break;
414 case READ_WRITE_PROTECT:
415 protect_mask |= (SPI_FPR_RPE | SPI_FPR_WPE);
416 break;
417 default:
Julius Wernere9665952022-01-21 17:06:20 -0800418 printk(BIOS_ERR, "Seeking invalid protection!\n");
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530419 return -1;
420 }
421
Aaron Durbin2b96f422017-12-14 15:32:37 -0700422 /* Set protected range base and limit */
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530423 reg = SPI_FPR(start, end) | protect_mask;
Aaron Durbin2b96f422017-12-14 15:32:37 -0700424
425 /* Set the FPR register and verify it is protected */
Elyes Haouasc4fbeac2022-12-04 16:06:02 +0100426 write32p(fpr_base, reg);
427 reg = read32p(fpr_base);
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530428 if (!(reg & protect_mask)) {
Julius Wernere9665952022-01-21 17:06:20 -0800429 printk(BIOS_ERR, "Unable to set SPI FPR %d\n", fpr);
Aaron Durbin2b96f422017-12-14 15:32:37 -0700430 return -1;
431 }
432
433 printk(BIOS_INFO, "%s: FPR %d is enabled for range 0x%08x-0x%08x\n",
434 __func__, fpr, start, end);
435 return 0;
436}
437
Furquan Shaikhf1db5fd2017-05-02 19:43:20 -0700438const struct spi_ctrlr fast_spi_flash_ctrlr = {
439 .setup = fast_spi_flash_ctrlr_setup,
Furquan Shaikhde705fa2017-04-19 19:27:28 -0700440 .max_xfer_size = SPI_CTRLR_DEFAULT_MAX_XFER_SIZE,
Furquan Shaikha1491572017-05-17 19:14:06 -0700441 .flash_probe = fast_spi_flash_probe,
Aaron Durbin2b96f422017-12-14 15:32:37 -0700442 .flash_protect = fast_spi_flash_protect,
Furquan Shaikhf1db5fd2017-05-02 19:43:20 -0700443};