blob: f896cd9a3ab79c2a7b5fcb1019e6a5397fac97da [file] [log] [blame]
Angel Pons0612b272020-04-05 15:46:56 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Barnali Sarkar89331cd2017-02-16 17:22:37 +05302
Kyösti Mälkki13f66502019-03-03 08:01:05 +02003#include <device/mmio.h>
Elyes HAOUASf97c1c92019-12-03 18:22:06 +01004#include <commonlib/helpers.h>
Barnali Sarkar89331cd2017-02-16 17:22:37 +05305#include <console/console.h>
6#include <fast_spi_def.h>
7#include <intelblocks/fast_spi.h>
Barnali Sarkar89331cd2017-02-16 17:22:37 +05308#include <soc/pci_devs.h>
9#include <spi_flash.h>
10#include <string.h>
11#include <timer.h>
12
13/* Helper to create a FAST_SPI context on API entry. */
14#define BOILERPLATE_CREATE_CTX(ctx) \
15 struct fast_spi_flash_ctx real_ctx; \
16 struct fast_spi_flash_ctx *ctx = &real_ctx; \
17 _fast_spi_flash_get_ctx(ctx)
18
19/*
20 * Anything that's not success is <0. Provided solely for readability, as these
21 * constants are not used outside this file.
22 */
23enum errors {
24 SUCCESS = 0,
25 E_TIMEOUT = -1,
26 E_HW_ERROR = -2,
27 E_ARGUMENT = -3,
28};
29
30/* Reduce data-passing burden by grouping transaction data in a context. */
31struct fast_spi_flash_ctx {
32 uintptr_t mmio_base;
33};
34
35static void _fast_spi_flash_get_ctx(struct fast_spi_flash_ctx *ctx)
36{
37 ctx->mmio_base = (uintptr_t)fast_spi_get_bar();
38}
39
40/* Read register from the FAST_SPI flash controller. */
41static uint32_t fast_spi_flash_ctrlr_reg_read(struct fast_spi_flash_ctx *ctx,
42 uint16_t reg)
43{
44 uintptr_t addr = ALIGN_DOWN(ctx->mmio_base + reg, sizeof(uint32_t));
Elyes Haouasc4fbeac2022-12-04 16:06:02 +010045 return read32p(addr);
Barnali Sarkar89331cd2017-02-16 17:22:37 +053046}
47
48/* Write to register in FAST_SPI flash controller. */
49static void fast_spi_flash_ctrlr_reg_write(struct fast_spi_flash_ctx *ctx,
50 uint16_t reg, uint32_t val)
51{
52 uintptr_t addr = ALIGN_DOWN(ctx->mmio_base + reg, sizeof(uint32_t));
Elyes Haouasc4fbeac2022-12-04 16:06:02 +010053 write32p(addr, val);
Barnali Sarkar89331cd2017-02-16 17:22:37 +053054}
55
56/*
57 * The hardware datasheet is not clear on what HORD values actually do. It
58 * seems that HORD_SFDP provides access to the first 8 bytes of the SFDP, which
59 * is the signature and revision fields. HORD_JEDEC provides access to the
60 * actual flash parameters, and is most likely what you want to use when
61 * probing the flash from software.
62 * It's okay to rely on SFDP, since the SPI flash controller requires an SFDP
63 * 1.5 or newer compliant FAST_SPI flash chip.
64 * NOTE: Due to the register layout of the hardware, all accesses will be
65 * aligned to a 4 byte boundary.
66 */
67static uint32_t fast_spi_flash_read_sfdp_param(struct fast_spi_flash_ctx *ctx,
68 uint16_t sfdp_reg)
69{
70 uint32_t ptinx_index = sfdp_reg & SPIBAR_PTINX_IDX_MASK;
71 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_PTINX,
72 ptinx_index | SPIBAR_PTINX_HORD_JEDEC);
73 return fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_PTDATA);
74}
75
76/* Fill FDATAn FIFO in preparation for a write transaction. */
77static void fill_xfer_fifo(struct fast_spi_flash_ctx *ctx, const void *data,
78 size_t len)
79{
80 /* YES! memcpy() works. FDATAn does not require 32-bit accesses. */
81 memcpy((void *)(ctx->mmio_base + SPIBAR_FDATA(0)), data, len);
82}
83
84/* Drain FDATAn FIFO after a read transaction populates data. */
85static void drain_xfer_fifo(struct fast_spi_flash_ctx *ctx, void *dest,
86 size_t len)
87{
88 /* YES! memcpy() works. FDATAn does not require 32-bit accesses. */
89 memcpy(dest, (void *)(ctx->mmio_base + SPIBAR_FDATA(0)), len);
90}
91
92/* Fire up a transfer using the hardware sequencer. */
93static void start_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
94 uint32_t hsfsts_cycle, uint32_t flash_addr, size_t len)
95{
96 /* Make sure all W1C status bits get cleared. */
97 uint32_t hsfsts = SPIBAR_HSFSTS_W1C_BITS;
98 /* Set up transaction parameters. */
99 hsfsts |= hsfsts_cycle & SPIBAR_HSFSTS_FCYCLE_MASK;
100 hsfsts |= SPIBAR_HSFSTS_FDBC(len - 1);
101
102 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_FADDR, flash_addr);
103 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_HSFSTS_CTL,
104 hsfsts | SPIBAR_HSFSTS_FGO);
105}
106
107static int wait_for_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
108 uint32_t flash_addr)
109{
110 struct stopwatch sw;
111 uint32_t hsfsts;
112
Angel Pons122cc8c2021-02-15 17:18:55 +0100113 stopwatch_init_msecs_expire(&sw, SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530114 do {
115 hsfsts = fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_HSFSTS_CTL);
116
117 if (hsfsts & SPIBAR_HSFSTS_FCERR) {
118 printk(BIOS_ERR, "SPI Transaction Error at Flash Offset %x HSFSTS = 0x%08x\n",
119 flash_addr, hsfsts);
120 return E_HW_ERROR;
121 }
122
123 if (hsfsts & SPIBAR_HSFSTS_FDONE)
124 return SUCCESS;
125 } while (!(stopwatch_expired(&sw)));
126
127 printk(BIOS_ERR, "SPI Transaction Timeout (Exceeded %d ms) at Flash Offset %x HSFSTS = 0x%08x\n",
Angel Pons122cc8c2021-02-15 17:18:55 +0100128 SPIBAR_HWSEQ_XFER_TIMEOUT_MS, flash_addr, hsfsts);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530129 return E_TIMEOUT;
130}
131
Subrata Banik4de2c342022-02-11 13:58:31 +0530132static int wait_for_hwseq_spi_cycle_complete(struct fast_spi_flash_ctx *ctx)
133{
134 struct stopwatch sw;
135 uint32_t hsfsts;
136
137 stopwatch_init_msecs_expire(&sw, SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
138 do {
139 hsfsts = fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_HSFSTS_CTL);
140
141 if (!(hsfsts & SPIBAR_HSFSTS_SCIP))
142 return SUCCESS;
143 } while (!(stopwatch_expired(&sw)));
144
145 return E_TIMEOUT;
146}
147
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530148/* Execute FAST_SPI flash transfer. This is a blocking call. */
149static int exec_sync_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
150 uint32_t hsfsts_cycle, uint32_t flash_addr,
151 size_t len)
152{
Subrata Banik4de2c342022-02-11 13:58:31 +0530153 if (wait_for_hwseq_spi_cycle_complete(ctx) != SUCCESS) {
154 printk(BIOS_ERR, "SPI Transaction Timeout (Exceeded %d ms) due to prior"
155 " operation at Flash Offset %x\n",
156 SPIBAR_HWSEQ_XFER_TIMEOUT_MS, flash_addr);
157 return E_TIMEOUT;
158 }
159
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530160 start_hwseq_xfer(ctx, hsfsts_cycle, flash_addr, len);
161 return wait_for_hwseq_xfer(ctx, flash_addr);
162}
163
Subrata Banika26bb782022-04-13 19:18:34 +0530164int fast_spi_cycle_in_progress(void)
165{
166 BOILERPLATE_CREATE_CTX(ctx);
167
168 int ret = wait_for_hwseq_spi_cycle_complete(ctx);
169 if (ret != SUCCESS)
170 printk(BIOS_ERR, "SPI Transaction Timeout (Exceeded %d ms) due to prior"
171 " operation is pending\n", SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
172
173 return ret;
174}
175
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530176/*
177 * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700178 * that the operation does not cross page boundary.
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530179 */
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700180static size_t get_xfer_len(const struct spi_flash *flash, uint32_t addr,
181 size_t len)
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530182{
Elyes HAOUASf97c1c92019-12-03 18:22:06 +0100183 size_t xfer_len = MIN(len, SPIBAR_FDATA_FIFO_SIZE);
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700184 size_t bytes_left = ALIGN_UP(addr, flash->page_size) - addr;
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530185
186 if (bytes_left)
Elyes HAOUASf97c1c92019-12-03 18:22:06 +0100187 xfer_len = MIN(xfer_len, bytes_left);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530188
189 return xfer_len;
190}
191
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530192static int fast_spi_flash_erase(const struct spi_flash *flash,
193 uint32_t offset, size_t len)
194{
195 int ret;
196 size_t erase_size;
197 uint32_t erase_cycle;
198
199 BOILERPLATE_CREATE_CTX(ctx);
200
201 if (!IS_ALIGNED(offset, 4 * KiB) || !IS_ALIGNED(len, 4 * KiB)) {
202 printk(BIOS_ERR, "BUG! SPI erase region not sector aligned\n");
203 return E_ARGUMENT;
204 }
205
206 while (len) {
207 if (IS_ALIGNED(offset, 64 * KiB) && (len >= 64 * KiB)) {
208 erase_size = 64 * KiB;
209 erase_cycle = SPIBAR_HSFSTS_CYCLE_64K_ERASE;
210 } else {
211 erase_size = 4 * KiB;
212 erase_cycle = SPIBAR_HSFSTS_CYCLE_4K_ERASE;
213 }
214 printk(BIOS_SPEW, "Erasing flash addr %x + %zu KiB\n",
215 offset, erase_size / KiB);
216
217 ret = exec_sync_hwseq_xfer(ctx, erase_cycle, offset, 0);
218 if (ret != SUCCESS)
219 return ret;
220
221 offset += erase_size;
222 len -= erase_size;
223 }
224
225 return SUCCESS;
226}
227
228static int fast_spi_flash_read(const struct spi_flash *flash,
229 uint32_t addr, size_t len, void *buf)
230{
231 int ret;
232 size_t xfer_len;
233 uint8_t *data = buf;
234
235 BOILERPLATE_CREATE_CTX(ctx);
236
237 while (len) {
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700238 xfer_len = get_xfer_len(flash, addr, len);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530239
240 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_READ,
241 addr, xfer_len);
242 if (ret != SUCCESS)
243 return ret;
244
245 drain_xfer_fifo(ctx, data, xfer_len);
246
247 addr += xfer_len;
248 data += xfer_len;
249 len -= xfer_len;
250 }
251
252 return SUCCESS;
253}
254
255static int fast_spi_flash_write(const struct spi_flash *flash,
256 uint32_t addr, size_t len, const void *buf)
257{
258 int ret;
259 size_t xfer_len;
260 const uint8_t *data = buf;
261
262 BOILERPLATE_CREATE_CTX(ctx);
263
264 while (len) {
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700265 xfer_len = get_xfer_len(flash, addr, len);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530266 fill_xfer_fifo(ctx, data, xfer_len);
267
268 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_WRITE,
269 addr, xfer_len);
270 if (ret != SUCCESS)
271 return ret;
272
273 addr += xfer_len;
274 data += xfer_len;
275 len -= xfer_len;
276 }
277
278 return SUCCESS;
279}
280
281static int fast_spi_flash_status(const struct spi_flash *flash,
282 uint8_t *reg)
283{
284 int ret;
285 BOILERPLATE_CREATE_CTX(ctx);
286
287 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_RD_STATUS, 0,
288 sizeof(*reg));
289 if (ret != SUCCESS)
290 return ret;
291
292 drain_xfer_fifo(ctx, reg, sizeof(*reg));
293 return ret;
294}
295
Furquan Shaikhe2fc5e22017-05-17 17:26:01 -0700296const struct spi_flash_ops fast_spi_flash_ops = {
297 .read = fast_spi_flash_read,
298 .write = fast_spi_flash_write,
299 .erase = fast_spi_flash_erase,
300 .status = fast_spi_flash_status,
301};
302
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530303/*
304 * We can't use FDOC and FDOD to read FLCOMP, as previous platforms did.
305 * For details see:
306 * Ch 31, SPI: p. 194
307 * The size of the flash component is always taken from density field in the
308 * SFDP table. FLCOMP.C0DEN is no longer used by the Flash Controller.
309 */
Furquan Shaikha1491572017-05-17 19:14:06 -0700310static int fast_spi_flash_probe(const struct spi_slave *dev,
311 struct spi_flash *flash)
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530312{
313 BOILERPLATE_CREATE_CTX(ctx);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530314 uint32_t flash_bits;
315
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530316 /*
317 * bytes = (bits + 1) / 8;
318 * But we need to do the addition in a way which doesn't overflow for
319 * 4 Gbit devices (flash_bits == 0xffffffff).
320 */
321 flash_bits = fast_spi_flash_read_sfdp_param(ctx, 0x04);
322 flash->size = (flash_bits >> 3) + 1;
323
324 memcpy(&flash->spi, dev, sizeof(*dev));
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530325
326 /* Can erase both 4 KiB and 64 KiB chunks. Declare the smaller size. */
327 flash->sector_size = 4 * KiB;
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700328 flash->page_size = 256;
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530329 /*
330 * FIXME: Get erase+cmd, and status_cmd from SFDP.
331 *
332 * flash->erase_cmd = ???
333 * flash->status_cmd = ???
334 */
335
Furquan Shaikhe2fc5e22017-05-17 17:26:01 -0700336 flash->ops = &fast_spi_flash_ops;
Furquan Shaikh30221b42017-05-15 14:35:15 -0700337 return 0;
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530338}
339
Furquan Shaikhf1db5fd2017-05-02 19:43:20 -0700340static int fast_spi_flash_ctrlr_setup(const struct spi_slave *dev)
341{
342 if (dev->cs != 0) {
343 printk(BIOS_ERR, "%s: Invalid CS for fast SPI bus=0x%x,cs=0x%x!\n",
344 __func__, dev->bus, dev->cs);
345 return -1;
346 }
347
348 return 0;
349}
350
Aaron Durbin2b96f422017-12-14 15:32:37 -0700351#define SPI_FPR_SHIFT 12
352#define SPI_FPR_MASK 0x7fff
353#define SPI_FPR_BASE_SHIFT 0
354#define SPI_FPR_LIMIT_SHIFT 16
355#define SPI_FPR_RPE (1 << 15) /* Read Protect */
356#define SPI_FPR_WPE (1 << 31) /* Write Protect */
357#define SPI_FPR(base, limit) \
358 (((((limit) >> SPI_FPR_SHIFT) & SPI_FPR_MASK) << SPI_FPR_LIMIT_SHIFT) |\
359 ((((base) >> SPI_FPR_SHIFT) & SPI_FPR_MASK) << SPI_FPR_BASE_SHIFT))
360
361/*
362 * Protect range of SPI flash defined by [start, start+size-1] using Flash
363 * Protected Range (FPR) register if available.
364 */
365static int fast_spi_flash_protect(const struct spi_flash *flash,
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530366 const struct region *region,
367 const enum ctrlr_prot_type type)
Aaron Durbin2b96f422017-12-14 15:32:37 -0700368{
369 u32 start = region_offset(region);
370 u32 end = start + region_sz(region) - 1;
371 u32 reg;
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530372 u32 protect_mask = 0;
Aaron Durbin2b96f422017-12-14 15:32:37 -0700373 int fpr;
374 uintptr_t fpr_base;
375 BOILERPLATE_CREATE_CTX(ctx);
376
377 fpr_base = ctx->mmio_base + SPIBAR_FPR_BASE;
378
379 /* Find first empty FPR */
380 for (fpr = 0; fpr < SPIBAR_FPR_MAX; fpr++) {
Elyes Haouasc4fbeac2022-12-04 16:06:02 +0100381 reg = read32p(fpr_base);
Aaron Durbin2b96f422017-12-14 15:32:37 -0700382 if (reg == 0)
383 break;
384 fpr_base += sizeof(uint32_t);
385 }
386
387 if (fpr >= SPIBAR_FPR_MAX) {
Julius Wernere9665952022-01-21 17:06:20 -0800388 printk(BIOS_ERR, "No SPI FPR free!\n");
Aaron Durbin2b96f422017-12-14 15:32:37 -0700389 return -1;
390 }
391
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530392 switch (type) {
393 case WRITE_PROTECT:
394 protect_mask |= SPI_FPR_WPE;
395 break;
396 case READ_PROTECT:
397 protect_mask |= SPI_FPR_RPE;
398 break;
399 case READ_WRITE_PROTECT:
400 protect_mask |= (SPI_FPR_RPE | SPI_FPR_WPE);
401 break;
402 default:
Julius Wernere9665952022-01-21 17:06:20 -0800403 printk(BIOS_ERR, "Seeking invalid protection!\n");
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530404 return -1;
405 }
406
Aaron Durbin2b96f422017-12-14 15:32:37 -0700407 /* Set protected range base and limit */
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530408 reg = SPI_FPR(start, end) | protect_mask;
Aaron Durbin2b96f422017-12-14 15:32:37 -0700409
410 /* Set the FPR register and verify it is protected */
Elyes Haouasc4fbeac2022-12-04 16:06:02 +0100411 write32p(fpr_base, reg);
412 reg = read32p(fpr_base);
Rizwan Qureshif9f50932018-12-31 15:19:16 +0530413 if (!(reg & protect_mask)) {
Julius Wernere9665952022-01-21 17:06:20 -0800414 printk(BIOS_ERR, "Unable to set SPI FPR %d\n", fpr);
Aaron Durbin2b96f422017-12-14 15:32:37 -0700415 return -1;
416 }
417
418 printk(BIOS_INFO, "%s: FPR %d is enabled for range 0x%08x-0x%08x\n",
419 __func__, fpr, start, end);
420 return 0;
421}
422
Furquan Shaikhf1db5fd2017-05-02 19:43:20 -0700423const struct spi_ctrlr fast_spi_flash_ctrlr = {
424 .setup = fast_spi_flash_ctrlr_setup,
Furquan Shaikhde705fa2017-04-19 19:27:28 -0700425 .max_xfer_size = SPI_CTRLR_DEFAULT_MAX_XFER_SIZE,
Furquan Shaikha1491572017-05-17 19:14:06 -0700426 .flash_probe = fast_spi_flash_probe,
Aaron Durbin2b96f422017-12-14 15:32:37 -0700427 .flash_protect = fast_spi_flash_protect,
Furquan Shaikhf1db5fd2017-05-02 19:43:20 -0700428};