blob: 6dac24e4e6c15457ee83c6bca2fcf8903cb85b23 [file] [log] [blame]
Barnali Sarkar89331cd2017-02-16 17:22:37 +05301/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2017 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <arch/early_variables.h>
17#include <arch/io.h>
18#include <console/console.h>
19#include <fast_spi_def.h>
20#include <intelblocks/fast_spi.h>
21#include <soc/intel/common/spi_flash.h>
22#include <soc/pci_devs.h>
23#include <spi_flash.h>
24#include <string.h>
25#include <timer.h>
26
27/* Helper to create a FAST_SPI context on API entry. */
28#define BOILERPLATE_CREATE_CTX(ctx) \
29 struct fast_spi_flash_ctx real_ctx; \
30 struct fast_spi_flash_ctx *ctx = &real_ctx; \
31 _fast_spi_flash_get_ctx(ctx)
32
33/*
34 * Anything that's not success is <0. Provided solely for readability, as these
35 * constants are not used outside this file.
36 */
37enum errors {
38 SUCCESS = 0,
39 E_TIMEOUT = -1,
40 E_HW_ERROR = -2,
41 E_ARGUMENT = -3,
42};
43
44/* Reduce data-passing burden by grouping transaction data in a context. */
45struct fast_spi_flash_ctx {
46 uintptr_t mmio_base;
47};
48
49static void _fast_spi_flash_get_ctx(struct fast_spi_flash_ctx *ctx)
50{
51 ctx->mmio_base = (uintptr_t)fast_spi_get_bar();
52}
53
54/* Read register from the FAST_SPI flash controller. */
55static uint32_t fast_spi_flash_ctrlr_reg_read(struct fast_spi_flash_ctx *ctx,
56 uint16_t reg)
57{
58 uintptr_t addr = ALIGN_DOWN(ctx->mmio_base + reg, sizeof(uint32_t));
59 return read32((void *)addr);
60}
61
62/* Write to register in FAST_SPI flash controller. */
63static void fast_spi_flash_ctrlr_reg_write(struct fast_spi_flash_ctx *ctx,
64 uint16_t reg, uint32_t val)
65{
66 uintptr_t addr = ALIGN_DOWN(ctx->mmio_base + reg, sizeof(uint32_t));
67 write32((void *)addr, val);
68}
69
70/*
71 * The hardware datasheet is not clear on what HORD values actually do. It
72 * seems that HORD_SFDP provides access to the first 8 bytes of the SFDP, which
73 * is the signature and revision fields. HORD_JEDEC provides access to the
74 * actual flash parameters, and is most likely what you want to use when
75 * probing the flash from software.
76 * It's okay to rely on SFDP, since the SPI flash controller requires an SFDP
77 * 1.5 or newer compliant FAST_SPI flash chip.
78 * NOTE: Due to the register layout of the hardware, all accesses will be
79 * aligned to a 4 byte boundary.
80 */
81static uint32_t fast_spi_flash_read_sfdp_param(struct fast_spi_flash_ctx *ctx,
82 uint16_t sfdp_reg)
83{
84 uint32_t ptinx_index = sfdp_reg & SPIBAR_PTINX_IDX_MASK;
85 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_PTINX,
86 ptinx_index | SPIBAR_PTINX_HORD_JEDEC);
87 return fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_PTDATA);
88}
89
90/* Fill FDATAn FIFO in preparation for a write transaction. */
91static void fill_xfer_fifo(struct fast_spi_flash_ctx *ctx, const void *data,
92 size_t len)
93{
94 /* YES! memcpy() works. FDATAn does not require 32-bit accesses. */
95 memcpy((void *)(ctx->mmio_base + SPIBAR_FDATA(0)), data, len);
96}
97
98/* Drain FDATAn FIFO after a read transaction populates data. */
99static void drain_xfer_fifo(struct fast_spi_flash_ctx *ctx, void *dest,
100 size_t len)
101{
102 /* YES! memcpy() works. FDATAn does not require 32-bit accesses. */
103 memcpy(dest, (void *)(ctx->mmio_base + SPIBAR_FDATA(0)), len);
104}
105
106/* Fire up a transfer using the hardware sequencer. */
107static void start_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
108 uint32_t hsfsts_cycle, uint32_t flash_addr, size_t len)
109{
110 /* Make sure all W1C status bits get cleared. */
111 uint32_t hsfsts = SPIBAR_HSFSTS_W1C_BITS;
112 /* Set up transaction parameters. */
113 hsfsts |= hsfsts_cycle & SPIBAR_HSFSTS_FCYCLE_MASK;
114 hsfsts |= SPIBAR_HSFSTS_FDBC(len - 1);
115
116 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_FADDR, flash_addr);
117 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_HSFSTS_CTL,
118 hsfsts | SPIBAR_HSFSTS_FGO);
119}
120
121static int wait_for_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
122 uint32_t flash_addr)
123{
124 struct stopwatch sw;
125 uint32_t hsfsts;
126
127 stopwatch_init_msecs_expire(&sw, SPIBAR_HWSEQ_XFER_TIMEOUT);
128 do {
129 hsfsts = fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_HSFSTS_CTL);
130
131 if (hsfsts & SPIBAR_HSFSTS_FCERR) {
132 printk(BIOS_ERR, "SPI Transaction Error at Flash Offset %x HSFSTS = 0x%08x\n",
133 flash_addr, hsfsts);
134 return E_HW_ERROR;
135 }
136
137 if (hsfsts & SPIBAR_HSFSTS_FDONE)
138 return SUCCESS;
139 } while (!(stopwatch_expired(&sw)));
140
141 printk(BIOS_ERR, "SPI Transaction Timeout (Exceeded %d ms) at Flash Offset %x HSFSTS = 0x%08x\n",
142 SPIBAR_HWSEQ_XFER_TIMEOUT, flash_addr, hsfsts);
143 return E_TIMEOUT;
144}
145
146/* Execute FAST_SPI flash transfer. This is a blocking call. */
147static int exec_sync_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
148 uint32_t hsfsts_cycle, uint32_t flash_addr,
149 size_t len)
150{
151 start_hwseq_xfer(ctx, hsfsts_cycle, flash_addr, len);
152 return wait_for_hwseq_xfer(ctx, flash_addr);
153}
154
155/*
156 * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700157 * that the operation does not cross page boundary.
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530158 */
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700159static size_t get_xfer_len(const struct spi_flash *flash, uint32_t addr,
160 size_t len)
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530161{
162 size_t xfer_len = min(len, SPIBAR_FDATA_FIFO_SIZE);
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700163 size_t bytes_left = ALIGN_UP(addr, flash->page_size) - addr;
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530164
165 if (bytes_left)
166 xfer_len = min(xfer_len, bytes_left);
167
168 return xfer_len;
169}
170
171
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530172static int fast_spi_flash_erase(const struct spi_flash *flash,
173 uint32_t offset, size_t len)
174{
175 int ret;
176 size_t erase_size;
177 uint32_t erase_cycle;
178
179 BOILERPLATE_CREATE_CTX(ctx);
180
181 if (!IS_ALIGNED(offset, 4 * KiB) || !IS_ALIGNED(len, 4 * KiB)) {
182 printk(BIOS_ERR, "BUG! SPI erase region not sector aligned\n");
183 return E_ARGUMENT;
184 }
185
186 while (len) {
187 if (IS_ALIGNED(offset, 64 * KiB) && (len >= 64 * KiB)) {
188 erase_size = 64 * KiB;
189 erase_cycle = SPIBAR_HSFSTS_CYCLE_64K_ERASE;
190 } else {
191 erase_size = 4 * KiB;
192 erase_cycle = SPIBAR_HSFSTS_CYCLE_4K_ERASE;
193 }
194 printk(BIOS_SPEW, "Erasing flash addr %x + %zu KiB\n",
195 offset, erase_size / KiB);
196
197 ret = exec_sync_hwseq_xfer(ctx, erase_cycle, offset, 0);
198 if (ret != SUCCESS)
199 return ret;
200
201 offset += erase_size;
202 len -= erase_size;
203 }
204
205 return SUCCESS;
206}
207
208static int fast_spi_flash_read(const struct spi_flash *flash,
209 uint32_t addr, size_t len, void *buf)
210{
211 int ret;
212 size_t xfer_len;
213 uint8_t *data = buf;
214
215 BOILERPLATE_CREATE_CTX(ctx);
216
217 while (len) {
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700218 xfer_len = get_xfer_len(flash, addr, len);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530219
220 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_READ,
221 addr, xfer_len);
222 if (ret != SUCCESS)
223 return ret;
224
225 drain_xfer_fifo(ctx, data, xfer_len);
226
227 addr += xfer_len;
228 data += xfer_len;
229 len -= xfer_len;
230 }
231
232 return SUCCESS;
233}
234
235static int fast_spi_flash_write(const struct spi_flash *flash,
236 uint32_t addr, size_t len, const void *buf)
237{
238 int ret;
239 size_t xfer_len;
240 const uint8_t *data = buf;
241
242 BOILERPLATE_CREATE_CTX(ctx);
243
244 while (len) {
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700245 xfer_len = get_xfer_len(flash, addr, len);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530246 fill_xfer_fifo(ctx, data, xfer_len);
247
248 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_WRITE,
249 addr, xfer_len);
250 if (ret != SUCCESS)
251 return ret;
252
253 addr += xfer_len;
254 data += xfer_len;
255 len -= xfer_len;
256 }
257
258 return SUCCESS;
259}
260
261static int fast_spi_flash_status(const struct spi_flash *flash,
262 uint8_t *reg)
263{
264 int ret;
265 BOILERPLATE_CREATE_CTX(ctx);
266
267 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_RD_STATUS, 0,
268 sizeof(*reg));
269 if (ret != SUCCESS)
270 return ret;
271
272 drain_xfer_fifo(ctx, reg, sizeof(*reg));
273 return ret;
274}
275
Furquan Shaikhe2fc5e22017-05-17 17:26:01 -0700276const struct spi_flash_ops fast_spi_flash_ops = {
277 .read = fast_spi_flash_read,
278 .write = fast_spi_flash_write,
279 .erase = fast_spi_flash_erase,
280 .status = fast_spi_flash_status,
281};
282
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530283/*
284 * We can't use FDOC and FDOD to read FLCOMP, as previous platforms did.
285 * For details see:
286 * Ch 31, SPI: p. 194
287 * The size of the flash component is always taken from density field in the
288 * SFDP table. FLCOMP.C0DEN is no longer used by the Flash Controller.
289 */
Furquan Shaikha1491572017-05-17 19:14:06 -0700290static int fast_spi_flash_probe(const struct spi_slave *dev,
291 struct spi_flash *flash)
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530292{
293 BOILERPLATE_CREATE_CTX(ctx);
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530294 uint32_t flash_bits;
295
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530296 /*
297 * bytes = (bits + 1) / 8;
298 * But we need to do the addition in a way which doesn't overflow for
299 * 4 Gbit devices (flash_bits == 0xffffffff).
300 */
301 flash_bits = fast_spi_flash_read_sfdp_param(ctx, 0x04);
302 flash->size = (flash_bits >> 3) + 1;
303
304 memcpy(&flash->spi, dev, sizeof(*dev));
305 flash->name = "FAST_SPI Hardware Sequencer";
306
307 /* Can erase both 4 KiB and 64 KiB chunks. Declare the smaller size. */
308 flash->sector_size = 4 * KiB;
Furquan Shaikhfc1a1232017-05-12 00:19:56 -0700309 flash->page_size = 256;
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530310 /*
311 * FIXME: Get erase+cmd, and status_cmd from SFDP.
312 *
313 * flash->erase_cmd = ???
314 * flash->status_cmd = ???
315 */
316
Furquan Shaikhe2fc5e22017-05-17 17:26:01 -0700317 flash->ops = &fast_spi_flash_ops;
Furquan Shaikh30221b42017-05-15 14:35:15 -0700318 return 0;
Barnali Sarkar89331cd2017-02-16 17:22:37 +0530319}
320
321int spi_flash_get_fpr_info(struct fpr_info *info)
322{
323 BOILERPLATE_CREATE_CTX(ctx);
324
325 info->base = ctx->mmio_base + SPIBAR_FPR_BASE;
326 info->max = SPIBAR_FPR_MAX;
327 return 0;
328}
329
330/*
331 * Minimal set of commands to read WPSR from FAST_SPI.
332 * Returns 0 on success, < 0 on failure.
333 */
334int fast_spi_flash_read_wpsr(u8 *sr)
335{
336 uint8_t rdsr;
337 int ret = 0;
338
339 fast_spi_init();
340
341 /* sending NULL for spiflash struct parameter since we are not
342 * calling HWSEQ read_status() call via Probe.
343 */
344 ret = fast_spi_flash_status(NULL, &rdsr);
345 if (ret) {
346 printk(BIOS_ERR, "SPI rdsr failed\n");
347 return ret;
348 }
349 *sr = rdsr & WPSR_MASK_SRP0_BIT;
350
351 return 0;
352}
Furquan Shaikhf1db5fd2017-05-02 19:43:20 -0700353
354static int fast_spi_flash_ctrlr_setup(const struct spi_slave *dev)
355{
356 if (dev->cs != 0) {
357 printk(BIOS_ERR, "%s: Invalid CS for fast SPI bus=0x%x,cs=0x%x!\n",
358 __func__, dev->bus, dev->cs);
359 return -1;
360 }
361
362 return 0;
363}
364
365const struct spi_ctrlr fast_spi_flash_ctrlr = {
366 .setup = fast_spi_flash_ctrlr_setup,
Furquan Shaikhde705fa2017-04-19 19:27:28 -0700367 .max_xfer_size = SPI_CTRLR_DEFAULT_MAX_XFER_SIZE,
Furquan Shaikha1491572017-05-17 19:14:06 -0700368 .flash_probe = fast_spi_flash_probe,
Furquan Shaikhf1db5fd2017-05-02 19:43:20 -0700369};