blob: 3babf914b746d2f679907c6d02e673aeed0f8967 [file] [log] [blame]
Barnali Sarkar89331cd2017-02-16 17:22:37 +05301/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2017 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <arch/early_variables.h>
17#include <arch/io.h>
18#include <console/console.h>
19#include <fast_spi_def.h>
20#include <intelblocks/fast_spi.h>
21#include <soc/intel/common/spi_flash.h>
22#include <soc/pci_devs.h>
23#include <spi_flash.h>
24#include <string.h>
25#include <timer.h>
26
27/* Helper to create a FAST_SPI context on API entry. */
28#define BOILERPLATE_CREATE_CTX(ctx) \
29 struct fast_spi_flash_ctx real_ctx; \
30 struct fast_spi_flash_ctx *ctx = &real_ctx; \
31 _fast_spi_flash_get_ctx(ctx)
32
33/*
34 * Anything that's not success is <0. Provided solely for readability, as these
35 * constants are not used outside this file.
36 */
37enum errors {
38 SUCCESS = 0,
39 E_TIMEOUT = -1,
40 E_HW_ERROR = -2,
41 E_ARGUMENT = -3,
42};
43
44/* Reduce data-passing burden by grouping transaction data in a context. */
45struct fast_spi_flash_ctx {
46 uintptr_t mmio_base;
47};
48
49static void _fast_spi_flash_get_ctx(struct fast_spi_flash_ctx *ctx)
50{
51 ctx->mmio_base = (uintptr_t)fast_spi_get_bar();
52}
53
54/* Read register from the FAST_SPI flash controller. */
55static uint32_t fast_spi_flash_ctrlr_reg_read(struct fast_spi_flash_ctx *ctx,
56 uint16_t reg)
57{
58 uintptr_t addr = ALIGN_DOWN(ctx->mmio_base + reg, sizeof(uint32_t));
59 return read32((void *)addr);
60}
61
62/* Write to register in FAST_SPI flash controller. */
63static void fast_spi_flash_ctrlr_reg_write(struct fast_spi_flash_ctx *ctx,
64 uint16_t reg, uint32_t val)
65{
66 uintptr_t addr = ALIGN_DOWN(ctx->mmio_base + reg, sizeof(uint32_t));
67 write32((void *)addr, val);
68}
69
70/*
71 * The hardware datasheet is not clear on what HORD values actually do. It
72 * seems that HORD_SFDP provides access to the first 8 bytes of the SFDP, which
73 * is the signature and revision fields. HORD_JEDEC provides access to the
74 * actual flash parameters, and is most likely what you want to use when
75 * probing the flash from software.
76 * It's okay to rely on SFDP, since the SPI flash controller requires an SFDP
77 * 1.5 or newer compliant FAST_SPI flash chip.
78 * NOTE: Due to the register layout of the hardware, all accesses will be
79 * aligned to a 4 byte boundary.
80 */
81static uint32_t fast_spi_flash_read_sfdp_param(struct fast_spi_flash_ctx *ctx,
82 uint16_t sfdp_reg)
83{
84 uint32_t ptinx_index = sfdp_reg & SPIBAR_PTINX_IDX_MASK;
85 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_PTINX,
86 ptinx_index | SPIBAR_PTINX_HORD_JEDEC);
87 return fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_PTDATA);
88}
89
90/* Fill FDATAn FIFO in preparation for a write transaction. */
91static void fill_xfer_fifo(struct fast_spi_flash_ctx *ctx, const void *data,
92 size_t len)
93{
94 /* YES! memcpy() works. FDATAn does not require 32-bit accesses. */
95 memcpy((void *)(ctx->mmio_base + SPIBAR_FDATA(0)), data, len);
96}
97
98/* Drain FDATAn FIFO after a read transaction populates data. */
99static void drain_xfer_fifo(struct fast_spi_flash_ctx *ctx, void *dest,
100 size_t len)
101{
102 /* YES! memcpy() works. FDATAn does not require 32-bit accesses. */
103 memcpy(dest, (void *)(ctx->mmio_base + SPIBAR_FDATA(0)), len);
104}
105
106/* Fire up a transfer using the hardware sequencer. */
107static void start_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
108 uint32_t hsfsts_cycle, uint32_t flash_addr, size_t len)
109{
110 /* Make sure all W1C status bits get cleared. */
111 uint32_t hsfsts = SPIBAR_HSFSTS_W1C_BITS;
112 /* Set up transaction parameters. */
113 hsfsts |= hsfsts_cycle & SPIBAR_HSFSTS_FCYCLE_MASK;
114 hsfsts |= SPIBAR_HSFSTS_FDBC(len - 1);
115
116 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_FADDR, flash_addr);
117 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_HSFSTS_CTL,
118 hsfsts | SPIBAR_HSFSTS_FGO);
119}
120
121static int wait_for_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
122 uint32_t flash_addr)
123{
124 struct stopwatch sw;
125 uint32_t hsfsts;
126
127 stopwatch_init_msecs_expire(&sw, SPIBAR_HWSEQ_XFER_TIMEOUT);
128 do {
129 hsfsts = fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_HSFSTS_CTL);
130
131 if (hsfsts & SPIBAR_HSFSTS_FCERR) {
132 printk(BIOS_ERR, "SPI Transaction Error at Flash Offset %x HSFSTS = 0x%08x\n",
133 flash_addr, hsfsts);
134 return E_HW_ERROR;
135 }
136
137 if (hsfsts & SPIBAR_HSFSTS_FDONE)
138 return SUCCESS;
139 } while (!(stopwatch_expired(&sw)));
140
141 printk(BIOS_ERR, "SPI Transaction Timeout (Exceeded %d ms) at Flash Offset %x HSFSTS = 0x%08x\n",
142 SPIBAR_HWSEQ_XFER_TIMEOUT, flash_addr, hsfsts);
143 return E_TIMEOUT;
144}
145
146/* Execute FAST_SPI flash transfer. This is a blocking call. */
147static int exec_sync_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
148 uint32_t hsfsts_cycle, uint32_t flash_addr,
149 size_t len)
150{
151 start_hwseq_xfer(ctx, hsfsts_cycle, flash_addr, len);
152 return wait_for_hwseq_xfer(ctx, flash_addr);
153}
154
155/*
156 * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
157 * that the operation does not cross 256-byte boundary.
158 */
159static size_t get_xfer_len(uint32_t addr, size_t len)
160{
161 size_t xfer_len = min(len, SPIBAR_FDATA_FIFO_SIZE);
162 size_t bytes_left = ALIGN_UP(addr, 256) - addr;
163
164 if (bytes_left)
165 xfer_len = min(xfer_len, bytes_left);
166
167 return xfer_len;
168}
169
170
171/* Flash device operations. */
172static struct spi_flash boot_flash CAR_GLOBAL;
173
174static int fast_spi_flash_erase(const struct spi_flash *flash,
175 uint32_t offset, size_t len)
176{
177 int ret;
178 size_t erase_size;
179 uint32_t erase_cycle;
180
181 BOILERPLATE_CREATE_CTX(ctx);
182
183 if (!IS_ALIGNED(offset, 4 * KiB) || !IS_ALIGNED(len, 4 * KiB)) {
184 printk(BIOS_ERR, "BUG! SPI erase region not sector aligned\n");
185 return E_ARGUMENT;
186 }
187
188 while (len) {
189 if (IS_ALIGNED(offset, 64 * KiB) && (len >= 64 * KiB)) {
190 erase_size = 64 * KiB;
191 erase_cycle = SPIBAR_HSFSTS_CYCLE_64K_ERASE;
192 } else {
193 erase_size = 4 * KiB;
194 erase_cycle = SPIBAR_HSFSTS_CYCLE_4K_ERASE;
195 }
196 printk(BIOS_SPEW, "Erasing flash addr %x + %zu KiB\n",
197 offset, erase_size / KiB);
198
199 ret = exec_sync_hwseq_xfer(ctx, erase_cycle, offset, 0);
200 if (ret != SUCCESS)
201 return ret;
202
203 offset += erase_size;
204 len -= erase_size;
205 }
206
207 return SUCCESS;
208}
209
210static int fast_spi_flash_read(const struct spi_flash *flash,
211 uint32_t addr, size_t len, void *buf)
212{
213 int ret;
214 size_t xfer_len;
215 uint8_t *data = buf;
216
217 BOILERPLATE_CREATE_CTX(ctx);
218
219 while (len) {
220 xfer_len = get_xfer_len(addr, len);
221
222 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_READ,
223 addr, xfer_len);
224 if (ret != SUCCESS)
225 return ret;
226
227 drain_xfer_fifo(ctx, data, xfer_len);
228
229 addr += xfer_len;
230 data += xfer_len;
231 len -= xfer_len;
232 }
233
234 return SUCCESS;
235}
236
237static int fast_spi_flash_write(const struct spi_flash *flash,
238 uint32_t addr, size_t len, const void *buf)
239{
240 int ret;
241 size_t xfer_len;
242 const uint8_t *data = buf;
243
244 BOILERPLATE_CREATE_CTX(ctx);
245
246 while (len) {
247 xfer_len = get_xfer_len(addr, len);
248 fill_xfer_fifo(ctx, data, xfer_len);
249
250 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_WRITE,
251 addr, xfer_len);
252 if (ret != SUCCESS)
253 return ret;
254
255 addr += xfer_len;
256 data += xfer_len;
257 len -= xfer_len;
258 }
259
260 return SUCCESS;
261}
262
263static int fast_spi_flash_status(const struct spi_flash *flash,
264 uint8_t *reg)
265{
266 int ret;
267 BOILERPLATE_CREATE_CTX(ctx);
268
269 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_RD_STATUS, 0,
270 sizeof(*reg));
271 if (ret != SUCCESS)
272 return ret;
273
274 drain_xfer_fifo(ctx, reg, sizeof(*reg));
275 return ret;
276}
277
278/*
279 * We can't use FDOC and FDOD to read FLCOMP, as previous platforms did.
280 * For details see:
281 * Ch 31, SPI: p. 194
282 * The size of the flash component is always taken from density field in the
283 * SFDP table. FLCOMP.C0DEN is no longer used by the Flash Controller.
284 */
285struct spi_flash *spi_flash_programmer_probe(struct spi_slave *dev, int force)
286{
287 BOILERPLATE_CREATE_CTX(ctx);
288 struct spi_flash *flash;
289 uint32_t flash_bits;
290
291 flash = car_get_var_ptr(&boot_flash);
292
293 /*
294 * bytes = (bits + 1) / 8;
295 * But we need to do the addition in a way which doesn't overflow for
296 * 4 Gbit devices (flash_bits == 0xffffffff).
297 */
298 flash_bits = fast_spi_flash_read_sfdp_param(ctx, 0x04);
299 flash->size = (flash_bits >> 3) + 1;
300
301 memcpy(&flash->spi, dev, sizeof(*dev));
302 flash->name = "FAST_SPI Hardware Sequencer";
303
304 /* Can erase both 4 KiB and 64 KiB chunks. Declare the smaller size. */
305 flash->sector_size = 4 * KiB;
306 /*
307 * FIXME: Get erase+cmd, and status_cmd from SFDP.
308 *
309 * flash->erase_cmd = ???
310 * flash->status_cmd = ???
311 */
312
313 flash->internal_write = fast_spi_flash_write;
314 flash->internal_erase = fast_spi_flash_erase;
315 flash->internal_read = fast_spi_flash_read;
316 flash->internal_status = fast_spi_flash_status;
317
318 return flash;
319}
320
321int spi_flash_get_fpr_info(struct fpr_info *info)
322{
323 BOILERPLATE_CREATE_CTX(ctx);
324
325 info->base = ctx->mmio_base + SPIBAR_FPR_BASE;
326 info->max = SPIBAR_FPR_MAX;
327 return 0;
328}
329
330/*
331 * Minimal set of commands to read WPSR from FAST_SPI.
332 * Returns 0 on success, < 0 on failure.
333 */
334int fast_spi_flash_read_wpsr(u8 *sr)
335{
336 uint8_t rdsr;
337 int ret = 0;
338
339 fast_spi_init();
340
341 /* sending NULL for spiflash struct parameter since we are not
342 * calling HWSEQ read_status() call via Probe.
343 */
344 ret = fast_spi_flash_status(NULL, &rdsr);
345 if (ret) {
346 printk(BIOS_ERR, "SPI rdsr failed\n");
347 return ret;
348 }
349 *sr = rdsr & WPSR_MASK_SRP0_BIT;
350
351 return 0;
352}