blob: 799262953e1523efbda15662c3c4b6644c4de682 [file] [log] [blame]
Patrick Georgiac959032020-05-05 22:49:26 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Stefan Reinauer1c56d9b2012-05-10 11:27:32 -07002
Furquan Shaikhc28984d2016-11-20 21:04:00 -08003#include <console/console.h>
Elyes HAOUAS361a9352019-12-18 21:26:33 +01004#include <commonlib/helpers.h>
Stefan Reinauer1c56d9b2012-05-10 11:27:32 -07005#include <spi_flash.h>
Furquan Shaikhc28984d2016-11-20 21:04:00 -08006#include <spi-generic.h>
Patrick Rudolphe63a5f12018-03-12 11:34:53 +01007#include <delay.h>
8#include <lib.h>
Edward O'Callaghanc4561e22014-06-26 15:02:40 +10009
Stefan Reinauer1c56d9b2012-05-10 11:27:32 -070010#include "spi_flash_internal.h"
Frans Hendriks31eac4d2019-03-04 15:16:43 +010011#include "spi_winbond.h"
Stefan Reinauer1c56d9b2012-05-10 11:27:32 -070012
Daniel Gröber5569ee92020-06-04 16:25:13 +020013union status_reg1 {
Patrick Rudolph0f8bf022018-03-09 14:20:25 +010014 uint8_t u;
15 struct {
16 uint8_t busy : 1;
17 uint8_t wel : 1;
18 uint8_t bp : 3;
19 uint8_t tb : 1;
20 uint8_t sec : 1;
21 uint8_t srp0 : 1;
Daniel Gröber5569ee92020-06-04 16:25:13 +020022 } bp3;
Patrick Rudolph0f8bf022018-03-09 14:20:25 +010023 struct {
24 uint8_t busy : 1;
25 uint8_t wel : 1;
26 uint8_t bp : 4;
27 uint8_t tb : 1;
28 uint8_t srp0 : 1;
Daniel Gröber5569ee92020-06-04 16:25:13 +020029 } bp4;
Patrick Rudolph0f8bf022018-03-09 14:20:25 +010030};
31
32union status_reg2 {
33 uint8_t u;
34 struct {
35 uint8_t srp1 : 1;
36 uint8_t qe : 1;
37 uint8_t res : 1;
38 uint8_t lb : 3;
39 uint8_t cmp : 1;
40 uint8_t sus : 1;
41 };
42};
43
Patrick Rudolphe63a5f12018-03-12 11:34:53 +010044struct status_regs {
45 union {
46 struct {
47#if defined(__BIG_ENDIAN)
48 union status_reg2 reg2;
Daniel Gröber5569ee92020-06-04 16:25:13 +020049 union status_reg1 reg1;
Patrick Rudolphe63a5f12018-03-12 11:34:53 +010050#else
Daniel Gröber5569ee92020-06-04 16:25:13 +020051 union status_reg1 reg1;
Patrick Rudolphe63a5f12018-03-12 11:34:53 +010052 union status_reg2 reg2;
53#endif
54 };
55 u16 u;
56 };
57};
58
Aaron Durbina6c73c82020-01-11 23:18:51 -070059static const struct spi_flash_part_id flash_table[] = {
Stefan Reinauer1c56d9b2012-05-10 11:27:32 -070060 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -070061 /* W25P80 */
62 .id[0] = 0x2014,
Aaron Durbina6c73c82020-01-11 23:18:51 -070063 .nr_sectors_shift = 8,
Mike Banon72812fa2019-01-08 19:11:34 +030064 },
65 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -070066 /* W25P16 */
67 .id[0] = 0x2015,
Aaron Durbina6c73c82020-01-11 23:18:51 -070068 .nr_sectors_shift = 9,
Mike Banon72812fa2019-01-08 19:11:34 +030069 },
70 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -070071 /* W25P32 */
72 .id[0] = 0x2016,
Aaron Durbina6c73c82020-01-11 23:18:51 -070073 .nr_sectors_shift = 10,
Mike Banon72812fa2019-01-08 19:11:34 +030074 },
75 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -070076 /* W25X80 */
77 .id[0] = 0x3014,
Aaron Durbina6c73c82020-01-11 23:18:51 -070078 .nr_sectors_shift = 8,
Aaron Durbina6c73c82020-01-11 23:18:51 -070079 .fast_read_dual_output_support = 1,
Mike Banon72812fa2019-01-08 19:11:34 +030080 },
81 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -070082 /* W25X16 */
83 .id[0] = 0x3015,
Aaron Durbina6c73c82020-01-11 23:18:51 -070084 .nr_sectors_shift = 9,
Aaron Durbina6c73c82020-01-11 23:18:51 -070085 .fast_read_dual_output_support = 1,
Stefan Reinauer1c56d9b2012-05-10 11:27:32 -070086 },
87 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -070088 /* W25X32 */
89 .id[0] = 0x3016,
Aaron Durbina6c73c82020-01-11 23:18:51 -070090 .nr_sectors_shift = 10,
Aaron Durbina6c73c82020-01-11 23:18:51 -070091 .fast_read_dual_output_support = 1,
Stefan Reinauer1c56d9b2012-05-10 11:27:32 -070092 },
93 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -070094 /* W25X64 */
95 .id[0] = 0x3017,
Aaron Durbina6c73c82020-01-11 23:18:51 -070096 .nr_sectors_shift = 11,
Aaron Durbina6c73c82020-01-11 23:18:51 -070097 .fast_read_dual_output_support = 1,
Stefan Reinauer1c56d9b2012-05-10 11:27:32 -070098 },
99 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700100 /* W25Q80_V */
101 .id[0] = 0x4014,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700102 .nr_sectors_shift = 8,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700103 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700104 .fast_read_dual_io_support = 1,
Kyösti Mälkki76f7b792018-06-14 22:25:58 +0300105 },
106 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700107 /* W25Q16_V */
108 .id[0] = 0x4015,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700109 .nr_sectors_shift = 9,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700110 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700111 .fast_read_dual_io_support = 1,
Mike Banon72812fa2019-01-08 19:11:34 +0300112 .protection_granularity_shift = 16,
113 .bp_bits = 3,
114 },
115 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700116 /* W25Q16DW */
117 .id[0] = 0x6015,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700118 .nr_sectors_shift = 9,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700119 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700120 .fast_read_dual_io_support = 1,
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100121 .protection_granularity_shift = 16,
122 .bp_bits = 3,
Stefan Reinauer1c56d9b2012-05-10 11:27:32 -0700123 },
124 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700125 /* W25Q32_V */
126 .id[0] = 0x4016,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700127 .nr_sectors_shift = 10,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700128 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700129 .fast_read_dual_io_support = 1,
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100130 .protection_granularity_shift = 16,
131 .bp_bits = 3,
Stefan Reinauer1c56d9b2012-05-10 11:27:32 -0700132 },
133 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700134 /* W25Q32DW */
135 .id[0] = 0x6016,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700136 .nr_sectors_shift = 10,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700137 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700138 .fast_read_dual_io_support = 1,
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100139 .protection_granularity_shift = 16,
140 .bp_bits = 3,
David Hendricks43e92522014-03-21 19:45:02 -0700141 },
142 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700143 /* W25Q64_V */
144 .id[0] = 0x4017,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700145 .nr_sectors_shift = 11,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700146 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700147 .fast_read_dual_io_support = 1,
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100148 .protection_granularity_shift = 17,
149 .bp_bits = 3,
Stefan Reinauer1c56d9b2012-05-10 11:27:32 -0700150 },
151 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700152 /* W25Q64DW */
153 .id[0] = 0x6017,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700154 .nr_sectors_shift = 11,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700155 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700156 .fast_read_dual_io_support = 1,
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100157 .protection_granularity_shift = 17,
158 .bp_bits = 3,
Aaron Durbin16000c82013-09-19 12:19:51 -0500159 },
160 {
Scott Chaoe6d1c7f2020-04-08 15:43:03 +0800161 /* W25Q64JW */
162 .id[0] = 0x8017,
163 .nr_sectors_shift = 11,
164 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700165 .fast_read_dual_io_support = 1,
Scott Chaoe6d1c7f2020-04-08 15:43:03 +0800166 .protection_granularity_shift = 17,
167 .bp_bits = 3,
168 },
169 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700170 /* W25Q128_V */
171 .id[0] = 0x4018,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700172 .nr_sectors_shift = 12,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700173 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700174 .fast_read_dual_io_support = 1,
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100175 .protection_granularity_shift = 18,
176 .bp_bits = 3,
Stefan Reinauer1c56d9b2012-05-10 11:27:32 -0700177 },
Mohan D'Costabdae9be2014-09-25 14:40:44 +0900178 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700179 /* W25Q128FW */
180 .id[0] = 0x6018,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700181 .nr_sectors_shift = 12,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700182 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700183 .fast_read_dual_io_support = 1,
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100184 .protection_granularity_shift = 18,
185 .bp_bits = 3,
Mohan D'Costabdae9be2014-09-25 14:40:44 +0900186 },
Varadarajan Narayanan934c6832016-03-21 12:56:43 +0530187 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700188 /* W25Q128J */
189 .id[0] = 0x7018,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700190 .nr_sectors_shift = 12,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700191 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700192 .fast_read_dual_io_support = 1,
Patrick Rudolphfca506d2018-10-01 16:00:16 +0200193 .protection_granularity_shift = 18,
194 .bp_bits = 3,
195 },
196 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700197 /* W25Q128JW */
198 .id[0] = 0x8018,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700199 .nr_sectors_shift = 12,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700200 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700201 .fast_read_dual_io_support = 1,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700202 .protection_granularity_shift = 18,
203 .bp_bits = 3,
Peichao Wang0d922712019-11-12 08:36:53 +0800204 },
205 {
Shaik Sajida Bhanu5ef1a832021-05-30 23:04:47 +0530206 /* W25Q512NW-IM */
207 .id[0] = 0x8020,
Shaik Sajida Bhanu7ce02362021-05-30 23:04:47 +0530208 .nr_sectors_shift = 14,
Shaik Sajida Bhanu5ef1a832021-05-30 23:04:47 +0530209 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700210 .fast_read_dual_io_support = 1,
Shaik Sajida Bhanu5ef1a832021-05-30 23:04:47 +0530211 .protection_granularity_shift = 16,
212 .bp_bits = 4,
213 },
214 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700215 /* W25Q256_V */
216 .id[0] = 0x4019,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700217 .nr_sectors_shift = 13,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700218 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700219 .fast_read_dual_io_support = 1,
Mike Banon72812fa2019-01-08 19:11:34 +0300220 .protection_granularity_shift = 16,
221 .bp_bits = 4,
222 },
223 {
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700224 /* W25Q256J */
225 .id[0] = 0x7019,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700226 .nr_sectors_shift = 13,
Aaron Durbina6c73c82020-01-11 23:18:51 -0700227 .fast_read_dual_output_support = 1,
Julius Wernerdf506222021-07-13 15:57:29 -0700228 .fast_read_dual_io_support = 1,
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100229 .protection_granularity_shift = 16,
230 .bp_bits = 4,
Varadarajan Narayanan934c6832016-03-21 12:56:43 +0530231 },
Ritul Guru5fcef012022-05-24 18:46:16 +0530232 {
233 /* W25Q256JW */
234 .id[0] = 0x6019,
235 .nr_sectors_shift = 13,
236 .fast_read_dual_output_support = 1,
237 .fast_read_dual_io_support = 1,
238 .protection_granularity_shift = 16,
239 .bp_bits = 4,
240 },
Fred Reitberger6296fba2023-06-01 16:05:37 -0400241 {
242 /* W25Q256JW_DTR */
243 .id[0] = 0x8019,
244 .nr_sectors_shift = 13,
245 .fast_read_dual_output_support = 1,
246 .fast_read_dual_io_support = 1,
247 .protection_granularity_shift = 16,
248 .bp_bits = 4,
249 },
Stefan Reinauer1c56d9b2012-05-10 11:27:32 -0700250};
251
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100252/*
253 * Convert BPx, TB and CMP to a region.
254 * SEC (if available) must be zero.
255 */
256static void winbond_bpbits_to_region(const size_t granularity,
257 const u8 bp,
258 bool tb,
259 const bool cmp,
260 const size_t flash_size,
261 struct region *out)
262{
263 size_t protected_size =
Elyes HAOUAS361a9352019-12-18 21:26:33 +0100264 MIN(bp ? granularity << (bp - 1) : 0, flash_size);
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100265
266 if (cmp) {
267 protected_size = flash_size - protected_size;
268 tb = !tb;
269 }
270
Patrick Rudolphe7360152018-12-03 09:41:06 +0100271 out->offset = tb ? 0 : flash_size - protected_size;
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100272 out->size = protected_size;
273}
274
275/*
276 * Available on all devices.
277 * Read block protect bits from Status/Status2 Reg.
278 * Converts block protection bits to a region.
279 *
280 * Returns:
281 * -1 on error
282 * 1 if region is covered by write protection
283 * 0 if a part of region isn't covered by write protection
284 */
285static int winbond_get_write_protection(const struct spi_flash *flash,
286 const struct region *region)
287{
Aaron Durbina6c73c82020-01-11 23:18:51 -0700288 const struct spi_flash_part_id *params;
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100289 struct region wp_region;
290 union status_reg2 reg2;
291 u8 bp, tb;
292 int ret;
293
Aaron Durbin5abeb062020-01-12 15:12:18 -0700294 params = flash->part;
Aaron Durbina6c73c82020-01-11 23:18:51 -0700295
296 if (!params)
297 return -1;
298
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100299 const size_t granularity = (1 << params->protection_granularity_shift);
300
Daniel Gröber5569ee92020-06-04 16:25:13 +0200301 union status_reg1 reg1 = { .u = 0 };
302
303 ret = spi_flash_cmd(&flash->spi, flash->status_cmd, &reg1.u,
304 sizeof(reg1.u));
305 if (ret)
306 return ret;
307
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100308 if (params->bp_bits == 3) {
Daniel Gröber5569ee92020-06-04 16:25:13 +0200309 if (reg1.bp3.sec) {
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100310 // FIXME: not supported
311 return -1;
312 }
313
Daniel Gröber5569ee92020-06-04 16:25:13 +0200314 bp = reg1.bp3.bp;
315 tb = reg1.bp3.tb;
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100316 } else if (params->bp_bits == 4) {
Daniel Gröber5569ee92020-06-04 16:25:13 +0200317 bp = reg1.bp4.bp;
318 tb = reg1.bp4.tb;
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100319 } else {
320 // FIXME: not supported
321 return -1;
322 }
323
324 ret = spi_flash_cmd(&flash->spi, CMD_W25_RDSR2, &reg2.u,
325 sizeof(reg2.u));
326 if (ret)
327 return ret;
328
329 winbond_bpbits_to_region(granularity, bp, tb, reg2.cmp, flash->size,
330 &wp_region);
331
Patrick Rudolph87471362018-09-25 14:26:33 +0200332 if (!region_sz(&wp_region)) {
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100333 printk(BIOS_DEBUG, "WINBOND: flash isn't protected\n");
334
335 return 0;
336 }
337
338 printk(BIOS_DEBUG, "WINBOND: flash protected range 0x%08zx-0x%08zx\n",
Aaron Durbin56aeae02019-11-08 14:37:58 -0700339 region_offset(&wp_region), region_end(&wp_region));
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100340
341 return region_is_subregion(&wp_region, region);
342}
343
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100344/**
345 * Common method to write some bit of the status register 1 & 2 at the same
346 * time. Only change bits that are one in @mask.
347 * Compare the final result to make sure that the register isn't locked.
348 *
349 * @param mask: The bits that are affected by @val
350 * @param val: The bits to write
351 * @param non_volatile: Make setting permanent
352 *
353 * @return 0 on success
354 */
355static int winbond_flash_cmd_status(const struct spi_flash *flash,
356 const u16 mask,
357 const u16 val,
358 const bool non_volatile)
359{
360 struct {
361 u8 cmd;
362 u16 sreg;
363 } __packed cmdbuf;
364 u8 reg8;
365 int ret;
366
367 if (!flash)
368 return -1;
369
370 ret = spi_flash_cmd(&flash->spi, CMD_W25_RDSR, &reg8, sizeof(reg8));
371 if (ret)
372 return ret;
373
374 cmdbuf.sreg = reg8;
375
376 ret = spi_flash_cmd(&flash->spi, CMD_W25_RDSR2, &reg8, sizeof(reg8));
377 if (ret)
378 return ret;
379
380 cmdbuf.sreg |= reg8 << 8;
381
382 if ((val & mask) == (cmdbuf.sreg & mask))
383 return 0;
384
385 if (non_volatile) {
386 ret = spi_flash_cmd(&flash->spi, CMD_W25_WREN, NULL, 0);
387 } else {
388 ret = spi_flash_cmd(&flash->spi, CMD_VOLATILE_SREG_WREN, NULL,
389 0);
390 }
391 if (ret)
392 return ret;
393
394 cmdbuf.sreg &= ~mask;
395 cmdbuf.sreg |= val & mask;
396 cmdbuf.cmd = CMD_W25_WRSR;
397
398 /* Legacy method of writing status register 1 & 2 */
399 ret = spi_flash_cmd_write(&flash->spi, (u8 *)&cmdbuf, sizeof(cmdbuf),
400 NULL, 0);
401 if (ret)
402 return ret;
403
404 if (non_volatile) {
405 /* Wait tw */
406 ret = spi_flash_cmd_wait_ready(flash, WINBOND_FLASH_TIMEOUT);
407 if (ret)
408 return ret;
409 } else {
410 /* Wait tSHSL */
411 udelay(1);
412 }
413
414 /* Now read the status register to make sure it's not locked */
415 ret = spi_flash_cmd(&flash->spi, CMD_W25_RDSR, &reg8, sizeof(reg8));
416 if (ret)
417 return ret;
418
419 cmdbuf.sreg = reg8;
420
421 ret = spi_flash_cmd(&flash->spi, CMD_W25_RDSR2, &reg8, sizeof(reg8));
422 if (ret)
423 return ret;
424
425 cmdbuf.sreg |= reg8 << 8;
426
427 printk(BIOS_DEBUG, "WINBOND: SREG=%02x SREG2=%02x\n",
428 cmdbuf.sreg & 0xff,
429 cmdbuf.sreg >> 8);
430
431 /* Compare against expected result */
432 if ((val & mask) != (cmdbuf.sreg & mask)) {
433 printk(BIOS_ERR, "WINBOND: SREG is locked!\n");
434 ret = -1;
435 }
436
437 return ret;
438}
439
440/*
441 * Available on all devices.
442 * Protect a region starting from start of flash or end of flash.
443 * The caller must provide a supported protected region size.
444 * SEC isn't supported and set to zero.
445 * Write block protect bits to Status/Status2 Reg.
446 * Optionally lock the status register if lock_sreg is set with the provided
447 * mode.
448 *
449 * @param flash: The flash to operate on
450 * @param region: The region to write protect
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100451 * @param mode: Optional status register lock-down mode
452 *
453 * @return 0 on success
454 */
455static int
456winbond_set_write_protection(const struct spi_flash *flash,
457 const struct region *region,
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100458 const enum spi_flash_status_reg_lockdown mode)
459{
Aaron Durbina6c73c82020-01-11 23:18:51 -0700460 const struct spi_flash_part_id *params;
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100461 struct status_regs mask, val;
462 struct region wp_region;
463 u8 cmp, bp, tb;
464 int ret;
465
466 /* Need to touch TOP or BOTTOM */
Aaron Durbin56aeae02019-11-08 14:37:58 -0700467 if (region_offset(region) != 0 && region_end(region) != flash->size)
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100468 return -1;
469
Aaron Durbin5abeb062020-01-12 15:12:18 -0700470 params = flash->part;
Aaron Durbina6c73c82020-01-11 23:18:51 -0700471
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100472 if (!params)
473 return -1;
474
475 if (params->bp_bits != 3 && params->bp_bits != 4) {
476 /* FIXME: not implemented */
477 return -1;
478 }
479
480 wp_region = *region;
481
482 if (region_offset(&wp_region) == 0)
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100483 tb = 1;
Patrick Rudolphe7360152018-12-03 09:41:06 +0100484 else
485 tb = 0;
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100486
487 if (region_sz(&wp_region) > flash->size / 2) {
488 cmp = 1;
489 wp_region.offset = tb ? 0 : region_sz(&wp_region);
490 wp_region.size = flash->size - region_sz(&wp_region);
491 tb = !tb;
492 } else {
493 cmp = 0;
494 }
495
496 if (region_sz(&wp_region) == 0) {
497 bp = 0;
498 } else if (IS_POWER_OF_2(region_sz(&wp_region)) &&
499 (region_sz(&wp_region) >=
500 (1 << params->protection_granularity_shift))) {
501 bp = log2(region_sz(&wp_region)) -
502 params->protection_granularity_shift + 1;
503 } else {
504 printk(BIOS_ERR, "WINBOND: ERROR: unsupported region size\n");
505 return -1;
506 }
507
508 /* Write block protection bits */
509
510 if (params->bp_bits == 3) {
Daniel Gröber5569ee92020-06-04 16:25:13 +0200511 val.reg1 = (union status_reg1) {
512 .bp3 = { .bp = bp, .tb = tb, .sec = 0 }
513 };
514 mask.reg1 = (union status_reg1) {
515 .bp3 = { .bp = ~0, .tb = 1, .sec = 1 }
516 };
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100517 } else {
Daniel Gröber5569ee92020-06-04 16:25:13 +0200518 val.reg1 = (union status_reg1) {
519 .bp4 = { .bp = bp, .tb = tb }
520 };
521 mask.reg1 = (union status_reg1) {
522 .bp4 = { .bp = ~0, .tb = 1 }
523 };
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100524 }
525
526 val.reg2 = (union status_reg2) { .cmp = cmp };
527 mask.reg2 = (union status_reg2) { .cmp = 1 };
528
529 if (mode != SPI_WRITE_PROTECTION_PRESERVE) {
530 u8 srp;
531 switch (mode) {
532 case SPI_WRITE_PROTECTION_NONE:
533 srp = 0;
534 break;
535 case SPI_WRITE_PROTECTION_PIN:
536 srp = 1;
537 break;
538 case SPI_WRITE_PROTECTION_REBOOT:
539 srp = 2;
540 break;
541 case SPI_WRITE_PROTECTION_PERMANENT:
542 srp = 3;
543 break;
544 default:
545 return -1;
546 }
547
548 if (params->bp_bits == 3) {
Daniel Gröber5569ee92020-06-04 16:25:13 +0200549 val.reg1.bp3.srp0 = !!(srp & 1);
550 mask.reg1.bp3.srp0 = 1;
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100551 } else {
Daniel Gröber5569ee92020-06-04 16:25:13 +0200552 val.reg1.bp4.srp0 = !!(srp & 1);
553 mask.reg1.bp4.srp0 = 1;
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100554 }
555
556 val.reg2.srp1 = !!(srp & 2);
557 mask.reg2.srp1 = 1;
558 }
559
Daniel Gröber0d0b2f42020-05-26 22:12:14 +0200560 ret = winbond_flash_cmd_status(flash, mask.u, val.u, true);
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100561 if (ret)
562 return ret;
563
564 printk(BIOS_DEBUG, "WINBOND: write-protection set to range "
Aaron Durbin56aeae02019-11-08 14:37:58 -0700565 "0x%08zx-0x%08zx\n", region_offset(region), region_end(region));
Patrick Rudolphe63a5f12018-03-12 11:34:53 +0100566
567 return ret;
568}
Patrick Rudolph0f8bf022018-03-09 14:20:25 +0100569
Aaron Durbinf584f192020-01-11 14:03:27 -0700570static const struct spi_flash_protection_ops spi_flash_protection_ops = {
571 .get_write = winbond_get_write_protection,
572 .set_write = winbond_set_write_protection,
Furquan Shaikhe2fc5e22017-05-17 17:26:01 -0700573};
574
Aaron Durbin5abeb062020-01-12 15:12:18 -0700575const struct spi_flash_vendor_info spi_flash_winbond_vi = {
576 .id = VENDOR_ID_WINBOND,
577 .page_size_shift = 8,
578 .sector_size_kib_shift = 2,
Aaron Durbinfc7b9532020-01-23 11:45:30 -0700579 .match_id_mask[0] = 0xffff,
Aaron Durbin5abeb062020-01-12 15:12:18 -0700580 .ids = flash_table,
581 .nr_part_ids = ARRAY_SIZE(flash_table),
582 .desc = &spi_flash_pp_0x20_sector_desc,
583 .prot_ops = &spi_flash_protection_ops,
584};