Angel Pons | 118a9c7 | 2020-04-02 23:48:34 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 2 | |
| 3 | #include <boot_device.h> |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 4 | #include <cbmem.h> |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 5 | #include <console/console.h> |
| 6 | #include <fmap.h> |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 7 | #include <stddef.h> |
| 8 | #include <string.h> |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 9 | #include <symbols.h> |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 10 | |
Aaron Durbin | bf1e481 | 2016-05-10 15:12:08 -0500 | [diff] [blame] | 11 | #include "fmap_config.h" |
| 12 | |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 13 | /* |
| 14 | * See http://code.google.com/p/flashmap/ for more information on FMAP. |
| 15 | */ |
| 16 | |
Arthur Heymans | dba22d2 | 2019-11-20 19:57:49 +0100 | [diff] [blame] | 17 | static int fmap_print_once; |
| 18 | static struct mem_region_device fmap_cache; |
Duncan Laurie | bc2c0a3 | 2016-02-09 09:17:56 -0800 | [diff] [blame] | 19 | |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 20 | #define print_once(...) do { \ |
Arthur Heymans | dba22d2 | 2019-11-20 19:57:49 +0100 | [diff] [blame] | 21 | if (!fmap_print_once) \ |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 22 | printk(__VA_ARGS__); \ |
| 23 | } while (0) |
| 24 | |
Furquan Shaikh | b33a2b0 | 2019-09-26 23:51:46 -0700 | [diff] [blame] | 25 | uint64_t get_fmap_flash_offset(void) |
| 26 | { |
| 27 | return FMAP_OFFSET; |
| 28 | } |
| 29 | |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 30 | static int check_signature(const struct fmap *fmap) |
| 31 | { |
| 32 | return memcmp(fmap->signature, FMAP_SIGNATURE, sizeof(fmap->signature)); |
| 33 | } |
| 34 | |
| 35 | static void report(const struct fmap *fmap) |
| 36 | { |
| 37 | print_once(BIOS_DEBUG, "FMAP: Found \"%s\" version %d.%d at %#x.\n", |
| 38 | fmap->name, fmap->ver_major, fmap->ver_minor, FMAP_OFFSET); |
| 39 | print_once(BIOS_DEBUG, "FMAP: base = %#llx size = %#x #areas = %d\n", |
| 40 | (long long)fmap->base, fmap->size, fmap->nareas); |
Arthur Heymans | dba22d2 | 2019-11-20 19:57:49 +0100 | [diff] [blame] | 41 | fmap_print_once = 1; |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 42 | } |
| 43 | |
| 44 | static void setup_preram_cache(struct mem_region_device *cache_mrdev) |
| 45 | { |
Julius Werner | 7fc9286 | 2019-11-18 13:01:06 -0800 | [diff] [blame] | 46 | if (CONFIG(NO_FMAP_CACHE)) |
| 47 | return; |
| 48 | |
Josie Nordrum | c3cc158 | 2020-09-09 12:57:13 -0600 | [diff] [blame^] | 49 | /* No need to use FMAP cache in SMM */ |
| 50 | if (ENV_SMM) |
| 51 | return; |
| 52 | |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 53 | if (!ENV_ROMSTAGE_OR_BEFORE) { |
| 54 | /* We get here if ramstage makes an FMAP access before calling |
| 55 | cbmem_initialize(). We should avoid letting it come to that, |
| 56 | so print a warning. */ |
| 57 | print_once(BIOS_WARNING, |
| 58 | "WARNING: Post-RAM FMAP access too early for cache!\n"); |
| 59 | return; |
| 60 | } |
| 61 | |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 62 | struct fmap *fmap = (struct fmap *)_fmap_cache; |
Martin Roth | 1594e8f | 2020-07-15 13:57:54 -0600 | [diff] [blame] | 63 | if (!(ENV_INITIAL_STAGE)) { |
| 64 | /* NOTE: This assumes that the first stage will make |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 65 | at least one FMAP access (usually from finding CBFS). */ |
| 66 | if (!check_signature(fmap)) |
| 67 | goto register_cache; |
| 68 | |
| 69 | printk(BIOS_ERR, "ERROR: FMAP cache corrupted?!\n"); |
| 70 | } |
| 71 | |
| 72 | /* In case we fail below, make sure the cache is invalid. */ |
| 73 | memset(fmap->signature, 0, sizeof(fmap->signature)); |
| 74 | |
| 75 | boot_device_init(); |
| 76 | const struct region_device *boot_rdev = boot_device_ro(); |
| 77 | if (!boot_rdev) |
| 78 | return; |
| 79 | |
| 80 | /* memlayout statically guarantees that the FMAP_CACHE is big enough. */ |
| 81 | if (rdev_readat(boot_rdev, fmap, FMAP_OFFSET, FMAP_SIZE) != FMAP_SIZE) |
| 82 | return; |
| 83 | if (check_signature(fmap)) |
| 84 | return; |
| 85 | report(fmap); |
| 86 | |
| 87 | register_cache: |
| 88 | mem_region_device_ro_init(cache_mrdev, fmap, FMAP_SIZE); |
| 89 | } |
| 90 | |
Furquan Shaikh | b33a2b0 | 2019-09-26 23:51:46 -0700 | [diff] [blame] | 91 | static int find_fmap_directory(struct region_device *fmrd) |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 92 | { |
| 93 | const struct region_device *boot; |
| 94 | struct fmap *fmap; |
Aaron Durbin | bf1e481 | 2016-05-10 15:12:08 -0500 | [diff] [blame] | 95 | size_t offset = FMAP_OFFSET; |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 96 | |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 97 | /* Try FMAP cache first */ |
Arthur Heymans | dba22d2 | 2019-11-20 19:57:49 +0100 | [diff] [blame] | 98 | if (!region_device_sz(&fmap_cache.rdev)) |
| 99 | setup_preram_cache(&fmap_cache); |
| 100 | if (region_device_sz(&fmap_cache.rdev)) |
| 101 | return rdev_chain_full(fmrd, &fmap_cache.rdev); |
Patrick Rudolph | 6d787c2 | 2019-09-12 13:21:37 +0200 | [diff] [blame] | 102 | |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 103 | boot_device_init(); |
| 104 | boot = boot_device_ro(); |
| 105 | |
| 106 | if (boot == NULL) |
| 107 | return -1; |
| 108 | |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 109 | fmap = rdev_mmap(boot, offset, sizeof(struct fmap)); |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 110 | |
| 111 | if (fmap == NULL) |
| 112 | return -1; |
| 113 | |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 114 | if (check_signature(fmap)) { |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 115 | printk(BIOS_DEBUG, "No FMAP found at %zx offset.\n", offset); |
| 116 | rdev_munmap(boot, fmap); |
| 117 | return -1; |
| 118 | } |
| 119 | |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 120 | report(fmap); |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 121 | |
| 122 | rdev_munmap(boot, fmap); |
| 123 | |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 124 | return rdev_chain(fmrd, boot, offset, FMAP_SIZE); |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | int fmap_locate_area_as_rdev(const char *name, struct region_device *area) |
| 128 | { |
| 129 | struct region ar; |
| 130 | |
| 131 | if (fmap_locate_area(name, &ar)) |
| 132 | return -1; |
| 133 | |
| 134 | return boot_device_ro_subregion(&ar, area); |
| 135 | } |
| 136 | |
Aaron Durbin | bccaab8 | 2016-08-12 12:42:04 -0500 | [diff] [blame] | 137 | int fmap_locate_area_as_rdev_rw(const char *name, struct region_device *area) |
| 138 | { |
| 139 | struct region ar; |
| 140 | |
| 141 | if (fmap_locate_area(name, &ar)) |
| 142 | return -1; |
| 143 | |
| 144 | return boot_device_rw_subregion(&ar, area); |
| 145 | } |
| 146 | |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 147 | int fmap_locate_area(const char *name, struct region *ar) |
| 148 | { |
| 149 | struct region_device fmrd; |
| 150 | size_t offset; |
| 151 | |
| 152 | if (find_fmap_directory(&fmrd)) |
| 153 | return -1; |
| 154 | |
| 155 | /* Start reading the areas just after fmap header. */ |
| 156 | offset = sizeof(struct fmap); |
| 157 | |
| 158 | while (1) { |
| 159 | struct fmap_area *area; |
| 160 | |
| 161 | area = rdev_mmap(&fmrd, offset, sizeof(*area)); |
| 162 | |
| 163 | if (area == NULL) |
| 164 | return -1; |
| 165 | |
| 166 | if (strcmp((const char *)area->name, name)) { |
| 167 | rdev_munmap(&fmrd, area); |
| 168 | offset += sizeof(struct fmap_area); |
| 169 | continue; |
| 170 | } |
| 171 | |
Duncan Laurie | bc2c0a3 | 2016-02-09 09:17:56 -0800 | [diff] [blame] | 172 | printk(BIOS_DEBUG, "FMAP: area %s found @ %x (%d bytes)\n", |
| 173 | name, area->offset, area->size); |
Aaron Durbin | 0424c95 | 2015-03-28 23:56:22 -0500 | [diff] [blame] | 174 | |
| 175 | ar->offset = area->offset; |
| 176 | ar->size = area->size; |
| 177 | |
| 178 | rdev_munmap(&fmrd, area); |
| 179 | |
| 180 | return 0; |
| 181 | } |
| 182 | |
| 183 | printk(BIOS_DEBUG, "FMAP: area %s not found\n", name); |
| 184 | |
| 185 | return -1; |
| 186 | } |
Patrick Georgi | 9952690 | 2015-07-09 11:27:44 +0200 | [diff] [blame] | 187 | |
| 188 | int fmap_find_region_name(const struct region * const ar, |
| 189 | char name[FMAP_STRLEN]) |
| 190 | { |
| 191 | struct region_device fmrd; |
| 192 | size_t offset; |
| 193 | |
| 194 | if (find_fmap_directory(&fmrd)) |
| 195 | return -1; |
| 196 | |
| 197 | /* Start reading the areas just after fmap header. */ |
| 198 | offset = sizeof(struct fmap); |
| 199 | |
| 200 | while (1) { |
| 201 | struct fmap_area *area; |
| 202 | |
| 203 | area = rdev_mmap(&fmrd, offset, sizeof(*area)); |
| 204 | |
| 205 | if (area == NULL) |
| 206 | return -1; |
| 207 | |
| 208 | if ((ar->offset != area->offset) || |
| 209 | (ar->size != area->size)) { |
| 210 | rdev_munmap(&fmrd, area); |
| 211 | offset += sizeof(struct fmap_area); |
| 212 | continue; |
| 213 | } |
| 214 | |
| 215 | printk(BIOS_DEBUG, "FMAP: area (%zx, %zx) found, named %s\n", |
| 216 | ar->offset, ar->size, area->name); |
| 217 | |
| 218 | memcpy(name, area->name, FMAP_STRLEN); |
| 219 | |
| 220 | rdev_munmap(&fmrd, area); |
| 221 | |
| 222 | return 0; |
| 223 | } |
| 224 | |
| 225 | printk(BIOS_DEBUG, "FMAP: area (%zx, %zx) not found\n", |
| 226 | ar->offset, ar->size); |
| 227 | |
| 228 | return -1; |
| 229 | } |
T Michael Turney | 19fcc89 | 2019-03-20 14:37:34 -0700 | [diff] [blame] | 230 | |
| 231 | ssize_t fmap_read_area(const char *name, void *buffer, size_t size) |
| 232 | { |
| 233 | struct region_device rdev; |
| 234 | if (fmap_locate_area_as_rdev(name, &rdev)) |
| 235 | return -1; |
| 236 | return rdev_readat(&rdev, buffer, 0, |
| 237 | MIN(size, region_device_sz(&rdev))); |
| 238 | } |
| 239 | |
| 240 | ssize_t fmap_overwrite_area(const char *name, const void *buffer, size_t size) |
| 241 | { |
| 242 | struct region_device rdev; |
| 243 | |
| 244 | if (fmap_locate_area_as_rdev_rw(name, &rdev)) |
| 245 | return -1; |
| 246 | if (size > region_device_sz(&rdev)) |
| 247 | return -1; |
| 248 | if (rdev_eraseat(&rdev, 0, region_device_sz(&rdev)) < 0) |
| 249 | return -1; |
| 250 | return rdev_writeat(&rdev, buffer, 0, size); |
| 251 | } |
Patrick Rudolph | 6d787c2 | 2019-09-12 13:21:37 +0200 | [diff] [blame] | 252 | |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 253 | static void fmap_register_cbmem_cache(int unused) |
Patrick Rudolph | 6d787c2 | 2019-09-12 13:21:37 +0200 | [diff] [blame] | 254 | { |
| 255 | const struct cbmem_entry *e; |
Patrick Rudolph | 6d787c2 | 2019-09-12 13:21:37 +0200 | [diff] [blame] | 256 | |
| 257 | /* Find the FMAP cache installed by previous stage */ |
| 258 | e = cbmem_entry_find(CBMEM_ID_FMAP); |
| 259 | /* Don't set fmap_cache so that find_fmap_directory will use regular path */ |
| 260 | if (!e) |
| 261 | return; |
| 262 | |
Arthur Heymans | dba22d2 | 2019-11-20 19:57:49 +0100 | [diff] [blame] | 263 | mem_region_device_ro_init(&fmap_cache, cbmem_entry_start(e), cbmem_entry_size(e)); |
Patrick Rudolph | 6d787c2 | 2019-09-12 13:21:37 +0200 | [diff] [blame] | 264 | } |
| 265 | |
| 266 | /* |
| 267 | * The main reason to copy the FMAP into CBMEM is to make it available to the |
| 268 | * OS on every architecture. As side effect use the CBMEM copy as cache. |
| 269 | */ |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 270 | static void fmap_setup_cbmem_cache(int unused) |
Patrick Rudolph | 6d787c2 | 2019-09-12 13:21:37 +0200 | [diff] [blame] | 271 | { |
| 272 | struct region_device fmrd; |
| 273 | |
| 274 | if (find_fmap_directory(&fmrd)) |
| 275 | return; |
| 276 | |
| 277 | /* Reloads the FMAP even on ACPI S3 resume */ |
| 278 | const size_t s = region_device_sz(&fmrd); |
| 279 | struct fmap *fmap = cbmem_add(CBMEM_ID_FMAP, s); |
| 280 | if (!fmap) { |
| 281 | printk(BIOS_ERR, "ERROR: Failed to allocate CBMEM\n"); |
| 282 | return; |
| 283 | } |
| 284 | |
| 285 | const ssize_t ret = rdev_readat(&fmrd, fmap, 0, s); |
| 286 | if (ret != s) { |
| 287 | printk(BIOS_ERR, "ERROR: Failed to read FMAP into CBMEM\n"); |
| 288 | cbmem_entry_remove(cbmem_entry_find(CBMEM_ID_FMAP)); |
| 289 | return; |
| 290 | } |
| 291 | |
| 292 | /* Finally advertise the cache for the current stage */ |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 293 | fmap_register_cbmem_cache(unused); |
Patrick Rudolph | 6d787c2 | 2019-09-12 13:21:37 +0200 | [diff] [blame] | 294 | } |
| 295 | |
Julius Werner | cefe89e | 2019-11-06 19:29:44 -0800 | [diff] [blame] | 296 | ROMSTAGE_CBMEM_INIT_HOOK(fmap_setup_cbmem_cache) |
| 297 | RAMSTAGE_CBMEM_INIT_HOOK(fmap_register_cbmem_cache) |
| 298 | POSTCAR_CBMEM_INIT_HOOK(fmap_register_cbmem_cache) |