blob: d27550576af55ced20bf087589170a88ecc20122 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Peter Stuge483b7bb2009-04-14 07:40:01 +00002
Aaron Durbin899d13d2015-05-15 23:39:23 -05003#include <assert.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -05004#include <boot_device.h>
5#include <cbfs.h>
Julius Werner0d9072b2020-03-05 12:51:08 -08006#include <cbfs_private.h>
Julius Werner1e37c9c2019-12-11 17:09:39 -08007#include <cbmem.h>
Julius Werner98eeb962019-12-11 15:47:42 -08008#include <commonlib/bsd/compression.h>
Aaron Durbin0b418bd2020-10-08 14:56:03 -06009#include <commonlib/endian.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080010#include <console/console.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080011#include <fmap.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -050012#include <lib.h>
Julius Wernerfdabf3f2020-05-06 17:06:35 -070013#include <metadata_hash.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080014#include <security/tpm/tspi/crtm.h>
15#include <security/vboot/vboot_common.h>
16#include <stdlib.h>
17#include <string.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -050018#include <symbols.h>
Julius Werner09f29212015-09-29 13:51:35 -070019#include <timestamp.h>
Patrick Georgi58a150a2016-05-02 17:22:29 +080020
Julius Werner0d9072b2020-03-05 12:51:08 -080021cb_err_t cbfs_boot_lookup(const char *name, bool force_ro,
22 union cbfs_mdata *mdata, struct region_device *rdev)
Julius Werner1e37c9c2019-12-11 17:09:39 -080023{
Julius Werner0d9072b2020-03-05 12:51:08 -080024 const struct cbfs_boot_device *cbd = cbfs_get_boot_device(force_ro);
25 if (!cbd)
26 return CB_ERR;
27
28 size_t data_offset;
Julius Werner1e37c9c2019-12-11 17:09:39 -080029 cb_err_t err = CB_CBFS_CACHE_FULL;
30 if (!CONFIG(NO_CBFS_MCACHE) && !ENV_SMM)
31 err = cbfs_mcache_lookup(cbd->mcache, cbd->mcache_size,
Julius Werner0d9072b2020-03-05 12:51:08 -080032 name, mdata, &data_offset);
Julius Wernerfdabf3f2020-05-06 17:06:35 -070033 if (err == CB_CBFS_CACHE_FULL) {
34 struct vb2_hash *metadata_hash = NULL;
35 if (CONFIG(TOCTOU_SAFETY)) {
36 if (ENV_SMM) /* Cannot provide TOCTOU safety for SMM */
37 dead_code();
38 /* We can only reach this for the RW CBFS -- an mcache
39 overflow in the RO CBFS would have been caught when
40 building the mcache in cbfs_get_boot_device().
41 (Note that TOCTOU_SAFETY implies !NO_CBFS_MCACHE.) */
42 assert(cbd == vboot_get_cbfs_boot_device());
43 /* TODO: set metadata_hash to RW metadata hash here. */
44 }
45 err = cbfs_lookup(&cbd->rdev, name, mdata, &data_offset,
46 metadata_hash);
47 }
Julius Werner0d9072b2020-03-05 12:51:08 -080048
49 if (CONFIG(VBOOT_ENABLE_CBFS_FALLBACK) && !force_ro &&
50 err == CB_CBFS_NOT_FOUND) {
51 printk(BIOS_INFO, "CBFS: Fall back to RO region for %s\n",
52 name);
53 return cbfs_boot_lookup(name, true, mdata, rdev);
54 }
Julius Werner0247fcf2020-12-03 12:41:00 -080055 if (err) {
56 if (err == CB_CBFS_NOT_FOUND)
57 printk(BIOS_WARNING, "CBFS: '%s' not found.\n", name);
58 else if (err == CB_CBFS_HASH_MISMATCH)
59 printk(BIOS_ERR, "CBFS ERROR: metadata hash mismatch!\n");
60 else
61 printk(BIOS_ERR,
62 "CBFS ERROR: error %d when looking up '%s'\n",
63 err, name);
Julius Werner0d9072b2020-03-05 12:51:08 -080064 return err;
Julius Werner0247fcf2020-12-03 12:41:00 -080065 }
Julius Werner0d9072b2020-03-05 12:51:08 -080066
67 if (rdev_chain(rdev, &cbd->rdev, data_offset, be32toh(mdata->h.len)))
68 return CB_ERR;
69
70 if (tspi_measure_cbfs_hook(rdev, name, be32toh(mdata->h.type)))
71 return CB_ERR;
72
73 return CB_SUCCESS;
Julius Werner1e37c9c2019-12-11 17:09:39 -080074}
75
Aaron Durbin37a5d152015-09-17 16:09:30 -050076int cbfs_boot_locate(struct cbfsf *fh, const char *name, uint32_t *type)
Aaron Durbin899d13d2015-05-15 23:39:23 -050077{
Julius Werner0d9072b2020-03-05 12:51:08 -080078 if (cbfs_boot_lookup(name, false, &fh->mdata, &fh->data))
Julius Werner1cd013b2019-12-11 16:50:02 -080079 return -1;
80
81 size_t msize = be32toh(fh->mdata.h.offset);
82 if (rdev_chain(&fh->metadata, &addrspace_32bit.rdev,
Julius Werner0d9072b2020-03-05 12:51:08 -080083 (uintptr_t)&fh->mdata, msize))
Julius Werner1cd013b2019-12-11 16:50:02 -080084 return -1;
Julius Werner0d9072b2020-03-05 12:51:08 -080085
Julius Werner1cd013b2019-12-11 16:50:02 -080086 if (type) {
87 if (!*type)
88 *type = be32toh(fh->mdata.h.type);
89 else if (*type != be32toh(fh->mdata.h.type))
90 return -1;
Wim Vervoorn114e2e82019-11-05 14:09:16 +010091 }
92
Julius Werner1cd013b2019-12-11 16:50:02 -080093 return 0;
Aaron Durbin899d13d2015-05-15 23:39:23 -050094}
95
Julius Werner9d0cc2a2020-01-22 18:00:18 -080096static void *_cbfs_map(const char *name, size_t *size_out, bool force_ro)
Aaron Durbin899d13d2015-05-15 23:39:23 -050097{
Julius Wernerd17ce412020-10-01 18:25:49 -070098 struct region_device rdev;
99 union cbfs_mdata mdata;
Aaron Durbin899d13d2015-05-15 23:39:23 -0500100
Julius Werner9d0cc2a2020-01-22 18:00:18 -0800101 if (cbfs_boot_lookup(name, force_ro, &mdata, &rdev))
Aaron Durbin899d13d2015-05-15 23:39:23 -0500102 return NULL;
103
Julius Werner834b3ec2020-03-04 16:52:08 -0800104 if (size_out != NULL)
Julius Wernerd17ce412020-10-01 18:25:49 -0700105 *size_out = region_device_sz(&rdev);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500106
Julius Wernerd17ce412020-10-01 18:25:49 -0700107 return rdev_mmap_full(&rdev);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500108}
109
Julius Werner9d0cc2a2020-01-22 18:00:18 -0800110void *cbfs_map(const char *name, size_t *size_out)
111{
112 return _cbfs_map(name, size_out, false);
113}
114
115void *cbfs_ro_map(const char *name, size_t *size_out)
116{
117 return _cbfs_map(name, size_out, true);
118}
119
Julius Werner834b3ec2020-03-04 16:52:08 -0800120int cbfs_unmap(void *mapping)
121{
122 /* This works because munmap() only works on the root rdev and never
123 cares about which chained subregion something was mapped from. */
124 return rdev_munmap(boot_device_ro(), mapping);
125}
126
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800127int cbfs_locate_file_in_region(struct cbfsf *fh, const char *region_name,
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100128 const char *name, uint32_t *type)
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800129{
130 struct region_device rdev;
Bill XIEbad08c22020-02-13 11:11:35 +0800131 int ret = 0;
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800132 if (fmap_locate_area_as_rdev(region_name, &rdev)) {
133 LOG("%s region not found while looking for %s\n",
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100134 region_name, name);
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800135 return -1;
136 }
137
Julius Werner0d9072b2020-03-05 12:51:08 -0800138 uint32_t dummy_type = 0;
139 if (!type)
140 type = &dummy_type;
141
Bill XIEbad08c22020-02-13 11:11:35 +0800142 ret = cbfs_locate(fh, &rdev, name, type);
143 if (!ret)
Julius Werner0d9072b2020-03-05 12:51:08 -0800144 if (tspi_measure_cbfs_hook(&rdev, name, *type))
Bill XIEbad08c22020-02-13 11:11:35 +0800145 return -1;
146 return ret;
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800147}
148
Aaron Durbina85febc2020-05-15 15:09:10 -0600149static inline bool fsps_env(void)
150{
151 /* FSP-S is assumed to be loaded in ramstage. */
152 if (ENV_RAMSTAGE)
153 return true;
154 return false;
155}
156
Aaron Durbinecbfa992020-05-15 17:01:58 -0600157static inline bool fspm_env(void)
158{
159 /* FSP-M is assumed to be loaded in romstage. */
160 if (ENV_ROMSTAGE)
161 return true;
162 return false;
163}
164
Aaron Durbina121f952020-05-26 15:48:10 -0600165static inline bool cbfs_lz4_enabled(void)
166{
Aaron Durbina85febc2020-05-15 15:09:10 -0600167 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZ4))
168 return true;
Aaron Durbinecbfa992020-05-15 17:01:58 -0600169 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZ4))
170 return true;
Aaron Durbina85febc2020-05-15 15:09:10 -0600171
Aaron Durbina121f952020-05-26 15:48:10 -0600172 if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) && !CONFIG(COMPRESS_PRERAM_STAGES))
173 return false;
174
175 return true;
176}
177
178static inline bool cbfs_lzma_enabled(void)
179{
Aaron Durbina85febc2020-05-15 15:09:10 -0600180 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZMA))
181 return true;
Aaron Durbinecbfa992020-05-15 17:01:58 -0600182 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZMA))
183 return true;
Aaron Durbina121f952020-05-26 15:48:10 -0600184 /* We assume here romstage and postcar are never compressed. */
185 if (ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE)
186 return false;
187 if (ENV_ROMSTAGE && CONFIG(POSTCAR_STAGE))
188 return false;
189 if ((ENV_ROMSTAGE || ENV_POSTCAR)
190 && !CONFIG(COMPRESS_RAMSTAGE))
191 return false;
192 return true;
193}
194
Julius Werner09f29212015-09-29 13:51:35 -0700195size_t cbfs_load_and_decompress(const struct region_device *rdev, size_t offset,
196 size_t in_size, void *buffer, size_t buffer_size, uint32_t compression)
Aaron Durbin899d13d2015-05-15 23:39:23 -0500197{
Julius Werner09f29212015-09-29 13:51:35 -0700198 size_t out_size;
Aaron Durbin84f394e2020-05-26 16:16:42 -0600199 void *map;
Julius Werner09f29212015-09-29 13:51:35 -0700200
201 switch (compression) {
202 case CBFS_COMPRESS_NONE:
Julius Wernerf975e552016-08-19 15:43:06 -0700203 if (buffer_size < in_size)
204 return 0;
Julius Werner09f29212015-09-29 13:51:35 -0700205 if (rdev_readat(rdev, buffer, offset, in_size) != in_size)
206 return 0;
207 return in_size;
208
209 case CBFS_COMPRESS_LZ4:
Aaron Durbina121f952020-05-26 15:48:10 -0600210 if (!cbfs_lz4_enabled())
Julius Werner09f29212015-09-29 13:51:35 -0700211 return 0;
212
Aaron Durbin84f394e2020-05-26 16:16:42 -0600213 /* cbfs_stage_load_and_decompress() takes care of in-place
214 lz4 decompression by setting up the rdev to be in memory. */
215 map = rdev_mmap(rdev, offset, in_size);
216 if (map == NULL)
Julius Werner09f29212015-09-29 13:51:35 -0700217 return 0;
218
219 timestamp_add_now(TS_START_ULZ4F);
Aaron Durbin84f394e2020-05-26 16:16:42 -0600220 out_size = ulz4fn(map, in_size, buffer, buffer_size);
Julius Werner09f29212015-09-29 13:51:35 -0700221 timestamp_add_now(TS_END_ULZ4F);
Aaron Durbin84f394e2020-05-26 16:16:42 -0600222
223 rdev_munmap(rdev, map);
224
Julius Werner09f29212015-09-29 13:51:35 -0700225 return out_size;
226
227 case CBFS_COMPRESS_LZMA:
Aaron Durbina121f952020-05-26 15:48:10 -0600228 if (!cbfs_lzma_enabled())
Julius Werner09f29212015-09-29 13:51:35 -0700229 return 0;
Aaron Durbin84f394e2020-05-26 16:16:42 -0600230 map = rdev_mmap(rdev, offset, in_size);
Julius Werner09f29212015-09-29 13:51:35 -0700231 if (map == NULL)
232 return 0;
233
234 /* Note: timestamp not useful for memory-mapped media (x86) */
235 timestamp_add_now(TS_START_ULZMA);
236 out_size = ulzman(map, in_size, buffer, buffer_size);
237 timestamp_add_now(TS_END_ULZMA);
238
239 rdev_munmap(rdev, map);
240
241 return out_size;
242
243 default:
Aaron Durbin899d13d2015-05-15 23:39:23 -0500244 return 0;
Julius Werner09f29212015-09-29 13:51:35 -0700245 }
Aaron Durbin899d13d2015-05-15 23:39:23 -0500246}
247
Aaron Durbin84f394e2020-05-26 16:16:42 -0600248static size_t cbfs_stage_load_and_decompress(const struct region_device *rdev,
249 size_t offset, size_t in_size, void *buffer, size_t buffer_size,
250 uint32_t compression)
251{
252 struct region_device rdev_src;
253
254 if (compression == CBFS_COMPRESS_LZ4) {
255 if (!cbfs_lz4_enabled())
256 return 0;
257 /* Load the compressed image to the end of the available memory
258 * area for in-place decompression. It is the responsibility of
259 * the caller to ensure that buffer_size is large enough
260 * (see compression.h, guaranteed by cbfstool for stages). */
261 void *compr_start = buffer + buffer_size - in_size;
262 if (rdev_readat(rdev, compr_start, offset, in_size) != in_size)
263 return 0;
264 /* Create a region device backed by memory. */
265 rdev_chain(&rdev_src, &addrspace_32bit.rdev,
266 (uintptr_t)compr_start, in_size);
267
268 return cbfs_load_and_decompress(&rdev_src, 0, in_size, buffer,
269 buffer_size, compression);
270 }
271
272 /* All other algorithms can use the generic implementation. */
273 return cbfs_load_and_decompress(rdev, offset, in_size, buffer,
274 buffer_size, compression);
275}
276
Stefan Reinauer800379f2010-03-01 08:34:19 +0000277static inline int tohex4(unsigned int c)
Patrick Georgib203c2f2009-08-20 14:48:03 +0000278{
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800279 return (c <= 9) ? (c + '0') : (c - 10 + 'a');
Patrick Georgib203c2f2009-08-20 14:48:03 +0000280}
281
Martin Rotha616a4b2020-01-21 09:28:40 -0700282static void tohex8(unsigned int val, char *dest)
283{
284 dest[0] = tohex4((val >> 4) & 0xf);
285 dest[1] = tohex4(val & 0xf);
286}
287
Lee Leahyb2d834a2017-03-08 16:52:22 -0800288static void tohex16(unsigned int val, char *dest)
Patrick Georgib203c2f2009-08-20 14:48:03 +0000289{
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100290 dest[0] = tohex4(val >> 12);
291 dest[1] = tohex4((val >> 8) & 0xf);
292 dest[2] = tohex4((val >> 4) & 0xf);
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800293 dest[3] = tohex4(val & 0xf);
Patrick Georgib203c2f2009-08-20 14:48:03 +0000294}
295
Aaron Durbin899d13d2015-05-15 23:39:23 -0500296void *cbfs_boot_map_optionrom(uint16_t vendor, uint16_t device)
Peter Stuge483b7bb2009-04-14 07:40:01 +0000297{
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800298 char name[17] = "pciXXXX,XXXX.rom";
Peter Stuge483b7bb2009-04-14 07:40:01 +0000299
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100300 tohex16(vendor, name + 3);
301 tohex16(device, name + 8);
Peter Stuge483b7bb2009-04-14 07:40:01 +0000302
Julius Werner834b3ec2020-03-04 16:52:08 -0800303 return cbfs_map(name, NULL);
Peter Stuge483b7bb2009-04-14 07:40:01 +0000304}
305
Martin Rotha616a4b2020-01-21 09:28:40 -0700306void *cbfs_boot_map_optionrom_revision(uint16_t vendor, uint16_t device, uint8_t rev)
307{
308 char name[20] = "pciXXXX,XXXX,XX.rom";
309
310 tohex16(vendor, name + 3);
311 tohex16(device, name + 8);
312 tohex8(rev, name + 13);
313
Julius Werner834b3ec2020-03-04 16:52:08 -0800314 return cbfs_map(name, NULL);
Martin Rotha616a4b2020-01-21 09:28:40 -0700315}
316
Julius Werner9d0cc2a2020-01-22 18:00:18 -0800317static size_t _cbfs_load(const char *name, void *buf, size_t buf_size,
318 bool force_ro)
Julius Wernerf975e552016-08-19 15:43:06 -0700319{
Julius Wernerd17ce412020-10-01 18:25:49 -0700320 struct region_device rdev;
321 union cbfs_mdata mdata;
Julius Wernerf975e552016-08-19 15:43:06 -0700322
Julius Werner9d0cc2a2020-01-22 18:00:18 -0800323 if (cbfs_boot_lookup(name, force_ro, &mdata, &rdev))
Julius Wernerf975e552016-08-19 15:43:06 -0700324 return 0;
325
Julius Wernerd17ce412020-10-01 18:25:49 -0700326 uint32_t compression = CBFS_COMPRESS_NONE;
327 const struct cbfs_file_attr_compression *attr = cbfs_find_attr(&mdata,
328 CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*attr));
329 if (attr) {
330 compression = be32toh(attr->compression);
331 if (buf_size < be32toh(attr->decompressed_size))
332 return 0;
333 }
Julius Wernerf975e552016-08-19 15:43:06 -0700334
Julius Wernerd17ce412020-10-01 18:25:49 -0700335 return cbfs_load_and_decompress(&rdev, 0, region_device_sz(&rdev),
336 buf, buf_size, compression);
Julius Wernerf975e552016-08-19 15:43:06 -0700337}
338
Julius Werner9d0cc2a2020-01-22 18:00:18 -0800339size_t cbfs_load(const char *name, void *buf, size_t buf_size)
340{
341 return _cbfs_load(name, buf, buf_size, false);
342}
343
344size_t cbfs_ro_load(const char *name, void *buf, size_t buf_size)
345{
346 return _cbfs_load(name, buf, buf_size, true);
347}
348
Aaron Durbin899d13d2015-05-15 23:39:23 -0500349int cbfs_prog_stage_load(struct prog *pstage)
350{
351 struct cbfs_stage stage;
352 uint8_t *load;
353 void *entry;
354 size_t fsize;
355 size_t foffset;
Aaron Durbin37a5d152015-09-17 16:09:30 -0500356 const struct region_device *fh = prog_rdev(pstage);
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800357
Aaron Durbin899d13d2015-05-15 23:39:23 -0500358 if (rdev_readat(fh, &stage, 0, sizeof(stage)) != sizeof(stage))
Julius Wernerb29bd27b2015-12-03 11:29:12 -0800359 return -1;
Aaron Durbin899d13d2015-05-15 23:39:23 -0500360
361 fsize = region_device_sz(fh);
362 fsize -= sizeof(stage);
363 foffset = 0;
364 foffset += sizeof(stage);
365
Aaron Durbin0b418bd2020-10-08 14:56:03 -0600366 /* cbfs_stage fields are written in little endian despite the other
367 cbfs data types being encoded in big endian. */
368 stage.compression = read_le32(&stage.compression);
369 stage.entry = read_le64(&stage.entry);
370 stage.load = read_le64(&stage.load);
371 stage.len = read_le32(&stage.len);
372 stage.memlen = read_le32(&stage.memlen);
373
Aaron Durbin899d13d2015-05-15 23:39:23 -0500374 assert(fsize == stage.len);
375
Aaron Durbin899d13d2015-05-15 23:39:23 -0500376 load = (void *)(uintptr_t)stage.load;
377 entry = (void *)(uintptr_t)stage.entry;
378
Aaron Durbined253c82015-10-07 17:22:42 -0500379 /* Hacky way to not load programs over read only media. The stages
380 * that would hit this path initialize themselves. */
Julius Werner21a40532020-04-21 16:03:53 -0700381 if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) &&
382 !CONFIG(NO_XIP_EARLY_STAGES) && CONFIG(BOOT_DEVICE_MEMORY_MAPPED)) {
Aaron Durbined253c82015-10-07 17:22:42 -0500383 void *mapping = rdev_mmap(fh, foffset, fsize);
384 rdev_munmap(fh, mapping);
385 if (mapping == load)
386 goto out;
387 }
388
Aaron Durbin84f394e2020-05-26 16:16:42 -0600389 fsize = cbfs_stage_load_and_decompress(fh, foffset, fsize, load,
Julius Werner09f29212015-09-29 13:51:35 -0700390 stage.memlen, stage.compression);
391 if (!fsize)
Aaron Durbin899d13d2015-05-15 23:39:23 -0500392 return -1;
393
394 /* Clear area not covered by file. */
395 memset(&load[fsize], 0, stage.memlen - fsize);
396
Aaron Durbin096f4572016-03-31 13:49:00 -0500397 prog_segment_loaded((uintptr_t)load, stage.memlen, SEG_FINAL);
Aaron Durbined253c82015-10-07 17:22:42 -0500398
399out:
Aaron Durbin899d13d2015-05-15 23:39:23 -0500400 prog_set_area(pstage, load, stage.memlen);
401 prog_set_entry(pstage, entry, NULL);
402
403 return 0;
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800404}
Aaron Durbin6d720f32015-12-08 17:00:23 -0600405
Julius Werner1e37c9c2019-12-11 17:09:39 -0800406void cbfs_boot_device_find_mcache(struct cbfs_boot_device *cbd, uint32_t id)
Aaron Durbin6d720f32015-12-08 17:00:23 -0600407{
Julius Werner1e37c9c2019-12-11 17:09:39 -0800408 if (CONFIG(NO_CBFS_MCACHE) || ENV_SMM)
409 return;
410
411 const struct cbmem_entry *entry;
412 if (cbmem_possibly_online() &&
413 (entry = cbmem_entry_find(id))) {
414 cbd->mcache = cbmem_entry_start(entry);
415 cbd->mcache_size = cbmem_entry_size(entry);
416 } else if (ENV_ROMSTAGE_OR_BEFORE) {
417 u8 *boundary = _ecbfs_mcache - REGION_SIZE(cbfs_mcache) *
418 CONFIG_CBFS_MCACHE_RW_PERCENTAGE / 100;
419 boundary = (u8 *)ALIGN_DOWN((uintptr_t)boundary,
420 CBFS_MCACHE_ALIGNMENT);
421 if (id == CBMEM_ID_CBFS_RO_MCACHE) {
422 cbd->mcache = _cbfs_mcache;
423 cbd->mcache_size = boundary - _cbfs_mcache;
424 } else if (id == CBMEM_ID_CBFS_RW_MCACHE) {
425 cbd->mcache = boundary;
426 cbd->mcache_size = _ecbfs_mcache - boundary;
427 }
428 }
Aaron Durbin6d720f32015-12-08 17:00:23 -0600429}
Julius Werner1e37c9c2019-12-11 17:09:39 -0800430
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700431cb_err_t cbfs_init_boot_device(const struct cbfs_boot_device *cbd,
432 struct vb2_hash *metadata_hash)
433{
434 /* If we have an mcache, mcache_build() will also check mdata hash. */
435 if (!CONFIG(NO_CBFS_MCACHE) && !ENV_SMM && cbd->mcache_size > 0)
436 return cbfs_mcache_build(&cbd->rdev, cbd->mcache,
437 cbd->mcache_size, metadata_hash);
438
439 /* No mcache and no verification means we have nothing special to do. */
440 if (!CONFIG(CBFS_VERIFICATION) || !metadata_hash)
441 return CB_SUCCESS;
442
443 /* Verification only: use cbfs_walk() without a walker() function to
444 just run through the CBFS once, will return NOT_FOUND by default. */
445 cb_err_t err = cbfs_walk(&cbd->rdev, NULL, NULL, metadata_hash, 0);
446 if (err == CB_CBFS_NOT_FOUND)
447 err = CB_SUCCESS;
448 return err;
449}
450
Julius Werner1e37c9c2019-12-11 17:09:39 -0800451const struct cbfs_boot_device *cbfs_get_boot_device(bool force_ro)
452{
453 static struct cbfs_boot_device ro;
454
455 /* Ensure we always init RO mcache, even if first file is from RW.
456 Otherwise it may not be available when needed in later stages. */
457 if (ENV_INITIAL_STAGE && !force_ro && !region_device_sz(&ro.rdev))
458 cbfs_get_boot_device(true);
459
460 if (!force_ro) {
461 const struct cbfs_boot_device *rw = vboot_get_cbfs_boot_device();
462 /* This will return NULL if vboot isn't enabled, didn't run yet
463 or decided to boot into recovery mode. */
464 if (rw)
465 return rw;
466 }
467
468 if (region_device_sz(&ro.rdev))
469 return &ro;
470
471 if (fmap_locate_area_as_rdev("COREBOOT", &ro.rdev))
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700472 die("Cannot locate primary CBFS");
Julius Werner1e37c9c2019-12-11 17:09:39 -0800473
474 cbfs_boot_device_find_mcache(&ro, CBMEM_ID_CBFS_RO_MCACHE);
475
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700476 if (ENV_INITIAL_STAGE) {
477 cb_err_t err = cbfs_init_boot_device(&ro, metadata_hash_get());
478 if (err == CB_CBFS_HASH_MISMATCH)
479 die("RO CBFS metadata hash verification failure");
480 else if (CONFIG(TOCTOU_SAFETY) && err == CB_CBFS_CACHE_FULL)
481 die("RO mcache overflow breaks TOCTOU safety!\n");
482 else if (err && err != CB_CBFS_CACHE_FULL)
483 die("RO CBFS initialization error: %d", err);
Julius Werner1e37c9c2019-12-11 17:09:39 -0800484 }
485
486 return &ro;
487}
488
489#if !CONFIG(NO_CBFS_MCACHE)
490static void mcache_to_cbmem(const struct cbfs_boot_device *cbd, u32 cbmem_id)
491{
492 if (!cbd)
493 return;
494
495 size_t real_size = cbfs_mcache_real_size(cbd->mcache, cbd->mcache_size);
496 void *cbmem_mcache = cbmem_add(cbmem_id, real_size);
497 if (!cbmem_mcache) {
498 printk(BIOS_ERR, "ERROR: Cannot allocate CBMEM mcache %#x (%#zx bytes)!\n",
499 cbmem_id, real_size);
500 return;
501 }
502 memcpy(cbmem_mcache, cbd->mcache, real_size);
503}
504
505static void cbfs_mcache_migrate(int unused)
506{
507 mcache_to_cbmem(vboot_get_cbfs_boot_device(), CBMEM_ID_CBFS_RW_MCACHE);
508 mcache_to_cbmem(cbfs_get_boot_device(true), CBMEM_ID_CBFS_RO_MCACHE);
509}
510ROMSTAGE_CBMEM_INIT_HOOK(cbfs_mcache_migrate)
511#endif