blob: 5df1d8bd855ecc749fa2d6e9ad1e185aa927d164 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Peter Stuge483b7bb2009-04-14 07:40:01 +00002
Aaron Durbin899d13d2015-05-15 23:39:23 -05003#include <assert.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -05004#include <boot_device.h>
5#include <cbfs.h>
Julius Werner0d9072b2020-03-05 12:51:08 -08006#include <cbfs_private.h>
Julius Werner1e37c9c2019-12-11 17:09:39 -08007#include <cbmem.h>
Julius Werner98eeb962019-12-11 15:47:42 -08008#include <commonlib/bsd/compression.h>
Aaron Durbin0b418bd2020-10-08 14:56:03 -06009#include <commonlib/endian.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080010#include <console/console.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080011#include <fmap.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -050012#include <lib.h>
Julius Wernerfdabf3f2020-05-06 17:06:35 -070013#include <metadata_hash.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080014#include <security/tpm/tspi/crtm.h>
15#include <security/vboot/vboot_common.h>
16#include <stdlib.h>
17#include <string.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -050018#include <symbols.h>
Julius Werner09f29212015-09-29 13:51:35 -070019#include <timestamp.h>
Patrick Georgi58a150a2016-05-02 17:22:29 +080020
Julius Werner0d9072b2020-03-05 12:51:08 -080021cb_err_t cbfs_boot_lookup(const char *name, bool force_ro,
22 union cbfs_mdata *mdata, struct region_device *rdev)
Julius Werner1e37c9c2019-12-11 17:09:39 -080023{
Julius Werner0d9072b2020-03-05 12:51:08 -080024 const struct cbfs_boot_device *cbd = cbfs_get_boot_device(force_ro);
25 if (!cbd)
26 return CB_ERR;
27
28 size_t data_offset;
Julius Werner1e37c9c2019-12-11 17:09:39 -080029 cb_err_t err = CB_CBFS_CACHE_FULL;
30 if (!CONFIG(NO_CBFS_MCACHE) && !ENV_SMM)
31 err = cbfs_mcache_lookup(cbd->mcache, cbd->mcache_size,
Julius Werner0d9072b2020-03-05 12:51:08 -080032 name, mdata, &data_offset);
Julius Wernerfdabf3f2020-05-06 17:06:35 -070033 if (err == CB_CBFS_CACHE_FULL) {
34 struct vb2_hash *metadata_hash = NULL;
35 if (CONFIG(TOCTOU_SAFETY)) {
36 if (ENV_SMM) /* Cannot provide TOCTOU safety for SMM */
37 dead_code();
38 /* We can only reach this for the RW CBFS -- an mcache
39 overflow in the RO CBFS would have been caught when
40 building the mcache in cbfs_get_boot_device().
41 (Note that TOCTOU_SAFETY implies !NO_CBFS_MCACHE.) */
42 assert(cbd == vboot_get_cbfs_boot_device());
43 /* TODO: set metadata_hash to RW metadata hash here. */
44 }
45 err = cbfs_lookup(&cbd->rdev, name, mdata, &data_offset,
46 metadata_hash);
47 }
Julius Werner0d9072b2020-03-05 12:51:08 -080048
49 if (CONFIG(VBOOT_ENABLE_CBFS_FALLBACK) && !force_ro &&
50 err == CB_CBFS_NOT_FOUND) {
51 printk(BIOS_INFO, "CBFS: Fall back to RO region for %s\n",
52 name);
53 return cbfs_boot_lookup(name, true, mdata, rdev);
54 }
55 if (err)
56 return err;
57
58 if (rdev_chain(rdev, &cbd->rdev, data_offset, be32toh(mdata->h.len)))
59 return CB_ERR;
60
61 if (tspi_measure_cbfs_hook(rdev, name, be32toh(mdata->h.type)))
62 return CB_ERR;
63
64 return CB_SUCCESS;
Julius Werner1e37c9c2019-12-11 17:09:39 -080065}
66
Aaron Durbin37a5d152015-09-17 16:09:30 -050067int cbfs_boot_locate(struct cbfsf *fh, const char *name, uint32_t *type)
Aaron Durbin899d13d2015-05-15 23:39:23 -050068{
Julius Werner0d9072b2020-03-05 12:51:08 -080069 if (cbfs_boot_lookup(name, false, &fh->mdata, &fh->data))
Julius Werner1cd013b2019-12-11 16:50:02 -080070 return -1;
71
72 size_t msize = be32toh(fh->mdata.h.offset);
73 if (rdev_chain(&fh->metadata, &addrspace_32bit.rdev,
Julius Werner0d9072b2020-03-05 12:51:08 -080074 (uintptr_t)&fh->mdata, msize))
Julius Werner1cd013b2019-12-11 16:50:02 -080075 return -1;
Julius Werner0d9072b2020-03-05 12:51:08 -080076
Julius Werner1cd013b2019-12-11 16:50:02 -080077 if (type) {
78 if (!*type)
79 *type = be32toh(fh->mdata.h.type);
80 else if (*type != be32toh(fh->mdata.h.type))
81 return -1;
Wim Vervoorn114e2e82019-11-05 14:09:16 +010082 }
83
Julius Werner1cd013b2019-12-11 16:50:02 -080084 return 0;
Aaron Durbin899d13d2015-05-15 23:39:23 -050085}
86
Julius Werner9d0cc2a2020-01-22 18:00:18 -080087static void *_cbfs_map(const char *name, size_t *size_out, bool force_ro)
Aaron Durbin899d13d2015-05-15 23:39:23 -050088{
Julius Wernerd17ce412020-10-01 18:25:49 -070089 struct region_device rdev;
90 union cbfs_mdata mdata;
Aaron Durbin899d13d2015-05-15 23:39:23 -050091
Julius Werner9d0cc2a2020-01-22 18:00:18 -080092 if (cbfs_boot_lookup(name, force_ro, &mdata, &rdev))
Aaron Durbin899d13d2015-05-15 23:39:23 -050093 return NULL;
94
Julius Werner834b3ec2020-03-04 16:52:08 -080095 if (size_out != NULL)
Julius Wernerd17ce412020-10-01 18:25:49 -070096 *size_out = region_device_sz(&rdev);
Aaron Durbin899d13d2015-05-15 23:39:23 -050097
Julius Wernerd17ce412020-10-01 18:25:49 -070098 return rdev_mmap_full(&rdev);
Aaron Durbin899d13d2015-05-15 23:39:23 -050099}
100
Julius Werner9d0cc2a2020-01-22 18:00:18 -0800101void *cbfs_map(const char *name, size_t *size_out)
102{
103 return _cbfs_map(name, size_out, false);
104}
105
106void *cbfs_ro_map(const char *name, size_t *size_out)
107{
108 return _cbfs_map(name, size_out, true);
109}
110
Julius Werner834b3ec2020-03-04 16:52:08 -0800111int cbfs_unmap(void *mapping)
112{
113 /* This works because munmap() only works on the root rdev and never
114 cares about which chained subregion something was mapped from. */
115 return rdev_munmap(boot_device_ro(), mapping);
116}
117
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800118int cbfs_locate_file_in_region(struct cbfsf *fh, const char *region_name,
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100119 const char *name, uint32_t *type)
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800120{
121 struct region_device rdev;
Bill XIEbad08c22020-02-13 11:11:35 +0800122 int ret = 0;
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800123 if (fmap_locate_area_as_rdev(region_name, &rdev)) {
124 LOG("%s region not found while looking for %s\n",
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100125 region_name, name);
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800126 return -1;
127 }
128
Julius Werner0d9072b2020-03-05 12:51:08 -0800129 uint32_t dummy_type = 0;
130 if (!type)
131 type = &dummy_type;
132
Bill XIEbad08c22020-02-13 11:11:35 +0800133 ret = cbfs_locate(fh, &rdev, name, type);
134 if (!ret)
Julius Werner0d9072b2020-03-05 12:51:08 -0800135 if (tspi_measure_cbfs_hook(&rdev, name, *type))
Bill XIEbad08c22020-02-13 11:11:35 +0800136 return -1;
137 return ret;
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800138}
139
Aaron Durbina85febc2020-05-15 15:09:10 -0600140static inline bool fsps_env(void)
141{
142 /* FSP-S is assumed to be loaded in ramstage. */
143 if (ENV_RAMSTAGE)
144 return true;
145 return false;
146}
147
Aaron Durbinecbfa992020-05-15 17:01:58 -0600148static inline bool fspm_env(void)
149{
150 /* FSP-M is assumed to be loaded in romstage. */
151 if (ENV_ROMSTAGE)
152 return true;
153 return false;
154}
155
Aaron Durbina121f952020-05-26 15:48:10 -0600156static inline bool cbfs_lz4_enabled(void)
157{
Aaron Durbina85febc2020-05-15 15:09:10 -0600158 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZ4))
159 return true;
Aaron Durbinecbfa992020-05-15 17:01:58 -0600160 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZ4))
161 return true;
Aaron Durbina85febc2020-05-15 15:09:10 -0600162
Aaron Durbina121f952020-05-26 15:48:10 -0600163 if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) && !CONFIG(COMPRESS_PRERAM_STAGES))
164 return false;
165
166 return true;
167}
168
169static inline bool cbfs_lzma_enabled(void)
170{
Aaron Durbina85febc2020-05-15 15:09:10 -0600171 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZMA))
172 return true;
Aaron Durbinecbfa992020-05-15 17:01:58 -0600173 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZMA))
174 return true;
Aaron Durbina121f952020-05-26 15:48:10 -0600175 /* We assume here romstage and postcar are never compressed. */
176 if (ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE)
177 return false;
178 if (ENV_ROMSTAGE && CONFIG(POSTCAR_STAGE))
179 return false;
180 if ((ENV_ROMSTAGE || ENV_POSTCAR)
181 && !CONFIG(COMPRESS_RAMSTAGE))
182 return false;
183 return true;
184}
185
Julius Werner09f29212015-09-29 13:51:35 -0700186size_t cbfs_load_and_decompress(const struct region_device *rdev, size_t offset,
187 size_t in_size, void *buffer, size_t buffer_size, uint32_t compression)
Aaron Durbin899d13d2015-05-15 23:39:23 -0500188{
Julius Werner09f29212015-09-29 13:51:35 -0700189 size_t out_size;
Aaron Durbin84f394e2020-05-26 16:16:42 -0600190 void *map;
Julius Werner09f29212015-09-29 13:51:35 -0700191
192 switch (compression) {
193 case CBFS_COMPRESS_NONE:
Julius Wernerf975e552016-08-19 15:43:06 -0700194 if (buffer_size < in_size)
195 return 0;
Julius Werner09f29212015-09-29 13:51:35 -0700196 if (rdev_readat(rdev, buffer, offset, in_size) != in_size)
197 return 0;
198 return in_size;
199
200 case CBFS_COMPRESS_LZ4:
Aaron Durbina121f952020-05-26 15:48:10 -0600201 if (!cbfs_lz4_enabled())
Julius Werner09f29212015-09-29 13:51:35 -0700202 return 0;
203
Aaron Durbin84f394e2020-05-26 16:16:42 -0600204 /* cbfs_stage_load_and_decompress() takes care of in-place
205 lz4 decompression by setting up the rdev to be in memory. */
206 map = rdev_mmap(rdev, offset, in_size);
207 if (map == NULL)
Julius Werner09f29212015-09-29 13:51:35 -0700208 return 0;
209
210 timestamp_add_now(TS_START_ULZ4F);
Aaron Durbin84f394e2020-05-26 16:16:42 -0600211 out_size = ulz4fn(map, in_size, buffer, buffer_size);
Julius Werner09f29212015-09-29 13:51:35 -0700212 timestamp_add_now(TS_END_ULZ4F);
Aaron Durbin84f394e2020-05-26 16:16:42 -0600213
214 rdev_munmap(rdev, map);
215
Julius Werner09f29212015-09-29 13:51:35 -0700216 return out_size;
217
218 case CBFS_COMPRESS_LZMA:
Aaron Durbina121f952020-05-26 15:48:10 -0600219 if (!cbfs_lzma_enabled())
Julius Werner09f29212015-09-29 13:51:35 -0700220 return 0;
Aaron Durbin84f394e2020-05-26 16:16:42 -0600221 map = rdev_mmap(rdev, offset, in_size);
Julius Werner09f29212015-09-29 13:51:35 -0700222 if (map == NULL)
223 return 0;
224
225 /* Note: timestamp not useful for memory-mapped media (x86) */
226 timestamp_add_now(TS_START_ULZMA);
227 out_size = ulzman(map, in_size, buffer, buffer_size);
228 timestamp_add_now(TS_END_ULZMA);
229
230 rdev_munmap(rdev, map);
231
232 return out_size;
233
234 default:
Aaron Durbin899d13d2015-05-15 23:39:23 -0500235 return 0;
Julius Werner09f29212015-09-29 13:51:35 -0700236 }
Aaron Durbin899d13d2015-05-15 23:39:23 -0500237}
238
Aaron Durbin84f394e2020-05-26 16:16:42 -0600239static size_t cbfs_stage_load_and_decompress(const struct region_device *rdev,
240 size_t offset, size_t in_size, void *buffer, size_t buffer_size,
241 uint32_t compression)
242{
243 struct region_device rdev_src;
244
245 if (compression == CBFS_COMPRESS_LZ4) {
246 if (!cbfs_lz4_enabled())
247 return 0;
248 /* Load the compressed image to the end of the available memory
249 * area for in-place decompression. It is the responsibility of
250 * the caller to ensure that buffer_size is large enough
251 * (see compression.h, guaranteed by cbfstool for stages). */
252 void *compr_start = buffer + buffer_size - in_size;
253 if (rdev_readat(rdev, compr_start, offset, in_size) != in_size)
254 return 0;
255 /* Create a region device backed by memory. */
256 rdev_chain(&rdev_src, &addrspace_32bit.rdev,
257 (uintptr_t)compr_start, in_size);
258
259 return cbfs_load_and_decompress(&rdev_src, 0, in_size, buffer,
260 buffer_size, compression);
261 }
262
263 /* All other algorithms can use the generic implementation. */
264 return cbfs_load_and_decompress(rdev, offset, in_size, buffer,
265 buffer_size, compression);
266}
267
Stefan Reinauer800379f2010-03-01 08:34:19 +0000268static inline int tohex4(unsigned int c)
Patrick Georgib203c2f2009-08-20 14:48:03 +0000269{
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800270 return (c <= 9) ? (c + '0') : (c - 10 + 'a');
Patrick Georgib203c2f2009-08-20 14:48:03 +0000271}
272
Martin Rotha616a4b2020-01-21 09:28:40 -0700273static void tohex8(unsigned int val, char *dest)
274{
275 dest[0] = tohex4((val >> 4) & 0xf);
276 dest[1] = tohex4(val & 0xf);
277}
278
Lee Leahyb2d834a2017-03-08 16:52:22 -0800279static void tohex16(unsigned int val, char *dest)
Patrick Georgib203c2f2009-08-20 14:48:03 +0000280{
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100281 dest[0] = tohex4(val >> 12);
282 dest[1] = tohex4((val >> 8) & 0xf);
283 dest[2] = tohex4((val >> 4) & 0xf);
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800284 dest[3] = tohex4(val & 0xf);
Patrick Georgib203c2f2009-08-20 14:48:03 +0000285}
286
Aaron Durbin899d13d2015-05-15 23:39:23 -0500287void *cbfs_boot_map_optionrom(uint16_t vendor, uint16_t device)
Peter Stuge483b7bb2009-04-14 07:40:01 +0000288{
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800289 char name[17] = "pciXXXX,XXXX.rom";
Peter Stuge483b7bb2009-04-14 07:40:01 +0000290
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100291 tohex16(vendor, name + 3);
292 tohex16(device, name + 8);
Peter Stuge483b7bb2009-04-14 07:40:01 +0000293
Julius Werner834b3ec2020-03-04 16:52:08 -0800294 return cbfs_map(name, NULL);
Peter Stuge483b7bb2009-04-14 07:40:01 +0000295}
296
Martin Rotha616a4b2020-01-21 09:28:40 -0700297void *cbfs_boot_map_optionrom_revision(uint16_t vendor, uint16_t device, uint8_t rev)
298{
299 char name[20] = "pciXXXX,XXXX,XX.rom";
300
301 tohex16(vendor, name + 3);
302 tohex16(device, name + 8);
303 tohex8(rev, name + 13);
304
Julius Werner834b3ec2020-03-04 16:52:08 -0800305 return cbfs_map(name, NULL);
Martin Rotha616a4b2020-01-21 09:28:40 -0700306}
307
Julius Werner9d0cc2a2020-01-22 18:00:18 -0800308static size_t _cbfs_load(const char *name, void *buf, size_t buf_size,
309 bool force_ro)
Julius Wernerf975e552016-08-19 15:43:06 -0700310{
Julius Wernerd17ce412020-10-01 18:25:49 -0700311 struct region_device rdev;
312 union cbfs_mdata mdata;
Julius Wernerf975e552016-08-19 15:43:06 -0700313
Julius Werner9d0cc2a2020-01-22 18:00:18 -0800314 if (cbfs_boot_lookup(name, force_ro, &mdata, &rdev))
Julius Wernerf975e552016-08-19 15:43:06 -0700315 return 0;
316
Julius Wernerd17ce412020-10-01 18:25:49 -0700317 uint32_t compression = CBFS_COMPRESS_NONE;
318 const struct cbfs_file_attr_compression *attr = cbfs_find_attr(&mdata,
319 CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*attr));
320 if (attr) {
321 compression = be32toh(attr->compression);
322 if (buf_size < be32toh(attr->decompressed_size))
323 return 0;
324 }
Julius Wernerf975e552016-08-19 15:43:06 -0700325
Julius Wernerd17ce412020-10-01 18:25:49 -0700326 return cbfs_load_and_decompress(&rdev, 0, region_device_sz(&rdev),
327 buf, buf_size, compression);
Julius Wernerf975e552016-08-19 15:43:06 -0700328}
329
Julius Werner9d0cc2a2020-01-22 18:00:18 -0800330size_t cbfs_load(const char *name, void *buf, size_t buf_size)
331{
332 return _cbfs_load(name, buf, buf_size, false);
333}
334
335size_t cbfs_ro_load(const char *name, void *buf, size_t buf_size)
336{
337 return _cbfs_load(name, buf, buf_size, true);
338}
339
Aaron Durbin899d13d2015-05-15 23:39:23 -0500340int cbfs_prog_stage_load(struct prog *pstage)
341{
342 struct cbfs_stage stage;
343 uint8_t *load;
344 void *entry;
345 size_t fsize;
346 size_t foffset;
Aaron Durbin37a5d152015-09-17 16:09:30 -0500347 const struct region_device *fh = prog_rdev(pstage);
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800348
Aaron Durbin899d13d2015-05-15 23:39:23 -0500349 if (rdev_readat(fh, &stage, 0, sizeof(stage)) != sizeof(stage))
Julius Wernerb29bd27b2015-12-03 11:29:12 -0800350 return -1;
Aaron Durbin899d13d2015-05-15 23:39:23 -0500351
352 fsize = region_device_sz(fh);
353 fsize -= sizeof(stage);
354 foffset = 0;
355 foffset += sizeof(stage);
356
Aaron Durbin0b418bd2020-10-08 14:56:03 -0600357 /* cbfs_stage fields are written in little endian despite the other
358 cbfs data types being encoded in big endian. */
359 stage.compression = read_le32(&stage.compression);
360 stage.entry = read_le64(&stage.entry);
361 stage.load = read_le64(&stage.load);
362 stage.len = read_le32(&stage.len);
363 stage.memlen = read_le32(&stage.memlen);
364
Aaron Durbin899d13d2015-05-15 23:39:23 -0500365 assert(fsize == stage.len);
366
Aaron Durbin899d13d2015-05-15 23:39:23 -0500367 load = (void *)(uintptr_t)stage.load;
368 entry = (void *)(uintptr_t)stage.entry;
369
Aaron Durbined253c82015-10-07 17:22:42 -0500370 /* Hacky way to not load programs over read only media. The stages
371 * that would hit this path initialize themselves. */
Julius Werner21a40532020-04-21 16:03:53 -0700372 if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) &&
373 !CONFIG(NO_XIP_EARLY_STAGES) && CONFIG(BOOT_DEVICE_MEMORY_MAPPED)) {
Aaron Durbined253c82015-10-07 17:22:42 -0500374 void *mapping = rdev_mmap(fh, foffset, fsize);
375 rdev_munmap(fh, mapping);
376 if (mapping == load)
377 goto out;
378 }
379
Aaron Durbin84f394e2020-05-26 16:16:42 -0600380 fsize = cbfs_stage_load_and_decompress(fh, foffset, fsize, load,
Julius Werner09f29212015-09-29 13:51:35 -0700381 stage.memlen, stage.compression);
382 if (!fsize)
Aaron Durbin899d13d2015-05-15 23:39:23 -0500383 return -1;
384
385 /* Clear area not covered by file. */
386 memset(&load[fsize], 0, stage.memlen - fsize);
387
Aaron Durbin096f4572016-03-31 13:49:00 -0500388 prog_segment_loaded((uintptr_t)load, stage.memlen, SEG_FINAL);
Aaron Durbined253c82015-10-07 17:22:42 -0500389
390out:
Aaron Durbin899d13d2015-05-15 23:39:23 -0500391 prog_set_area(pstage, load, stage.memlen);
392 prog_set_entry(pstage, entry, NULL);
393
394 return 0;
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800395}
Aaron Durbin6d720f32015-12-08 17:00:23 -0600396
Julius Werner1e37c9c2019-12-11 17:09:39 -0800397void cbfs_boot_device_find_mcache(struct cbfs_boot_device *cbd, uint32_t id)
Aaron Durbin6d720f32015-12-08 17:00:23 -0600398{
Julius Werner1e37c9c2019-12-11 17:09:39 -0800399 if (CONFIG(NO_CBFS_MCACHE) || ENV_SMM)
400 return;
401
402 const struct cbmem_entry *entry;
403 if (cbmem_possibly_online() &&
404 (entry = cbmem_entry_find(id))) {
405 cbd->mcache = cbmem_entry_start(entry);
406 cbd->mcache_size = cbmem_entry_size(entry);
407 } else if (ENV_ROMSTAGE_OR_BEFORE) {
408 u8 *boundary = _ecbfs_mcache - REGION_SIZE(cbfs_mcache) *
409 CONFIG_CBFS_MCACHE_RW_PERCENTAGE / 100;
410 boundary = (u8 *)ALIGN_DOWN((uintptr_t)boundary,
411 CBFS_MCACHE_ALIGNMENT);
412 if (id == CBMEM_ID_CBFS_RO_MCACHE) {
413 cbd->mcache = _cbfs_mcache;
414 cbd->mcache_size = boundary - _cbfs_mcache;
415 } else if (id == CBMEM_ID_CBFS_RW_MCACHE) {
416 cbd->mcache = boundary;
417 cbd->mcache_size = _ecbfs_mcache - boundary;
418 }
419 }
Aaron Durbin6d720f32015-12-08 17:00:23 -0600420}
Julius Werner1e37c9c2019-12-11 17:09:39 -0800421
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700422cb_err_t cbfs_init_boot_device(const struct cbfs_boot_device *cbd,
423 struct vb2_hash *metadata_hash)
424{
425 /* If we have an mcache, mcache_build() will also check mdata hash. */
426 if (!CONFIG(NO_CBFS_MCACHE) && !ENV_SMM && cbd->mcache_size > 0)
427 return cbfs_mcache_build(&cbd->rdev, cbd->mcache,
428 cbd->mcache_size, metadata_hash);
429
430 /* No mcache and no verification means we have nothing special to do. */
431 if (!CONFIG(CBFS_VERIFICATION) || !metadata_hash)
432 return CB_SUCCESS;
433
434 /* Verification only: use cbfs_walk() without a walker() function to
435 just run through the CBFS once, will return NOT_FOUND by default. */
436 cb_err_t err = cbfs_walk(&cbd->rdev, NULL, NULL, metadata_hash, 0);
437 if (err == CB_CBFS_NOT_FOUND)
438 err = CB_SUCCESS;
439 return err;
440}
441
Julius Werner1e37c9c2019-12-11 17:09:39 -0800442const struct cbfs_boot_device *cbfs_get_boot_device(bool force_ro)
443{
444 static struct cbfs_boot_device ro;
445
446 /* Ensure we always init RO mcache, even if first file is from RW.
447 Otherwise it may not be available when needed in later stages. */
448 if (ENV_INITIAL_STAGE && !force_ro && !region_device_sz(&ro.rdev))
449 cbfs_get_boot_device(true);
450
451 if (!force_ro) {
452 const struct cbfs_boot_device *rw = vboot_get_cbfs_boot_device();
453 /* This will return NULL if vboot isn't enabled, didn't run yet
454 or decided to boot into recovery mode. */
455 if (rw)
456 return rw;
457 }
458
459 if (region_device_sz(&ro.rdev))
460 return &ro;
461
462 if (fmap_locate_area_as_rdev("COREBOOT", &ro.rdev))
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700463 die("Cannot locate primary CBFS");
Julius Werner1e37c9c2019-12-11 17:09:39 -0800464
465 cbfs_boot_device_find_mcache(&ro, CBMEM_ID_CBFS_RO_MCACHE);
466
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700467 if (ENV_INITIAL_STAGE) {
468 cb_err_t err = cbfs_init_boot_device(&ro, metadata_hash_get());
469 if (err == CB_CBFS_HASH_MISMATCH)
470 die("RO CBFS metadata hash verification failure");
471 else if (CONFIG(TOCTOU_SAFETY) && err == CB_CBFS_CACHE_FULL)
472 die("RO mcache overflow breaks TOCTOU safety!\n");
473 else if (err && err != CB_CBFS_CACHE_FULL)
474 die("RO CBFS initialization error: %d", err);
Julius Werner1e37c9c2019-12-11 17:09:39 -0800475 }
476
477 return &ro;
478}
479
480#if !CONFIG(NO_CBFS_MCACHE)
481static void mcache_to_cbmem(const struct cbfs_boot_device *cbd, u32 cbmem_id)
482{
483 if (!cbd)
484 return;
485
486 size_t real_size = cbfs_mcache_real_size(cbd->mcache, cbd->mcache_size);
487 void *cbmem_mcache = cbmem_add(cbmem_id, real_size);
488 if (!cbmem_mcache) {
489 printk(BIOS_ERR, "ERROR: Cannot allocate CBMEM mcache %#x (%#zx bytes)!\n",
490 cbmem_id, real_size);
491 return;
492 }
493 memcpy(cbmem_mcache, cbd->mcache, real_size);
494}
495
496static void cbfs_mcache_migrate(int unused)
497{
498 mcache_to_cbmem(vboot_get_cbfs_boot_device(), CBMEM_ID_CBFS_RW_MCACHE);
499 mcache_to_cbmem(cbfs_get_boot_device(true), CBMEM_ID_CBFS_RO_MCACHE);
500}
501ROMSTAGE_CBMEM_INIT_HOOK(cbfs_mcache_migrate)
502#endif