blob: 32ebfafbf65057c57c154c386a7f7ba81661d22f [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Peter Stuge483b7bb2009-04-14 07:40:01 +00002
Aaron Durbin899d13d2015-05-15 23:39:23 -05003#include <assert.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -05004#include <boot_device.h>
5#include <cbfs.h>
Julius Werner0d9072b2020-03-05 12:51:08 -08006#include <cbfs_private.h>
Julius Werner1e37c9c2019-12-11 17:09:39 -08007#include <cbmem.h>
Julius Werner98eeb962019-12-11 15:47:42 -08008#include <commonlib/bsd/compression.h>
Aaron Durbin0b418bd2020-10-08 14:56:03 -06009#include <commonlib/endian.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080010#include <console/console.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080011#include <fmap.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -050012#include <lib.h>
Julius Wernerfdabf3f2020-05-06 17:06:35 -070013#include <metadata_hash.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080014#include <security/tpm/tspi/crtm.h>
15#include <security/vboot/vboot_common.h>
16#include <stdlib.h>
17#include <string.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -050018#include <symbols.h>
Julius Werner09f29212015-09-29 13:51:35 -070019#include <timestamp.h>
Patrick Georgi58a150a2016-05-02 17:22:29 +080020
Julius Werner9b1f3cc2020-12-30 17:30:12 -080021#if CBFS_CACHE_AVAILABLE
22struct mem_pool cbfs_cache = MEM_POOL_INIT(_cbfs_cache, REGION_SIZE(cbfs_cache));
23
24static void switch_to_postram_cache(int unused)
25{
26 if (_preram_cbfs_cache != _postram_cbfs_cache)
27 mem_pool_init(&cbfs_cache, _postram_cbfs_cache,
28 REGION_SIZE(postram_cbfs_cache));
29}
30ROMSTAGE_CBMEM_INIT_HOOK(switch_to_postram_cache);
31#endif
32
Julius Werner0d9072b2020-03-05 12:51:08 -080033cb_err_t cbfs_boot_lookup(const char *name, bool force_ro,
34 union cbfs_mdata *mdata, struct region_device *rdev)
Julius Werner1e37c9c2019-12-11 17:09:39 -080035{
Julius Werner0d9072b2020-03-05 12:51:08 -080036 const struct cbfs_boot_device *cbd = cbfs_get_boot_device(force_ro);
37 if (!cbd)
38 return CB_ERR;
39
40 size_t data_offset;
Julius Werner1e37c9c2019-12-11 17:09:39 -080041 cb_err_t err = CB_CBFS_CACHE_FULL;
Julius Werner34cf0732020-12-08 14:21:43 -080042 if (!CONFIG(NO_CBFS_MCACHE) && !ENV_SMM && cbd->mcache_size)
Julius Werner1e37c9c2019-12-11 17:09:39 -080043 err = cbfs_mcache_lookup(cbd->mcache, cbd->mcache_size,
Julius Werner723e3b12020-12-29 17:33:30 -080044 name, mdata, &data_offset);
Julius Wernerfdabf3f2020-05-06 17:06:35 -070045 if (err == CB_CBFS_CACHE_FULL) {
46 struct vb2_hash *metadata_hash = NULL;
47 if (CONFIG(TOCTOU_SAFETY)) {
48 if (ENV_SMM) /* Cannot provide TOCTOU safety for SMM */
49 dead_code();
Julius Werner34cf0732020-12-08 14:21:43 -080050 if (!cbd->mcache_size)
51 die("Cannot access CBFS TOCTOU-safely in " ENV_STRING " before CBMEM init!\n");
Julius Werner723e3b12020-12-29 17:33:30 -080052 /* We can only reach this for the RW CBFS -- an mcache overflow in the
53 RO CBFS would have been caught when building the mcache in cbfs_get
54 boot_device(). (Note that TOCTOU_SAFETY implies !NO_CBFS_MCACHE.) */
Julius Wernerfdabf3f2020-05-06 17:06:35 -070055 assert(cbd == vboot_get_cbfs_boot_device());
56 /* TODO: set metadata_hash to RW metadata hash here. */
57 }
Julius Werner723e3b12020-12-29 17:33:30 -080058 err = cbfs_lookup(&cbd->rdev, name, mdata, &data_offset, metadata_hash);
Julius Wernerfdabf3f2020-05-06 17:06:35 -070059 }
Julius Werner0d9072b2020-03-05 12:51:08 -080060
Julius Werner723e3b12020-12-29 17:33:30 -080061 if (CONFIG(VBOOT_ENABLE_CBFS_FALLBACK) && !force_ro && err == CB_CBFS_NOT_FOUND) {
62 printk(BIOS_INFO, "CBFS: Fall back to RO region for %s\n", name);
Julius Werner0d9072b2020-03-05 12:51:08 -080063 return cbfs_boot_lookup(name, true, mdata, rdev);
64 }
Julius Werner0247fcf2020-12-03 12:41:00 -080065 if (err) {
66 if (err == CB_CBFS_NOT_FOUND)
67 printk(BIOS_WARNING, "CBFS: '%s' not found.\n", name);
68 else if (err == CB_CBFS_HASH_MISMATCH)
69 printk(BIOS_ERR, "CBFS ERROR: metadata hash mismatch!\n");
70 else
Julius Werner723e3b12020-12-29 17:33:30 -080071 printk(BIOS_ERR, "CBFS ERROR: error %d when looking up '%s'\n",
Julius Werner0247fcf2020-12-03 12:41:00 -080072 err, name);
Julius Werner0d9072b2020-03-05 12:51:08 -080073 return err;
Julius Werner0247fcf2020-12-03 12:41:00 -080074 }
Julius Werner0d9072b2020-03-05 12:51:08 -080075
76 if (rdev_chain(rdev, &cbd->rdev, data_offset, be32toh(mdata->h.len)))
77 return CB_ERR;
78
Arthur Heymans27545df2021-02-26 14:22:47 +010079 if (tspi_measure_cbfs_hook(rdev, name, be32toh(mdata->h.type))) {
80 printk(BIOS_ERR, "CBFS ERROR: error when measuring '%s'\n", name);
81 }
Julius Werner0d9072b2020-03-05 12:51:08 -080082
83 return CB_SUCCESS;
Julius Werner1e37c9c2019-12-11 17:09:39 -080084}
85
Aaron Durbin37a5d152015-09-17 16:09:30 -050086int cbfs_boot_locate(struct cbfsf *fh, const char *name, uint32_t *type)
Aaron Durbin899d13d2015-05-15 23:39:23 -050087{
Julius Werner0d9072b2020-03-05 12:51:08 -080088 if (cbfs_boot_lookup(name, false, &fh->mdata, &fh->data))
Julius Werner1cd013b2019-12-11 16:50:02 -080089 return -1;
90
91 size_t msize = be32toh(fh->mdata.h.offset);
Julius Wernerc8931972021-04-16 16:48:32 -070092 if (rdev_chain_mem(&fh->metadata, &fh->mdata, msize))
Julius Werner1cd013b2019-12-11 16:50:02 -080093 return -1;
Julius Werner0d9072b2020-03-05 12:51:08 -080094
Julius Werner1cd013b2019-12-11 16:50:02 -080095 if (type) {
96 if (!*type)
97 *type = be32toh(fh->mdata.h.type);
98 else if (*type != be32toh(fh->mdata.h.type))
99 return -1;
Wim Vervoorn114e2e82019-11-05 14:09:16 +0100100 }
101
Julius Werner1cd013b2019-12-11 16:50:02 -0800102 return 0;
Aaron Durbin899d13d2015-05-15 23:39:23 -0500103}
104
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800105void cbfs_unmap(void *mapping)
Julius Werner834b3ec2020-03-04 16:52:08 -0800106{
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800107 /*
108 * This is save to call with mappings that weren't allocated in the cache (e.g. x86
109 * direct mappings) -- mem_pool_free() just does nothing for addresses it doesn't
110 * recognize. This hardcodes the assumption that if platforms implement an rdev_mmap()
111 * that requires a free() for the boot_device, they need to implement it via the
112 * cbfs_cache mem_pool.
113 */
114 if (CBFS_CACHE_AVAILABLE)
115 mem_pool_free(&cbfs_cache, mapping);
Julius Werner834b3ec2020-03-04 16:52:08 -0800116}
117
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800118int cbfs_locate_file_in_region(struct cbfsf *fh, const char *region_name,
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100119 const char *name, uint32_t *type)
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800120{
121 struct region_device rdev;
Bill XIEbad08c22020-02-13 11:11:35 +0800122 int ret = 0;
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800123 if (fmap_locate_area_as_rdev(region_name, &rdev)) {
Julius Werner723e3b12020-12-29 17:33:30 -0800124 LOG("%s region not found while looking for %s\n", region_name, name);
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800125 return -1;
126 }
127
Julius Werner0d9072b2020-03-05 12:51:08 -0800128 uint32_t dummy_type = 0;
129 if (!type)
130 type = &dummy_type;
131
Bill XIEbad08c22020-02-13 11:11:35 +0800132 ret = cbfs_locate(fh, &rdev, name, type);
133 if (!ret)
Julius Werner0d9072b2020-03-05 12:51:08 -0800134 if (tspi_measure_cbfs_hook(&rdev, name, *type))
Arthur Heymans27545df2021-02-26 14:22:47 +0100135 LOG("error measuring %s in region %s\n", name, region_name);
Bill XIEbad08c22020-02-13 11:11:35 +0800136 return ret;
Pratik Prajapati2a7708a2016-11-30 17:29:10 -0800137}
138
Aaron Durbina85febc2020-05-15 15:09:10 -0600139static inline bool fsps_env(void)
140{
141 /* FSP-S is assumed to be loaded in ramstage. */
142 if (ENV_RAMSTAGE)
143 return true;
144 return false;
145}
146
Aaron Durbinecbfa992020-05-15 17:01:58 -0600147static inline bool fspm_env(void)
148{
149 /* FSP-M is assumed to be loaded in romstage. */
150 if (ENV_ROMSTAGE)
151 return true;
152 return false;
153}
154
Aaron Durbina121f952020-05-26 15:48:10 -0600155static inline bool cbfs_lz4_enabled(void)
156{
Aaron Durbina85febc2020-05-15 15:09:10 -0600157 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZ4))
158 return true;
Aaron Durbinecbfa992020-05-15 17:01:58 -0600159 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZ4))
160 return true;
Aaron Durbina85febc2020-05-15 15:09:10 -0600161
Aaron Durbina121f952020-05-26 15:48:10 -0600162 if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) && !CONFIG(COMPRESS_PRERAM_STAGES))
163 return false;
164
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800165 if (ENV_SMM)
166 return false;
167
Aaron Durbina121f952020-05-26 15:48:10 -0600168 return true;
169}
170
171static inline bool cbfs_lzma_enabled(void)
172{
Aaron Durbina85febc2020-05-15 15:09:10 -0600173 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZMA))
174 return true;
Aaron Durbinecbfa992020-05-15 17:01:58 -0600175 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZMA))
176 return true;
Aaron Durbina121f952020-05-26 15:48:10 -0600177 /* We assume here romstage and postcar are never compressed. */
178 if (ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE)
179 return false;
180 if (ENV_ROMSTAGE && CONFIG(POSTCAR_STAGE))
181 return false;
Julius Werner723e3b12020-12-29 17:33:30 -0800182 if ((ENV_ROMSTAGE || ENV_POSTCAR) && !CONFIG(COMPRESS_RAMSTAGE))
Aaron Durbina121f952020-05-26 15:48:10 -0600183 return false;
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800184 if (ENV_SMM)
185 return false;
Aaron Durbina121f952020-05-26 15:48:10 -0600186 return true;
187}
188
Julius Wernerfccf1222021-04-02 15:58:05 -0700189static inline bool cbfs_file_hash_mismatch(const void *buffer, size_t size,
190 const struct vb2_hash *file_hash)
191{
192 /* Avoid linking hash functions when verification is disabled. */
193 if (!CONFIG(CBFS_VERIFICATION))
194 return false;
195
196 /* If there is no file hash, always count that as a mismatch. */
197 if (file_hash && vb2_hash_verify(buffer, size, file_hash) == VB2_SUCCESS)
198 return false;
199
200 printk(BIOS_CRIT, "CBFS file hash mismatch!\n");
201 return true;
202}
203
204static size_t cbfs_load_and_decompress(const struct region_device *rdev, void *buffer,
205 size_t buffer_size, uint32_t compression,
206 const struct vb2_hash *file_hash)
Aaron Durbin899d13d2015-05-15 23:39:23 -0500207{
Julius Wernereca99af2021-03-10 18:49:37 -0800208 size_t in_size = region_device_sz(rdev);
Julius Wernerfccf1222021-04-02 15:58:05 -0700209 size_t out_size = 0;
Aaron Durbin84f394e2020-05-26 16:16:42 -0600210 void *map;
Julius Werner09f29212015-09-29 13:51:35 -0700211
Julius Wernereca99af2021-03-10 18:49:37 -0800212 DEBUG("Decompressing %zu bytes to %p with algo %d\n", in_size, buffer, compression);
Julius Werner7778cf22021-01-05 18:54:19 -0800213
Julius Werner09f29212015-09-29 13:51:35 -0700214 switch (compression) {
215 case CBFS_COMPRESS_NONE:
Julius Wernerf975e552016-08-19 15:43:06 -0700216 if (buffer_size < in_size)
217 return 0;
Julius Wernereca99af2021-03-10 18:49:37 -0800218 if (rdev_readat(rdev, buffer, 0, in_size) != in_size)
Julius Werner09f29212015-09-29 13:51:35 -0700219 return 0;
Julius Wernerfccf1222021-04-02 15:58:05 -0700220 if (cbfs_file_hash_mismatch(buffer, in_size, file_hash))
221 return 0;
Julius Werner09f29212015-09-29 13:51:35 -0700222 return in_size;
223
224 case CBFS_COMPRESS_LZ4:
Aaron Durbina121f952020-05-26 15:48:10 -0600225 if (!cbfs_lz4_enabled())
Julius Werner09f29212015-09-29 13:51:35 -0700226 return 0;
227
Julius Wernereca99af2021-03-10 18:49:37 -0800228 /* cbfs_prog_stage_load() takes care of in-place LZ4 decompression by
Julius Werner723e3b12020-12-29 17:33:30 -0800229 setting up the rdev to be in memory. */
Julius Wernereca99af2021-03-10 18:49:37 -0800230 map = rdev_mmap_full(rdev);
Aaron Durbin84f394e2020-05-26 16:16:42 -0600231 if (map == NULL)
Julius Werner09f29212015-09-29 13:51:35 -0700232 return 0;
233
Julius Wernerfccf1222021-04-02 15:58:05 -0700234 if (!cbfs_file_hash_mismatch(map, in_size, file_hash)) {
235 timestamp_add_now(TS_START_ULZ4F);
236 out_size = ulz4fn(map, in_size, buffer, buffer_size);
237 timestamp_add_now(TS_END_ULZ4F);
238 }
Aaron Durbin84f394e2020-05-26 16:16:42 -0600239
240 rdev_munmap(rdev, map);
241
Julius Werner09f29212015-09-29 13:51:35 -0700242 return out_size;
243
244 case CBFS_COMPRESS_LZMA:
Aaron Durbina121f952020-05-26 15:48:10 -0600245 if (!cbfs_lzma_enabled())
Julius Werner09f29212015-09-29 13:51:35 -0700246 return 0;
Julius Wernereca99af2021-03-10 18:49:37 -0800247 map = rdev_mmap_full(rdev);
Julius Werner09f29212015-09-29 13:51:35 -0700248 if (map == NULL)
249 return 0;
250
Julius Wernerfccf1222021-04-02 15:58:05 -0700251 if (!cbfs_file_hash_mismatch(map, in_size, file_hash)) {
252 /* Note: timestamp not useful for memory-mapped media (x86) */
253 timestamp_add_now(TS_START_ULZMA);
254 out_size = ulzman(map, in_size, buffer, buffer_size);
255 timestamp_add_now(TS_END_ULZMA);
256 }
Julius Werner09f29212015-09-29 13:51:35 -0700257
258 rdev_munmap(rdev, map);
259
260 return out_size;
261
262 default:
Aaron Durbin899d13d2015-05-15 23:39:23 -0500263 return 0;
Julius Werner09f29212015-09-29 13:51:35 -0700264 }
Aaron Durbin899d13d2015-05-15 23:39:23 -0500265}
266
Stefan Reinauer800379f2010-03-01 08:34:19 +0000267static inline int tohex4(unsigned int c)
Patrick Georgib203c2f2009-08-20 14:48:03 +0000268{
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800269 return (c <= 9) ? (c + '0') : (c - 10 + 'a');
Patrick Georgib203c2f2009-08-20 14:48:03 +0000270}
271
Martin Rotha616a4b2020-01-21 09:28:40 -0700272static void tohex8(unsigned int val, char *dest)
273{
274 dest[0] = tohex4((val >> 4) & 0xf);
275 dest[1] = tohex4(val & 0xf);
276}
277
Lee Leahyb2d834a2017-03-08 16:52:22 -0800278static void tohex16(unsigned int val, char *dest)
Patrick Georgib203c2f2009-08-20 14:48:03 +0000279{
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100280 dest[0] = tohex4(val >> 12);
281 dest[1] = tohex4((val >> 8) & 0xf);
282 dest[2] = tohex4((val >> 4) & 0xf);
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800283 dest[3] = tohex4(val & 0xf);
Patrick Georgib203c2f2009-08-20 14:48:03 +0000284}
285
Aaron Durbin899d13d2015-05-15 23:39:23 -0500286void *cbfs_boot_map_optionrom(uint16_t vendor, uint16_t device)
Peter Stuge483b7bb2009-04-14 07:40:01 +0000287{
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800288 char name[17] = "pciXXXX,XXXX.rom";
Peter Stuge483b7bb2009-04-14 07:40:01 +0000289
Philipp Deppenwiese66f9a092018-11-08 10:59:40 +0100290 tohex16(vendor, name + 3);
291 tohex16(device, name + 8);
Peter Stuge483b7bb2009-04-14 07:40:01 +0000292
Julius Werner834b3ec2020-03-04 16:52:08 -0800293 return cbfs_map(name, NULL);
Peter Stuge483b7bb2009-04-14 07:40:01 +0000294}
295
Martin Rotha616a4b2020-01-21 09:28:40 -0700296void *cbfs_boot_map_optionrom_revision(uint16_t vendor, uint16_t device, uint8_t rev)
297{
298 char name[20] = "pciXXXX,XXXX,XX.rom";
299
300 tohex16(vendor, name + 3);
301 tohex16(device, name + 8);
302 tohex8(rev, name + 13);
303
Julius Werner834b3ec2020-03-04 16:52:08 -0800304 return cbfs_map(name, NULL);
Martin Rotha616a4b2020-01-21 09:28:40 -0700305}
306
Julius Werner7778cf22021-01-05 18:54:19 -0800307void *_cbfs_alloc(const char *name, cbfs_allocator_t allocator, void *arg,
308 size_t *size_out, bool force_ro, enum cbfs_type *type)
Julius Wernerf975e552016-08-19 15:43:06 -0700309{
Julius Wernerd17ce412020-10-01 18:25:49 -0700310 struct region_device rdev;
311 union cbfs_mdata mdata;
Julius Werner7778cf22021-01-05 18:54:19 -0800312 void *loc;
313
314 DEBUG("%s(name='%s', alloc=%p(%p), force_ro=%s, type=%d)\n", __func__, name, allocator,
315 arg, force_ro ? "true" : "false", type ? *type : -1);
Julius Wernerf975e552016-08-19 15:43:06 -0700316
Julius Werner9d0cc2a2020-01-22 18:00:18 -0800317 if (cbfs_boot_lookup(name, force_ro, &mdata, &rdev))
Julius Werner7778cf22021-01-05 18:54:19 -0800318 return NULL;
Julius Wernerf975e552016-08-19 15:43:06 -0700319
Julius Werner7778cf22021-01-05 18:54:19 -0800320 if (type) {
321 const enum cbfs_type real_type = be32toh(mdata.h.type);
322 if (*type == CBFS_TYPE_QUERY)
323 *type = real_type;
324 else if (*type != real_type) {
325 ERROR("'%s' type mismatch (is %u, expected %u)\n",
326 mdata.h.filename, real_type, *type);
327 return NULL;
328 }
Julius Wernerd17ce412020-10-01 18:25:49 -0700329 }
Julius Wernerf975e552016-08-19 15:43:06 -0700330
Julius Werner7778cf22021-01-05 18:54:19 -0800331 size_t size = region_device_sz(&rdev);
332 uint32_t compression = CBFS_COMPRESS_NONE;
333 const struct cbfs_file_attr_compression *cattr = cbfs_find_attr(&mdata,
334 CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*cattr));
335 if (cattr) {
336 compression = be32toh(cattr->compression);
337 size = be32toh(cattr->decompressed_size);
338 }
339
340 if (size_out)
341 *size_out = size;
342
Julius Wernerfccf1222021-04-02 15:58:05 -0700343 const struct vb2_hash *file_hash = NULL;
344 if (CONFIG(CBFS_VERIFICATION))
345 file_hash = cbfs_file_hash(&mdata);
346
Julius Werner7778cf22021-01-05 18:54:19 -0800347 /* allocator == NULL means do a cbfs_map() */
348 if (allocator) {
349 loc = allocator(arg, size, &mdata);
350 } else if (compression == CBFS_COMPRESS_NONE) {
Julius Wernerfccf1222021-04-02 15:58:05 -0700351 void *mapping = rdev_mmap_full(&rdev);
352 if (!mapping || cbfs_file_hash_mismatch(mapping, size, file_hash))
353 return NULL;
354 return mapping;
Julius Werner7778cf22021-01-05 18:54:19 -0800355 } else if (!CBFS_CACHE_AVAILABLE) {
356 ERROR("Cannot map compressed file %s on x86\n", mdata.h.filename);
357 return NULL;
358 } else {
359 loc = mem_pool_alloc(&cbfs_cache, size);
360 }
361
362 if (!loc) {
363 ERROR("'%s' allocation failure\n", mdata.h.filename);
364 return NULL;
365 }
366
Julius Wernerfccf1222021-04-02 15:58:05 -0700367 size = cbfs_load_and_decompress(&rdev, loc, size, compression, file_hash);
Julius Werner7778cf22021-01-05 18:54:19 -0800368 if (!size)
369 return NULL;
370
371 return loc;
372}
373
Julius Werner4676ec52021-03-10 16:52:14 -0800374void *_cbfs_default_allocator(void *arg, size_t size, const union cbfs_mdata *unused)
Julius Werner7778cf22021-01-05 18:54:19 -0800375{
376 struct _cbfs_default_allocator_arg *darg = arg;
377 if (size > darg->buf_size)
378 return NULL;
379 return darg->buf;
380}
381
Julius Werner4676ec52021-03-10 16:52:14 -0800382void *_cbfs_cbmem_allocator(void *arg, size_t size, const union cbfs_mdata *unused)
Julius Werner7778cf22021-01-05 18:54:19 -0800383{
384 return cbmem_add((uintptr_t)arg, size);
Julius Wernerf975e552016-08-19 15:43:06 -0700385}
386
Julius Werner1de87082020-12-23 17:38:11 -0800387cb_err_t cbfs_prog_stage_load(struct prog *pstage)
Aaron Durbin899d13d2015-05-15 23:39:23 -0500388{
Julius Werner1de87082020-12-23 17:38:11 -0800389 union cbfs_mdata mdata;
390 struct region_device rdev;
Julius Werner1de87082020-12-23 17:38:11 -0800391 cb_err_t err;
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800392
Julius Werner1de87082020-12-23 17:38:11 -0800393 prog_locate_hook(pstage);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500394
Julius Werner1de87082020-12-23 17:38:11 -0800395 if ((err = cbfs_boot_lookup(prog_name(pstage), false, &mdata, &rdev)))
396 return err;
397
398 assert(be32toh(mdata.h.type) == CBFS_TYPE_STAGE);
399 pstage->cbfs_type = CBFS_TYPE_STAGE;
400
Julius Werner81dc20e2020-10-15 17:37:57 -0700401 enum cbfs_compression compression = CBFS_COMPRESS_NONE;
402 const struct cbfs_file_attr_compression *cattr = cbfs_find_attr(&mdata,
403 CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*cattr));
404 if (cattr)
405 compression = be32toh(cattr->compression);
Julius Werner1de87082020-12-23 17:38:11 -0800406
Julius Werner81dc20e2020-10-15 17:37:57 -0700407 const struct cbfs_file_attr_stageheader *sattr = cbfs_find_attr(&mdata,
408 CBFS_FILE_ATTR_TAG_STAGEHEADER, sizeof(*sattr));
409 if (!sattr)
410 return CB_ERR;
411 prog_set_area(pstage, (void *)(uintptr_t)be64toh(sattr->loadaddr),
412 be32toh(sattr->memlen));
413 prog_set_entry(pstage, prog_start(pstage) +
414 be32toh(sattr->entry_offset), NULL);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500415
Julius Wernerfccf1222021-04-02 15:58:05 -0700416 const struct vb2_hash *file_hash = NULL;
417 if (CONFIG(CBFS_VERIFICATION))
418 file_hash = cbfs_file_hash(&mdata);
419
Aaron Durbined253c82015-10-07 17:22:42 -0500420 /* Hacky way to not load programs over read only media. The stages
421 * that would hit this path initialize themselves. */
Julius Werner21a40532020-04-21 16:03:53 -0700422 if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) &&
423 !CONFIG(NO_XIP_EARLY_STAGES) && CONFIG(BOOT_DEVICE_MEMORY_MAPPED)) {
Julius Werner81dc20e2020-10-15 17:37:57 -0700424 void *mapping = rdev_mmap_full(&rdev);
Julius Werner1de87082020-12-23 17:38:11 -0800425 rdev_munmap(&rdev, mapping);
Julius Wernerfccf1222021-04-02 15:58:05 -0700426 if (cbfs_file_hash_mismatch(mapping, region_device_sz(&rdev), file_hash))
427 return CB_CBFS_HASH_MISMATCH;
Julius Werner81dc20e2020-10-15 17:37:57 -0700428 if (mapping == prog_start(pstage))
429 return CB_SUCCESS;
Aaron Durbined253c82015-10-07 17:22:42 -0500430 }
431
Julius Wernereca99af2021-03-10 18:49:37 -0800432 /* LZ4 stages can be decompressed in-place to save mapping scratch space. Load the
433 compressed data to the end of the buffer and point &rdev to that memory location. */
434 if (cbfs_lz4_enabled() && compression == CBFS_COMPRESS_LZ4) {
435 size_t in_size = region_device_sz(&rdev);
436 void *compr_start = prog_start(pstage) + prog_size(pstage) - in_size;
437 if (rdev_readat(&rdev, compr_start, 0, in_size) != in_size)
438 return CB_ERR;
Julius Wernerc8931972021-04-16 16:48:32 -0700439 rdev_chain_mem(&rdev, compr_start, in_size);
Julius Wernereca99af2021-03-10 18:49:37 -0800440 }
441
442 size_t fsize = cbfs_load_and_decompress(&rdev, prog_start(pstage), prog_size(pstage),
Julius Wernerfccf1222021-04-02 15:58:05 -0700443 compression, file_hash);
Julius Werner09f29212015-09-29 13:51:35 -0700444 if (!fsize)
Julius Werner1de87082020-12-23 17:38:11 -0800445 return CB_ERR;
Aaron Durbin899d13d2015-05-15 23:39:23 -0500446
447 /* Clear area not covered by file. */
Julius Werner81dc20e2020-10-15 17:37:57 -0700448 memset(prog_start(pstage) + fsize, 0, prog_size(pstage) - fsize);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500449
Julius Werner81dc20e2020-10-15 17:37:57 -0700450 prog_segment_loaded((uintptr_t)prog_start(pstage), prog_size(pstage),
451 SEG_FINAL);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500452
Julius Werner1de87082020-12-23 17:38:11 -0800453 return CB_SUCCESS;
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800454}
Aaron Durbin6d720f32015-12-08 17:00:23 -0600455
Julius Werner1e37c9c2019-12-11 17:09:39 -0800456void cbfs_boot_device_find_mcache(struct cbfs_boot_device *cbd, uint32_t id)
Aaron Durbin6d720f32015-12-08 17:00:23 -0600457{
Julius Werner1e37c9c2019-12-11 17:09:39 -0800458 if (CONFIG(NO_CBFS_MCACHE) || ENV_SMM)
459 return;
460
Julius Werner34cf0732020-12-08 14:21:43 -0800461 if (cbd->mcache_size)
462 return;
463
Julius Werner1e37c9c2019-12-11 17:09:39 -0800464 const struct cbmem_entry *entry;
465 if (cbmem_possibly_online() &&
466 (entry = cbmem_entry_find(id))) {
467 cbd->mcache = cbmem_entry_start(entry);
468 cbd->mcache_size = cbmem_entry_size(entry);
469 } else if (ENV_ROMSTAGE_OR_BEFORE) {
470 u8 *boundary = _ecbfs_mcache - REGION_SIZE(cbfs_mcache) *
471 CONFIG_CBFS_MCACHE_RW_PERCENTAGE / 100;
Julius Werner723e3b12020-12-29 17:33:30 -0800472 boundary = (u8 *)ALIGN_DOWN((uintptr_t)boundary, CBFS_MCACHE_ALIGNMENT);
Julius Werner1e37c9c2019-12-11 17:09:39 -0800473 if (id == CBMEM_ID_CBFS_RO_MCACHE) {
474 cbd->mcache = _cbfs_mcache;
475 cbd->mcache_size = boundary - _cbfs_mcache;
476 } else if (id == CBMEM_ID_CBFS_RW_MCACHE) {
477 cbd->mcache = boundary;
478 cbd->mcache_size = _ecbfs_mcache - boundary;
479 }
480 }
Aaron Durbin6d720f32015-12-08 17:00:23 -0600481}
Julius Werner1e37c9c2019-12-11 17:09:39 -0800482
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700483cb_err_t cbfs_init_boot_device(const struct cbfs_boot_device *cbd,
Julius Werner723e3b12020-12-29 17:33:30 -0800484 struct vb2_hash *mdata_hash)
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700485{
486 /* If we have an mcache, mcache_build() will also check mdata hash. */
487 if (!CONFIG(NO_CBFS_MCACHE) && !ENV_SMM && cbd->mcache_size > 0)
Julius Werner723e3b12020-12-29 17:33:30 -0800488 return cbfs_mcache_build(&cbd->rdev, cbd->mcache, cbd->mcache_size, mdata_hash);
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700489
490 /* No mcache and no verification means we have nothing special to do. */
Julius Werner723e3b12020-12-29 17:33:30 -0800491 if (!CONFIG(CBFS_VERIFICATION) || !mdata_hash)
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700492 return CB_SUCCESS;
493
Julius Werner723e3b12020-12-29 17:33:30 -0800494 /* Verification only: use cbfs_walk() without a walker() function to just run through
495 the CBFS once, will return NOT_FOUND by default. */
496 cb_err_t err = cbfs_walk(&cbd->rdev, NULL, NULL, mdata_hash, 0);
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700497 if (err == CB_CBFS_NOT_FOUND)
498 err = CB_SUCCESS;
499 return err;
500}
501
Julius Werner1e37c9c2019-12-11 17:09:39 -0800502const struct cbfs_boot_device *cbfs_get_boot_device(bool force_ro)
503{
504 static struct cbfs_boot_device ro;
505
Julius Werner723e3b12020-12-29 17:33:30 -0800506 /* Ensure we always init RO mcache, even if the first file is from the RW CBFS.
Julius Werner1e37c9c2019-12-11 17:09:39 -0800507 Otherwise it may not be available when needed in later stages. */
508 if (ENV_INITIAL_STAGE && !force_ro && !region_device_sz(&ro.rdev))
509 cbfs_get_boot_device(true);
510
511 if (!force_ro) {
512 const struct cbfs_boot_device *rw = vboot_get_cbfs_boot_device();
Julius Werner723e3b12020-12-29 17:33:30 -0800513 /* This will return NULL if vboot isn't enabled, didn't run yet or decided to
514 boot into recovery mode. */
Julius Werner1e37c9c2019-12-11 17:09:39 -0800515 if (rw)
516 return rw;
517 }
518
Julius Werner723e3b12020-12-29 17:33:30 -0800519 /* In rare cases post-RAM stages may run this before cbmem_initialize(), so we can't
520 lock in the result of find_mcache() on the first try and should keep trying every
521 time until an mcache is found. */
Julius Werner34cf0732020-12-08 14:21:43 -0800522 cbfs_boot_device_find_mcache(&ro, CBMEM_ID_CBFS_RO_MCACHE);
523
Julius Werner1e37c9c2019-12-11 17:09:39 -0800524 if (region_device_sz(&ro.rdev))
525 return &ro;
526
527 if (fmap_locate_area_as_rdev("COREBOOT", &ro.rdev))
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700528 die("Cannot locate primary CBFS");
Julius Werner1e37c9c2019-12-11 17:09:39 -0800529
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700530 if (ENV_INITIAL_STAGE) {
531 cb_err_t err = cbfs_init_boot_device(&ro, metadata_hash_get());
532 if (err == CB_CBFS_HASH_MISMATCH)
533 die("RO CBFS metadata hash verification failure");
534 else if (CONFIG(TOCTOU_SAFETY) && err == CB_CBFS_CACHE_FULL)
535 die("RO mcache overflow breaks TOCTOU safety!\n");
536 else if (err && err != CB_CBFS_CACHE_FULL)
537 die("RO CBFS initialization error: %d", err);
Julius Werner1e37c9c2019-12-11 17:09:39 -0800538 }
539
540 return &ro;
541}
542
543#if !CONFIG(NO_CBFS_MCACHE)
544static void mcache_to_cbmem(const struct cbfs_boot_device *cbd, u32 cbmem_id)
545{
546 if (!cbd)
547 return;
548
549 size_t real_size = cbfs_mcache_real_size(cbd->mcache, cbd->mcache_size);
550 void *cbmem_mcache = cbmem_add(cbmem_id, real_size);
551 if (!cbmem_mcache) {
552 printk(BIOS_ERR, "ERROR: Cannot allocate CBMEM mcache %#x (%#zx bytes)!\n",
553 cbmem_id, real_size);
554 return;
555 }
556 memcpy(cbmem_mcache, cbd->mcache, real_size);
557}
558
559static void cbfs_mcache_migrate(int unused)
560{
561 mcache_to_cbmem(vboot_get_cbfs_boot_device(), CBMEM_ID_CBFS_RW_MCACHE);
562 mcache_to_cbmem(cbfs_get_boot_device(true), CBMEM_ID_CBFS_RO_MCACHE);
563}
564ROMSTAGE_CBMEM_INIT_HOOK(cbfs_mcache_migrate)
565#endif