blob: d2a4b84656f37301e275d48d84c3d1d44118b302 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Peter Stuge483b7bb2009-04-14 07:40:01 +00002
Aaron Durbin899d13d2015-05-15 23:39:23 -05003#include <assert.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -05004#include <boot_device.h>
5#include <cbfs.h>
Julius Werner1e37c9c2019-12-11 17:09:39 -08006#include <cbmem.h>
Julius Werner57d4bc62021-11-15 10:13:37 -08007#include <commonlib/bsd/cbfs_private.h>
Julius Werner98eeb962019-12-11 15:47:42 -08008#include <commonlib/bsd/compression.h>
Bill XIEc79e96b2019-08-22 20:28:36 +08009#include <console/console.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080010#include <fmap.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -050011#include <lib.h>
Raul E Rangel4cfb8622021-11-01 13:40:14 -060012#include <list.h>
Julius Wernerfdabf3f2020-05-06 17:06:35 -070013#include <metadata_hash.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080014#include <security/tpm/tspi/crtm.h>
15#include <security/vboot/vboot_common.h>
16#include <stdlib.h>
17#include <string.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -050018#include <symbols.h>
Raul E Rangel4cfb8622021-11-01 13:40:14 -060019#include <thread.h>
Julius Werner09f29212015-09-29 13:51:35 -070020#include <timestamp.h>
Patrick Georgi58a150a2016-05-02 17:22:29 +080021
Raul E Rangel3d7b9842021-10-08 13:10:38 -060022#if ENV_STAGE_HAS_DATA_SECTION
Raul E Rangel5ac82dc2021-07-23 16:43:18 -060023struct mem_pool cbfs_cache =
Raul E Rangel6938f352021-07-23 16:43:18 -060024 MEM_POOL_INIT(_cbfs_cache, REGION_SIZE(cbfs_cache), CONFIG_CBFS_CACHE_ALIGN);
Raul E Rangel3d7b9842021-10-08 13:10:38 -060025#else
Raul E Rangel5ac82dc2021-07-23 16:43:18 -060026struct mem_pool cbfs_cache = MEM_POOL_INIT(NULL, 0, 0);
Raul E Rangel3d7b9842021-10-08 13:10:38 -060027#endif
Julius Werner9b1f3cc2020-12-30 17:30:12 -080028
29static void switch_to_postram_cache(int unused)
30{
31 if (_preram_cbfs_cache != _postram_cbfs_cache)
Raul E Rangel5ac82dc2021-07-23 16:43:18 -060032 mem_pool_init(&cbfs_cache, _postram_cbfs_cache, REGION_SIZE(postram_cbfs_cache),
Raul E Rangel6938f352021-07-23 16:43:18 -060033 CONFIG_CBFS_CACHE_ALIGN);
Julius Werner9b1f3cc2020-12-30 17:30:12 -080034}
35ROMSTAGE_CBMEM_INIT_HOOK(switch_to_postram_cache);
Julius Werner9b1f3cc2020-12-30 17:30:12 -080036
Julius Werner57d4bc62021-11-15 10:13:37 -080037cb_err_t _cbfs_boot_lookup(const char *name, bool force_ro,
38 union cbfs_mdata *mdata, struct region_device *rdev)
Julius Werner1e37c9c2019-12-11 17:09:39 -080039{
Julius Werner0d9072b2020-03-05 12:51:08 -080040 const struct cbfs_boot_device *cbd = cbfs_get_boot_device(force_ro);
41 if (!cbd)
42 return CB_ERR;
43
44 size_t data_offset;
Julius Werner1e37c9c2019-12-11 17:09:39 -080045 cb_err_t err = CB_CBFS_CACHE_FULL;
Julius Werner34cf0732020-12-08 14:21:43 -080046 if (!CONFIG(NO_CBFS_MCACHE) && !ENV_SMM && cbd->mcache_size)
Julius Werner1e37c9c2019-12-11 17:09:39 -080047 err = cbfs_mcache_lookup(cbd->mcache, cbd->mcache_size,
Julius Werner723e3b12020-12-29 17:33:30 -080048 name, mdata, &data_offset);
Julius Wernerfdabf3f2020-05-06 17:06:35 -070049 if (err == CB_CBFS_CACHE_FULL) {
50 struct vb2_hash *metadata_hash = NULL;
51 if (CONFIG(TOCTOU_SAFETY)) {
52 if (ENV_SMM) /* Cannot provide TOCTOU safety for SMM */
53 dead_code();
Julius Werner34cf0732020-12-08 14:21:43 -080054 if (!cbd->mcache_size)
55 die("Cannot access CBFS TOCTOU-safely in " ENV_STRING " before CBMEM init!\n");
Julius Werner723e3b12020-12-29 17:33:30 -080056 /* We can only reach this for the RW CBFS -- an mcache overflow in the
57 RO CBFS would have been caught when building the mcache in cbfs_get
58 boot_device(). (Note that TOCTOU_SAFETY implies !NO_CBFS_MCACHE.) */
Julius Wernerfdabf3f2020-05-06 17:06:35 -070059 assert(cbd == vboot_get_cbfs_boot_device());
Julius Werner25096eb2021-12-08 10:04:25 -080060 die("TODO: set metadata_hash to RW metadata hash here.\n");
Julius Wernerfdabf3f2020-05-06 17:06:35 -070061 }
Julius Werner723e3b12020-12-29 17:33:30 -080062 err = cbfs_lookup(&cbd->rdev, name, mdata, &data_offset, metadata_hash);
Julius Wernerfdabf3f2020-05-06 17:06:35 -070063 }
Julius Werner0d9072b2020-03-05 12:51:08 -080064
Julius Werner723e3b12020-12-29 17:33:30 -080065 if (CONFIG(VBOOT_ENABLE_CBFS_FALLBACK) && !force_ro && err == CB_CBFS_NOT_FOUND) {
66 printk(BIOS_INFO, "CBFS: Fall back to RO region for %s\n", name);
Julius Werner57d4bc62021-11-15 10:13:37 -080067 return _cbfs_boot_lookup(name, true, mdata, rdev);
Julius Werner0d9072b2020-03-05 12:51:08 -080068 }
Julius Werner0247fcf2020-12-03 12:41:00 -080069 if (err) {
70 if (err == CB_CBFS_NOT_FOUND)
71 printk(BIOS_WARNING, "CBFS: '%s' not found.\n", name);
72 else if (err == CB_CBFS_HASH_MISMATCH)
73 printk(BIOS_ERR, "CBFS ERROR: metadata hash mismatch!\n");
74 else
Julius Werner723e3b12020-12-29 17:33:30 -080075 printk(BIOS_ERR, "CBFS ERROR: error %d when looking up '%s'\n",
Julius Werner0247fcf2020-12-03 12:41:00 -080076 err, name);
Julius Werner0d9072b2020-03-05 12:51:08 -080077 return err;
Julius Werner0247fcf2020-12-03 12:41:00 -080078 }
Julius Werner0d9072b2020-03-05 12:51:08 -080079
80 if (rdev_chain(rdev, &cbd->rdev, data_offset, be32toh(mdata->h.len)))
81 return CB_ERR;
82
Julius Werner0d9072b2020-03-05 12:51:08 -080083 return CB_SUCCESS;
Julius Werner1e37c9c2019-12-11 17:09:39 -080084}
85
Julius Werner9b1f3cc2020-12-30 17:30:12 -080086void cbfs_unmap(void *mapping)
Julius Werner834b3ec2020-03-04 16:52:08 -080087{
Julius Werner9b1f3cc2020-12-30 17:30:12 -080088 /*
89 * This is save to call with mappings that weren't allocated in the cache (e.g. x86
90 * direct mappings) -- mem_pool_free() just does nothing for addresses it doesn't
91 * recognize. This hardcodes the assumption that if platforms implement an rdev_mmap()
92 * that requires a free() for the boot_device, they need to implement it via the
93 * cbfs_cache mem_pool.
94 */
Raul E Rangel3d7b9842021-10-08 13:10:38 -060095 mem_pool_free(&cbfs_cache, mapping);
Julius Werner834b3ec2020-03-04 16:52:08 -080096}
97
Aaron Durbina85febc2020-05-15 15:09:10 -060098static inline bool fsps_env(void)
99{
100 /* FSP-S is assumed to be loaded in ramstage. */
101 if (ENV_RAMSTAGE)
102 return true;
103 return false;
104}
105
Aaron Durbinecbfa992020-05-15 17:01:58 -0600106static inline bool fspm_env(void)
107{
108 /* FSP-M is assumed to be loaded in romstage. */
109 if (ENV_ROMSTAGE)
110 return true;
111 return false;
112}
113
Aaron Durbina121f952020-05-26 15:48:10 -0600114static inline bool cbfs_lz4_enabled(void)
115{
Aaron Durbina85febc2020-05-15 15:09:10 -0600116 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZ4))
117 return true;
Aaron Durbinecbfa992020-05-15 17:01:58 -0600118 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZ4))
119 return true;
Aaron Durbina85febc2020-05-15 15:09:10 -0600120
Aaron Durbina121f952020-05-26 15:48:10 -0600121 if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) && !CONFIG(COMPRESS_PRERAM_STAGES))
122 return false;
123
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800124 if (ENV_SMM)
125 return false;
126
Aaron Durbina121f952020-05-26 15:48:10 -0600127 return true;
128}
129
130static inline bool cbfs_lzma_enabled(void)
131{
Aaron Durbina85febc2020-05-15 15:09:10 -0600132 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZMA))
133 return true;
Aaron Durbinecbfa992020-05-15 17:01:58 -0600134 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZMA))
135 return true;
Aaron Durbina121f952020-05-26 15:48:10 -0600136 /* We assume here romstage and postcar are never compressed. */
137 if (ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE)
138 return false;
139 if (ENV_ROMSTAGE && CONFIG(POSTCAR_STAGE))
140 return false;
Julius Werner723e3b12020-12-29 17:33:30 -0800141 if ((ENV_ROMSTAGE || ENV_POSTCAR) && !CONFIG(COMPRESS_RAMSTAGE))
Aaron Durbina121f952020-05-26 15:48:10 -0600142 return false;
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800143 if (ENV_SMM)
144 return false;
Aaron Durbina121f952020-05-26 15:48:10 -0600145 return true;
146}
147
Julius Werner05714cc2021-04-15 23:25:44 -0700148static bool cbfs_file_hash_mismatch(const void *buffer, size_t size,
149 const union cbfs_mdata *mdata, bool skip_verification)
Julius Wernerfccf1222021-04-02 15:58:05 -0700150{
Julius Werner7e7cc1a2021-08-11 18:19:23 -0700151 /* Avoid linking hash functions when verification and measurement are disabled. */
152 if (!CONFIG(CBFS_VERIFICATION) && !CONFIG(TPM_MEASURED_BOOT))
Julius Wernerfccf1222021-04-02 15:58:05 -0700153 return false;
154
Julius Werner7e7cc1a2021-08-11 18:19:23 -0700155 const struct vb2_hash *hash = NULL;
156
157 if (CONFIG(CBFS_VERIFICATION) && !skip_verification) {
158 hash = cbfs_file_hash(mdata);
159 if (!hash) {
160 ERROR("'%s' does not have a file hash!\n", mdata->h.filename);
161 return true;
162 }
163 if (vb2_hash_verify(buffer, size, hash) != VB2_SUCCESS) {
164 ERROR("'%s' file hash mismatch!\n", mdata->h.filename);
165 return true;
166 }
Julius Werner05714cc2021-04-15 23:25:44 -0700167 }
Julius Wernerfccf1222021-04-02 15:58:05 -0700168
Julius Werner7e7cc1a2021-08-11 18:19:23 -0700169 if (CONFIG(TPM_MEASURED_BOOT) && !ENV_SMM) {
170 struct vb2_hash calculated_hash;
171
172 /* No need to re-hash file if we already have it from verification. */
173 if (!hash || hash->algo != TPM_MEASURE_ALGO) {
174 vb2_hash_calculate(buffer, size, TPM_MEASURE_ALGO, &calculated_hash);
175 hash = &calculated_hash;
176 }
177
178 if (tspi_cbfs_measurement(mdata->h.filename, be32toh(mdata->h.type), hash))
179 ERROR("failed to measure '%s' into TCPA log\n", mdata->h.filename);
180 /* We intentionally continue to boot on measurement errors. */
Julius Werner05714cc2021-04-15 23:25:44 -0700181 }
182
183 return false;
Julius Wernerfccf1222021-04-02 15:58:05 -0700184}
185
186static size_t cbfs_load_and_decompress(const struct region_device *rdev, void *buffer,
187 size_t buffer_size, uint32_t compression,
Julius Werner05714cc2021-04-15 23:25:44 -0700188 const union cbfs_mdata *mdata, bool skip_verification)
Aaron Durbin899d13d2015-05-15 23:39:23 -0500189{
Julius Wernereca99af2021-03-10 18:49:37 -0800190 size_t in_size = region_device_sz(rdev);
Julius Wernerfccf1222021-04-02 15:58:05 -0700191 size_t out_size = 0;
Aaron Durbin84f394e2020-05-26 16:16:42 -0600192 void *map;
Julius Werner09f29212015-09-29 13:51:35 -0700193
Julius Werner05714cc2021-04-15 23:25:44 -0700194 DEBUG("Decompressing %zu bytes from '%s' to %p with algo %d\n",
195 in_size, mdata->h.filename, buffer, compression);
Julius Werner7778cf22021-01-05 18:54:19 -0800196
Julius Werner09f29212015-09-29 13:51:35 -0700197 switch (compression) {
198 case CBFS_COMPRESS_NONE:
Julius Wernerf975e552016-08-19 15:43:06 -0700199 if (buffer_size < in_size)
200 return 0;
Julius Wernereca99af2021-03-10 18:49:37 -0800201 if (rdev_readat(rdev, buffer, 0, in_size) != in_size)
Julius Werner09f29212015-09-29 13:51:35 -0700202 return 0;
Julius Werner05714cc2021-04-15 23:25:44 -0700203 if (cbfs_file_hash_mismatch(buffer, in_size, mdata, skip_verification))
Julius Wernerfccf1222021-04-02 15:58:05 -0700204 return 0;
Julius Werner09f29212015-09-29 13:51:35 -0700205 return in_size;
206
207 case CBFS_COMPRESS_LZ4:
Aaron Durbina121f952020-05-26 15:48:10 -0600208 if (!cbfs_lz4_enabled())
Julius Werner09f29212015-09-29 13:51:35 -0700209 return 0;
210
Julius Wernereca99af2021-03-10 18:49:37 -0800211 /* cbfs_prog_stage_load() takes care of in-place LZ4 decompression by
Julius Werner723e3b12020-12-29 17:33:30 -0800212 setting up the rdev to be in memory. */
Julius Wernereca99af2021-03-10 18:49:37 -0800213 map = rdev_mmap_full(rdev);
Aaron Durbin84f394e2020-05-26 16:16:42 -0600214 if (map == NULL)
Julius Werner09f29212015-09-29 13:51:35 -0700215 return 0;
216
Julius Werner05714cc2021-04-15 23:25:44 -0700217 if (!cbfs_file_hash_mismatch(map, in_size, mdata, skip_verification)) {
Julius Wernerfccf1222021-04-02 15:58:05 -0700218 timestamp_add_now(TS_START_ULZ4F);
219 out_size = ulz4fn(map, in_size, buffer, buffer_size);
220 timestamp_add_now(TS_END_ULZ4F);
221 }
Aaron Durbin84f394e2020-05-26 16:16:42 -0600222
223 rdev_munmap(rdev, map);
224
Julius Werner09f29212015-09-29 13:51:35 -0700225 return out_size;
226
227 case CBFS_COMPRESS_LZMA:
Aaron Durbina121f952020-05-26 15:48:10 -0600228 if (!cbfs_lzma_enabled())
Julius Werner09f29212015-09-29 13:51:35 -0700229 return 0;
Julius Wernereca99af2021-03-10 18:49:37 -0800230 map = rdev_mmap_full(rdev);
Julius Werner09f29212015-09-29 13:51:35 -0700231 if (map == NULL)
232 return 0;
233
Julius Werner05714cc2021-04-15 23:25:44 -0700234 if (!cbfs_file_hash_mismatch(map, in_size, mdata, skip_verification)) {
Julius Wernerfccf1222021-04-02 15:58:05 -0700235 /* Note: timestamp not useful for memory-mapped media (x86) */
236 timestamp_add_now(TS_START_ULZMA);
237 out_size = ulzman(map, in_size, buffer, buffer_size);
238 timestamp_add_now(TS_END_ULZMA);
239 }
Julius Werner09f29212015-09-29 13:51:35 -0700240
241 rdev_munmap(rdev, map);
242
243 return out_size;
244
245 default:
Aaron Durbin899d13d2015-05-15 23:39:23 -0500246 return 0;
Julius Werner09f29212015-09-29 13:51:35 -0700247 }
Aaron Durbin899d13d2015-05-15 23:39:23 -0500248}
249
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600250struct cbfs_preload_context {
251 struct region_device rdev;
252 struct thread_handle handle;
253 struct list_node list_node;
254 void *buffer;
255 char name[];
256};
257
258static struct list_node cbfs_preload_context_list;
259
260static struct cbfs_preload_context *alloc_cbfs_preload_context(size_t additional)
261{
262 struct cbfs_preload_context *context;
263 size_t size = sizeof(*context) + additional;
264
265 context = mem_pool_alloc(&cbfs_cache, size);
266
267 if (!context)
268 return NULL;
269
270 memset(context, 0, size);
271
272 return context;
273}
274
275static void append_cbfs_preload_context(struct cbfs_preload_context *context)
276{
277 list_append(&context->list_node, &cbfs_preload_context_list);
278}
279
280static void free_cbfs_preload_context(struct cbfs_preload_context *context)
281{
282 list_remove(&context->list_node);
283
284 mem_pool_free(&cbfs_cache, context);
285}
286
287static enum cb_err cbfs_preload_thread_entry(void *arg)
288{
289 struct cbfs_preload_context *context = arg;
290
Julius Wernerf364eb92021-12-01 14:48:59 -0800291 if (rdev_read_full(&context->rdev, context->buffer) < 0) {
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600292 ERROR("%s(name='%s') readat failed\n", __func__, context->name);
293 return CB_ERR;
294 }
295
296 return CB_SUCCESS;
297}
298
299void cbfs_preload(const char *name)
300{
301 struct region_device rdev;
302 union cbfs_mdata mdata;
303 struct cbfs_preload_context *context;
304 bool force_ro = false;
305 size_t size;
306
307 if (!CONFIG(CBFS_PRELOAD))
308 dead_code();
309
310 DEBUG("%s(name='%s')\n", __func__, name);
311
Julius Werner57d4bc62021-11-15 10:13:37 -0800312 if (_cbfs_boot_lookup(name, force_ro, &mdata, &rdev))
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600313 return;
314
315 size = region_device_sz(&rdev);
316
317 context = alloc_cbfs_preload_context(strlen(name) + 1);
318 if (!context) {
319 ERROR("%s(name='%s') failed to allocate preload context\n", __func__, name);
320 return;
321 }
322
323 context->buffer = mem_pool_alloc(&cbfs_cache, size);
324 if (context->buffer == NULL) {
325 ERROR("%s(name='%s') failed to allocate %zu bytes for preload buffer\n",
326 __func__, name, size);
327 goto out;
328 }
329
330 context->rdev = rdev;
331 strcpy(context->name, name);
332
333 append_cbfs_preload_context(context);
334
335 if (thread_run(&context->handle, cbfs_preload_thread_entry, context) == 0)
336 return;
337
338 ERROR("%s(name='%s') failed to start preload thread\n", __func__, name);
339 mem_pool_free(&cbfs_cache, context->buffer);
340
341out:
342 free_cbfs_preload_context(context);
343}
344
345static struct cbfs_preload_context *find_cbfs_preload_context(const char *name)
346{
347 struct cbfs_preload_context *context;
348
349 list_for_each(context, cbfs_preload_context_list, list_node) {
350 if (strcmp(context->name, name) == 0)
351 return context;
352 }
353
354 return NULL;
355}
356
357static enum cb_err get_preload_rdev(struct region_device *rdev, const char *name)
358{
359 enum cb_err err;
360 struct cbfs_preload_context *context;
361
Raul E Rangeld8f07c12021-11-22 13:43:49 -0700362 if (!CONFIG(CBFS_PRELOAD) || !ENV_STAGE_SUPPORTS_COOP)
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600363 return CB_ERR_ARG;
364
365 context = find_cbfs_preload_context(name);
366 if (!context)
367 return CB_ERR_ARG;
368
369 err = thread_join(&context->handle);
370 if (err != CB_SUCCESS) {
371 ERROR("%s(name='%s') Preload thread failed: %u\n", __func__, name, err);
372
373 goto out;
374 }
375
376 if (rdev_chain_mem(rdev, context->buffer, region_device_sz(&context->rdev)) != 0) {
377 ERROR("%s(name='%s') chaining failed\n", __func__, name);
378
379 err = CB_ERR;
380 goto out;
381 }
382
383 err = CB_SUCCESS;
384
385 DEBUG("%s(name='%s') preload successful\n", __func__, name);
386
387out:
388 free_cbfs_preload_context(context);
389
390 return err;
391}
392
Julius Werner05714cc2021-04-15 23:25:44 -0700393static void *do_alloc(union cbfs_mdata *mdata, struct region_device *rdev,
394 cbfs_allocator_t allocator, void *arg, size_t *size_out,
395 bool skip_verification)
396{
397 size_t size = region_device_sz(rdev);
398 void *loc = NULL;
399
400 uint32_t compression = CBFS_COMPRESS_NONE;
401 const struct cbfs_file_attr_compression *cattr = cbfs_find_attr(mdata,
402 CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*cattr));
403 if (cattr) {
404 compression = be32toh(cattr->compression);
405 size = be32toh(cattr->decompressed_size);
406 }
407
408 if (size_out)
409 *size_out = size;
410
411 /* allocator == NULL means do a cbfs_map() */
412 if (allocator) {
413 loc = allocator(arg, size, mdata);
414 } else if (compression == CBFS_COMPRESS_NONE) {
415 void *mapping = rdev_mmap_full(rdev);
416 if (!mapping)
417 return NULL;
418 if (cbfs_file_hash_mismatch(mapping, size, mdata, skip_verification)) {
419 rdev_munmap(rdev, mapping);
420 return NULL;
421 }
422 return mapping;
423 } else if (!cbfs_cache.size) {
424 /* In order to use the cbfs_cache you need to add a CBFS_CACHE to your
425 * memlayout. For stages that don't have .data sections (x86 pre-RAM),
426 * it is not possible to add a CBFS_CACHE. */
427 ERROR("Cannot map compressed file %s without cbfs_cache\n", mdata->h.filename);
428 return NULL;
429 } else {
430 loc = mem_pool_alloc(&cbfs_cache, size);
431 }
432
433 if (!loc) {
434 ERROR("'%s' allocation failure\n", mdata->h.filename);
435 return NULL;
436 }
437
438 size = cbfs_load_and_decompress(rdev, loc, size, compression, mdata, skip_verification);
439 if (!size)
440 return NULL;
441
442 return loc;
443}
444
Julius Werner7778cf22021-01-05 18:54:19 -0800445void *_cbfs_alloc(const char *name, cbfs_allocator_t allocator, void *arg,
446 size_t *size_out, bool force_ro, enum cbfs_type *type)
Julius Wernerf975e552016-08-19 15:43:06 -0700447{
Julius Wernerd17ce412020-10-01 18:25:49 -0700448 struct region_device rdev;
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600449 bool preload_successful = false;
Julius Wernerd17ce412020-10-01 18:25:49 -0700450 union cbfs_mdata mdata;
Julius Werner7778cf22021-01-05 18:54:19 -0800451
452 DEBUG("%s(name='%s', alloc=%p(%p), force_ro=%s, type=%d)\n", __func__, name, allocator,
453 arg, force_ro ? "true" : "false", type ? *type : -1);
Julius Wernerf975e552016-08-19 15:43:06 -0700454
Julius Werner57d4bc62021-11-15 10:13:37 -0800455 if (_cbfs_boot_lookup(name, force_ro, &mdata, &rdev))
Julius Werner7778cf22021-01-05 18:54:19 -0800456 return NULL;
Julius Wernerf975e552016-08-19 15:43:06 -0700457
Julius Werner7778cf22021-01-05 18:54:19 -0800458 if (type) {
459 const enum cbfs_type real_type = be32toh(mdata.h.type);
460 if (*type == CBFS_TYPE_QUERY)
461 *type = real_type;
462 else if (*type != real_type) {
463 ERROR("'%s' type mismatch (is %u, expected %u)\n",
464 mdata.h.filename, real_type, *type);
465 return NULL;
466 }
Julius Wernerd17ce412020-10-01 18:25:49 -0700467 }
Julius Wernerf975e552016-08-19 15:43:06 -0700468
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600469 /* Update the rdev with the preload content */
470 if (!force_ro && get_preload_rdev(&rdev, name) == CB_SUCCESS)
471 preload_successful = true;
472
Julius Werner05714cc2021-04-15 23:25:44 -0700473 void *ret = do_alloc(&mdata, &rdev, allocator, arg, size_out, false);
Raul E Rangel3f41d322021-07-23 15:23:12 -0600474
Julius Werner05714cc2021-04-15 23:25:44 -0700475 /* When using cbfs_preload we need to free the preload buffer after populating the
476 * destination buffer. We know we must have a mem_rdev here, so extra mmap is fine. */
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600477 if (preload_successful)
478 cbfs_unmap(rdev_mmap_full(&rdev));
Julius Werner7778cf22021-01-05 18:54:19 -0800479
Julius Werner05714cc2021-04-15 23:25:44 -0700480 return ret;
481}
482
483void *_cbfs_unverified_area_alloc(const char *area, const char *name,
484 cbfs_allocator_t allocator, void *arg, size_t *size_out)
485{
486 struct region_device area_rdev, file_rdev;
487 union cbfs_mdata mdata;
488 size_t data_offset;
489
490 DEBUG("%s(area='%s', name='%s', alloc=%p(%p))\n", __func__, area, name, allocator, arg);
491
492 if (fmap_locate_area_as_rdev(area, &area_rdev))
493 return NULL;
494
495 if (cbfs_lookup(&area_rdev, name, &mdata, &data_offset, NULL)) {
496 ERROR("'%s' not found in '%s'\n", name, area);
497 return NULL;
498 }
499
500 if (rdev_chain(&file_rdev, &area_rdev, data_offset, be32toh(mdata.h.len)))
501 return NULL;
502
Julius Werner05714cc2021-04-15 23:25:44 -0700503 return do_alloc(&mdata, &file_rdev, allocator, arg, size_out, true);
Julius Werner7778cf22021-01-05 18:54:19 -0800504}
505
Julius Werner4676ec52021-03-10 16:52:14 -0800506void *_cbfs_default_allocator(void *arg, size_t size, const union cbfs_mdata *unused)
Julius Werner7778cf22021-01-05 18:54:19 -0800507{
508 struct _cbfs_default_allocator_arg *darg = arg;
509 if (size > darg->buf_size)
510 return NULL;
511 return darg->buf;
512}
513
Julius Werner4676ec52021-03-10 16:52:14 -0800514void *_cbfs_cbmem_allocator(void *arg, size_t size, const union cbfs_mdata *unused)
Julius Werner7778cf22021-01-05 18:54:19 -0800515{
516 return cbmem_add((uintptr_t)arg, size);
Julius Wernerf975e552016-08-19 15:43:06 -0700517}
518
Julius Werner1de87082020-12-23 17:38:11 -0800519cb_err_t cbfs_prog_stage_load(struct prog *pstage)
Aaron Durbin899d13d2015-05-15 23:39:23 -0500520{
Julius Werner1de87082020-12-23 17:38:11 -0800521 union cbfs_mdata mdata;
522 struct region_device rdev;
Julius Werner1de87082020-12-23 17:38:11 -0800523 cb_err_t err;
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800524
Julius Werner1de87082020-12-23 17:38:11 -0800525 prog_locate_hook(pstage);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500526
Julius Werner57d4bc62021-11-15 10:13:37 -0800527 if ((err = _cbfs_boot_lookup(prog_name(pstage), false, &mdata, &rdev)))
Julius Werner1de87082020-12-23 17:38:11 -0800528 return err;
529
530 assert(be32toh(mdata.h.type) == CBFS_TYPE_STAGE);
531 pstage->cbfs_type = CBFS_TYPE_STAGE;
532
Julius Werner81dc20e2020-10-15 17:37:57 -0700533 enum cbfs_compression compression = CBFS_COMPRESS_NONE;
534 const struct cbfs_file_attr_compression *cattr = cbfs_find_attr(&mdata,
535 CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*cattr));
536 if (cattr)
537 compression = be32toh(cattr->compression);
Julius Werner1de87082020-12-23 17:38:11 -0800538
Julius Werner81dc20e2020-10-15 17:37:57 -0700539 const struct cbfs_file_attr_stageheader *sattr = cbfs_find_attr(&mdata,
540 CBFS_FILE_ATTR_TAG_STAGEHEADER, sizeof(*sattr));
541 if (!sattr)
542 return CB_ERR;
543 prog_set_area(pstage, (void *)(uintptr_t)be64toh(sattr->loadaddr),
544 be32toh(sattr->memlen));
545 prog_set_entry(pstage, prog_start(pstage) +
546 be32toh(sattr->entry_offset), NULL);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500547
Aaron Durbined253c82015-10-07 17:22:42 -0500548 /* Hacky way to not load programs over read only media. The stages
549 * that would hit this path initialize themselves. */
Julius Werner21a40532020-04-21 16:03:53 -0700550 if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) &&
551 !CONFIG(NO_XIP_EARLY_STAGES) && CONFIG(BOOT_DEVICE_MEMORY_MAPPED)) {
Julius Werner81dc20e2020-10-15 17:37:57 -0700552 void *mapping = rdev_mmap_full(&rdev);
Julius Werner1de87082020-12-23 17:38:11 -0800553 rdev_munmap(&rdev, mapping);
Julius Werner05714cc2021-04-15 23:25:44 -0700554 if (cbfs_file_hash_mismatch(mapping, region_device_sz(&rdev), &mdata, false))
Julius Wernerfccf1222021-04-02 15:58:05 -0700555 return CB_CBFS_HASH_MISMATCH;
Julius Werner81dc20e2020-10-15 17:37:57 -0700556 if (mapping == prog_start(pstage))
557 return CB_SUCCESS;
Aaron Durbined253c82015-10-07 17:22:42 -0500558 }
559
Julius Wernereca99af2021-03-10 18:49:37 -0800560 /* LZ4 stages can be decompressed in-place to save mapping scratch space. Load the
561 compressed data to the end of the buffer and point &rdev to that memory location. */
562 if (cbfs_lz4_enabled() && compression == CBFS_COMPRESS_LZ4) {
563 size_t in_size = region_device_sz(&rdev);
564 void *compr_start = prog_start(pstage) + prog_size(pstage) - in_size;
565 if (rdev_readat(&rdev, compr_start, 0, in_size) != in_size)
566 return CB_ERR;
Julius Wernerc8931972021-04-16 16:48:32 -0700567 rdev_chain_mem(&rdev, compr_start, in_size);
Julius Wernereca99af2021-03-10 18:49:37 -0800568 }
569
570 size_t fsize = cbfs_load_and_decompress(&rdev, prog_start(pstage), prog_size(pstage),
Julius Werner05714cc2021-04-15 23:25:44 -0700571 compression, &mdata, false);
Julius Werner09f29212015-09-29 13:51:35 -0700572 if (!fsize)
Julius Werner1de87082020-12-23 17:38:11 -0800573 return CB_ERR;
Aaron Durbin899d13d2015-05-15 23:39:23 -0500574
575 /* Clear area not covered by file. */
Julius Werner81dc20e2020-10-15 17:37:57 -0700576 memset(prog_start(pstage) + fsize, 0, prog_size(pstage) - fsize);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500577
Julius Werner81dc20e2020-10-15 17:37:57 -0700578 prog_segment_loaded((uintptr_t)prog_start(pstage), prog_size(pstage),
579 SEG_FINAL);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500580
Julius Werner1de87082020-12-23 17:38:11 -0800581 return CB_SUCCESS;
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800582}
Aaron Durbin6d720f32015-12-08 17:00:23 -0600583
Julius Werner1e37c9c2019-12-11 17:09:39 -0800584void cbfs_boot_device_find_mcache(struct cbfs_boot_device *cbd, uint32_t id)
Aaron Durbin6d720f32015-12-08 17:00:23 -0600585{
Julius Werner1e37c9c2019-12-11 17:09:39 -0800586 if (CONFIG(NO_CBFS_MCACHE) || ENV_SMM)
587 return;
588
Julius Werner34cf0732020-12-08 14:21:43 -0800589 if (cbd->mcache_size)
590 return;
591
Julius Werner1e37c9c2019-12-11 17:09:39 -0800592 const struct cbmem_entry *entry;
593 if (cbmem_possibly_online() &&
594 (entry = cbmem_entry_find(id))) {
595 cbd->mcache = cbmem_entry_start(entry);
596 cbd->mcache_size = cbmem_entry_size(entry);
597 } else if (ENV_ROMSTAGE_OR_BEFORE) {
598 u8 *boundary = _ecbfs_mcache - REGION_SIZE(cbfs_mcache) *
599 CONFIG_CBFS_MCACHE_RW_PERCENTAGE / 100;
Julius Werner723e3b12020-12-29 17:33:30 -0800600 boundary = (u8 *)ALIGN_DOWN((uintptr_t)boundary, CBFS_MCACHE_ALIGNMENT);
Julius Werner1e37c9c2019-12-11 17:09:39 -0800601 if (id == CBMEM_ID_CBFS_RO_MCACHE) {
602 cbd->mcache = _cbfs_mcache;
603 cbd->mcache_size = boundary - _cbfs_mcache;
604 } else if (id == CBMEM_ID_CBFS_RW_MCACHE) {
605 cbd->mcache = boundary;
606 cbd->mcache_size = _ecbfs_mcache - boundary;
607 }
608 }
Aaron Durbin6d720f32015-12-08 17:00:23 -0600609}
Julius Werner1e37c9c2019-12-11 17:09:39 -0800610
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700611cb_err_t cbfs_init_boot_device(const struct cbfs_boot_device *cbd,
Julius Werner723e3b12020-12-29 17:33:30 -0800612 struct vb2_hash *mdata_hash)
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700613{
614 /* If we have an mcache, mcache_build() will also check mdata hash. */
615 if (!CONFIG(NO_CBFS_MCACHE) && !ENV_SMM && cbd->mcache_size > 0)
Julius Werner723e3b12020-12-29 17:33:30 -0800616 return cbfs_mcache_build(&cbd->rdev, cbd->mcache, cbd->mcache_size, mdata_hash);
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700617
618 /* No mcache and no verification means we have nothing special to do. */
Julius Werner723e3b12020-12-29 17:33:30 -0800619 if (!CONFIG(CBFS_VERIFICATION) || !mdata_hash)
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700620 return CB_SUCCESS;
621
Julius Werner723e3b12020-12-29 17:33:30 -0800622 /* Verification only: use cbfs_walk() without a walker() function to just run through
623 the CBFS once, will return NOT_FOUND by default. */
624 cb_err_t err = cbfs_walk(&cbd->rdev, NULL, NULL, mdata_hash, 0);
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700625 if (err == CB_CBFS_NOT_FOUND)
626 err = CB_SUCCESS;
627 return err;
628}
629
Julius Werner1e37c9c2019-12-11 17:09:39 -0800630const struct cbfs_boot_device *cbfs_get_boot_device(bool force_ro)
631{
632 static struct cbfs_boot_device ro;
633
Julius Werner723e3b12020-12-29 17:33:30 -0800634 /* Ensure we always init RO mcache, even if the first file is from the RW CBFS.
Julius Werner1e37c9c2019-12-11 17:09:39 -0800635 Otherwise it may not be available when needed in later stages. */
636 if (ENV_INITIAL_STAGE && !force_ro && !region_device_sz(&ro.rdev))
637 cbfs_get_boot_device(true);
638
639 if (!force_ro) {
640 const struct cbfs_boot_device *rw = vboot_get_cbfs_boot_device();
Julius Werner723e3b12020-12-29 17:33:30 -0800641 /* This will return NULL if vboot isn't enabled, didn't run yet or decided to
642 boot into recovery mode. */
Julius Werner1e37c9c2019-12-11 17:09:39 -0800643 if (rw)
644 return rw;
645 }
646
Julius Werner723e3b12020-12-29 17:33:30 -0800647 /* In rare cases post-RAM stages may run this before cbmem_initialize(), so we can't
648 lock in the result of find_mcache() on the first try and should keep trying every
649 time until an mcache is found. */
Julius Werner34cf0732020-12-08 14:21:43 -0800650 cbfs_boot_device_find_mcache(&ro, CBMEM_ID_CBFS_RO_MCACHE);
651
Julius Werner1e37c9c2019-12-11 17:09:39 -0800652 if (region_device_sz(&ro.rdev))
653 return &ro;
654
655 if (fmap_locate_area_as_rdev("COREBOOT", &ro.rdev))
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700656 die("Cannot locate primary CBFS");
Julius Werner1e37c9c2019-12-11 17:09:39 -0800657
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700658 if (ENV_INITIAL_STAGE) {
659 cb_err_t err = cbfs_init_boot_device(&ro, metadata_hash_get());
660 if (err == CB_CBFS_HASH_MISMATCH)
661 die("RO CBFS metadata hash verification failure");
662 else if (CONFIG(TOCTOU_SAFETY) && err == CB_CBFS_CACHE_FULL)
663 die("RO mcache overflow breaks TOCTOU safety!\n");
664 else if (err && err != CB_CBFS_CACHE_FULL)
665 die("RO CBFS initialization error: %d", err);
Julius Werner1e37c9c2019-12-11 17:09:39 -0800666 }
667
668 return &ro;
669}
670
671#if !CONFIG(NO_CBFS_MCACHE)
672static void mcache_to_cbmem(const struct cbfs_boot_device *cbd, u32 cbmem_id)
673{
674 if (!cbd)
675 return;
676
677 size_t real_size = cbfs_mcache_real_size(cbd->mcache, cbd->mcache_size);
678 void *cbmem_mcache = cbmem_add(cbmem_id, real_size);
679 if (!cbmem_mcache) {
680 printk(BIOS_ERR, "ERROR: Cannot allocate CBMEM mcache %#x (%#zx bytes)!\n",
681 cbmem_id, real_size);
682 return;
683 }
684 memcpy(cbmem_mcache, cbd->mcache, real_size);
685}
686
687static void cbfs_mcache_migrate(int unused)
688{
689 mcache_to_cbmem(vboot_get_cbfs_boot_device(), CBMEM_ID_CBFS_RW_MCACHE);
690 mcache_to_cbmem(cbfs_get_boot_device(true), CBMEM_ID_CBFS_RO_MCACHE);
691}
692ROMSTAGE_CBMEM_INIT_HOOK(cbfs_mcache_migrate)
693#endif