blob: 4e25d27cfb0388e0361ed0bd7213a2f35a7c7d44 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Peter Stuge483b7bb2009-04-14 07:40:01 +00002
Aaron Durbin899d13d2015-05-15 23:39:23 -05003#include <assert.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -05004#include <boot_device.h>
5#include <cbfs.h>
Julius Werner1e37c9c2019-12-11 17:09:39 -08006#include <cbmem.h>
Julius Werner57d4bc62021-11-15 10:13:37 -08007#include <commonlib/bsd/cbfs_private.h>
Julius Werner98eeb962019-12-11 15:47:42 -08008#include <commonlib/bsd/compression.h>
Bill XIEc79e96b2019-08-22 20:28:36 +08009#include <console/console.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080010#include <fmap.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -050011#include <lib.h>
Raul E Rangel4cfb8622021-11-01 13:40:14 -060012#include <list.h>
Julius Wernerfdabf3f2020-05-06 17:06:35 -070013#include <metadata_hash.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080014#include <security/tpm/tspi/crtm.h>
Jakub Czapiga967a76b2022-08-19 12:25:27 +020015#include <security/vboot/vboot_common.h>
Julius Wernerd96ca242022-08-08 18:08:35 -070016#include <security/vboot/misc.h>
Bill XIEc79e96b2019-08-22 20:28:36 +080017#include <stdlib.h>
18#include <string.h>
Aaron Durbin899d13d2015-05-15 23:39:23 -050019#include <symbols.h>
Raul E Rangel4cfb8622021-11-01 13:40:14 -060020#include <thread.h>
Julius Werner09f29212015-09-29 13:51:35 -070021#include <timestamp.h>
Patrick Georgi58a150a2016-05-02 17:22:29 +080022
Arthur Heymans6acc05e2022-05-12 18:01:13 +020023#if ENV_HAS_DATA_SECTION
Raul E Rangel5ac82dc2021-07-23 16:43:18 -060024struct mem_pool cbfs_cache =
Raul E Rangel6938f352021-07-23 16:43:18 -060025 MEM_POOL_INIT(_cbfs_cache, REGION_SIZE(cbfs_cache), CONFIG_CBFS_CACHE_ALIGN);
Raul E Rangel3d7b9842021-10-08 13:10:38 -060026#else
Raul E Rangel5ac82dc2021-07-23 16:43:18 -060027struct mem_pool cbfs_cache = MEM_POOL_INIT(NULL, 0, 0);
Raul E Rangel3d7b9842021-10-08 13:10:38 -060028#endif
Julius Werner9b1f3cc2020-12-30 17:30:12 -080029
30static void switch_to_postram_cache(int unused)
31{
32 if (_preram_cbfs_cache != _postram_cbfs_cache)
Raul E Rangel5ac82dc2021-07-23 16:43:18 -060033 mem_pool_init(&cbfs_cache, _postram_cbfs_cache, REGION_SIZE(postram_cbfs_cache),
Raul E Rangel6938f352021-07-23 16:43:18 -060034 CONFIG_CBFS_CACHE_ALIGN);
Julius Werner9b1f3cc2020-12-30 17:30:12 -080035}
Kyösti Mälkkifa3bc042022-03-31 07:40:10 +030036CBMEM_CREATION_HOOK(switch_to_postram_cache);
Julius Werner9b1f3cc2020-12-30 17:30:12 -080037
Julius Werner69cc5572022-03-04 17:49:56 -080038enum cb_err _cbfs_boot_lookup(const char *name, bool force_ro,
39 union cbfs_mdata *mdata, struct region_device *rdev)
Julius Werner1e37c9c2019-12-11 17:09:39 -080040{
Julius Werner0d9072b2020-03-05 12:51:08 -080041 const struct cbfs_boot_device *cbd = cbfs_get_boot_device(force_ro);
42 if (!cbd)
43 return CB_ERR;
44
45 size_t data_offset;
Julius Werner69cc5572022-03-04 17:49:56 -080046 enum cb_err err = CB_CBFS_CACHE_FULL;
Julius Werner34cf0732020-12-08 14:21:43 -080047 if (!CONFIG(NO_CBFS_MCACHE) && !ENV_SMM && cbd->mcache_size)
Julius Werner1e37c9c2019-12-11 17:09:39 -080048 err = cbfs_mcache_lookup(cbd->mcache, cbd->mcache_size,
Julius Werner723e3b12020-12-29 17:33:30 -080049 name, mdata, &data_offset);
Julius Wernerfdabf3f2020-05-06 17:06:35 -070050 if (err == CB_CBFS_CACHE_FULL) {
51 struct vb2_hash *metadata_hash = NULL;
52 if (CONFIG(TOCTOU_SAFETY)) {
53 if (ENV_SMM) /* Cannot provide TOCTOU safety for SMM */
54 dead_code();
Julius Werner34cf0732020-12-08 14:21:43 -080055 if (!cbd->mcache_size)
56 die("Cannot access CBFS TOCTOU-safely in " ENV_STRING " before CBMEM init!\n");
Julius Werner723e3b12020-12-29 17:33:30 -080057 /* We can only reach this for the RW CBFS -- an mcache overflow in the
58 RO CBFS would have been caught when building the mcache in cbfs_get
59 boot_device(). (Note that TOCTOU_SAFETY implies !NO_CBFS_MCACHE.) */
Julius Wernerfdabf3f2020-05-06 17:06:35 -070060 assert(cbd == vboot_get_cbfs_boot_device());
Jakub Czapiga967a76b2022-08-19 12:25:27 +020061 if (!CONFIG(VBOOT)
62 || vb2api_get_metadata_hash(vboot_get_context(), &metadata_hash)
63 != VB2_SUCCESS)
64 die("Failed to get RW metadata hash");
Julius Wernerfdabf3f2020-05-06 17:06:35 -070065 }
Julius Werner723e3b12020-12-29 17:33:30 -080066 err = cbfs_lookup(&cbd->rdev, name, mdata, &data_offset, metadata_hash);
Julius Wernerfdabf3f2020-05-06 17:06:35 -070067 }
Julius Werner0d9072b2020-03-05 12:51:08 -080068
Julius Werner723e3b12020-12-29 17:33:30 -080069 if (CONFIG(VBOOT_ENABLE_CBFS_FALLBACK) && !force_ro && err == CB_CBFS_NOT_FOUND) {
70 printk(BIOS_INFO, "CBFS: Fall back to RO region for %s\n", name);
Julius Werner57d4bc62021-11-15 10:13:37 -080071 return _cbfs_boot_lookup(name, true, mdata, rdev);
Julius Werner0d9072b2020-03-05 12:51:08 -080072 }
Julius Werner0247fcf2020-12-03 12:41:00 -080073 if (err) {
74 if (err == CB_CBFS_NOT_FOUND)
75 printk(BIOS_WARNING, "CBFS: '%s' not found.\n", name);
76 else if (err == CB_CBFS_HASH_MISMATCH)
77 printk(BIOS_ERR, "CBFS ERROR: metadata hash mismatch!\n");
78 else
Julius Werner723e3b12020-12-29 17:33:30 -080079 printk(BIOS_ERR, "CBFS ERROR: error %d when looking up '%s'\n",
Julius Werner0247fcf2020-12-03 12:41:00 -080080 err, name);
Julius Werner0d9072b2020-03-05 12:51:08 -080081 return err;
Julius Werner0247fcf2020-12-03 12:41:00 -080082 }
Julius Werner0d9072b2020-03-05 12:51:08 -080083
84 if (rdev_chain(rdev, &cbd->rdev, data_offset, be32toh(mdata->h.len)))
85 return CB_ERR;
86
Julius Werner0d9072b2020-03-05 12:51:08 -080087 return CB_SUCCESS;
Julius Werner1e37c9c2019-12-11 17:09:39 -080088}
89
Julius Werner9b1f3cc2020-12-30 17:30:12 -080090void cbfs_unmap(void *mapping)
Julius Werner834b3ec2020-03-04 16:52:08 -080091{
Julius Werner9b1f3cc2020-12-30 17:30:12 -080092 /*
93 * This is save to call with mappings that weren't allocated in the cache (e.g. x86
94 * direct mappings) -- mem_pool_free() just does nothing for addresses it doesn't
95 * recognize. This hardcodes the assumption that if platforms implement an rdev_mmap()
96 * that requires a free() for the boot_device, they need to implement it via the
97 * cbfs_cache mem_pool.
98 */
Raul E Rangel3d7b9842021-10-08 13:10:38 -060099 mem_pool_free(&cbfs_cache, mapping);
Julius Werner834b3ec2020-03-04 16:52:08 -0800100}
101
Aaron Durbina85febc2020-05-15 15:09:10 -0600102static inline bool fsps_env(void)
103{
104 /* FSP-S is assumed to be loaded in ramstage. */
105 if (ENV_RAMSTAGE)
106 return true;
107 return false;
108}
109
Aaron Durbinecbfa992020-05-15 17:01:58 -0600110static inline bool fspm_env(void)
111{
112 /* FSP-M is assumed to be loaded in romstage. */
Kyösti Mälkki11cac782022-04-07 07:16:48 +0300113 if (ENV_RAMINIT)
Aaron Durbinecbfa992020-05-15 17:01:58 -0600114 return true;
115 return false;
116}
117
Aaron Durbina121f952020-05-26 15:48:10 -0600118static inline bool cbfs_lz4_enabled(void)
119{
Aaron Durbina85febc2020-05-15 15:09:10 -0600120 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZ4))
121 return true;
Aaron Durbinecbfa992020-05-15 17:01:58 -0600122 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZ4))
123 return true;
Aaron Durbina85febc2020-05-15 15:09:10 -0600124
Aaron Durbina121f952020-05-26 15:48:10 -0600125 if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) && !CONFIG(COMPRESS_PRERAM_STAGES))
126 return false;
127
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800128 if (ENV_SMM)
129 return false;
130
Aaron Durbina121f952020-05-26 15:48:10 -0600131 return true;
132}
133
134static inline bool cbfs_lzma_enabled(void)
135{
Aaron Durbina85febc2020-05-15 15:09:10 -0600136 if (fsps_env() && CONFIG(FSP_COMPRESS_FSP_S_LZMA))
137 return true;
Aaron Durbinecbfa992020-05-15 17:01:58 -0600138 if (fspm_env() && CONFIG(FSP_COMPRESS_FSP_M_LZMA))
139 return true;
Aaron Durbina121f952020-05-26 15:48:10 -0600140 /* We assume here romstage and postcar are never compressed. */
141 if (ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE)
142 return false;
143 if (ENV_ROMSTAGE && CONFIG(POSTCAR_STAGE))
144 return false;
Martin Roth40729a52023-01-04 17:26:21 -0700145 if ((ENV_ROMSTAGE || ENV_POSTCAR) && !CONFIG(COMPRESS_RAMSTAGE_LZMA))
Aaron Durbina121f952020-05-26 15:48:10 -0600146 return false;
Julius Werner9b1f3cc2020-12-30 17:30:12 -0800147 if (ENV_SMM)
148 return false;
Aaron Durbina121f952020-05-26 15:48:10 -0600149 return true;
150}
151
Julius Werner05714cc2021-04-15 23:25:44 -0700152static bool cbfs_file_hash_mismatch(const void *buffer, size_t size,
153 const union cbfs_mdata *mdata, bool skip_verification)
Julius Wernerfccf1222021-04-02 15:58:05 -0700154{
Julius Werner7e7cc1a2021-08-11 18:19:23 -0700155 /* Avoid linking hash functions when verification and measurement are disabled. */
156 if (!CONFIG(CBFS_VERIFICATION) && !CONFIG(TPM_MEASURED_BOOT))
Julius Wernerfccf1222021-04-02 15:58:05 -0700157 return false;
158
Julius Werner7e7cc1a2021-08-11 18:19:23 -0700159 const struct vb2_hash *hash = NULL;
160
161 if (CONFIG(CBFS_VERIFICATION) && !skip_verification) {
162 hash = cbfs_file_hash(mdata);
163 if (!hash) {
164 ERROR("'%s' does not have a file hash!\n", mdata->h.filename);
165 return true;
166 }
Jakub Czapiga967a76b2022-08-19 12:25:27 +0200167
168 vb2_error_t rv = vb2_hash_verify(vboot_hwcrypto_allowed(), buffer, size, hash);
169 if (rv != VB2_SUCCESS) {
Julius Werner7e7cc1a2021-08-11 18:19:23 -0700170 ERROR("'%s' file hash mismatch!\n", mdata->h.filename);
Jakub Czapiga967a76b2022-08-19 12:25:27 +0200171 if (CONFIG(VBOOT_CBFS_INTEGRATION) && !vboot_recovery_mode_enabled()
172 && vboot_logic_executed())
173 vboot_fail_and_reboot(vboot_get_context(), VB2_RECOVERY_FW_BODY,
174 rv);
Julius Werner7e7cc1a2021-08-11 18:19:23 -0700175 return true;
176 }
Julius Werner05714cc2021-04-15 23:25:44 -0700177 }
Julius Wernerfccf1222021-04-02 15:58:05 -0700178
Julius Werner7e7cc1a2021-08-11 18:19:23 -0700179 if (CONFIG(TPM_MEASURED_BOOT) && !ENV_SMM) {
180 struct vb2_hash calculated_hash;
181
182 /* No need to re-hash file if we already have it from verification. */
183 if (!hash || hash->algo != TPM_MEASURE_ALGO) {
Julius Wernerd96ca242022-08-08 18:08:35 -0700184 if (vb2_hash_calculate(vboot_hwcrypto_allowed(), buffer, size,
185 TPM_MEASURE_ALGO, &calculated_hash))
186 hash = NULL;
187 else
188 hash = &calculated_hash;
Julius Werner7e7cc1a2021-08-11 18:19:23 -0700189 }
190
Julius Wernerd96ca242022-08-08 18:08:35 -0700191 if (!hash ||
192 tspi_cbfs_measurement(mdata->h.filename, be32toh(mdata->h.type), hash))
Sergii Dmytruk2710df72022-11-10 00:40:51 +0200193 ERROR("failed to measure '%s' into TPM log\n", mdata->h.filename);
Julius Werner7e7cc1a2021-08-11 18:19:23 -0700194 /* We intentionally continue to boot on measurement errors. */
Julius Werner05714cc2021-04-15 23:25:44 -0700195 }
196
197 return false;
Julius Wernerfccf1222021-04-02 15:58:05 -0700198}
199
200static size_t cbfs_load_and_decompress(const struct region_device *rdev, void *buffer,
201 size_t buffer_size, uint32_t compression,
Julius Werner05714cc2021-04-15 23:25:44 -0700202 const union cbfs_mdata *mdata, bool skip_verification)
Aaron Durbin899d13d2015-05-15 23:39:23 -0500203{
Julius Wernereca99af2021-03-10 18:49:37 -0800204 size_t in_size = region_device_sz(rdev);
Julius Wernerfccf1222021-04-02 15:58:05 -0700205 size_t out_size = 0;
Aaron Durbin84f394e2020-05-26 16:16:42 -0600206 void *map;
Julius Werner09f29212015-09-29 13:51:35 -0700207
Julius Werner05714cc2021-04-15 23:25:44 -0700208 DEBUG("Decompressing %zu bytes from '%s' to %p with algo %d\n",
209 in_size, mdata->h.filename, buffer, compression);
Julius Werner7778cf22021-01-05 18:54:19 -0800210
Julius Werner09f29212015-09-29 13:51:35 -0700211 switch (compression) {
212 case CBFS_COMPRESS_NONE:
Julius Wernerf975e552016-08-19 15:43:06 -0700213 if (buffer_size < in_size)
214 return 0;
Julius Wernereca99af2021-03-10 18:49:37 -0800215 if (rdev_readat(rdev, buffer, 0, in_size) != in_size)
Julius Werner09f29212015-09-29 13:51:35 -0700216 return 0;
Julius Werner05714cc2021-04-15 23:25:44 -0700217 if (cbfs_file_hash_mismatch(buffer, in_size, mdata, skip_verification))
Julius Wernerfccf1222021-04-02 15:58:05 -0700218 return 0;
Julius Werner09f29212015-09-29 13:51:35 -0700219 return in_size;
220
221 case CBFS_COMPRESS_LZ4:
Aaron Durbina121f952020-05-26 15:48:10 -0600222 if (!cbfs_lz4_enabled())
Julius Werner09f29212015-09-29 13:51:35 -0700223 return 0;
224
Julius Wernereca99af2021-03-10 18:49:37 -0800225 /* cbfs_prog_stage_load() takes care of in-place LZ4 decompression by
Julius Werner723e3b12020-12-29 17:33:30 -0800226 setting up the rdev to be in memory. */
Julius Wernereca99af2021-03-10 18:49:37 -0800227 map = rdev_mmap_full(rdev);
Aaron Durbin84f394e2020-05-26 16:16:42 -0600228 if (map == NULL)
Julius Werner09f29212015-09-29 13:51:35 -0700229 return 0;
230
Julius Werner05714cc2021-04-15 23:25:44 -0700231 if (!cbfs_file_hash_mismatch(map, in_size, mdata, skip_verification)) {
Jakub Czapigaad6157e2022-02-15 11:50:31 +0100232 timestamp_add_now(TS_ULZ4F_START);
Julius Wernerfccf1222021-04-02 15:58:05 -0700233 out_size = ulz4fn(map, in_size, buffer, buffer_size);
Jakub Czapigaad6157e2022-02-15 11:50:31 +0100234 timestamp_add_now(TS_ULZ4F_END);
Julius Wernerfccf1222021-04-02 15:58:05 -0700235 }
Aaron Durbin84f394e2020-05-26 16:16:42 -0600236
237 rdev_munmap(rdev, map);
238
Julius Werner09f29212015-09-29 13:51:35 -0700239 return out_size;
240
241 case CBFS_COMPRESS_LZMA:
Aaron Durbina121f952020-05-26 15:48:10 -0600242 if (!cbfs_lzma_enabled())
Julius Werner09f29212015-09-29 13:51:35 -0700243 return 0;
Julius Wernereca99af2021-03-10 18:49:37 -0800244 map = rdev_mmap_full(rdev);
Julius Werner09f29212015-09-29 13:51:35 -0700245 if (map == NULL)
246 return 0;
247
Julius Werner05714cc2021-04-15 23:25:44 -0700248 if (!cbfs_file_hash_mismatch(map, in_size, mdata, skip_verification)) {
Julius Wernerfccf1222021-04-02 15:58:05 -0700249 /* Note: timestamp not useful for memory-mapped media (x86) */
Jakub Czapigaad6157e2022-02-15 11:50:31 +0100250 timestamp_add_now(TS_ULZMA_START);
Julius Wernerfccf1222021-04-02 15:58:05 -0700251 out_size = ulzman(map, in_size, buffer, buffer_size);
Jakub Czapigaad6157e2022-02-15 11:50:31 +0100252 timestamp_add_now(TS_ULZMA_END);
Julius Wernerfccf1222021-04-02 15:58:05 -0700253 }
Julius Werner09f29212015-09-29 13:51:35 -0700254
255 rdev_munmap(rdev, map);
256
257 return out_size;
258
259 default:
Aaron Durbin899d13d2015-05-15 23:39:23 -0500260 return 0;
Julius Werner09f29212015-09-29 13:51:35 -0700261 }
Aaron Durbin899d13d2015-05-15 23:39:23 -0500262}
263
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600264struct cbfs_preload_context {
265 struct region_device rdev;
266 struct thread_handle handle;
267 struct list_node list_node;
268 void *buffer;
269 char name[];
270};
271
272static struct list_node cbfs_preload_context_list;
273
274static struct cbfs_preload_context *alloc_cbfs_preload_context(size_t additional)
275{
276 struct cbfs_preload_context *context;
277 size_t size = sizeof(*context) + additional;
278
279 context = mem_pool_alloc(&cbfs_cache, size);
280
281 if (!context)
282 return NULL;
283
284 memset(context, 0, size);
285
286 return context;
287}
288
289static void append_cbfs_preload_context(struct cbfs_preload_context *context)
290{
291 list_append(&context->list_node, &cbfs_preload_context_list);
292}
293
294static void free_cbfs_preload_context(struct cbfs_preload_context *context)
295{
296 list_remove(&context->list_node);
297
298 mem_pool_free(&cbfs_cache, context);
299}
300
301static enum cb_err cbfs_preload_thread_entry(void *arg)
302{
303 struct cbfs_preload_context *context = arg;
304
Julius Wernerf364eb92021-12-01 14:48:59 -0800305 if (rdev_read_full(&context->rdev, context->buffer) < 0) {
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600306 ERROR("%s(name='%s') readat failed\n", __func__, context->name);
307 return CB_ERR;
308 }
309
310 return CB_SUCCESS;
311}
312
313void cbfs_preload(const char *name)
314{
315 struct region_device rdev;
316 union cbfs_mdata mdata;
317 struct cbfs_preload_context *context;
318 bool force_ro = false;
319 size_t size;
320
321 if (!CONFIG(CBFS_PRELOAD))
322 dead_code();
323
Raul E Rangel74d22182021-12-03 15:09:35 -0700324 /* We don't want to cross the vboot boundary */
325 if (ENV_ROMSTAGE && CONFIG(VBOOT_STARTS_IN_ROMSTAGE))
326 return;
327
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600328 DEBUG("%s(name='%s')\n", __func__, name);
329
Julius Werner57d4bc62021-11-15 10:13:37 -0800330 if (_cbfs_boot_lookup(name, force_ro, &mdata, &rdev))
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600331 return;
332
333 size = region_device_sz(&rdev);
334
335 context = alloc_cbfs_preload_context(strlen(name) + 1);
336 if (!context) {
337 ERROR("%s(name='%s') failed to allocate preload context\n", __func__, name);
338 return;
339 }
340
341 context->buffer = mem_pool_alloc(&cbfs_cache, size);
342 if (context->buffer == NULL) {
343 ERROR("%s(name='%s') failed to allocate %zu bytes for preload buffer\n",
344 __func__, name, size);
345 goto out;
346 }
347
348 context->rdev = rdev;
349 strcpy(context->name, name);
350
351 append_cbfs_preload_context(context);
352
353 if (thread_run(&context->handle, cbfs_preload_thread_entry, context) == 0)
354 return;
355
356 ERROR("%s(name='%s') failed to start preload thread\n", __func__, name);
357 mem_pool_free(&cbfs_cache, context->buffer);
358
359out:
360 free_cbfs_preload_context(context);
361}
362
363static struct cbfs_preload_context *find_cbfs_preload_context(const char *name)
364{
365 struct cbfs_preload_context *context;
366
367 list_for_each(context, cbfs_preload_context_list, list_node) {
368 if (strcmp(context->name, name) == 0)
369 return context;
370 }
371
372 return NULL;
373}
374
375static enum cb_err get_preload_rdev(struct region_device *rdev, const char *name)
376{
377 enum cb_err err;
378 struct cbfs_preload_context *context;
379
Arthur Heymans6acc05e2022-05-12 18:01:13 +0200380 if (!CONFIG(CBFS_PRELOAD) || !ENV_SUPPORTS_COOP)
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600381 return CB_ERR_ARG;
382
383 context = find_cbfs_preload_context(name);
384 if (!context)
385 return CB_ERR_ARG;
386
387 err = thread_join(&context->handle);
388 if (err != CB_SUCCESS) {
389 ERROR("%s(name='%s') Preload thread failed: %u\n", __func__, name, err);
390
391 goto out;
392 }
393
394 if (rdev_chain_mem(rdev, context->buffer, region_device_sz(&context->rdev)) != 0) {
395 ERROR("%s(name='%s') chaining failed\n", __func__, name);
396
397 err = CB_ERR;
398 goto out;
399 }
400
401 err = CB_SUCCESS;
402
403 DEBUG("%s(name='%s') preload successful\n", __func__, name);
404
405out:
406 free_cbfs_preload_context(context);
407
408 return err;
409}
410
Julius Werner05714cc2021-04-15 23:25:44 -0700411static void *do_alloc(union cbfs_mdata *mdata, struct region_device *rdev,
412 cbfs_allocator_t allocator, void *arg, size_t *size_out,
413 bool skip_verification)
414{
415 size_t size = region_device_sz(rdev);
416 void *loc = NULL;
417
418 uint32_t compression = CBFS_COMPRESS_NONE;
419 const struct cbfs_file_attr_compression *cattr = cbfs_find_attr(mdata,
420 CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*cattr));
421 if (cattr) {
422 compression = be32toh(cattr->compression);
423 size = be32toh(cattr->decompressed_size);
424 }
425
426 if (size_out)
427 *size_out = size;
428
429 /* allocator == NULL means do a cbfs_map() */
430 if (allocator) {
431 loc = allocator(arg, size, mdata);
432 } else if (compression == CBFS_COMPRESS_NONE) {
433 void *mapping = rdev_mmap_full(rdev);
434 if (!mapping)
435 return NULL;
436 if (cbfs_file_hash_mismatch(mapping, size, mdata, skip_verification)) {
437 rdev_munmap(rdev, mapping);
438 return NULL;
439 }
440 return mapping;
441 } else if (!cbfs_cache.size) {
442 /* In order to use the cbfs_cache you need to add a CBFS_CACHE to your
443 * memlayout. For stages that don't have .data sections (x86 pre-RAM),
444 * it is not possible to add a CBFS_CACHE. */
445 ERROR("Cannot map compressed file %s without cbfs_cache\n", mdata->h.filename);
446 return NULL;
447 } else {
448 loc = mem_pool_alloc(&cbfs_cache, size);
449 }
450
451 if (!loc) {
452 ERROR("'%s' allocation failure\n", mdata->h.filename);
453 return NULL;
454 }
455
456 size = cbfs_load_and_decompress(rdev, loc, size, compression, mdata, skip_verification);
457 if (!size)
458 return NULL;
459
460 return loc;
461}
462
Julius Werner7778cf22021-01-05 18:54:19 -0800463void *_cbfs_alloc(const char *name, cbfs_allocator_t allocator, void *arg,
464 size_t *size_out, bool force_ro, enum cbfs_type *type)
Julius Wernerf975e552016-08-19 15:43:06 -0700465{
Julius Wernerd17ce412020-10-01 18:25:49 -0700466 struct region_device rdev;
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600467 bool preload_successful = false;
Julius Wernerd17ce412020-10-01 18:25:49 -0700468 union cbfs_mdata mdata;
Julius Werner7778cf22021-01-05 18:54:19 -0800469
470 DEBUG("%s(name='%s', alloc=%p(%p), force_ro=%s, type=%d)\n", __func__, name, allocator,
471 arg, force_ro ? "true" : "false", type ? *type : -1);
Julius Wernerf975e552016-08-19 15:43:06 -0700472
Julius Werner57d4bc62021-11-15 10:13:37 -0800473 if (_cbfs_boot_lookup(name, force_ro, &mdata, &rdev))
Julius Werner7778cf22021-01-05 18:54:19 -0800474 return NULL;
Julius Wernerf975e552016-08-19 15:43:06 -0700475
Julius Werner7778cf22021-01-05 18:54:19 -0800476 if (type) {
477 const enum cbfs_type real_type = be32toh(mdata.h.type);
478 if (*type == CBFS_TYPE_QUERY)
479 *type = real_type;
480 else if (*type != real_type) {
481 ERROR("'%s' type mismatch (is %u, expected %u)\n",
482 mdata.h.filename, real_type, *type);
483 return NULL;
484 }
Julius Wernerd17ce412020-10-01 18:25:49 -0700485 }
Julius Wernerf975e552016-08-19 15:43:06 -0700486
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600487 /* Update the rdev with the preload content */
488 if (!force_ro && get_preload_rdev(&rdev, name) == CB_SUCCESS)
489 preload_successful = true;
490
Julius Werner05714cc2021-04-15 23:25:44 -0700491 void *ret = do_alloc(&mdata, &rdev, allocator, arg, size_out, false);
Raul E Rangel3f41d322021-07-23 15:23:12 -0600492
Julius Werner05714cc2021-04-15 23:25:44 -0700493 /* When using cbfs_preload we need to free the preload buffer after populating the
494 * destination buffer. We know we must have a mem_rdev here, so extra mmap is fine. */
Raul E Rangel4cfb8622021-11-01 13:40:14 -0600495 if (preload_successful)
496 cbfs_unmap(rdev_mmap_full(&rdev));
Julius Werner7778cf22021-01-05 18:54:19 -0800497
Julius Werner05714cc2021-04-15 23:25:44 -0700498 return ret;
499}
500
501void *_cbfs_unverified_area_alloc(const char *area, const char *name,
502 cbfs_allocator_t allocator, void *arg, size_t *size_out)
503{
504 struct region_device area_rdev, file_rdev;
505 union cbfs_mdata mdata;
506 size_t data_offset;
507
508 DEBUG("%s(area='%s', name='%s', alloc=%p(%p))\n", __func__, area, name, allocator, arg);
509
510 if (fmap_locate_area_as_rdev(area, &area_rdev))
511 return NULL;
512
513 if (cbfs_lookup(&area_rdev, name, &mdata, &data_offset, NULL)) {
514 ERROR("'%s' not found in '%s'\n", name, area);
515 return NULL;
516 }
517
518 if (rdev_chain(&file_rdev, &area_rdev, data_offset, be32toh(mdata.h.len)))
519 return NULL;
520
Julius Werner05714cc2021-04-15 23:25:44 -0700521 return do_alloc(&mdata, &file_rdev, allocator, arg, size_out, true);
Julius Werner7778cf22021-01-05 18:54:19 -0800522}
523
Julius Werner4676ec52021-03-10 16:52:14 -0800524void *_cbfs_default_allocator(void *arg, size_t size, const union cbfs_mdata *unused)
Julius Werner7778cf22021-01-05 18:54:19 -0800525{
526 struct _cbfs_default_allocator_arg *darg = arg;
527 if (size > darg->buf_size)
528 return NULL;
529 return darg->buf;
530}
531
Julius Werner4676ec52021-03-10 16:52:14 -0800532void *_cbfs_cbmem_allocator(void *arg, size_t size, const union cbfs_mdata *unused)
Julius Werner7778cf22021-01-05 18:54:19 -0800533{
534 return cbmem_add((uintptr_t)arg, size);
Julius Wernerf975e552016-08-19 15:43:06 -0700535}
536
Julius Werner69cc5572022-03-04 17:49:56 -0800537enum cb_err cbfs_prog_stage_load(struct prog *pstage)
Aaron Durbin899d13d2015-05-15 23:39:23 -0500538{
Julius Werner1de87082020-12-23 17:38:11 -0800539 union cbfs_mdata mdata;
540 struct region_device rdev;
Julius Werner69cc5572022-03-04 17:49:56 -0800541 enum cb_err err;
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800542
Julius Werner1de87082020-12-23 17:38:11 -0800543 prog_locate_hook(pstage);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500544
Julius Werner57d4bc62021-11-15 10:13:37 -0800545 if ((err = _cbfs_boot_lookup(prog_name(pstage), false, &mdata, &rdev)))
Julius Werner1de87082020-12-23 17:38:11 -0800546 return err;
547
548 assert(be32toh(mdata.h.type) == CBFS_TYPE_STAGE);
549 pstage->cbfs_type = CBFS_TYPE_STAGE;
550
Julius Werner81dc20e2020-10-15 17:37:57 -0700551 enum cbfs_compression compression = CBFS_COMPRESS_NONE;
552 const struct cbfs_file_attr_compression *cattr = cbfs_find_attr(&mdata,
553 CBFS_FILE_ATTR_TAG_COMPRESSION, sizeof(*cattr));
554 if (cattr)
555 compression = be32toh(cattr->compression);
Julius Werner1de87082020-12-23 17:38:11 -0800556
Julius Werner81dc20e2020-10-15 17:37:57 -0700557 const struct cbfs_file_attr_stageheader *sattr = cbfs_find_attr(&mdata,
558 CBFS_FILE_ATTR_TAG_STAGEHEADER, sizeof(*sattr));
559 if (!sattr)
560 return CB_ERR;
561 prog_set_area(pstage, (void *)(uintptr_t)be64toh(sattr->loadaddr),
562 be32toh(sattr->memlen));
563 prog_set_entry(pstage, prog_start(pstage) +
564 be32toh(sattr->entry_offset), NULL);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500565
Aaron Durbined253c82015-10-07 17:22:42 -0500566 /* Hacky way to not load programs over read only media. The stages
567 * that would hit this path initialize themselves. */
Julius Werner21a40532020-04-21 16:03:53 -0700568 if ((ENV_BOOTBLOCK || ENV_SEPARATE_VERSTAGE) &&
569 !CONFIG(NO_XIP_EARLY_STAGES) && CONFIG(BOOT_DEVICE_MEMORY_MAPPED)) {
Julius Werner81dc20e2020-10-15 17:37:57 -0700570 void *mapping = rdev_mmap_full(&rdev);
Julius Werner1de87082020-12-23 17:38:11 -0800571 rdev_munmap(&rdev, mapping);
Julius Werner05714cc2021-04-15 23:25:44 -0700572 if (cbfs_file_hash_mismatch(mapping, region_device_sz(&rdev), &mdata, false))
Julius Wernerfccf1222021-04-02 15:58:05 -0700573 return CB_CBFS_HASH_MISMATCH;
Julius Werner81dc20e2020-10-15 17:37:57 -0700574 if (mapping == prog_start(pstage))
575 return CB_SUCCESS;
Aaron Durbined253c82015-10-07 17:22:42 -0500576 }
577
Julius Wernereca99af2021-03-10 18:49:37 -0800578 /* LZ4 stages can be decompressed in-place to save mapping scratch space. Load the
579 compressed data to the end of the buffer and point &rdev to that memory location. */
580 if (cbfs_lz4_enabled() && compression == CBFS_COMPRESS_LZ4) {
581 size_t in_size = region_device_sz(&rdev);
582 void *compr_start = prog_start(pstage) + prog_size(pstage) - in_size;
583 if (rdev_readat(&rdev, compr_start, 0, in_size) != in_size)
584 return CB_ERR;
Julius Wernerc8931972021-04-16 16:48:32 -0700585 rdev_chain_mem(&rdev, compr_start, in_size);
Julius Wernereca99af2021-03-10 18:49:37 -0800586 }
587
588 size_t fsize = cbfs_load_and_decompress(&rdev, prog_start(pstage), prog_size(pstage),
Julius Werner05714cc2021-04-15 23:25:44 -0700589 compression, &mdata, false);
Julius Werner09f29212015-09-29 13:51:35 -0700590 if (!fsize)
Julius Werner1de87082020-12-23 17:38:11 -0800591 return CB_ERR;
Aaron Durbin899d13d2015-05-15 23:39:23 -0500592
593 /* Clear area not covered by file. */
Julius Werner81dc20e2020-10-15 17:37:57 -0700594 memset(prog_start(pstage) + fsize, 0, prog_size(pstage) - fsize);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500595
Julius Werner81dc20e2020-10-15 17:37:57 -0700596 prog_segment_loaded((uintptr_t)prog_start(pstage), prog_size(pstage),
597 SEG_FINAL);
Aaron Durbin899d13d2015-05-15 23:39:23 -0500598
Julius Werner1de87082020-12-23 17:38:11 -0800599 return CB_SUCCESS;
Hung-Te Lin6fe0cab2013-01-22 18:57:56 +0800600}
Aaron Durbin6d720f32015-12-08 17:00:23 -0600601
Julius Werner1e37c9c2019-12-11 17:09:39 -0800602void cbfs_boot_device_find_mcache(struct cbfs_boot_device *cbd, uint32_t id)
Aaron Durbin6d720f32015-12-08 17:00:23 -0600603{
Julius Werner1e37c9c2019-12-11 17:09:39 -0800604 if (CONFIG(NO_CBFS_MCACHE) || ENV_SMM)
605 return;
606
Julius Werner34cf0732020-12-08 14:21:43 -0800607 if (cbd->mcache_size)
608 return;
609
Julius Werner1e37c9c2019-12-11 17:09:39 -0800610 const struct cbmem_entry *entry;
611 if (cbmem_possibly_online() &&
612 (entry = cbmem_entry_find(id))) {
613 cbd->mcache = cbmem_entry_start(entry);
614 cbd->mcache_size = cbmem_entry_size(entry);
615 } else if (ENV_ROMSTAGE_OR_BEFORE) {
616 u8 *boundary = _ecbfs_mcache - REGION_SIZE(cbfs_mcache) *
617 CONFIG_CBFS_MCACHE_RW_PERCENTAGE / 100;
Julius Werner723e3b12020-12-29 17:33:30 -0800618 boundary = (u8 *)ALIGN_DOWN((uintptr_t)boundary, CBFS_MCACHE_ALIGNMENT);
Julius Werner1e37c9c2019-12-11 17:09:39 -0800619 if (id == CBMEM_ID_CBFS_RO_MCACHE) {
620 cbd->mcache = _cbfs_mcache;
621 cbd->mcache_size = boundary - _cbfs_mcache;
622 } else if (id == CBMEM_ID_CBFS_RW_MCACHE) {
623 cbd->mcache = boundary;
624 cbd->mcache_size = _ecbfs_mcache - boundary;
625 }
626 }
Aaron Durbin6d720f32015-12-08 17:00:23 -0600627}
Julius Werner1e37c9c2019-12-11 17:09:39 -0800628
Julius Werner69cc5572022-03-04 17:49:56 -0800629enum cb_err cbfs_init_boot_device(const struct cbfs_boot_device *cbd,
630 struct vb2_hash *mdata_hash)
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700631{
632 /* If we have an mcache, mcache_build() will also check mdata hash. */
633 if (!CONFIG(NO_CBFS_MCACHE) && !ENV_SMM && cbd->mcache_size > 0)
Julius Werner723e3b12020-12-29 17:33:30 -0800634 return cbfs_mcache_build(&cbd->rdev, cbd->mcache, cbd->mcache_size, mdata_hash);
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700635
636 /* No mcache and no verification means we have nothing special to do. */
Julius Werner723e3b12020-12-29 17:33:30 -0800637 if (!CONFIG(CBFS_VERIFICATION) || !mdata_hash)
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700638 return CB_SUCCESS;
639
Julius Werner723e3b12020-12-29 17:33:30 -0800640 /* Verification only: use cbfs_walk() without a walker() function to just run through
641 the CBFS once, will return NOT_FOUND by default. */
Julius Werner69cc5572022-03-04 17:49:56 -0800642 enum cb_err err = cbfs_walk(&cbd->rdev, NULL, NULL, mdata_hash, 0);
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700643 if (err == CB_CBFS_NOT_FOUND)
644 err = CB_SUCCESS;
645 return err;
646}
647
Julius Werner1e37c9c2019-12-11 17:09:39 -0800648const struct cbfs_boot_device *cbfs_get_boot_device(bool force_ro)
649{
650 static struct cbfs_boot_device ro;
651
Julius Werner723e3b12020-12-29 17:33:30 -0800652 /* Ensure we always init RO mcache, even if the first file is from the RW CBFS.
Julius Werner1e37c9c2019-12-11 17:09:39 -0800653 Otherwise it may not be available when needed in later stages. */
654 if (ENV_INITIAL_STAGE && !force_ro && !region_device_sz(&ro.rdev))
655 cbfs_get_boot_device(true);
656
657 if (!force_ro) {
658 const struct cbfs_boot_device *rw = vboot_get_cbfs_boot_device();
Julius Werner723e3b12020-12-29 17:33:30 -0800659 /* This will return NULL if vboot isn't enabled, didn't run yet or decided to
660 boot into recovery mode. */
Julius Werner1e37c9c2019-12-11 17:09:39 -0800661 if (rw)
662 return rw;
663 }
664
Julius Werner723e3b12020-12-29 17:33:30 -0800665 /* In rare cases post-RAM stages may run this before cbmem_initialize(), so we can't
666 lock in the result of find_mcache() on the first try and should keep trying every
667 time until an mcache is found. */
Julius Werner34cf0732020-12-08 14:21:43 -0800668 cbfs_boot_device_find_mcache(&ro, CBMEM_ID_CBFS_RO_MCACHE);
669
Julius Werner1e37c9c2019-12-11 17:09:39 -0800670 if (region_device_sz(&ro.rdev))
671 return &ro;
672
673 if (fmap_locate_area_as_rdev("COREBOOT", &ro.rdev))
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700674 die("Cannot locate primary CBFS");
Julius Werner1e37c9c2019-12-11 17:09:39 -0800675
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700676 if (ENV_INITIAL_STAGE) {
Julius Werner69cc5572022-03-04 17:49:56 -0800677 enum cb_err err = cbfs_init_boot_device(&ro, metadata_hash_get());
Julius Wernerfdabf3f2020-05-06 17:06:35 -0700678 if (err == CB_CBFS_HASH_MISMATCH)
679 die("RO CBFS metadata hash verification failure");
680 else if (CONFIG(TOCTOU_SAFETY) && err == CB_CBFS_CACHE_FULL)
681 die("RO mcache overflow breaks TOCTOU safety!\n");
682 else if (err && err != CB_CBFS_CACHE_FULL)
683 die("RO CBFS initialization error: %d", err);
Julius Werner1e37c9c2019-12-11 17:09:39 -0800684 }
685
686 return &ro;
687}
688
689#if !CONFIG(NO_CBFS_MCACHE)
690static void mcache_to_cbmem(const struct cbfs_boot_device *cbd, u32 cbmem_id)
691{
692 if (!cbd)
693 return;
694
695 size_t real_size = cbfs_mcache_real_size(cbd->mcache, cbd->mcache_size);
696 void *cbmem_mcache = cbmem_add(cbmem_id, real_size);
697 if (!cbmem_mcache) {
Julius Wernere9665952022-01-21 17:06:20 -0800698 printk(BIOS_ERR, "Cannot allocate CBMEM mcache %#x (%#zx bytes)!\n",
Julius Werner1e37c9c2019-12-11 17:09:39 -0800699 cbmem_id, real_size);
700 return;
701 }
702 memcpy(cbmem_mcache, cbd->mcache, real_size);
703}
704
705static void cbfs_mcache_migrate(int unused)
706{
707 mcache_to_cbmem(vboot_get_cbfs_boot_device(), CBMEM_ID_CBFS_RW_MCACHE);
708 mcache_to_cbmem(cbfs_get_boot_device(true), CBMEM_ID_CBFS_RO_MCACHE);
709}
Kyösti Mälkkifa3bc042022-03-31 07:40:10 +0300710CBMEM_CREATION_HOOK(cbfs_mcache_migrate);
Julius Werner1e37c9c2019-12-11 17:09:39 -0800711#endif