blob: 91c86211f597f8fa4656c5084ceb34bab0201c83 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin0dff57d2015-03-05 21:18:33 -06002
Arthur Heymans340e4b82019-10-23 17:25:58 +02003#include <assert.h>
Elyes HAOUAS0edf6a52019-10-26 18:41:47 +02004#include <boot/coreboot_tables.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -06005#include <bootmem.h>
6#include <console/console.h>
7#include <cbmem.h>
8#include <imd.h>
Kyösti Mälkkif5cf60f2019-03-18 15:26:48 +02009#include <lib.h>
Elyes HAOUAS93a195c2021-12-31 18:46:13 +010010#include <types.h>
Julius Werner3c814b22016-08-19 16:20:40 -070011
Arthur Heymans340e4b82019-10-23 17:25:58 +020012/* The program loader passes on cbmem_top and the program entry point
13 has to fill in the _cbmem_top_ptr symbol based on the calling arguments. */
14uintptr_t _cbmem_top_ptr;
15
Patrick Georgib6161be2019-11-29 12:27:01 +010016static struct imd imd;
17
Arthur Heymans340e4b82019-10-23 17:25:58 +020018void *cbmem_top(void)
19{
Kyösti Mälkkifa3bc042022-03-31 07:40:10 +030020 if (ENV_CREATES_CBMEM) {
Elyes Haouas799c3212022-11-09 14:00:44 +010021 static uintptr_t top;
Arthur Heymans340e4b82019-10-23 17:25:58 +020022 if (top)
Elyes Haouas799c3212022-11-09 14:00:44 +010023 return (void *)top;
Arthur Heymans340e4b82019-10-23 17:25:58 +020024 top = cbmem_top_chipset();
Elyes Haouas799c3212022-11-09 14:00:44 +010025 return (void *)top;
Arthur Heymans340e4b82019-10-23 17:25:58 +020026 }
Arthur Heymansc4c5d852019-10-29 07:32:48 +010027 if (ENV_POSTCAR || ENV_RAMSTAGE)
Arthur Heymans340e4b82019-10-23 17:25:58 +020028 return (void *)_cbmem_top_ptr;
29
30 dead_code();
31}
32
Arthur Heymans54a4f172020-02-06 18:27:50 +010033int cbmem_initialized;
34
Aaron Durbin0dff57d2015-03-05 21:18:33 -060035static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
36{
37 return (const struct cbmem_entry *)e;
38}
39
40static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
41{
42 return (const struct imd_entry *)e;
43}
44
Aaron Durbin0dff57d2015-03-05 21:18:33 -060045void cbmem_initialize_empty(void)
46{
Lee Leahy522149c2015-05-08 11:33:55 -070047 cbmem_initialize_empty_id_size(0, 0);
48}
49
Aaron Durbindfdea2a2017-08-01 10:27:10 -060050static void cbmem_top_init_once(void)
51{
Kyösti Mälkkifa3bc042022-03-31 07:40:10 +030052 /* Call one-time hook on expected cbmem init during boot. */
53 if (!ENV_CREATES_CBMEM)
Aaron Durbindfdea2a2017-08-01 10:27:10 -060054 return;
55
Kyösti Mälkkif5cf60f2019-03-18 15:26:48 +020056 /* The test is only effective on X86 and when address hits UC memory. */
57 if (ENV_X86)
58 quick_ram_check_or_die((uintptr_t)cbmem_top() - sizeof(u32));
Aaron Durbindfdea2a2017-08-01 10:27:10 -060059}
60
Lee Leahy522149c2015-05-08 11:33:55 -070061void cbmem_initialize_empty_id_size(u32 id, u64 size)
62{
Aaron Durbin41607a42015-06-09 13:54:10 -050063 const int no_recovery = 0;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060064
Aaron Durbindfdea2a2017-08-01 10:27:10 -060065 cbmem_top_init_once();
66
Patrick Georgib6161be2019-11-29 12:27:01 +010067 imd_handle_init(&imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -060068
69 printk(BIOS_DEBUG, "CBMEM:\n");
70
Patrick Georgib6161be2019-11-29 12:27:01 +010071 if (imd_create_tiered_empty(&imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
Lee Leahy522149c2015-05-08 11:33:55 -070072 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -060073 printk(BIOS_DEBUG, "failed.\n");
74 return;
75 }
76
Lee Leahy522149c2015-05-08 11:33:55 -070077 /* Add the specified range first */
78 if (size)
79 cbmem_add(id, size);
80
Aaron Durbin0dff57d2015-03-05 21:18:33 -060081 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -050082 cbmem_run_init_hooks(no_recovery);
Arthur Heymans54a4f172020-02-06 18:27:50 +010083
84 cbmem_initialized = 1;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060085}
86
Aaron Durbin0dff57d2015-03-05 21:18:33 -060087int cbmem_initialize(void)
88{
Lee Leahy522149c2015-05-08 11:33:55 -070089 return cbmem_initialize_id_size(0, 0);
90}
91
92int cbmem_initialize_id_size(u32 id, u64 size)
93{
Aaron Durbin41607a42015-06-09 13:54:10 -050094 const int recovery = 1;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060095
Aaron Durbindfdea2a2017-08-01 10:27:10 -060096 cbmem_top_init_once();
97
Patrick Georgib6161be2019-11-29 12:27:01 +010098 imd_handle_init(&imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -060099
Patrick Georgib6161be2019-11-29 12:27:01 +0100100 if (imd_recover(&imd))
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600101 return 1;
102
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600103 /*
104 * Lock the imd in romstage on a recovery. The assumption is that
105 * if the imd area was recovered in romstage then S3 resume path
106 * is being taken.
107 */
Kyösti Mälkkifa3bc042022-03-31 07:40:10 +0300108 if (ENV_CREATES_CBMEM)
Patrick Georgib6161be2019-11-29 12:27:01 +0100109 imd_lockdown(&imd);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600110
Lee Leahy522149c2015-05-08 11:33:55 -0700111 /* Add the specified range first */
112 if (size)
113 cbmem_add(id, size);
114
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600115 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500116 cbmem_run_init_hooks(recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600117
Arthur Heymans54a4f172020-02-06 18:27:50 +0100118 cbmem_initialized = 1;
119
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600120 /* Recovery successful. */
121 return 0;
122}
123
124int cbmem_recovery(int is_wakeup)
125{
126 int rv = 0;
127 if (!is_wakeup)
128 cbmem_initialize_empty();
129 else
130 rv = cbmem_initialize();
131 return rv;
132}
133
134const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
135{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600136 const struct imd_entry *e;
137
Patrick Georgib6161be2019-11-29 12:27:01 +0100138 e = imd_entry_find_or_add(&imd, id, size64);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600139
140 return imd_to_cbmem(e);
141}
142
143void *cbmem_add(u32 id, u64 size)
144{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600145 const struct imd_entry *e;
146
Patrick Georgib6161be2019-11-29 12:27:01 +0100147 e = imd_entry_find_or_add(&imd, id, size);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600148
149 if (e == NULL)
150 return NULL;
151
Patrick Georgib6161be2019-11-29 12:27:01 +0100152 return imd_entry_at(&imd, e);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600153}
154
155/* Retrieve a region provided a given id. */
156const struct cbmem_entry *cbmem_entry_find(u32 id)
157{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600158 const struct imd_entry *e;
159
Patrick Georgib6161be2019-11-29 12:27:01 +0100160 e = imd_entry_find(&imd, id);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600161
162 return imd_to_cbmem(e);
163}
164
165void *cbmem_find(u32 id)
166{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600167 const struct imd_entry *e;
168
Patrick Georgib6161be2019-11-29 12:27:01 +0100169 e = imd_entry_find(&imd, id);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600170
171 if (e == NULL)
172 return NULL;
173
Patrick Georgib6161be2019-11-29 12:27:01 +0100174 return imd_entry_at(&imd, e);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600175}
176
177/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
178 * cannot be removed unless it was the last one added. */
179int cbmem_entry_remove(const struct cbmem_entry *entry)
180{
Patrick Georgib6161be2019-11-29 12:27:01 +0100181 return imd_entry_remove(&imd, cbmem_to_imd(entry));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600182}
183
184u64 cbmem_entry_size(const struct cbmem_entry *entry)
185{
Anna Karas215e7fc2020-07-16 14:12:30 +0200186 return imd_entry_size(cbmem_to_imd(entry));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600187}
188
189void *cbmem_entry_start(const struct cbmem_entry *entry)
190{
Patrick Georgib6161be2019-11-29 12:27:01 +0100191 return imd_entry_at(&imd, cbmem_to_imd(entry));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600192}
193
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600194void cbmem_add_bootmem(void)
195{
Aaron Durbinfb532422017-08-02 10:40:25 -0600196 void *baseptr = NULL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600197 size_t size = 0;
198
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200199 cbmem_get_region(&baseptr, &size);
Patrick Rudolph9ab9db02018-04-05 09:14:51 +0200200 bootmem_add_range((uintptr_t)baseptr, size, BM_MEM_TABLE);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600201}
202
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200203void cbmem_get_region(void **baseptr, size_t *size)
204{
Patrick Georgib6161be2019-11-29 12:27:01 +0100205 imd_region_used(&imd, baseptr, size);
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200206}
207
Kyösti Mälkkifa3bc042022-03-31 07:40:10 +0300208#if ENV_PAYLOAD_LOADER || (CONFIG(EARLY_CBMEM_LIST) && ENV_HAS_CBMEM)
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500209/*
210 * -fdata-sections doesn't work so well on read only strings. They all
211 * get put in the same section even though those strings may never be
212 * referenced in the final binary.
213 */
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600214void cbmem_list(void)
215{
216 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
217
Patrick Georgib6161be2019-11-29 12:27:01 +0100218 imd_print_entries(&imd, lookup, ARRAY_SIZE(lookup));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600219}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500220#endif
221
222void cbmem_add_records_to_cbtable(struct lb_header *header)
223{
224 struct imd_cursor cursor;
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500225
Patrick Georgib6161be2019-11-29 12:27:01 +0100226 if (imd_cursor_init(&imd, &cursor))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500227 return;
228
229 while (1) {
230 const struct imd_entry *e;
231 struct lb_cbmem_entry *lbe;
232 uint32_t id;
233
234 e = imd_cursor_next(&cursor);
235
236 if (e == NULL)
237 break;
238
Anna Karas215e7fc2020-07-16 14:12:30 +0200239 id = imd_entry_id(e);
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500240 /* Don't add these metadata entries. */
241 if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL)
242 continue;
243
244 lbe = (struct lb_cbmem_entry *)lb_new_record(header);
245 lbe->tag = LB_TAG_CBMEM_ENTRY;
246 lbe->size = sizeof(*lbe);
Patrick Georgib6161be2019-11-29 12:27:01 +0100247 lbe->address = (uintptr_t)imd_entry_at(&imd, e);
Anna Karas215e7fc2020-07-16 14:12:30 +0200248 lbe->entry_size = imd_entry_size(e);
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500249 lbe->id = id;
250 }
251}