blob: 7638e0e578b0cb3eb279a91a6a4695812a98b879 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin0dff57d2015-03-05 21:18:33 -06002
Arthur Heymans340e4b82019-10-23 17:25:58 +02003#include <assert.h>
Elyes HAOUAS0edf6a52019-10-26 18:41:47 +02004#include <boot/coreboot_tables.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -06005#include <bootmem.h>
6#include <console/console.h>
7#include <cbmem.h>
8#include <imd.h>
Kyösti Mälkkif5cf60f2019-03-18 15:26:48 +02009#include <lib.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060010#include <stdlib.h>
Julius Werner3c814b22016-08-19 16:20:40 -070011
Arthur Heymans340e4b82019-10-23 17:25:58 +020012/* The program loader passes on cbmem_top and the program entry point
13 has to fill in the _cbmem_top_ptr symbol based on the calling arguments. */
14uintptr_t _cbmem_top_ptr;
15
Patrick Georgib6161be2019-11-29 12:27:01 +010016static struct imd imd;
17
Arthur Heymans340e4b82019-10-23 17:25:58 +020018void *cbmem_top(void)
19{
Arthur Heymansc4c5d852019-10-29 07:32:48 +010020 if (ENV_ROMSTAGE) {
Kyösti Mälkkifcbbb912020-04-20 10:21:39 +030021 static void *top;
Arthur Heymans340e4b82019-10-23 17:25:58 +020022 if (top)
23 return top;
24 top = cbmem_top_chipset();
25 return top;
26 }
Arthur Heymansc4c5d852019-10-29 07:32:48 +010027 if (ENV_POSTCAR || ENV_RAMSTAGE)
Arthur Heymans340e4b82019-10-23 17:25:58 +020028 return (void *)_cbmem_top_ptr;
29
30 dead_code();
31}
32
Aaron Durbin0dff57d2015-03-05 21:18:33 -060033static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
34{
35 return (const struct cbmem_entry *)e;
36}
37
38static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
39{
40 return (const struct imd_entry *)e;
41}
42
Aaron Durbin0dff57d2015-03-05 21:18:33 -060043void cbmem_initialize_empty(void)
44{
Lee Leahy522149c2015-05-08 11:33:55 -070045 cbmem_initialize_empty_id_size(0, 0);
46}
47
Aaron Durbindfdea2a2017-08-01 10:27:10 -060048static void cbmem_top_init_once(void)
49{
50 /* Call one-time hook on expected cbmem init during boot. This sequence
Kyösti Mälkki513a1a82018-06-03 12:29:50 +030051 assumes first init call is in romstage. */
52 if (!ENV_ROMSTAGE)
Aaron Durbindfdea2a2017-08-01 10:27:10 -060053 return;
54
Kyösti Mälkkif5cf60f2019-03-18 15:26:48 +020055 /* The test is only effective on X86 and when address hits UC memory. */
56 if (ENV_X86)
57 quick_ram_check_or_die((uintptr_t)cbmem_top() - sizeof(u32));
Aaron Durbindfdea2a2017-08-01 10:27:10 -060058}
59
Lee Leahy522149c2015-05-08 11:33:55 -070060void cbmem_initialize_empty_id_size(u32 id, u64 size)
61{
Aaron Durbin41607a42015-06-09 13:54:10 -050062 const int no_recovery = 0;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060063
Aaron Durbindfdea2a2017-08-01 10:27:10 -060064 cbmem_top_init_once();
65
Patrick Georgib6161be2019-11-29 12:27:01 +010066 imd_handle_init(&imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -060067
68 printk(BIOS_DEBUG, "CBMEM:\n");
69
Patrick Georgib6161be2019-11-29 12:27:01 +010070 if (imd_create_tiered_empty(&imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
Lee Leahy522149c2015-05-08 11:33:55 -070071 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -060072 printk(BIOS_DEBUG, "failed.\n");
73 return;
74 }
75
Lee Leahy522149c2015-05-08 11:33:55 -070076 /* Add the specified range first */
77 if (size)
78 cbmem_add(id, size);
79
Aaron Durbin0dff57d2015-03-05 21:18:33 -060080 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -050081 cbmem_run_init_hooks(no_recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -060082}
83
Aaron Durbin0dff57d2015-03-05 21:18:33 -060084int cbmem_initialize(void)
85{
Lee Leahy522149c2015-05-08 11:33:55 -070086 return cbmem_initialize_id_size(0, 0);
87}
88
89int cbmem_initialize_id_size(u32 id, u64 size)
90{
Aaron Durbin41607a42015-06-09 13:54:10 -050091 const int recovery = 1;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060092
Aaron Durbindfdea2a2017-08-01 10:27:10 -060093 cbmem_top_init_once();
94
Patrick Georgib6161be2019-11-29 12:27:01 +010095 imd_handle_init(&imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -060096
Patrick Georgib6161be2019-11-29 12:27:01 +010097 if (imd_recover(&imd))
Aaron Durbin0dff57d2015-03-05 21:18:33 -060098 return 1;
99
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600100 /*
101 * Lock the imd in romstage on a recovery. The assumption is that
102 * if the imd area was recovered in romstage then S3 resume path
103 * is being taken.
104 */
Kyösti Mälkkie3acc8f2019-09-13 10:49:20 +0300105 if (ENV_ROMSTAGE)
Patrick Georgib6161be2019-11-29 12:27:01 +0100106 imd_lockdown(&imd);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600107
Lee Leahy522149c2015-05-08 11:33:55 -0700108 /* Add the specified range first */
109 if (size)
110 cbmem_add(id, size);
111
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600112 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500113 cbmem_run_init_hooks(recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600114
115 /* Recovery successful. */
116 return 0;
117}
118
119int cbmem_recovery(int is_wakeup)
120{
121 int rv = 0;
122 if (!is_wakeup)
123 cbmem_initialize_empty();
124 else
125 rv = cbmem_initialize();
126 return rv;
127}
128
129const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
130{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600131 const struct imd_entry *e;
132
Patrick Georgib6161be2019-11-29 12:27:01 +0100133 e = imd_entry_find_or_add(&imd, id, size64);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600134
135 return imd_to_cbmem(e);
136}
137
138void *cbmem_add(u32 id, u64 size)
139{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600140 const struct imd_entry *e;
141
Patrick Georgib6161be2019-11-29 12:27:01 +0100142 e = imd_entry_find_or_add(&imd, id, size);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600143
144 if (e == NULL)
145 return NULL;
146
Patrick Georgib6161be2019-11-29 12:27:01 +0100147 return imd_entry_at(&imd, e);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600148}
149
150/* Retrieve a region provided a given id. */
151const struct cbmem_entry *cbmem_entry_find(u32 id)
152{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600153 const struct imd_entry *e;
154
Patrick Georgib6161be2019-11-29 12:27:01 +0100155 e = imd_entry_find(&imd, id);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600156
157 return imd_to_cbmem(e);
158}
159
160void *cbmem_find(u32 id)
161{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600162 const struct imd_entry *e;
163
Patrick Georgib6161be2019-11-29 12:27:01 +0100164 e = imd_entry_find(&imd, id);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600165
166 if (e == NULL)
167 return NULL;
168
Patrick Georgib6161be2019-11-29 12:27:01 +0100169 return imd_entry_at(&imd, e);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600170}
171
172/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
173 * cannot be removed unless it was the last one added. */
174int cbmem_entry_remove(const struct cbmem_entry *entry)
175{
Patrick Georgib6161be2019-11-29 12:27:01 +0100176 return imd_entry_remove(&imd, cbmem_to_imd(entry));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600177}
178
179u64 cbmem_entry_size(const struct cbmem_entry *entry)
180{
Anna Karas215e7fc2020-07-16 14:12:30 +0200181 return imd_entry_size(cbmem_to_imd(entry));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600182}
183
184void *cbmem_entry_start(const struct cbmem_entry *entry)
185{
Patrick Georgib6161be2019-11-29 12:27:01 +0100186 return imd_entry_at(&imd, cbmem_to_imd(entry));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600187}
188
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600189void cbmem_add_bootmem(void)
190{
Aaron Durbinfb532422017-08-02 10:40:25 -0600191 void *baseptr = NULL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600192 size_t size = 0;
193
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200194 cbmem_get_region(&baseptr, &size);
Patrick Rudolph9ab9db02018-04-05 09:14:51 +0200195 bootmem_add_range((uintptr_t)baseptr, size, BM_MEM_TABLE);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600196}
197
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200198void cbmem_get_region(void **baseptr, size_t *size)
199{
Patrick Georgib6161be2019-11-29 12:27:01 +0100200 imd_region_used(&imd, baseptr, size);
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200201}
202
Subrata Banik42c44c22019-05-15 20:27:04 +0530203#if ENV_PAYLOAD_LOADER || (CONFIG(EARLY_CBMEM_LIST) \
Lee Leahye2422e32016-07-24 19:52:15 -0700204 && (ENV_POSTCAR || ENV_ROMSTAGE))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500205/*
206 * -fdata-sections doesn't work so well on read only strings. They all
207 * get put in the same section even though those strings may never be
208 * referenced in the final binary.
209 */
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600210void cbmem_list(void)
211{
212 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
213
Patrick Georgib6161be2019-11-29 12:27:01 +0100214 imd_print_entries(&imd, lookup, ARRAY_SIZE(lookup));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600215}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500216#endif
217
218void cbmem_add_records_to_cbtable(struct lb_header *header)
219{
220 struct imd_cursor cursor;
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500221
Patrick Georgib6161be2019-11-29 12:27:01 +0100222 if (imd_cursor_init(&imd, &cursor))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500223 return;
224
225 while (1) {
226 const struct imd_entry *e;
227 struct lb_cbmem_entry *lbe;
228 uint32_t id;
229
230 e = imd_cursor_next(&cursor);
231
232 if (e == NULL)
233 break;
234
Anna Karas215e7fc2020-07-16 14:12:30 +0200235 id = imd_entry_id(e);
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500236 /* Don't add these metadata entries. */
237 if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL)
238 continue;
239
240 lbe = (struct lb_cbmem_entry *)lb_new_record(header);
241 lbe->tag = LB_TAG_CBMEM_ENTRY;
242 lbe->size = sizeof(*lbe);
Patrick Georgib6161be2019-11-29 12:27:01 +0100243 lbe->address = (uintptr_t)imd_entry_at(&imd, e);
Anna Karas215e7fc2020-07-16 14:12:30 +0200244 lbe->entry_size = imd_entry_size(e);
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500245 lbe->id = id;
246 }
247}