blob: d06a9e941609f7c2486e02cefa00c2796925bf7c [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Aaron Durbin0dff57d2015-03-05 21:18:33 -06002
Arthur Heymans340e4b82019-10-23 17:25:58 +02003#include <assert.h>
Elyes HAOUAS0edf6a52019-10-26 18:41:47 +02004#include <boot/coreboot_tables.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -06005#include <bootstate.h>
6#include <bootmem.h>
7#include <console/console.h>
8#include <cbmem.h>
9#include <imd.h>
Kyösti Mälkkif5cf60f2019-03-18 15:26:48 +020010#include <lib.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060011#include <stdlib.h>
Julius Werner3c814b22016-08-19 16:20:40 -070012
Arthur Heymans340e4b82019-10-23 17:25:58 +020013/* The program loader passes on cbmem_top and the program entry point
14 has to fill in the _cbmem_top_ptr symbol based on the calling arguments. */
15uintptr_t _cbmem_top_ptr;
16
Patrick Georgib6161be2019-11-29 12:27:01 +010017static struct imd imd;
18
Arthur Heymans340e4b82019-10-23 17:25:58 +020019void *cbmem_top(void)
20{
Arthur Heymansc4c5d852019-10-29 07:32:48 +010021 if (ENV_ROMSTAGE) {
Kyösti Mälkkifcbbb912020-04-20 10:21:39 +030022 static void *top;
Arthur Heymans340e4b82019-10-23 17:25:58 +020023 if (top)
24 return top;
25 top = cbmem_top_chipset();
26 return top;
27 }
Arthur Heymansc4c5d852019-10-29 07:32:48 +010028 if (ENV_POSTCAR || ENV_RAMSTAGE)
Arthur Heymans340e4b82019-10-23 17:25:58 +020029 return (void *)_cbmem_top_ptr;
30
31 dead_code();
32}
33
Aaron Durbin0dff57d2015-03-05 21:18:33 -060034static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
35{
36 return (const struct cbmem_entry *)e;
37}
38
39static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
40{
41 return (const struct imd_entry *)e;
42}
43
Aaron Durbin0dff57d2015-03-05 21:18:33 -060044void cbmem_initialize_empty(void)
45{
Lee Leahy522149c2015-05-08 11:33:55 -070046 cbmem_initialize_empty_id_size(0, 0);
47}
48
Aaron Durbindfdea2a2017-08-01 10:27:10 -060049static void cbmem_top_init_once(void)
50{
51 /* Call one-time hook on expected cbmem init during boot. This sequence
Kyösti Mälkki513a1a82018-06-03 12:29:50 +030052 assumes first init call is in romstage. */
53 if (!ENV_ROMSTAGE)
Aaron Durbindfdea2a2017-08-01 10:27:10 -060054 return;
55
Kyösti Mälkkif5cf60f2019-03-18 15:26:48 +020056 /* The test is only effective on X86 and when address hits UC memory. */
57 if (ENV_X86)
58 quick_ram_check_or_die((uintptr_t)cbmem_top() - sizeof(u32));
Aaron Durbindfdea2a2017-08-01 10:27:10 -060059}
60
Lee Leahy522149c2015-05-08 11:33:55 -070061void cbmem_initialize_empty_id_size(u32 id, u64 size)
62{
Aaron Durbin41607a42015-06-09 13:54:10 -050063 const int no_recovery = 0;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060064
Aaron Durbindfdea2a2017-08-01 10:27:10 -060065 cbmem_top_init_once();
66
Patrick Georgib6161be2019-11-29 12:27:01 +010067 imd_handle_init(&imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -060068
69 printk(BIOS_DEBUG, "CBMEM:\n");
70
Patrick Georgib6161be2019-11-29 12:27:01 +010071 if (imd_create_tiered_empty(&imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
Lee Leahy522149c2015-05-08 11:33:55 -070072 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -060073 printk(BIOS_DEBUG, "failed.\n");
74 return;
75 }
76
Lee Leahy522149c2015-05-08 11:33:55 -070077 /* Add the specified range first */
78 if (size)
79 cbmem_add(id, size);
80
Aaron Durbin0dff57d2015-03-05 21:18:33 -060081 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -050082 cbmem_run_init_hooks(no_recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -060083}
84
Aaron Durbin0dff57d2015-03-05 21:18:33 -060085int cbmem_initialize(void)
86{
Lee Leahy522149c2015-05-08 11:33:55 -070087 return cbmem_initialize_id_size(0, 0);
88}
89
90int cbmem_initialize_id_size(u32 id, u64 size)
91{
Aaron Durbin41607a42015-06-09 13:54:10 -050092 const int recovery = 1;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060093
Aaron Durbindfdea2a2017-08-01 10:27:10 -060094 cbmem_top_init_once();
95
Patrick Georgib6161be2019-11-29 12:27:01 +010096 imd_handle_init(&imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -060097
Patrick Georgib6161be2019-11-29 12:27:01 +010098 if (imd_recover(&imd))
Aaron Durbin0dff57d2015-03-05 21:18:33 -060099 return 1;
100
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600101 /*
102 * Lock the imd in romstage on a recovery. The assumption is that
103 * if the imd area was recovered in romstage then S3 resume path
104 * is being taken.
105 */
Kyösti Mälkkie3acc8f2019-09-13 10:49:20 +0300106 if (ENV_ROMSTAGE)
Patrick Georgib6161be2019-11-29 12:27:01 +0100107 imd_lockdown(&imd);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600108
Lee Leahy522149c2015-05-08 11:33:55 -0700109 /* Add the specified range first */
110 if (size)
111 cbmem_add(id, size);
112
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600113 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500114 cbmem_run_init_hooks(recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600115
116 /* Recovery successful. */
117 return 0;
118}
119
120int cbmem_recovery(int is_wakeup)
121{
122 int rv = 0;
123 if (!is_wakeup)
124 cbmem_initialize_empty();
125 else
126 rv = cbmem_initialize();
127 return rv;
128}
129
130const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
131{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600132 const struct imd_entry *e;
133
Patrick Georgib6161be2019-11-29 12:27:01 +0100134 e = imd_entry_find_or_add(&imd, id, size64);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600135
136 return imd_to_cbmem(e);
137}
138
139void *cbmem_add(u32 id, u64 size)
140{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600141 const struct imd_entry *e;
142
Patrick Georgib6161be2019-11-29 12:27:01 +0100143 e = imd_entry_find_or_add(&imd, id, size);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600144
145 if (e == NULL)
146 return NULL;
147
Patrick Georgib6161be2019-11-29 12:27:01 +0100148 return imd_entry_at(&imd, e);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600149}
150
151/* Retrieve a region provided a given id. */
152const struct cbmem_entry *cbmem_entry_find(u32 id)
153{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600154 const struct imd_entry *e;
155
Patrick Georgib6161be2019-11-29 12:27:01 +0100156 e = imd_entry_find(&imd, id);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600157
158 return imd_to_cbmem(e);
159}
160
161void *cbmem_find(u32 id)
162{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600163 const struct imd_entry *e;
164
Patrick Georgib6161be2019-11-29 12:27:01 +0100165 e = imd_entry_find(&imd, id);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600166
167 if (e == NULL)
168 return NULL;
169
Patrick Georgib6161be2019-11-29 12:27:01 +0100170 return imd_entry_at(&imd, e);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600171}
172
173/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
174 * cannot be removed unless it was the last one added. */
175int cbmem_entry_remove(const struct cbmem_entry *entry)
176{
Patrick Georgib6161be2019-11-29 12:27:01 +0100177 return imd_entry_remove(&imd, cbmem_to_imd(entry));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600178}
179
180u64 cbmem_entry_size(const struct cbmem_entry *entry)
181{
Patrick Georgib6161be2019-11-29 12:27:01 +0100182 return imd_entry_size(&imd, cbmem_to_imd(entry));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600183}
184
185void *cbmem_entry_start(const struct cbmem_entry *entry)
186{
Patrick Georgib6161be2019-11-29 12:27:01 +0100187 return imd_entry_at(&imd, cbmem_to_imd(entry));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600188}
189
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600190void cbmem_add_bootmem(void)
191{
Aaron Durbinfb532422017-08-02 10:40:25 -0600192 void *baseptr = NULL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600193 size_t size = 0;
194
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200195 cbmem_get_region(&baseptr, &size);
Patrick Rudolph9ab9db02018-04-05 09:14:51 +0200196 bootmem_add_range((uintptr_t)baseptr, size, BM_MEM_TABLE);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600197}
198
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200199void cbmem_get_region(void **baseptr, size_t *size)
200{
Patrick Georgib6161be2019-11-29 12:27:01 +0100201 imd_region_used(&imd, baseptr, size);
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200202}
203
Subrata Banik42c44c22019-05-15 20:27:04 +0530204#if ENV_PAYLOAD_LOADER || (CONFIG(EARLY_CBMEM_LIST) \
Lee Leahye2422e32016-07-24 19:52:15 -0700205 && (ENV_POSTCAR || ENV_ROMSTAGE))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500206/*
207 * -fdata-sections doesn't work so well on read only strings. They all
208 * get put in the same section even though those strings may never be
209 * referenced in the final binary.
210 */
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600211void cbmem_list(void)
212{
213 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
214
Patrick Georgib6161be2019-11-29 12:27:01 +0100215 imd_print_entries(&imd, lookup, ARRAY_SIZE(lookup));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600216}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500217#endif
218
219void cbmem_add_records_to_cbtable(struct lb_header *header)
220{
221 struct imd_cursor cursor;
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500222
Patrick Georgib6161be2019-11-29 12:27:01 +0100223 if (imd_cursor_init(&imd, &cursor))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500224 return;
225
226 while (1) {
227 const struct imd_entry *e;
228 struct lb_cbmem_entry *lbe;
229 uint32_t id;
230
231 e = imd_cursor_next(&cursor);
232
233 if (e == NULL)
234 break;
235
Patrick Georgib6161be2019-11-29 12:27:01 +0100236 id = imd_entry_id(&imd, e);
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500237 /* Don't add these metadata entries. */
238 if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL)
239 continue;
240
241 lbe = (struct lb_cbmem_entry *)lb_new_record(header);
242 lbe->tag = LB_TAG_CBMEM_ENTRY;
243 lbe->size = sizeof(*lbe);
Patrick Georgib6161be2019-11-29 12:27:01 +0100244 lbe->address = (uintptr_t)imd_entry_at(&imd, e);
245 lbe->entry_size = imd_entry_size(&imd, e);
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500246 lbe->id = id;
247 }
248}