blob: 13b5483c457f6f11aae4d10cb42268cffc2abb36 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2/* This file is part of the coreboot project. */
Aaron Durbin0dff57d2015-03-05 21:18:33 -06003
Arthur Heymans340e4b82019-10-23 17:25:58 +02004#include <assert.h>
Elyes HAOUAS0edf6a52019-10-26 18:41:47 +02005#include <boot/coreboot_tables.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -06006#include <bootstate.h>
7#include <bootmem.h>
8#include <console/console.h>
9#include <cbmem.h>
10#include <imd.h>
Kyösti Mälkkif5cf60f2019-03-18 15:26:48 +020011#include <lib.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060012#include <stdlib.h>
Julius Werner3c814b22016-08-19 16:20:40 -070013
Arthur Heymans340e4b82019-10-23 17:25:58 +020014/* The program loader passes on cbmem_top and the program entry point
15 has to fill in the _cbmem_top_ptr symbol based on the calling arguments. */
16uintptr_t _cbmem_top_ptr;
17
Patrick Georgib6161be2019-11-29 12:27:01 +010018static struct imd imd;
19
Arthur Heymans340e4b82019-10-23 17:25:58 +020020void *cbmem_top(void)
21{
Arthur Heymansc4c5d852019-10-29 07:32:48 +010022 if (ENV_ROMSTAGE) {
Arthur Heymans340e4b82019-10-23 17:25:58 +020023 MAYBE_STATIC_BSS void *top = NULL;
24 if (top)
25 return top;
26 top = cbmem_top_chipset();
27 return top;
28 }
Arthur Heymansc4c5d852019-10-29 07:32:48 +010029 if (ENV_POSTCAR || ENV_RAMSTAGE)
Arthur Heymans340e4b82019-10-23 17:25:58 +020030 return (void *)_cbmem_top_ptr;
31
32 dead_code();
33}
34
Aaron Durbin0dff57d2015-03-05 21:18:33 -060035static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
36{
37 return (const struct cbmem_entry *)e;
38}
39
40static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
41{
42 return (const struct imd_entry *)e;
43}
44
Aaron Durbin0dff57d2015-03-05 21:18:33 -060045void cbmem_initialize_empty(void)
46{
Lee Leahy522149c2015-05-08 11:33:55 -070047 cbmem_initialize_empty_id_size(0, 0);
48}
49
Aaron Durbindfdea2a2017-08-01 10:27:10 -060050static void cbmem_top_init_once(void)
51{
52 /* Call one-time hook on expected cbmem init during boot. This sequence
Kyösti Mälkki513a1a82018-06-03 12:29:50 +030053 assumes first init call is in romstage. */
54 if (!ENV_ROMSTAGE)
Aaron Durbindfdea2a2017-08-01 10:27:10 -060055 return;
56
Kyösti Mälkkif5cf60f2019-03-18 15:26:48 +020057 /* The test is only effective on X86 and when address hits UC memory. */
58 if (ENV_X86)
59 quick_ram_check_or_die((uintptr_t)cbmem_top() - sizeof(u32));
Aaron Durbindfdea2a2017-08-01 10:27:10 -060060}
61
Lee Leahy522149c2015-05-08 11:33:55 -070062void cbmem_initialize_empty_id_size(u32 id, u64 size)
63{
Aaron Durbin41607a42015-06-09 13:54:10 -050064 const int no_recovery = 0;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060065
Aaron Durbindfdea2a2017-08-01 10:27:10 -060066 cbmem_top_init_once();
67
Patrick Georgib6161be2019-11-29 12:27:01 +010068 imd_handle_init(&imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -060069
70 printk(BIOS_DEBUG, "CBMEM:\n");
71
Patrick Georgib6161be2019-11-29 12:27:01 +010072 if (imd_create_tiered_empty(&imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
Lee Leahy522149c2015-05-08 11:33:55 -070073 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -060074 printk(BIOS_DEBUG, "failed.\n");
75 return;
76 }
77
Lee Leahy522149c2015-05-08 11:33:55 -070078 /* Add the specified range first */
79 if (size)
80 cbmem_add(id, size);
81
Aaron Durbin0dff57d2015-03-05 21:18:33 -060082 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -050083 cbmem_run_init_hooks(no_recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -060084}
85
Aaron Durbin0dff57d2015-03-05 21:18:33 -060086int cbmem_initialize(void)
87{
Lee Leahy522149c2015-05-08 11:33:55 -070088 return cbmem_initialize_id_size(0, 0);
89}
90
91int cbmem_initialize_id_size(u32 id, u64 size)
92{
Aaron Durbin41607a42015-06-09 13:54:10 -050093 const int recovery = 1;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060094
Aaron Durbindfdea2a2017-08-01 10:27:10 -060095 cbmem_top_init_once();
96
Patrick Georgib6161be2019-11-29 12:27:01 +010097 imd_handle_init(&imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -060098
Patrick Georgib6161be2019-11-29 12:27:01 +010099 if (imd_recover(&imd))
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600100 return 1;
101
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600102 /*
103 * Lock the imd in romstage on a recovery. The assumption is that
104 * if the imd area was recovered in romstage then S3 resume path
105 * is being taken.
106 */
Kyösti Mälkkie3acc8f2019-09-13 10:49:20 +0300107 if (ENV_ROMSTAGE)
Patrick Georgib6161be2019-11-29 12:27:01 +0100108 imd_lockdown(&imd);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600109
Lee Leahy522149c2015-05-08 11:33:55 -0700110 /* Add the specified range first */
111 if (size)
112 cbmem_add(id, size);
113
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600114 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500115 cbmem_run_init_hooks(recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600116
117 /* Recovery successful. */
118 return 0;
119}
120
121int cbmem_recovery(int is_wakeup)
122{
123 int rv = 0;
124 if (!is_wakeup)
125 cbmem_initialize_empty();
126 else
127 rv = cbmem_initialize();
128 return rv;
129}
130
131const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
132{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600133 const struct imd_entry *e;
134
Patrick Georgib6161be2019-11-29 12:27:01 +0100135 e = imd_entry_find_or_add(&imd, id, size64);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600136
137 return imd_to_cbmem(e);
138}
139
140void *cbmem_add(u32 id, u64 size)
141{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600142 const struct imd_entry *e;
143
Patrick Georgib6161be2019-11-29 12:27:01 +0100144 e = imd_entry_find_or_add(&imd, id, size);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600145
146 if (e == NULL)
147 return NULL;
148
Patrick Georgib6161be2019-11-29 12:27:01 +0100149 return imd_entry_at(&imd, e);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600150}
151
152/* Retrieve a region provided a given id. */
153const struct cbmem_entry *cbmem_entry_find(u32 id)
154{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600155 const struct imd_entry *e;
156
Patrick Georgib6161be2019-11-29 12:27:01 +0100157 e = imd_entry_find(&imd, id);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600158
159 return imd_to_cbmem(e);
160}
161
162void *cbmem_find(u32 id)
163{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600164 const struct imd_entry *e;
165
Patrick Georgib6161be2019-11-29 12:27:01 +0100166 e = imd_entry_find(&imd, id);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600167
168 if (e == NULL)
169 return NULL;
170
Patrick Georgib6161be2019-11-29 12:27:01 +0100171 return imd_entry_at(&imd, e);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600172}
173
174/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
175 * cannot be removed unless it was the last one added. */
176int cbmem_entry_remove(const struct cbmem_entry *entry)
177{
Patrick Georgib6161be2019-11-29 12:27:01 +0100178 return imd_entry_remove(&imd, cbmem_to_imd(entry));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600179}
180
181u64 cbmem_entry_size(const struct cbmem_entry *entry)
182{
Patrick Georgib6161be2019-11-29 12:27:01 +0100183 return imd_entry_size(&imd, cbmem_to_imd(entry));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600184}
185
186void *cbmem_entry_start(const struct cbmem_entry *entry)
187{
Patrick Georgib6161be2019-11-29 12:27:01 +0100188 return imd_entry_at(&imd, cbmem_to_imd(entry));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600189}
190
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600191void cbmem_add_bootmem(void)
192{
Aaron Durbinfb532422017-08-02 10:40:25 -0600193 void *baseptr = NULL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600194 size_t size = 0;
195
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200196 cbmem_get_region(&baseptr, &size);
Patrick Rudolph9ab9db02018-04-05 09:14:51 +0200197 bootmem_add_range((uintptr_t)baseptr, size, BM_MEM_TABLE);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600198}
199
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200200void cbmem_get_region(void **baseptr, size_t *size)
201{
Patrick Georgib6161be2019-11-29 12:27:01 +0100202 imd_region_used(&imd, baseptr, size);
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200203}
204
Subrata Banik42c44c22019-05-15 20:27:04 +0530205#if ENV_PAYLOAD_LOADER || (CONFIG(EARLY_CBMEM_LIST) \
Lee Leahye2422e32016-07-24 19:52:15 -0700206 && (ENV_POSTCAR || ENV_ROMSTAGE))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500207/*
208 * -fdata-sections doesn't work so well on read only strings. They all
209 * get put in the same section even though those strings may never be
210 * referenced in the final binary.
211 */
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600212void cbmem_list(void)
213{
214 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
215
Patrick Georgib6161be2019-11-29 12:27:01 +0100216 imd_print_entries(&imd, lookup, ARRAY_SIZE(lookup));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600217}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500218#endif
219
220void cbmem_add_records_to_cbtable(struct lb_header *header)
221{
222 struct imd_cursor cursor;
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500223
Patrick Georgib6161be2019-11-29 12:27:01 +0100224 if (imd_cursor_init(&imd, &cursor))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500225 return;
226
227 while (1) {
228 const struct imd_entry *e;
229 struct lb_cbmem_entry *lbe;
230 uint32_t id;
231
232 e = imd_cursor_next(&cursor);
233
234 if (e == NULL)
235 break;
236
Patrick Georgib6161be2019-11-29 12:27:01 +0100237 id = imd_entry_id(&imd, e);
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500238 /* Don't add these metadata entries. */
239 if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL)
240 continue;
241
242 lbe = (struct lb_cbmem_entry *)lb_new_record(header);
243 lbe->tag = LB_TAG_CBMEM_ENTRY;
244 lbe->size = sizeof(*lbe);
Patrick Georgib6161be2019-11-29 12:27:01 +0100245 lbe->address = (uintptr_t)imd_entry_at(&imd, e);
246 lbe->entry_size = imd_entry_size(&imd, e);
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500247 lbe->id = id;
248 }
249}