blob: 38620d2d060c5bb417b9e5f714e038aea59b76d5 [file] [log] [blame]
Aaron Durbin0dff57d2015-03-05 21:18:33 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbin0dff57d2015-03-05 21:18:33 -060014 */
15
Arthur Heymans340e4b82019-10-23 17:25:58 +020016#include <assert.h>
Elyes HAOUAS0edf6a52019-10-26 18:41:47 +020017#include <boot/coreboot_tables.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060018#include <bootstate.h>
19#include <bootmem.h>
20#include <console/console.h>
21#include <cbmem.h>
22#include <imd.h>
Kyösti Mälkkif5cf60f2019-03-18 15:26:48 +020023#include <lib.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060024#include <stdlib.h>
25#include <arch/early_variables.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060026
Julius Werner3c814b22016-08-19 16:20:40 -070027/*
Aaron Durbin403fdbc2017-08-02 10:57:23 -060028 * We need special handling on x86 where CAR global migration is employed. One
29 * cannot use true globals in that circumstance because CAR is where the globals
30 * are backed -- creating a circular dependency. For non CAR platforms globals
31 * are free to be used as well as any stages that are purely executing out of
32 * RAM. For CAR platforms that don't migrate globals the as-linked globals can
33 * be used, but they need special decoration using CAR_GLOBAL. That ensures
34 * proper object placement in conjunction with the linker.
35 *
36 * For the CAR global migration platforms we have to always try to partially
37 * recover CBMEM from cbmem_top() whenever we try to access it. In other
38 * environments we're not so constrained and just keep the backing imd struct
39 * in a global. This also means that we can easily tell whether CBMEM has
40 * explicitly been initialized or recovered yet on those platforms, and don't
41 * need to put the burden on board or chipset code to tell us by returning
42 * NULL from cbmem_top() before that point.
Julius Werner3c814b22016-08-19 16:20:40 -070043 */
Aaron Durbin1e9a9142016-09-16 16:23:21 -050044#define CAN_USE_GLOBALS \
Julius Wernercd49cce2019-03-05 16:53:33 -080045 (!CONFIG(ARCH_X86) || ENV_RAMSTAGE || ENV_POSTCAR || \
Kyösti Mälkki0f5e01a2019-08-09 07:11:07 +030046 !CONFIG(CAR_GLOBAL_MIGRATION))
Julius Werner3c814b22016-08-19 16:20:40 -070047
Arthur Heymans340e4b82019-10-23 17:25:58 +020048/* The program loader passes on cbmem_top and the program entry point
49 has to fill in the _cbmem_top_ptr symbol based on the calling arguments. */
50uintptr_t _cbmem_top_ptr;
51
52void *cbmem_top(void)
53{
Arthur Heymansc4c5d852019-10-29 07:32:48 +010054 if (ENV_ROMSTAGE) {
Arthur Heymans340e4b82019-10-23 17:25:58 +020055 MAYBE_STATIC_BSS void *top = NULL;
56 if (top)
57 return top;
58 top = cbmem_top_chipset();
59 return top;
60 }
Arthur Heymansc4c5d852019-10-29 07:32:48 +010061 if (ENV_POSTCAR || ENV_RAMSTAGE)
Arthur Heymans340e4b82019-10-23 17:25:58 +020062 return (void *)_cbmem_top_ptr;
63
64 dead_code();
65}
66
67
Aaron Durbin0dff57d2015-03-05 21:18:33 -060068static inline struct imd *cbmem_get_imd(void)
69{
Julius Werner3c814b22016-08-19 16:20:40 -070070 if (CAN_USE_GLOBALS) {
Aaron Durbin403fdbc2017-08-02 10:57:23 -060071 static struct imd imd_cbmem CAR_GLOBAL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060072 return &imd_cbmem;
73 }
74 return NULL;
75}
76
Aaron Durbin0dff57d2015-03-05 21:18:33 -060077static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
78{
79 return (const struct cbmem_entry *)e;
80}
81
82static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
83{
84 return (const struct imd_entry *)e;
85}
86
87/* These are the different situations to handle:
Kyösti Mälkki513a1a82018-06-03 12:29:50 +030088 *
Lee Leahye20a3192017-03-09 16:21:34 -080089 * In ramstage cbmem_initialize() attempts a recovery of the
90 * cbmem region set up by romstage. It uses cbmem_top() as the
91 * starting point of recovery.
Aaron Durbin0dff57d2015-03-05 21:18:33 -060092 *
Lee Leahye20a3192017-03-09 16:21:34 -080093 * In romstage, similar to ramstage, cbmem_initialize() needs to
94 * attempt recovery of the cbmem area using cbmem_top() as the limit.
95 * cbmem_initialize_empty() initializes an empty cbmem area from
96 * cbmem_top();
Aaron Durbin0dff57d2015-03-05 21:18:33 -060097 *
98 */
99static struct imd *imd_init_backing(struct imd *backing)
100{
101 struct imd *imd;
102
103 imd = cbmem_get_imd();
104
105 if (imd != NULL)
106 return imd;
107
108 imd = backing;
109
110 return imd;
111}
112
113static struct imd *imd_init_backing_with_recover(struct imd *backing)
114{
115 struct imd *imd;
116
117 imd = imd_init_backing(backing);
Julius Werner3c814b22016-08-19 16:20:40 -0700118 if (!CAN_USE_GLOBALS) {
119 /* Always partially recover if we can't keep track of whether
120 * we have already initialized CBMEM in this stage. */
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300121 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600122 imd_handle_init_partial_recovery(imd);
123 }
124
125 return imd;
126}
127
128void cbmem_initialize_empty(void)
129{
Lee Leahy522149c2015-05-08 11:33:55 -0700130 cbmem_initialize_empty_id_size(0, 0);
131}
132
Aaron Durbin64031672018-04-21 14:45:32 -0600133void __weak cbmem_top_init(void)
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600134{
135}
136
137static void cbmem_top_init_once(void)
138{
139 /* Call one-time hook on expected cbmem init during boot. This sequence
Kyösti Mälkki513a1a82018-06-03 12:29:50 +0300140 assumes first init call is in romstage. */
141 if (!ENV_ROMSTAGE)
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600142 return;
143
144 cbmem_top_init();
Kyösti Mälkkif5cf60f2019-03-18 15:26:48 +0200145
146 /* The test is only effective on X86 and when address hits UC memory. */
147 if (ENV_X86)
148 quick_ram_check_or_die((uintptr_t)cbmem_top() - sizeof(u32));
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600149}
150
Lee Leahy522149c2015-05-08 11:33:55 -0700151void cbmem_initialize_empty_id_size(u32 id, u64 size)
152{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600153 struct imd *imd;
154 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500155 const int no_recovery = 0;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600156
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600157 cbmem_top_init_once();
158
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600159 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300160 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600161
162 printk(BIOS_DEBUG, "CBMEM:\n");
163
Lee Leahy522149c2015-05-08 11:33:55 -0700164 if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
165 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600166 printk(BIOS_DEBUG, "failed.\n");
167 return;
168 }
169
Lee Leahy522149c2015-05-08 11:33:55 -0700170 /* Add the specified range first */
171 if (size)
172 cbmem_add(id, size);
173
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600174 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500175 cbmem_run_init_hooks(no_recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600176}
177
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600178int cbmem_initialize(void)
179{
Lee Leahy522149c2015-05-08 11:33:55 -0700180 return cbmem_initialize_id_size(0, 0);
181}
182
183int cbmem_initialize_id_size(u32 id, u64 size)
184{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600185 struct imd *imd;
186 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500187 const int recovery = 1;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600188
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600189 cbmem_top_init_once();
190
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600191 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300192 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600193
194 if (imd_recover(imd))
195 return 1;
196
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600197 /*
198 * Lock the imd in romstage on a recovery. The assumption is that
199 * if the imd area was recovered in romstage then S3 resume path
200 * is being taken.
201 */
Kyösti Mälkkie3acc8f2019-09-13 10:49:20 +0300202 if (ENV_ROMSTAGE)
203 imd_lockdown(imd);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600204
Lee Leahy522149c2015-05-08 11:33:55 -0700205 /* Add the specified range first */
206 if (size)
207 cbmem_add(id, size);
208
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600209 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500210 cbmem_run_init_hooks(recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600211
212 /* Recovery successful. */
213 return 0;
214}
215
216int cbmem_recovery(int is_wakeup)
217{
218 int rv = 0;
219 if (!is_wakeup)
220 cbmem_initialize_empty();
221 else
222 rv = cbmem_initialize();
223 return rv;
224}
225
226const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
227{
228 struct imd *imd;
229 struct imd imd_backing;
230 const struct imd_entry *e;
231
232 imd = imd_init_backing_with_recover(&imd_backing);
233
234 e = imd_entry_find_or_add(imd, id, size64);
235
236 return imd_to_cbmem(e);
237}
238
239void *cbmem_add(u32 id, u64 size)
240{
241 struct imd *imd;
242 struct imd imd_backing;
243 const struct imd_entry *e;
244
245 imd = imd_init_backing_with_recover(&imd_backing);
246
247 e = imd_entry_find_or_add(imd, id, size);
248
249 if (e == NULL)
250 return NULL;
251
252 return imd_entry_at(imd, e);
253}
254
255/* Retrieve a region provided a given id. */
256const struct cbmem_entry *cbmem_entry_find(u32 id)
257{
258 struct imd *imd;
259 struct imd imd_backing;
260 const struct imd_entry *e;
261
262 imd = imd_init_backing_with_recover(&imd_backing);
263
264 e = imd_entry_find(imd, id);
265
266 return imd_to_cbmem(e);
267}
268
269void *cbmem_find(u32 id)
270{
271 struct imd *imd;
272 struct imd imd_backing;
273 const struct imd_entry *e;
274
275 imd = imd_init_backing_with_recover(&imd_backing);
276
277 e = imd_entry_find(imd, id);
278
279 if (e == NULL)
280 return NULL;
281
282 return imd_entry_at(imd, e);
283}
284
285/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
286 * cannot be removed unless it was the last one added. */
287int cbmem_entry_remove(const struct cbmem_entry *entry)
288{
289 struct imd *imd;
290 struct imd imd_backing;
291
292 imd = imd_init_backing_with_recover(&imd_backing);
293
294 return imd_entry_remove(imd, cbmem_to_imd(entry));
295}
296
297u64 cbmem_entry_size(const struct cbmem_entry *entry)
298{
299 struct imd *imd;
300 struct imd imd_backing;
301
302 imd = imd_init_backing_with_recover(&imd_backing);
303
304 return imd_entry_size(imd, cbmem_to_imd(entry));
305}
306
307void *cbmem_entry_start(const struct cbmem_entry *entry)
308{
309 struct imd *imd;
310 struct imd imd_backing;
311
312 imd = imd_init_backing_with_recover(&imd_backing);
313
314 return imd_entry_at(imd, cbmem_to_imd(entry));
315}
316
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600317void cbmem_add_bootmem(void)
318{
Aaron Durbinfb532422017-08-02 10:40:25 -0600319 void *baseptr = NULL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600320 size_t size = 0;
321
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200322 cbmem_get_region(&baseptr, &size);
Patrick Rudolph9ab9db02018-04-05 09:14:51 +0200323 bootmem_add_range((uintptr_t)baseptr, size, BM_MEM_TABLE);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600324}
325
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200326void cbmem_get_region(void **baseptr, size_t *size)
327{
328 imd_region_used(cbmem_get_imd(), baseptr, size);
329}
330
Subrata Banik42c44c22019-05-15 20:27:04 +0530331#if ENV_PAYLOAD_LOADER || (CONFIG(EARLY_CBMEM_LIST) \
Lee Leahye2422e32016-07-24 19:52:15 -0700332 && (ENV_POSTCAR || ENV_ROMSTAGE))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500333/*
334 * -fdata-sections doesn't work so well on read only strings. They all
335 * get put in the same section even though those strings may never be
336 * referenced in the final binary.
337 */
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600338void cbmem_list(void)
339{
340 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
Lee Leahye2422e32016-07-24 19:52:15 -0700341 struct imd *imd;
342 struct imd imd_backing;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600343
Lee Leahye2422e32016-07-24 19:52:15 -0700344 imd = imd_init_backing_with_recover(&imd_backing);
345 imd_print_entries(imd, lookup, ARRAY_SIZE(lookup));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600346}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500347#endif
348
349void cbmem_add_records_to_cbtable(struct lb_header *header)
350{
351 struct imd_cursor cursor;
352 struct imd *imd;
353
354 imd = cbmem_get_imd();
355
356 if (imd_cursor_init(imd, &cursor))
357 return;
358
359 while (1) {
360 const struct imd_entry *e;
361 struct lb_cbmem_entry *lbe;
362 uint32_t id;
363
364 e = imd_cursor_next(&cursor);
365
366 if (e == NULL)
367 break;
368
369 id = imd_entry_id(imd, e);
370 /* Don't add these metadata entries. */
371 if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL)
372 continue;
373
374 lbe = (struct lb_cbmem_entry *)lb_new_record(header);
375 lbe->tag = LB_TAG_CBMEM_ENTRY;
376 lbe->size = sizeof(*lbe);
377 lbe->address = (uintptr_t)imd_entry_at(imd, e);
378 lbe->entry_size = imd_entry_size(imd, e);
379 lbe->id = id;
380 }
381}