blob: ff1cb95094c64f3d1d99238d1c111165a5e12ec0 [file] [log] [blame]
Aaron Durbin0dff57d2015-03-05 21:18:33 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbin0dff57d2015-03-05 21:18:33 -060014 */
15
16#include <bootstate.h>
17#include <bootmem.h>
18#include <console/console.h>
19#include <cbmem.h>
20#include <imd.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060021#include <string.h>
22#include <stdlib.h>
23#include <arch/early_variables.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060024
Julius Werner3c814b22016-08-19 16:20:40 -070025/*
Aaron Durbin403fdbc2017-08-02 10:57:23 -060026 * We need special handling on x86 where CAR global migration is employed. One
27 * cannot use true globals in that circumstance because CAR is where the globals
28 * are backed -- creating a circular dependency. For non CAR platforms globals
29 * are free to be used as well as any stages that are purely executing out of
30 * RAM. For CAR platforms that don't migrate globals the as-linked globals can
31 * be used, but they need special decoration using CAR_GLOBAL. That ensures
32 * proper object placement in conjunction with the linker.
33 *
34 * For the CAR global migration platforms we have to always try to partially
35 * recover CBMEM from cbmem_top() whenever we try to access it. In other
36 * environments we're not so constrained and just keep the backing imd struct
37 * in a global. This also means that we can easily tell whether CBMEM has
38 * explicitly been initialized or recovered yet on those platforms, and don't
39 * need to put the burden on board or chipset code to tell us by returning
40 * NULL from cbmem_top() before that point.
Julius Werner3c814b22016-08-19 16:20:40 -070041 */
Aaron Durbin1e9a9142016-09-16 16:23:21 -050042#define CAN_USE_GLOBALS \
Aaron Durbin403fdbc2017-08-02 10:57:23 -060043 (!IS_ENABLED(CONFIG_ARCH_X86) || ENV_RAMSTAGE || ENV_POSTCAR || \
44 IS_ENABLED(CONFIG_NO_CAR_GLOBAL_MIGRATION))
Julius Werner3c814b22016-08-19 16:20:40 -070045
Aaron Durbin0dff57d2015-03-05 21:18:33 -060046static inline struct imd *cbmem_get_imd(void)
47{
Julius Werner3c814b22016-08-19 16:20:40 -070048 if (CAN_USE_GLOBALS) {
Aaron Durbin403fdbc2017-08-02 10:57:23 -060049 static struct imd imd_cbmem CAR_GLOBAL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060050 return &imd_cbmem;
51 }
52 return NULL;
53}
54
Aaron Durbin0dff57d2015-03-05 21:18:33 -060055static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
56{
57 return (const struct cbmem_entry *)e;
58}
59
60static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
61{
62 return (const struct imd_entry *)e;
63}
64
65/* These are the different situations to handle:
Kyösti Mälkki513a1a82018-06-03 12:29:50 +030066 *
Lee Leahye20a3192017-03-09 16:21:34 -080067 * In ramstage cbmem_initialize() attempts a recovery of the
68 * cbmem region set up by romstage. It uses cbmem_top() as the
69 * starting point of recovery.
Aaron Durbin0dff57d2015-03-05 21:18:33 -060070 *
Lee Leahye20a3192017-03-09 16:21:34 -080071 * In romstage, similar to ramstage, cbmem_initialize() needs to
72 * attempt recovery of the cbmem area using cbmem_top() as the limit.
73 * cbmem_initialize_empty() initializes an empty cbmem area from
74 * cbmem_top();
Aaron Durbin0dff57d2015-03-05 21:18:33 -060075 *
76 */
77static struct imd *imd_init_backing(struct imd *backing)
78{
79 struct imd *imd;
80
81 imd = cbmem_get_imd();
82
83 if (imd != NULL)
84 return imd;
85
86 imd = backing;
87
88 return imd;
89}
90
91static struct imd *imd_init_backing_with_recover(struct imd *backing)
92{
93 struct imd *imd;
94
95 imd = imd_init_backing(backing);
Julius Werner3c814b22016-08-19 16:20:40 -070096 if (!CAN_USE_GLOBALS) {
97 /* Always partially recover if we can't keep track of whether
98 * we have already initialized CBMEM in this stage. */
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +030099 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600100 imd_handle_init_partial_recovery(imd);
101 }
102
103 return imd;
104}
105
106void cbmem_initialize_empty(void)
107{
Lee Leahy522149c2015-05-08 11:33:55 -0700108 cbmem_initialize_empty_id_size(0, 0);
109}
110
Aaron Durbin64031672018-04-21 14:45:32 -0600111void __weak cbmem_top_init(void)
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600112{
113}
114
115static void cbmem_top_init_once(void)
116{
117 /* Call one-time hook on expected cbmem init during boot. This sequence
Kyösti Mälkki513a1a82018-06-03 12:29:50 +0300118 assumes first init call is in romstage. */
119 if (!ENV_ROMSTAGE)
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600120 return;
121
122 cbmem_top_init();
123}
124
Lee Leahy522149c2015-05-08 11:33:55 -0700125void cbmem_initialize_empty_id_size(u32 id, u64 size)
126{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600127 struct imd *imd;
128 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500129 const int no_recovery = 0;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600130
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600131 cbmem_top_init_once();
132
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600133 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300134 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600135
136 printk(BIOS_DEBUG, "CBMEM:\n");
137
Lee Leahy522149c2015-05-08 11:33:55 -0700138 if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
139 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600140 printk(BIOS_DEBUG, "failed.\n");
141 return;
142 }
143
Lee Leahy522149c2015-05-08 11:33:55 -0700144 /* Add the specified range first */
145 if (size)
146 cbmem_add(id, size);
147
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600148 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500149 cbmem_run_init_hooks(no_recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600150}
151
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600152int cbmem_initialize(void)
153{
Lee Leahy522149c2015-05-08 11:33:55 -0700154 return cbmem_initialize_id_size(0, 0);
155}
156
157int cbmem_initialize_id_size(u32 id, u64 size)
158{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600159 struct imd *imd;
160 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500161 const int recovery = 1;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600162
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600163 cbmem_top_init_once();
164
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600165 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300166 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600167
168 if (imd_recover(imd))
169 return 1;
170
171#if defined(__PRE_RAM__)
172 /*
173 * Lock the imd in romstage on a recovery. The assumption is that
174 * if the imd area was recovered in romstage then S3 resume path
175 * is being taken.
176 */
177 imd_lockdown(imd);
178#endif
179
Lee Leahy522149c2015-05-08 11:33:55 -0700180 /* Add the specified range first */
181 if (size)
182 cbmem_add(id, size);
183
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600184 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500185 cbmem_run_init_hooks(recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600186
187 /* Recovery successful. */
188 return 0;
189}
190
191int cbmem_recovery(int is_wakeup)
192{
193 int rv = 0;
194 if (!is_wakeup)
195 cbmem_initialize_empty();
196 else
197 rv = cbmem_initialize();
198 return rv;
199}
200
201const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
202{
203 struct imd *imd;
204 struct imd imd_backing;
205 const struct imd_entry *e;
206
207 imd = imd_init_backing_with_recover(&imd_backing);
208
209 e = imd_entry_find_or_add(imd, id, size64);
210
211 return imd_to_cbmem(e);
212}
213
214void *cbmem_add(u32 id, u64 size)
215{
216 struct imd *imd;
217 struct imd imd_backing;
218 const struct imd_entry *e;
219
220 imd = imd_init_backing_with_recover(&imd_backing);
221
222 e = imd_entry_find_or_add(imd, id, size);
223
224 if (e == NULL)
225 return NULL;
226
227 return imd_entry_at(imd, e);
228}
229
230/* Retrieve a region provided a given id. */
231const struct cbmem_entry *cbmem_entry_find(u32 id)
232{
233 struct imd *imd;
234 struct imd imd_backing;
235 const struct imd_entry *e;
236
237 imd = imd_init_backing_with_recover(&imd_backing);
238
239 e = imd_entry_find(imd, id);
240
241 return imd_to_cbmem(e);
242}
243
244void *cbmem_find(u32 id)
245{
246 struct imd *imd;
247 struct imd imd_backing;
248 const struct imd_entry *e;
249
250 imd = imd_init_backing_with_recover(&imd_backing);
251
252 e = imd_entry_find(imd, id);
253
254 if (e == NULL)
255 return NULL;
256
257 return imd_entry_at(imd, e);
258}
259
260/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
261 * cannot be removed unless it was the last one added. */
262int cbmem_entry_remove(const struct cbmem_entry *entry)
263{
264 struct imd *imd;
265 struct imd imd_backing;
266
267 imd = imd_init_backing_with_recover(&imd_backing);
268
269 return imd_entry_remove(imd, cbmem_to_imd(entry));
270}
271
272u64 cbmem_entry_size(const struct cbmem_entry *entry)
273{
274 struct imd *imd;
275 struct imd imd_backing;
276
277 imd = imd_init_backing_with_recover(&imd_backing);
278
279 return imd_entry_size(imd, cbmem_to_imd(entry));
280}
281
282void *cbmem_entry_start(const struct cbmem_entry *entry)
283{
284 struct imd *imd;
285 struct imd imd_backing;
286
287 imd = imd_init_backing_with_recover(&imd_backing);
288
289 return imd_entry_at(imd, cbmem_to_imd(entry));
290}
291
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600292void cbmem_add_bootmem(void)
293{
Aaron Durbinfb532422017-08-02 10:40:25 -0600294 void *baseptr = NULL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600295 size_t size = 0;
296
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200297 cbmem_get_region(&baseptr, &size);
Patrick Rudolph9ab9db02018-04-05 09:14:51 +0200298 bootmem_add_range((uintptr_t)baseptr, size, BM_MEM_TABLE);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600299}
300
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200301void cbmem_get_region(void **baseptr, size_t *size)
302{
303 imd_region_used(cbmem_get_imd(), baseptr, size);
304}
305
Lee Leahye2422e32016-07-24 19:52:15 -0700306#if ENV_RAMSTAGE || (IS_ENABLED(CONFIG_EARLY_CBMEM_LIST) \
307 && (ENV_POSTCAR || ENV_ROMSTAGE))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500308/*
309 * -fdata-sections doesn't work so well on read only strings. They all
310 * get put in the same section even though those strings may never be
311 * referenced in the final binary.
312 */
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600313void cbmem_list(void)
314{
315 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
Lee Leahye2422e32016-07-24 19:52:15 -0700316 struct imd *imd;
317 struct imd imd_backing;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600318
Lee Leahye2422e32016-07-24 19:52:15 -0700319 imd = imd_init_backing_with_recover(&imd_backing);
320 imd_print_entries(imd, lookup, ARRAY_SIZE(lookup));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600321}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500322#endif
323
324void cbmem_add_records_to_cbtable(struct lb_header *header)
325{
326 struct imd_cursor cursor;
327 struct imd *imd;
328
329 imd = cbmem_get_imd();
330
331 if (imd_cursor_init(imd, &cursor))
332 return;
333
334 while (1) {
335 const struct imd_entry *e;
336 struct lb_cbmem_entry *lbe;
337 uint32_t id;
338
339 e = imd_cursor_next(&cursor);
340
341 if (e == NULL)
342 break;
343
344 id = imd_entry_id(imd, e);
345 /* Don't add these metadata entries. */
346 if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL)
347 continue;
348
349 lbe = (struct lb_cbmem_entry *)lb_new_record(header);
350 lbe->tag = LB_TAG_CBMEM_ENTRY;
351 lbe->size = sizeof(*lbe);
352 lbe->address = (uintptr_t)imd_entry_at(imd, e);
353 lbe->entry_size = imd_entry_size(imd, e);
354 lbe->id = id;
355 }
356}