blob: d6eed28860dc236739f567b726f604589275c7fe [file] [log] [blame]
Aaron Durbin0dff57d2015-03-05 21:18:33 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbin0dff57d2015-03-05 21:18:33 -060014 */
15
16#include <bootstate.h>
17#include <bootmem.h>
18#include <console/console.h>
19#include <cbmem.h>
20#include <imd.h>
21#include <rules.h>
22#include <string.h>
23#include <stdlib.h>
24#include <arch/early_variables.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060025
Julius Werner3c814b22016-08-19 16:20:40 -070026/*
Aaron Durbin403fdbc2017-08-02 10:57:23 -060027 * We need special handling on x86 where CAR global migration is employed. One
28 * cannot use true globals in that circumstance because CAR is where the globals
29 * are backed -- creating a circular dependency. For non CAR platforms globals
30 * are free to be used as well as any stages that are purely executing out of
31 * RAM. For CAR platforms that don't migrate globals the as-linked globals can
32 * be used, but they need special decoration using CAR_GLOBAL. That ensures
33 * proper object placement in conjunction with the linker.
34 *
35 * For the CAR global migration platforms we have to always try to partially
36 * recover CBMEM from cbmem_top() whenever we try to access it. In other
37 * environments we're not so constrained and just keep the backing imd struct
38 * in a global. This also means that we can easily tell whether CBMEM has
39 * explicitly been initialized or recovered yet on those platforms, and don't
40 * need to put the burden on board or chipset code to tell us by returning
41 * NULL from cbmem_top() before that point.
Julius Werner3c814b22016-08-19 16:20:40 -070042 */
Aaron Durbin1e9a9142016-09-16 16:23:21 -050043#define CAN_USE_GLOBALS \
Aaron Durbin403fdbc2017-08-02 10:57:23 -060044 (!IS_ENABLED(CONFIG_ARCH_X86) || ENV_RAMSTAGE || ENV_POSTCAR || \
45 IS_ENABLED(CONFIG_NO_CAR_GLOBAL_MIGRATION))
Julius Werner3c814b22016-08-19 16:20:40 -070046
Aaron Durbin0dff57d2015-03-05 21:18:33 -060047static inline struct imd *cbmem_get_imd(void)
48{
Julius Werner3c814b22016-08-19 16:20:40 -070049 if (CAN_USE_GLOBALS) {
Aaron Durbin403fdbc2017-08-02 10:57:23 -060050 static struct imd imd_cbmem CAR_GLOBAL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060051 return &imd_cbmem;
52 }
53 return NULL;
54}
55
Aaron Durbin0dff57d2015-03-05 21:18:33 -060056static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
57{
58 return (const struct cbmem_entry *)e;
59}
60
61static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
62{
63 return (const struct imd_entry *)e;
64}
65
66/* These are the different situations to handle:
Kyösti Mälkki513a1a82018-06-03 12:29:50 +030067 *
Lee Leahye20a3192017-03-09 16:21:34 -080068 * In ramstage cbmem_initialize() attempts a recovery of the
69 * cbmem region set up by romstage. It uses cbmem_top() as the
70 * starting point of recovery.
Aaron Durbin0dff57d2015-03-05 21:18:33 -060071 *
Lee Leahye20a3192017-03-09 16:21:34 -080072 * In romstage, similar to ramstage, cbmem_initialize() needs to
73 * attempt recovery of the cbmem area using cbmem_top() as the limit.
74 * cbmem_initialize_empty() initializes an empty cbmem area from
75 * cbmem_top();
Aaron Durbin0dff57d2015-03-05 21:18:33 -060076 *
77 */
78static struct imd *imd_init_backing(struct imd *backing)
79{
80 struct imd *imd;
81
82 imd = cbmem_get_imd();
83
84 if (imd != NULL)
85 return imd;
86
87 imd = backing;
88
89 return imd;
90}
91
92static struct imd *imd_init_backing_with_recover(struct imd *backing)
93{
94 struct imd *imd;
95
96 imd = imd_init_backing(backing);
Julius Werner3c814b22016-08-19 16:20:40 -070097 if (!CAN_USE_GLOBALS) {
98 /* Always partially recover if we can't keep track of whether
99 * we have already initialized CBMEM in this stage. */
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300100 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600101 imd_handle_init_partial_recovery(imd);
102 }
103
104 return imd;
105}
106
107void cbmem_initialize_empty(void)
108{
Lee Leahy522149c2015-05-08 11:33:55 -0700109 cbmem_initialize_empty_id_size(0, 0);
110}
111
Aaron Durbin64031672018-04-21 14:45:32 -0600112void __weak cbmem_top_init(void)
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600113{
114}
115
116static void cbmem_top_init_once(void)
117{
118 /* Call one-time hook on expected cbmem init during boot. This sequence
Kyösti Mälkki513a1a82018-06-03 12:29:50 +0300119 assumes first init call is in romstage. */
120 if (!ENV_ROMSTAGE)
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600121 return;
122
123 cbmem_top_init();
124}
125
Lee Leahy522149c2015-05-08 11:33:55 -0700126void cbmem_initialize_empty_id_size(u32 id, u64 size)
127{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600128 struct imd *imd;
129 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500130 const int no_recovery = 0;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600131
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600132 cbmem_top_init_once();
133
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600134 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300135 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600136
137 printk(BIOS_DEBUG, "CBMEM:\n");
138
Lee Leahy522149c2015-05-08 11:33:55 -0700139 if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
140 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600141 printk(BIOS_DEBUG, "failed.\n");
142 return;
143 }
144
Lee Leahy522149c2015-05-08 11:33:55 -0700145 /* Add the specified range first */
146 if (size)
147 cbmem_add(id, size);
148
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600149 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500150 cbmem_run_init_hooks(no_recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600151}
152
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600153int cbmem_initialize(void)
154{
Lee Leahy522149c2015-05-08 11:33:55 -0700155 return cbmem_initialize_id_size(0, 0);
156}
157
158int cbmem_initialize_id_size(u32 id, u64 size)
159{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600160 struct imd *imd;
161 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500162 const int recovery = 1;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600163
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600164 cbmem_top_init_once();
165
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600166 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300167 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600168
169 if (imd_recover(imd))
170 return 1;
171
172#if defined(__PRE_RAM__)
173 /*
174 * Lock the imd in romstage on a recovery. The assumption is that
175 * if the imd area was recovered in romstage then S3 resume path
176 * is being taken.
177 */
178 imd_lockdown(imd);
179#endif
180
Lee Leahy522149c2015-05-08 11:33:55 -0700181 /* Add the specified range first */
182 if (size)
183 cbmem_add(id, size);
184
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600185 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500186 cbmem_run_init_hooks(recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600187
188 /* Recovery successful. */
189 return 0;
190}
191
192int cbmem_recovery(int is_wakeup)
193{
194 int rv = 0;
195 if (!is_wakeup)
196 cbmem_initialize_empty();
197 else
198 rv = cbmem_initialize();
199 return rv;
200}
201
202const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
203{
204 struct imd *imd;
205 struct imd imd_backing;
206 const struct imd_entry *e;
207
208 imd = imd_init_backing_with_recover(&imd_backing);
209
210 e = imd_entry_find_or_add(imd, id, size64);
211
212 return imd_to_cbmem(e);
213}
214
215void *cbmem_add(u32 id, u64 size)
216{
217 struct imd *imd;
218 struct imd imd_backing;
219 const struct imd_entry *e;
220
221 imd = imd_init_backing_with_recover(&imd_backing);
222
223 e = imd_entry_find_or_add(imd, id, size);
224
225 if (e == NULL)
226 return NULL;
227
228 return imd_entry_at(imd, e);
229}
230
231/* Retrieve a region provided a given id. */
232const struct cbmem_entry *cbmem_entry_find(u32 id)
233{
234 struct imd *imd;
235 struct imd imd_backing;
236 const struct imd_entry *e;
237
238 imd = imd_init_backing_with_recover(&imd_backing);
239
240 e = imd_entry_find(imd, id);
241
242 return imd_to_cbmem(e);
243}
244
245void *cbmem_find(u32 id)
246{
247 struct imd *imd;
248 struct imd imd_backing;
249 const struct imd_entry *e;
250
251 imd = imd_init_backing_with_recover(&imd_backing);
252
253 e = imd_entry_find(imd, id);
254
255 if (e == NULL)
256 return NULL;
257
258 return imd_entry_at(imd, e);
259}
260
261/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
262 * cannot be removed unless it was the last one added. */
263int cbmem_entry_remove(const struct cbmem_entry *entry)
264{
265 struct imd *imd;
266 struct imd imd_backing;
267
268 imd = imd_init_backing_with_recover(&imd_backing);
269
270 return imd_entry_remove(imd, cbmem_to_imd(entry));
271}
272
273u64 cbmem_entry_size(const struct cbmem_entry *entry)
274{
275 struct imd *imd;
276 struct imd imd_backing;
277
278 imd = imd_init_backing_with_recover(&imd_backing);
279
280 return imd_entry_size(imd, cbmem_to_imd(entry));
281}
282
283void *cbmem_entry_start(const struct cbmem_entry *entry)
284{
285 struct imd *imd;
286 struct imd imd_backing;
287
288 imd = imd_init_backing_with_recover(&imd_backing);
289
290 return imd_entry_at(imd, cbmem_to_imd(entry));
291}
292
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600293void cbmem_add_bootmem(void)
294{
Aaron Durbinfb532422017-08-02 10:40:25 -0600295 void *baseptr = NULL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600296 size_t size = 0;
297
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200298 cbmem_get_region(&baseptr, &size);
Patrick Rudolph9ab9db02018-04-05 09:14:51 +0200299 bootmem_add_range((uintptr_t)baseptr, size, BM_MEM_TABLE);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600300}
301
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200302void cbmem_get_region(void **baseptr, size_t *size)
303{
304 imd_region_used(cbmem_get_imd(), baseptr, size);
305}
306
Lee Leahye2422e32016-07-24 19:52:15 -0700307#if ENV_RAMSTAGE || (IS_ENABLED(CONFIG_EARLY_CBMEM_LIST) \
308 && (ENV_POSTCAR || ENV_ROMSTAGE))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500309/*
310 * -fdata-sections doesn't work so well on read only strings. They all
311 * get put in the same section even though those strings may never be
312 * referenced in the final binary.
313 */
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600314void cbmem_list(void)
315{
316 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
Lee Leahye2422e32016-07-24 19:52:15 -0700317 struct imd *imd;
318 struct imd imd_backing;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600319
Lee Leahye2422e32016-07-24 19:52:15 -0700320 imd = imd_init_backing_with_recover(&imd_backing);
321 imd_print_entries(imd, lookup, ARRAY_SIZE(lookup));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600322}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500323#endif
324
325void cbmem_add_records_to_cbtable(struct lb_header *header)
326{
327 struct imd_cursor cursor;
328 struct imd *imd;
329
330 imd = cbmem_get_imd();
331
332 if (imd_cursor_init(imd, &cursor))
333 return;
334
335 while (1) {
336 const struct imd_entry *e;
337 struct lb_cbmem_entry *lbe;
338 uint32_t id;
339
340 e = imd_cursor_next(&cursor);
341
342 if (e == NULL)
343 break;
344
345 id = imd_entry_id(imd, e);
346 /* Don't add these metadata entries. */
347 if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL)
348 continue;
349
350 lbe = (struct lb_cbmem_entry *)lb_new_record(header);
351 lbe->tag = LB_TAG_CBMEM_ENTRY;
352 lbe->size = sizeof(*lbe);
353 lbe->address = (uintptr_t)imd_entry_at(imd, e);
354 lbe->entry_size = imd_entry_size(imd, e);
355 lbe->id = id;
356 }
357}