blob: c458e5e3b1a292eae32be9fba80c6ffa9dfd6bf0 [file] [log] [blame]
Aaron Durbin0dff57d2015-03-05 21:18:33 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbin0dff57d2015-03-05 21:18:33 -060014 */
15
16#include <bootstate.h>
17#include <bootmem.h>
18#include <console/console.h>
19#include <cbmem.h>
20#include <imd.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060021#include <stdlib.h>
22#include <arch/early_variables.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060023
Julius Werner3c814b22016-08-19 16:20:40 -070024/*
Aaron Durbin403fdbc2017-08-02 10:57:23 -060025 * We need special handling on x86 where CAR global migration is employed. One
26 * cannot use true globals in that circumstance because CAR is where the globals
27 * are backed -- creating a circular dependency. For non CAR platforms globals
28 * are free to be used as well as any stages that are purely executing out of
29 * RAM. For CAR platforms that don't migrate globals the as-linked globals can
30 * be used, but they need special decoration using CAR_GLOBAL. That ensures
31 * proper object placement in conjunction with the linker.
32 *
33 * For the CAR global migration platforms we have to always try to partially
34 * recover CBMEM from cbmem_top() whenever we try to access it. In other
35 * environments we're not so constrained and just keep the backing imd struct
36 * in a global. This also means that we can easily tell whether CBMEM has
37 * explicitly been initialized or recovered yet on those platforms, and don't
38 * need to put the burden on board or chipset code to tell us by returning
39 * NULL from cbmem_top() before that point.
Julius Werner3c814b22016-08-19 16:20:40 -070040 */
Aaron Durbin1e9a9142016-09-16 16:23:21 -050041#define CAN_USE_GLOBALS \
Julius Wernercd49cce2019-03-05 16:53:33 -080042 (!CONFIG(ARCH_X86) || ENV_RAMSTAGE || ENV_POSTCAR || \
43 CONFIG(NO_CAR_GLOBAL_MIGRATION))
Julius Werner3c814b22016-08-19 16:20:40 -070044
Aaron Durbin0dff57d2015-03-05 21:18:33 -060045static inline struct imd *cbmem_get_imd(void)
46{
Julius Werner3c814b22016-08-19 16:20:40 -070047 if (CAN_USE_GLOBALS) {
Aaron Durbin403fdbc2017-08-02 10:57:23 -060048 static struct imd imd_cbmem CAR_GLOBAL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060049 return &imd_cbmem;
50 }
51 return NULL;
52}
53
Aaron Durbin0dff57d2015-03-05 21:18:33 -060054static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
55{
56 return (const struct cbmem_entry *)e;
57}
58
59static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
60{
61 return (const struct imd_entry *)e;
62}
63
64/* These are the different situations to handle:
Kyösti Mälkki513a1a82018-06-03 12:29:50 +030065 *
Lee Leahye20a3192017-03-09 16:21:34 -080066 * In ramstage cbmem_initialize() attempts a recovery of the
67 * cbmem region set up by romstage. It uses cbmem_top() as the
68 * starting point of recovery.
Aaron Durbin0dff57d2015-03-05 21:18:33 -060069 *
Lee Leahye20a3192017-03-09 16:21:34 -080070 * In romstage, similar to ramstage, cbmem_initialize() needs to
71 * attempt recovery of the cbmem area using cbmem_top() as the limit.
72 * cbmem_initialize_empty() initializes an empty cbmem area from
73 * cbmem_top();
Aaron Durbin0dff57d2015-03-05 21:18:33 -060074 *
75 */
76static struct imd *imd_init_backing(struct imd *backing)
77{
78 struct imd *imd;
79
80 imd = cbmem_get_imd();
81
82 if (imd != NULL)
83 return imd;
84
85 imd = backing;
86
87 return imd;
88}
89
90static struct imd *imd_init_backing_with_recover(struct imd *backing)
91{
92 struct imd *imd;
93
94 imd = imd_init_backing(backing);
Julius Werner3c814b22016-08-19 16:20:40 -070095 if (!CAN_USE_GLOBALS) {
96 /* Always partially recover if we can't keep track of whether
97 * we have already initialized CBMEM in this stage. */
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +030098 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -060099 imd_handle_init_partial_recovery(imd);
100 }
101
102 return imd;
103}
104
105void cbmem_initialize_empty(void)
106{
Lee Leahy522149c2015-05-08 11:33:55 -0700107 cbmem_initialize_empty_id_size(0, 0);
108}
109
Aaron Durbin64031672018-04-21 14:45:32 -0600110void __weak cbmem_top_init(void)
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600111{
112}
113
114static void cbmem_top_init_once(void)
115{
116 /* Call one-time hook on expected cbmem init during boot. This sequence
Kyösti Mälkki513a1a82018-06-03 12:29:50 +0300117 assumes first init call is in romstage. */
118 if (!ENV_ROMSTAGE)
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600119 return;
120
121 cbmem_top_init();
122}
123
Lee Leahy522149c2015-05-08 11:33:55 -0700124void cbmem_initialize_empty_id_size(u32 id, u64 size)
125{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600126 struct imd *imd;
127 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500128 const int no_recovery = 0;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600129
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600130 cbmem_top_init_once();
131
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600132 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300133 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600134
135 printk(BIOS_DEBUG, "CBMEM:\n");
136
Lee Leahy522149c2015-05-08 11:33:55 -0700137 if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
138 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600139 printk(BIOS_DEBUG, "failed.\n");
140 return;
141 }
142
Lee Leahy522149c2015-05-08 11:33:55 -0700143 /* Add the specified range first */
144 if (size)
145 cbmem_add(id, size);
146
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600147 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500148 cbmem_run_init_hooks(no_recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600149}
150
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600151int cbmem_initialize(void)
152{
Lee Leahy522149c2015-05-08 11:33:55 -0700153 return cbmem_initialize_id_size(0, 0);
154}
155
156int cbmem_initialize_id_size(u32 id, u64 size)
157{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600158 struct imd *imd;
159 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500160 const int recovery = 1;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600161
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600162 cbmem_top_init_once();
163
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600164 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300165 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600166
167 if (imd_recover(imd))
168 return 1;
169
170#if defined(__PRE_RAM__)
171 /*
172 * Lock the imd in romstage on a recovery. The assumption is that
173 * if the imd area was recovered in romstage then S3 resume path
174 * is being taken.
175 */
176 imd_lockdown(imd);
177#endif
178
Lee Leahy522149c2015-05-08 11:33:55 -0700179 /* Add the specified range first */
180 if (size)
181 cbmem_add(id, size);
182
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600183 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500184 cbmem_run_init_hooks(recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600185
186 /* Recovery successful. */
187 return 0;
188}
189
190int cbmem_recovery(int is_wakeup)
191{
192 int rv = 0;
193 if (!is_wakeup)
194 cbmem_initialize_empty();
195 else
196 rv = cbmem_initialize();
197 return rv;
198}
199
200const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
201{
202 struct imd *imd;
203 struct imd imd_backing;
204 const struct imd_entry *e;
205
206 imd = imd_init_backing_with_recover(&imd_backing);
207
208 e = imd_entry_find_or_add(imd, id, size64);
209
210 return imd_to_cbmem(e);
211}
212
213void *cbmem_add(u32 id, u64 size)
214{
215 struct imd *imd;
216 struct imd imd_backing;
217 const struct imd_entry *e;
218
219 imd = imd_init_backing_with_recover(&imd_backing);
220
221 e = imd_entry_find_or_add(imd, id, size);
222
223 if (e == NULL)
224 return NULL;
225
226 return imd_entry_at(imd, e);
227}
228
229/* Retrieve a region provided a given id. */
230const struct cbmem_entry *cbmem_entry_find(u32 id)
231{
232 struct imd *imd;
233 struct imd imd_backing;
234 const struct imd_entry *e;
235
236 imd = imd_init_backing_with_recover(&imd_backing);
237
238 e = imd_entry_find(imd, id);
239
240 return imd_to_cbmem(e);
241}
242
243void *cbmem_find(u32 id)
244{
245 struct imd *imd;
246 struct imd imd_backing;
247 const struct imd_entry *e;
248
249 imd = imd_init_backing_with_recover(&imd_backing);
250
251 e = imd_entry_find(imd, id);
252
253 if (e == NULL)
254 return NULL;
255
256 return imd_entry_at(imd, e);
257}
258
259/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
260 * cannot be removed unless it was the last one added. */
261int cbmem_entry_remove(const struct cbmem_entry *entry)
262{
263 struct imd *imd;
264 struct imd imd_backing;
265
266 imd = imd_init_backing_with_recover(&imd_backing);
267
268 return imd_entry_remove(imd, cbmem_to_imd(entry));
269}
270
271u64 cbmem_entry_size(const struct cbmem_entry *entry)
272{
273 struct imd *imd;
274 struct imd imd_backing;
275
276 imd = imd_init_backing_with_recover(&imd_backing);
277
278 return imd_entry_size(imd, cbmem_to_imd(entry));
279}
280
281void *cbmem_entry_start(const struct cbmem_entry *entry)
282{
283 struct imd *imd;
284 struct imd imd_backing;
285
286 imd = imd_init_backing_with_recover(&imd_backing);
287
288 return imd_entry_at(imd, cbmem_to_imd(entry));
289}
290
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600291void cbmem_add_bootmem(void)
292{
Aaron Durbinfb532422017-08-02 10:40:25 -0600293 void *baseptr = NULL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600294 size_t size = 0;
295
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200296 cbmem_get_region(&baseptr, &size);
Patrick Rudolph9ab9db02018-04-05 09:14:51 +0200297 bootmem_add_range((uintptr_t)baseptr, size, BM_MEM_TABLE);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600298}
299
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200300void cbmem_get_region(void **baseptr, size_t *size)
301{
302 imd_region_used(cbmem_get_imd(), baseptr, size);
303}
304
Julius Wernercd49cce2019-03-05 16:53:33 -0800305#if ENV_RAMSTAGE || (CONFIG(EARLY_CBMEM_LIST) \
Lee Leahye2422e32016-07-24 19:52:15 -0700306 && (ENV_POSTCAR || ENV_ROMSTAGE))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500307/*
308 * -fdata-sections doesn't work so well on read only strings. They all
309 * get put in the same section even though those strings may never be
310 * referenced in the final binary.
311 */
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600312void cbmem_list(void)
313{
314 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
Lee Leahye2422e32016-07-24 19:52:15 -0700315 struct imd *imd;
316 struct imd imd_backing;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600317
Lee Leahye2422e32016-07-24 19:52:15 -0700318 imd = imd_init_backing_with_recover(&imd_backing);
319 imd_print_entries(imd, lookup, ARRAY_SIZE(lookup));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600320}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500321#endif
322
323void cbmem_add_records_to_cbtable(struct lb_header *header)
324{
325 struct imd_cursor cursor;
326 struct imd *imd;
327
328 imd = cbmem_get_imd();
329
330 if (imd_cursor_init(imd, &cursor))
331 return;
332
333 while (1) {
334 const struct imd_entry *e;
335 struct lb_cbmem_entry *lbe;
336 uint32_t id;
337
338 e = imd_cursor_next(&cursor);
339
340 if (e == NULL)
341 break;
342
343 id = imd_entry_id(imd, e);
344 /* Don't add these metadata entries. */
345 if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL)
346 continue;
347
348 lbe = (struct lb_cbmem_entry *)lb_new_record(header);
349 lbe->tag = LB_TAG_CBMEM_ENTRY;
350 lbe->size = sizeof(*lbe);
351 lbe->address = (uintptr_t)imd_entry_at(imd, e);
352 lbe->entry_size = imd_entry_size(imd, e);
353 lbe->id = id;
354 }
355}