blob: 83d5c1fd17661081eee80e9a7505db34e31c1fed [file] [log] [blame]
Aaron Durbin0dff57d2015-03-05 21:18:33 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbin0dff57d2015-03-05 21:18:33 -060014 */
15
16#include <bootstate.h>
17#include <bootmem.h>
Aaron Durbin64031672018-04-21 14:45:32 -060018#include <compiler.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060019#include <console/console.h>
20#include <cbmem.h>
21#include <imd.h>
22#include <rules.h>
23#include <string.h>
24#include <stdlib.h>
25#include <arch/early_variables.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060026
Julius Werner3c814b22016-08-19 16:20:40 -070027/*
Aaron Durbin403fdbc2017-08-02 10:57:23 -060028 * We need special handling on x86 where CAR global migration is employed. One
29 * cannot use true globals in that circumstance because CAR is where the globals
30 * are backed -- creating a circular dependency. For non CAR platforms globals
31 * are free to be used as well as any stages that are purely executing out of
32 * RAM. For CAR platforms that don't migrate globals the as-linked globals can
33 * be used, but they need special decoration using CAR_GLOBAL. That ensures
34 * proper object placement in conjunction with the linker.
35 *
36 * For the CAR global migration platforms we have to always try to partially
37 * recover CBMEM from cbmem_top() whenever we try to access it. In other
38 * environments we're not so constrained and just keep the backing imd struct
39 * in a global. This also means that we can easily tell whether CBMEM has
40 * explicitly been initialized or recovered yet on those platforms, and don't
41 * need to put the burden on board or chipset code to tell us by returning
42 * NULL from cbmem_top() before that point.
Julius Werner3c814b22016-08-19 16:20:40 -070043 */
Aaron Durbin1e9a9142016-09-16 16:23:21 -050044#define CAN_USE_GLOBALS \
Aaron Durbin403fdbc2017-08-02 10:57:23 -060045 (!IS_ENABLED(CONFIG_ARCH_X86) || ENV_RAMSTAGE || ENV_POSTCAR || \
46 IS_ENABLED(CONFIG_NO_CAR_GLOBAL_MIGRATION))
Julius Werner3c814b22016-08-19 16:20:40 -070047
Aaron Durbin0dff57d2015-03-05 21:18:33 -060048static inline struct imd *cbmem_get_imd(void)
49{
Julius Werner3c814b22016-08-19 16:20:40 -070050 if (CAN_USE_GLOBALS) {
Aaron Durbin403fdbc2017-08-02 10:57:23 -060051 static struct imd imd_cbmem CAR_GLOBAL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -060052 return &imd_cbmem;
53 }
54 return NULL;
55}
56
Aaron Durbin0dff57d2015-03-05 21:18:33 -060057static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
58{
59 return (const struct cbmem_entry *)e;
60}
61
62static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
63{
64 return (const struct imd_entry *)e;
65}
66
67/* These are the different situations to handle:
68 * CONFIG_EARLY_CBMEM_INIT:
Lee Leahye20a3192017-03-09 16:21:34 -080069 * In ramstage cbmem_initialize() attempts a recovery of the
70 * cbmem region set up by romstage. It uses cbmem_top() as the
71 * starting point of recovery.
Aaron Durbin0dff57d2015-03-05 21:18:33 -060072 *
Lee Leahye20a3192017-03-09 16:21:34 -080073 * In romstage, similar to ramstage, cbmem_initialize() needs to
74 * attempt recovery of the cbmem area using cbmem_top() as the limit.
75 * cbmem_initialize_empty() initializes an empty cbmem area from
76 * cbmem_top();
Aaron Durbin0dff57d2015-03-05 21:18:33 -060077 *
78 */
79static struct imd *imd_init_backing(struct imd *backing)
80{
81 struct imd *imd;
82
83 imd = cbmem_get_imd();
84
85 if (imd != NULL)
86 return imd;
87
88 imd = backing;
89
90 return imd;
91}
92
93static struct imd *imd_init_backing_with_recover(struct imd *backing)
94{
95 struct imd *imd;
96
97 imd = imd_init_backing(backing);
Julius Werner3c814b22016-08-19 16:20:40 -070098 if (!CAN_USE_GLOBALS) {
99 /* Always partially recover if we can't keep track of whether
100 * we have already initialized CBMEM in this stage. */
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300101 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600102 imd_handle_init_partial_recovery(imd);
103 }
104
105 return imd;
106}
107
108void cbmem_initialize_empty(void)
109{
Lee Leahy522149c2015-05-08 11:33:55 -0700110 cbmem_initialize_empty_id_size(0, 0);
111}
112
Aaron Durbin64031672018-04-21 14:45:32 -0600113void __weak cbmem_top_init(void)
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600114{
115}
116
117static void cbmem_top_init_once(void)
118{
119 /* Call one-time hook on expected cbmem init during boot. This sequence
120 assumes first init call is in romstage for early cbmem init and
121 ramstage for late cbmem init. */
122 if (IS_ENABLED(CONFIG_EARLY_CBMEM_INIT) && !ENV_ROMSTAGE)
123 return;
124 if (IS_ENABLED(CONFIG_LATE_CBMEM_INIT) && !ENV_RAMSTAGE)
125 return;
126
127 cbmem_top_init();
128}
129
Lee Leahy522149c2015-05-08 11:33:55 -0700130void cbmem_initialize_empty_id_size(u32 id, u64 size)
131{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600132 struct imd *imd;
133 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500134 const int no_recovery = 0;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600135
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600136 cbmem_top_init_once();
137
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600138 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300139 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600140
141 printk(BIOS_DEBUG, "CBMEM:\n");
142
Lee Leahy522149c2015-05-08 11:33:55 -0700143 if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
144 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600145 printk(BIOS_DEBUG, "failed.\n");
146 return;
147 }
148
Lee Leahy522149c2015-05-08 11:33:55 -0700149 /* Add the specified range first */
150 if (size)
151 cbmem_add(id, size);
152
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600153 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500154 cbmem_run_init_hooks(no_recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600155}
156
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600157int cbmem_initialize(void)
158{
Lee Leahy522149c2015-05-08 11:33:55 -0700159 return cbmem_initialize_id_size(0, 0);
160}
161
162int cbmem_initialize_id_size(u32 id, u64 size)
163{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600164 struct imd *imd;
165 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500166 const int recovery = 1;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600167
Aaron Durbindfdea2a2017-08-01 10:27:10 -0600168 cbmem_top_init_once();
169
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600170 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300171 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600172
173 if (imd_recover(imd))
174 return 1;
175
176#if defined(__PRE_RAM__)
177 /*
178 * Lock the imd in romstage on a recovery. The assumption is that
179 * if the imd area was recovered in romstage then S3 resume path
180 * is being taken.
181 */
182 imd_lockdown(imd);
183#endif
184
Lee Leahy522149c2015-05-08 11:33:55 -0700185 /* Add the specified range first */
186 if (size)
187 cbmem_add(id, size);
188
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600189 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500190 cbmem_run_init_hooks(recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600191
192 /* Recovery successful. */
193 return 0;
194}
195
196int cbmem_recovery(int is_wakeup)
197{
198 int rv = 0;
199 if (!is_wakeup)
200 cbmem_initialize_empty();
201 else
202 rv = cbmem_initialize();
203 return rv;
204}
205
206const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
207{
208 struct imd *imd;
209 struct imd imd_backing;
210 const struct imd_entry *e;
211
212 imd = imd_init_backing_with_recover(&imd_backing);
213
214 e = imd_entry_find_or_add(imd, id, size64);
215
216 return imd_to_cbmem(e);
217}
218
219void *cbmem_add(u32 id, u64 size)
220{
221 struct imd *imd;
222 struct imd imd_backing;
223 const struct imd_entry *e;
224
225 imd = imd_init_backing_with_recover(&imd_backing);
226
227 e = imd_entry_find_or_add(imd, id, size);
228
229 if (e == NULL)
230 return NULL;
231
232 return imd_entry_at(imd, e);
233}
234
235/* Retrieve a region provided a given id. */
236const struct cbmem_entry *cbmem_entry_find(u32 id)
237{
238 struct imd *imd;
239 struct imd imd_backing;
240 const struct imd_entry *e;
241
242 imd = imd_init_backing_with_recover(&imd_backing);
243
244 e = imd_entry_find(imd, id);
245
246 return imd_to_cbmem(e);
247}
248
249void *cbmem_find(u32 id)
250{
251 struct imd *imd;
252 struct imd imd_backing;
253 const struct imd_entry *e;
254
255 imd = imd_init_backing_with_recover(&imd_backing);
256
257 e = imd_entry_find(imd, id);
258
259 if (e == NULL)
260 return NULL;
261
262 return imd_entry_at(imd, e);
263}
264
265/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
266 * cannot be removed unless it was the last one added. */
267int cbmem_entry_remove(const struct cbmem_entry *entry)
268{
269 struct imd *imd;
270 struct imd imd_backing;
271
272 imd = imd_init_backing_with_recover(&imd_backing);
273
274 return imd_entry_remove(imd, cbmem_to_imd(entry));
275}
276
277u64 cbmem_entry_size(const struct cbmem_entry *entry)
278{
279 struct imd *imd;
280 struct imd imd_backing;
281
282 imd = imd_init_backing_with_recover(&imd_backing);
283
284 return imd_entry_size(imd, cbmem_to_imd(entry));
285}
286
287void *cbmem_entry_start(const struct cbmem_entry *entry)
288{
289 struct imd *imd;
290 struct imd imd_backing;
291
292 imd = imd_init_backing_with_recover(&imd_backing);
293
294 return imd_entry_at(imd, cbmem_to_imd(entry));
295}
296
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600297void cbmem_add_bootmem(void)
298{
Aaron Durbinfb532422017-08-02 10:40:25 -0600299 void *baseptr = NULL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600300 size_t size = 0;
301
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200302 cbmem_get_region(&baseptr, &size);
Patrick Rudolph9ab9db02018-04-05 09:14:51 +0200303 bootmem_add_range((uintptr_t)baseptr, size, BM_MEM_TABLE);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600304}
305
Philipp Deppenwiese84258db2018-08-16 00:31:26 +0200306void cbmem_get_region(void **baseptr, size_t *size)
307{
308 imd_region_used(cbmem_get_imd(), baseptr, size);
309}
310
Lee Leahye2422e32016-07-24 19:52:15 -0700311#if ENV_RAMSTAGE || (IS_ENABLED(CONFIG_EARLY_CBMEM_LIST) \
312 && (ENV_POSTCAR || ENV_ROMSTAGE))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500313/*
314 * -fdata-sections doesn't work so well on read only strings. They all
315 * get put in the same section even though those strings may never be
316 * referenced in the final binary.
317 */
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600318void cbmem_list(void)
319{
320 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
Lee Leahye2422e32016-07-24 19:52:15 -0700321 struct imd *imd;
322 struct imd imd_backing;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600323
Lee Leahye2422e32016-07-24 19:52:15 -0700324 imd = imd_init_backing_with_recover(&imd_backing);
325 imd_print_entries(imd, lookup, ARRAY_SIZE(lookup));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600326}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500327#endif
328
329void cbmem_add_records_to_cbtable(struct lb_header *header)
330{
331 struct imd_cursor cursor;
332 struct imd *imd;
333
334 imd = cbmem_get_imd();
335
336 if (imd_cursor_init(imd, &cursor))
337 return;
338
339 while (1) {
340 const struct imd_entry *e;
341 struct lb_cbmem_entry *lbe;
342 uint32_t id;
343
344 e = imd_cursor_next(&cursor);
345
346 if (e == NULL)
347 break;
348
349 id = imd_entry_id(imd, e);
350 /* Don't add these metadata entries. */
351 if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL)
352 continue;
353
354 lbe = (struct lb_cbmem_entry *)lb_new_record(header);
355 lbe->tag = LB_TAG_CBMEM_ENTRY;
356 lbe->size = sizeof(*lbe);
357 lbe->address = (uintptr_t)imd_entry_at(imd, e);
358 lbe->entry_size = imd_entry_size(imd, e);
359 lbe->id = id;
360 }
361}