blob: b0273f4d5699dc5262824e76632eb0aed9727b8c [file] [log] [blame]
Aaron Durbin0dff57d2015-03-05 21:18:33 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbin0dff57d2015-03-05 21:18:33 -060014 */
15
16#include <bootstate.h>
17#include <bootmem.h>
18#include <console/console.h>
19#include <cbmem.h>
20#include <imd.h>
21#include <rules.h>
22#include <string.h>
23#include <stdlib.h>
24#include <arch/early_variables.h>
25#if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)
26#include <arch/acpi.h>
27#endif
28
Julius Werner3c814b22016-08-19 16:20:40 -070029/*
30 * We need special handling on x86 before ramstage because we cannot use global
31 * variables (we're executing in-place from flash so we don't have a writable
32 * data segment, and we cannot use CAR_GLOBAL here since that mechanism itself
33 * is dependent on CBMEM). Therefore, we have to always try to partially recover
34 * CBMEM from cbmem_top() whenever we try to access it. In other environments
35 * we're not so constrained and just keep the backing imd struct in a global.
36 * This also means that we can easily tell whether CBMEM has explicitly been
37 * initialized or recovered yet on those platforms, and don't need to put the
38 * burden on board or chipset code to tell us by returning NULL from cbmem_top()
39 * before that point.
40 */
Aaron Durbin1e9a9142016-09-16 16:23:21 -050041#define CAN_USE_GLOBALS \
42 (!IS_ENABLED(CONFIG_ARCH_X86) || ENV_RAMSTAGE || ENV_POSTCAR)
Julius Werner3c814b22016-08-19 16:20:40 -070043
Aaron Durbin0dff57d2015-03-05 21:18:33 -060044static inline struct imd *cbmem_get_imd(void)
45{
Julius Werner3c814b22016-08-19 16:20:40 -070046 if (CAN_USE_GLOBALS) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -060047 static struct imd imd_cbmem;
48 return &imd_cbmem;
49 }
50 return NULL;
51}
52
Aaron Durbin0dff57d2015-03-05 21:18:33 -060053static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
54{
55 return (const struct cbmem_entry *)e;
56}
57
58static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
59{
60 return (const struct imd_entry *)e;
61}
62
63/* These are the different situations to handle:
64 * CONFIG_EARLY_CBMEM_INIT:
65 * In ramstage cbmem_initialize() attempts a recovery of the
66 * cbmem region set up by romstage. It uses cbmem_top() as the
67 * starting point of recovery.
68 *
69 * In romstage, similar to ramstage, cbmem_initialize() needs to
70 * attempt recovery of the cbmem area using cbmem_top() as the limit.
71 * cbmem_initialize_empty() initializes an empty cbmem area from
72 * cbmem_top();
73 *
74 */
75static struct imd *imd_init_backing(struct imd *backing)
76{
77 struct imd *imd;
78
79 imd = cbmem_get_imd();
80
81 if (imd != NULL)
82 return imd;
83
84 imd = backing;
85
86 return imd;
87}
88
89static struct imd *imd_init_backing_with_recover(struct imd *backing)
90{
91 struct imd *imd;
92
93 imd = imd_init_backing(backing);
Julius Werner3c814b22016-08-19 16:20:40 -070094 if (!CAN_USE_GLOBALS) {
95 /* Always partially recover if we can't keep track of whether
96 * we have already initialized CBMEM in this stage. */
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +030097 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -060098 imd_handle_init_partial_recovery(imd);
99 }
100
101 return imd;
102}
103
104void cbmem_initialize_empty(void)
105{
Lee Leahy522149c2015-05-08 11:33:55 -0700106 cbmem_initialize_empty_id_size(0, 0);
107}
108
109void cbmem_initialize_empty_id_size(u32 id, u64 size)
110{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600111 struct imd *imd;
112 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500113 const int no_recovery = 0;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600114
115 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300116 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600117
118 printk(BIOS_DEBUG, "CBMEM:\n");
119
Lee Leahy522149c2015-05-08 11:33:55 -0700120 if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
121 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600122 printk(BIOS_DEBUG, "failed.\n");
123 return;
124 }
125
Lee Leahy522149c2015-05-08 11:33:55 -0700126 /* Add the specified range first */
127 if (size)
128 cbmem_add(id, size);
129
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600130 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500131 cbmem_run_init_hooks(no_recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600132}
133
134static inline int cbmem_fail_recovery(void)
135{
136 cbmem_initialize_empty();
137 cbmem_fail_resume();
138 return 1;
139}
140
141int cbmem_initialize(void)
142{
Lee Leahy522149c2015-05-08 11:33:55 -0700143 return cbmem_initialize_id_size(0, 0);
144}
145
146int cbmem_initialize_id_size(u32 id, u64 size)
147{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600148 struct imd *imd;
149 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500150 const int recovery = 1;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600151
152 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300153 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600154
155 if (imd_recover(imd))
156 return 1;
157
158#if defined(__PRE_RAM__)
159 /*
160 * Lock the imd in romstage on a recovery. The assumption is that
161 * if the imd area was recovered in romstage then S3 resume path
162 * is being taken.
163 */
164 imd_lockdown(imd);
165#endif
166
Lee Leahy522149c2015-05-08 11:33:55 -0700167 /* Add the specified range first */
168 if (size)
169 cbmem_add(id, size);
170
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600171 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500172 cbmem_run_init_hooks(recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600173
174 /* Recovery successful. */
175 return 0;
176}
177
178int cbmem_recovery(int is_wakeup)
179{
180 int rv = 0;
181 if (!is_wakeup)
182 cbmem_initialize_empty();
183 else
184 rv = cbmem_initialize();
185 return rv;
186}
187
188const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
189{
190 struct imd *imd;
191 struct imd imd_backing;
192 const struct imd_entry *e;
193
194 imd = imd_init_backing_with_recover(&imd_backing);
195
196 e = imd_entry_find_or_add(imd, id, size64);
197
198 return imd_to_cbmem(e);
199}
200
201void *cbmem_add(u32 id, u64 size)
202{
203 struct imd *imd;
204 struct imd imd_backing;
205 const struct imd_entry *e;
206
207 imd = imd_init_backing_with_recover(&imd_backing);
208
209 e = imd_entry_find_or_add(imd, id, size);
210
211 if (e == NULL)
212 return NULL;
213
214 return imd_entry_at(imd, e);
215}
216
217/* Retrieve a region provided a given id. */
218const struct cbmem_entry *cbmem_entry_find(u32 id)
219{
220 struct imd *imd;
221 struct imd imd_backing;
222 const struct imd_entry *e;
223
224 imd = imd_init_backing_with_recover(&imd_backing);
225
226 e = imd_entry_find(imd, id);
227
228 return imd_to_cbmem(e);
229}
230
231void *cbmem_find(u32 id)
232{
233 struct imd *imd;
234 struct imd imd_backing;
235 const struct imd_entry *e;
236
237 imd = imd_init_backing_with_recover(&imd_backing);
238
239 e = imd_entry_find(imd, id);
240
241 if (e == NULL)
242 return NULL;
243
244 return imd_entry_at(imd, e);
245}
246
247/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
248 * cannot be removed unless it was the last one added. */
249int cbmem_entry_remove(const struct cbmem_entry *entry)
250{
251 struct imd *imd;
252 struct imd imd_backing;
253
254 imd = imd_init_backing_with_recover(&imd_backing);
255
256 return imd_entry_remove(imd, cbmem_to_imd(entry));
257}
258
259u64 cbmem_entry_size(const struct cbmem_entry *entry)
260{
261 struct imd *imd;
262 struct imd imd_backing;
263
264 imd = imd_init_backing_with_recover(&imd_backing);
265
266 return imd_entry_size(imd, cbmem_to_imd(entry));
267}
268
269void *cbmem_entry_start(const struct cbmem_entry *entry)
270{
271 struct imd *imd;
272 struct imd imd_backing;
273
274 imd = imd_init_backing_with_recover(&imd_backing);
275
276 return imd_entry_at(imd, cbmem_to_imd(entry));
277}
278
Alexandru Gagniuc555d6c22015-11-16 13:26:33 -0800279void cbmem_region_used(uintptr_t *base, size_t *size)
280{
281 void *baseptr;
282 imd_region_used(cbmem_get_imd(), &baseptr, size);
283 *base = (uintptr_t)baseptr;
284}
285
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600286void cbmem_add_bootmem(void)
287{
Andrey Petrov447d9482016-03-10 22:14:41 -0800288 uintptr_t base = 0;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600289 size_t size = 0;
290
Andrey Petrov447d9482016-03-10 22:14:41 -0800291 cbmem_region_used(&base, &size);
292 bootmem_add_range(base, size, LB_MEM_TABLE);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600293}
294
Lee Leahye2422e32016-07-24 19:52:15 -0700295#if ENV_RAMSTAGE || (IS_ENABLED(CONFIG_EARLY_CBMEM_LIST) \
296 && (ENV_POSTCAR || ENV_ROMSTAGE))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500297/*
298 * -fdata-sections doesn't work so well on read only strings. They all
299 * get put in the same section even though those strings may never be
300 * referenced in the final binary.
301 */
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600302void cbmem_list(void)
303{
304 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
Lee Leahye2422e32016-07-24 19:52:15 -0700305 struct imd *imd;
306 struct imd imd_backing;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600307
Lee Leahye2422e32016-07-24 19:52:15 -0700308 imd = imd_init_backing_with_recover(&imd_backing);
309 imd_print_entries(imd, lookup, ARRAY_SIZE(lookup));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600310}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500311#endif
312
313void cbmem_add_records_to_cbtable(struct lb_header *header)
314{
315 struct imd_cursor cursor;
316 struct imd *imd;
317
318 imd = cbmem_get_imd();
319
320 if (imd_cursor_init(imd, &cursor))
321 return;
322
323 while (1) {
324 const struct imd_entry *e;
325 struct lb_cbmem_entry *lbe;
326 uint32_t id;
327
328 e = imd_cursor_next(&cursor);
329
330 if (e == NULL)
331 break;
332
333 id = imd_entry_id(imd, e);
334 /* Don't add these metadata entries. */
335 if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL)
336 continue;
337
338 lbe = (struct lb_cbmem_entry *)lb_new_record(header);
339 lbe->tag = LB_TAG_CBMEM_ENTRY;
340 lbe->size = sizeof(*lbe);
341 lbe->address = (uintptr_t)imd_entry_at(imd, e);
342 lbe->entry_size = imd_entry_size(imd, e);
343 lbe->id = id;
344 }
345}