blob: 4fb78cef9618c7e7a82b8e509b119a175f2fb893 [file] [log] [blame]
Aaron Durbin0dff57d2015-03-05 21:18:33 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbin0dff57d2015-03-05 21:18:33 -060014 */
15
16#include <bootstate.h>
17#include <bootmem.h>
18#include <console/console.h>
19#include <cbmem.h>
20#include <imd.h>
21#include <rules.h>
22#include <string.h>
23#include <stdlib.h>
24#include <arch/early_variables.h>
Aaron Durbin0dff57d2015-03-05 21:18:33 -060025
Julius Werner3c814b22016-08-19 16:20:40 -070026/*
27 * We need special handling on x86 before ramstage because we cannot use global
28 * variables (we're executing in-place from flash so we don't have a writable
29 * data segment, and we cannot use CAR_GLOBAL here since that mechanism itself
30 * is dependent on CBMEM). Therefore, we have to always try to partially recover
31 * CBMEM from cbmem_top() whenever we try to access it. In other environments
32 * we're not so constrained and just keep the backing imd struct in a global.
33 * This also means that we can easily tell whether CBMEM has explicitly been
34 * initialized or recovered yet on those platforms, and don't need to put the
35 * burden on board or chipset code to tell us by returning NULL from cbmem_top()
36 * before that point.
37 */
Aaron Durbin1e9a9142016-09-16 16:23:21 -050038#define CAN_USE_GLOBALS \
39 (!IS_ENABLED(CONFIG_ARCH_X86) || ENV_RAMSTAGE || ENV_POSTCAR)
Julius Werner3c814b22016-08-19 16:20:40 -070040
Aaron Durbin0dff57d2015-03-05 21:18:33 -060041static inline struct imd *cbmem_get_imd(void)
42{
Julius Werner3c814b22016-08-19 16:20:40 -070043 if (CAN_USE_GLOBALS) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -060044 static struct imd imd_cbmem;
45 return &imd_cbmem;
46 }
47 return NULL;
48}
49
Aaron Durbin0dff57d2015-03-05 21:18:33 -060050static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
51{
52 return (const struct cbmem_entry *)e;
53}
54
55static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
56{
57 return (const struct imd_entry *)e;
58}
59
60/* These are the different situations to handle:
61 * CONFIG_EARLY_CBMEM_INIT:
Lee Leahye20a3192017-03-09 16:21:34 -080062 * In ramstage cbmem_initialize() attempts a recovery of the
63 * cbmem region set up by romstage. It uses cbmem_top() as the
64 * starting point of recovery.
Aaron Durbin0dff57d2015-03-05 21:18:33 -060065 *
Lee Leahye20a3192017-03-09 16:21:34 -080066 * In romstage, similar to ramstage, cbmem_initialize() needs to
67 * attempt recovery of the cbmem area using cbmem_top() as the limit.
68 * cbmem_initialize_empty() initializes an empty cbmem area from
69 * cbmem_top();
Aaron Durbin0dff57d2015-03-05 21:18:33 -060070 *
71 */
72static struct imd *imd_init_backing(struct imd *backing)
73{
74 struct imd *imd;
75
76 imd = cbmem_get_imd();
77
78 if (imd != NULL)
79 return imd;
80
81 imd = backing;
82
83 return imd;
84}
85
86static struct imd *imd_init_backing_with_recover(struct imd *backing)
87{
88 struct imd *imd;
89
90 imd = imd_init_backing(backing);
Julius Werner3c814b22016-08-19 16:20:40 -070091 if (!CAN_USE_GLOBALS) {
92 /* Always partially recover if we can't keep track of whether
93 * we have already initialized CBMEM in this stage. */
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +030094 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -060095 imd_handle_init_partial_recovery(imd);
96 }
97
98 return imd;
99}
100
101void cbmem_initialize_empty(void)
102{
Lee Leahy522149c2015-05-08 11:33:55 -0700103 cbmem_initialize_empty_id_size(0, 0);
104}
105
106void cbmem_initialize_empty_id_size(u32 id, u64 size)
107{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600108 struct imd *imd;
109 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500110 const int no_recovery = 0;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600111
112 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300113 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600114
115 printk(BIOS_DEBUG, "CBMEM:\n");
116
Lee Leahy522149c2015-05-08 11:33:55 -0700117 if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
118 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600119 printk(BIOS_DEBUG, "failed.\n");
120 return;
121 }
122
Lee Leahy522149c2015-05-08 11:33:55 -0700123 /* Add the specified range first */
124 if (size)
125 cbmem_add(id, size);
126
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600127 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500128 cbmem_run_init_hooks(no_recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600129}
130
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600131int cbmem_initialize(void)
132{
Lee Leahy522149c2015-05-08 11:33:55 -0700133 return cbmem_initialize_id_size(0, 0);
134}
135
136int cbmem_initialize_id_size(u32 id, u64 size)
137{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600138 struct imd *imd;
139 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500140 const int recovery = 1;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600141
142 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300143 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600144
145 if (imd_recover(imd))
146 return 1;
147
148#if defined(__PRE_RAM__)
149 /*
150 * Lock the imd in romstage on a recovery. The assumption is that
151 * if the imd area was recovered in romstage then S3 resume path
152 * is being taken.
153 */
154 imd_lockdown(imd);
155#endif
156
Lee Leahy522149c2015-05-08 11:33:55 -0700157 /* Add the specified range first */
158 if (size)
159 cbmem_add(id, size);
160
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600161 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500162 cbmem_run_init_hooks(recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600163
164 /* Recovery successful. */
165 return 0;
166}
167
168int cbmem_recovery(int is_wakeup)
169{
170 int rv = 0;
171 if (!is_wakeup)
172 cbmem_initialize_empty();
173 else
174 rv = cbmem_initialize();
175 return rv;
176}
177
178const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
179{
180 struct imd *imd;
181 struct imd imd_backing;
182 const struct imd_entry *e;
183
184 imd = imd_init_backing_with_recover(&imd_backing);
185
186 e = imd_entry_find_or_add(imd, id, size64);
187
188 return imd_to_cbmem(e);
189}
190
191void *cbmem_add(u32 id, u64 size)
192{
193 struct imd *imd;
194 struct imd imd_backing;
195 const struct imd_entry *e;
196
197 imd = imd_init_backing_with_recover(&imd_backing);
198
199 e = imd_entry_find_or_add(imd, id, size);
200
201 if (e == NULL)
202 return NULL;
203
204 return imd_entry_at(imd, e);
205}
206
207/* Retrieve a region provided a given id. */
208const struct cbmem_entry *cbmem_entry_find(u32 id)
209{
210 struct imd *imd;
211 struct imd imd_backing;
212 const struct imd_entry *e;
213
214 imd = imd_init_backing_with_recover(&imd_backing);
215
216 e = imd_entry_find(imd, id);
217
218 return imd_to_cbmem(e);
219}
220
221void *cbmem_find(u32 id)
222{
223 struct imd *imd;
224 struct imd imd_backing;
225 const struct imd_entry *e;
226
227 imd = imd_init_backing_with_recover(&imd_backing);
228
229 e = imd_entry_find(imd, id);
230
231 if (e == NULL)
232 return NULL;
233
234 return imd_entry_at(imd, e);
235}
236
237/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
238 * cannot be removed unless it was the last one added. */
239int cbmem_entry_remove(const struct cbmem_entry *entry)
240{
241 struct imd *imd;
242 struct imd imd_backing;
243
244 imd = imd_init_backing_with_recover(&imd_backing);
245
246 return imd_entry_remove(imd, cbmem_to_imd(entry));
247}
248
249u64 cbmem_entry_size(const struct cbmem_entry *entry)
250{
251 struct imd *imd;
252 struct imd imd_backing;
253
254 imd = imd_init_backing_with_recover(&imd_backing);
255
256 return imd_entry_size(imd, cbmem_to_imd(entry));
257}
258
259void *cbmem_entry_start(const struct cbmem_entry *entry)
260{
261 struct imd *imd;
262 struct imd imd_backing;
263
264 imd = imd_init_backing_with_recover(&imd_backing);
265
266 return imd_entry_at(imd, cbmem_to_imd(entry));
267}
268
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600269void cbmem_add_bootmem(void)
270{
Aaron Durbinfb532422017-08-02 10:40:25 -0600271 void *baseptr = NULL;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600272 size_t size = 0;
273
Aaron Durbinfb532422017-08-02 10:40:25 -0600274 imd_region_used(cbmem_get_imd(), &baseptr, &size);
275 bootmem_add_range((uintptr_t)baseptr, size, LB_MEM_TABLE);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600276}
277
Lee Leahye2422e32016-07-24 19:52:15 -0700278#if ENV_RAMSTAGE || (IS_ENABLED(CONFIG_EARLY_CBMEM_LIST) \
279 && (ENV_POSTCAR || ENV_ROMSTAGE))
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500280/*
281 * -fdata-sections doesn't work so well on read only strings. They all
282 * get put in the same section even though those strings may never be
283 * referenced in the final binary.
284 */
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600285void cbmem_list(void)
286{
287 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
Lee Leahye2422e32016-07-24 19:52:15 -0700288 struct imd *imd;
289 struct imd imd_backing;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600290
Lee Leahye2422e32016-07-24 19:52:15 -0700291 imd = imd_init_backing_with_recover(&imd_backing);
292 imd_print_entries(imd, lookup, ARRAY_SIZE(lookup));
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600293}
Aaron Durbin1ca2d862015-09-30 12:26:54 -0500294#endif
295
296void cbmem_add_records_to_cbtable(struct lb_header *header)
297{
298 struct imd_cursor cursor;
299 struct imd *imd;
300
301 imd = cbmem_get_imd();
302
303 if (imd_cursor_init(imd, &cursor))
304 return;
305
306 while (1) {
307 const struct imd_entry *e;
308 struct lb_cbmem_entry *lbe;
309 uint32_t id;
310
311 e = imd_cursor_next(&cursor);
312
313 if (e == NULL)
314 break;
315
316 id = imd_entry_id(imd, e);
317 /* Don't add these metadata entries. */
318 if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL)
319 continue;
320
321 lbe = (struct lb_cbmem_entry *)lb_new_record(header);
322 lbe->tag = LB_TAG_CBMEM_ENTRY;
323 lbe->size = sizeof(*lbe);
324 lbe->address = (uintptr_t)imd_entry_at(imd, e);
325 lbe->entry_size = imd_entry_size(imd, e);
326 lbe->id = id;
327 }
328}