blob: d1ff57d96cbdf75fde1f6da40ebb9ecb6dc56c5f [file] [log] [blame]
Aaron Durbin0dff57d2015-03-05 21:18:33 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc.
18 */
19
20#include <bootstate.h>
21#include <bootmem.h>
22#include <console/console.h>
23#include <cbmem.h>
24#include <imd.h>
25#include <rules.h>
26#include <string.h>
27#include <stdlib.h>
28#include <arch/early_variables.h>
29#if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)
30#include <arch/acpi.h>
31#endif
32
Aaron Durbin0dff57d2015-03-05 21:18:33 -060033static inline struct imd *cbmem_get_imd(void)
34{
35 /* Only supply a backing store for imd in ramstage. */
36 if (ENV_RAMSTAGE) {
37 static struct imd imd_cbmem;
38 return &imd_cbmem;
39 }
40 return NULL;
41}
42
Aaron Durbin0dff57d2015-03-05 21:18:33 -060043static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
44{
45 return (const struct cbmem_entry *)e;
46}
47
48static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
49{
50 return (const struct imd_entry *)e;
51}
52
53/* These are the different situations to handle:
54 * CONFIG_EARLY_CBMEM_INIT:
55 * In ramstage cbmem_initialize() attempts a recovery of the
56 * cbmem region set up by romstage. It uses cbmem_top() as the
57 * starting point of recovery.
58 *
59 * In romstage, similar to ramstage, cbmem_initialize() needs to
60 * attempt recovery of the cbmem area using cbmem_top() as the limit.
61 * cbmem_initialize_empty() initializes an empty cbmem area from
62 * cbmem_top();
63 *
64 */
65static struct imd *imd_init_backing(struct imd *backing)
66{
67 struct imd *imd;
68
69 imd = cbmem_get_imd();
70
71 if (imd != NULL)
72 return imd;
73
74 imd = backing;
75
76 return imd;
77}
78
79static struct imd *imd_init_backing_with_recover(struct imd *backing)
80{
81 struct imd *imd;
82
83 imd = imd_init_backing(backing);
84 if (!ENV_RAMSTAGE) {
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +030085 imd_handle_init(imd, cbmem_top());
86
Aaron Durbin0dff57d2015-03-05 21:18:33 -060087 /* Need to partially recover all the time outside of ramstage
88 * because there's object storage outside of the stack. */
89 imd_handle_init_partial_recovery(imd);
90 }
91
92 return imd;
93}
94
95void cbmem_initialize_empty(void)
96{
Lee Leahy522149c2015-05-08 11:33:55 -070097 cbmem_initialize_empty_id_size(0, 0);
98}
99
100void cbmem_initialize_empty_id_size(u32 id, u64 size)
101{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600102 struct imd *imd;
103 struct imd imd_backing;
104
105 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300106 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600107
108 printk(BIOS_DEBUG, "CBMEM:\n");
109
Lee Leahy522149c2015-05-08 11:33:55 -0700110 if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
111 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600112 printk(BIOS_DEBUG, "failed.\n");
113 return;
114 }
115
Lee Leahy522149c2015-05-08 11:33:55 -0700116 /* Add the specified range first */
117 if (size)
118 cbmem_add(id, size);
119
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600120 /* Complete migration to CBMEM. */
121 cbmem_run_init_hooks();
122}
123
124static inline int cbmem_fail_recovery(void)
125{
126 cbmem_initialize_empty();
127 cbmem_fail_resume();
128 return 1;
129}
130
131int cbmem_initialize(void)
132{
Lee Leahy522149c2015-05-08 11:33:55 -0700133 return cbmem_initialize_id_size(0, 0);
134}
135
136int cbmem_initialize_id_size(u32 id, u64 size)
137{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600138 struct imd *imd;
139 struct imd imd_backing;
140
141 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300142 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600143
144 if (imd_recover(imd))
145 return 1;
146
147#if defined(__PRE_RAM__)
148 /*
149 * Lock the imd in romstage on a recovery. The assumption is that
150 * if the imd area was recovered in romstage then S3 resume path
151 * is being taken.
152 */
153 imd_lockdown(imd);
154#endif
155
Lee Leahy522149c2015-05-08 11:33:55 -0700156 /* Add the specified range first */
157 if (size)
158 cbmem_add(id, size);
159
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600160 /* Complete migration to CBMEM. */
161 cbmem_run_init_hooks();
162
163 /* Recovery successful. */
164 return 0;
165}
166
167int cbmem_recovery(int is_wakeup)
168{
169 int rv = 0;
170 if (!is_wakeup)
171 cbmem_initialize_empty();
172 else
173 rv = cbmem_initialize();
174 return rv;
175}
176
177const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
178{
179 struct imd *imd;
180 struct imd imd_backing;
181 const struct imd_entry *e;
182
183 imd = imd_init_backing_with_recover(&imd_backing);
184
185 e = imd_entry_find_or_add(imd, id, size64);
186
187 return imd_to_cbmem(e);
188}
189
190void *cbmem_add(u32 id, u64 size)
191{
192 struct imd *imd;
193 struct imd imd_backing;
194 const struct imd_entry *e;
195
196 imd = imd_init_backing_with_recover(&imd_backing);
197
198 e = imd_entry_find_or_add(imd, id, size);
199
200 if (e == NULL)
201 return NULL;
202
203 return imd_entry_at(imd, e);
204}
205
206/* Retrieve a region provided a given id. */
207const struct cbmem_entry *cbmem_entry_find(u32 id)
208{
209 struct imd *imd;
210 struct imd imd_backing;
211 const struct imd_entry *e;
212
213 imd = imd_init_backing_with_recover(&imd_backing);
214
215 e = imd_entry_find(imd, id);
216
217 return imd_to_cbmem(e);
218}
219
220void *cbmem_find(u32 id)
221{
222 struct imd *imd;
223 struct imd imd_backing;
224 const struct imd_entry *e;
225
226 imd = imd_init_backing_with_recover(&imd_backing);
227
228 e = imd_entry_find(imd, id);
229
230 if (e == NULL)
231 return NULL;
232
233 return imd_entry_at(imd, e);
234}
235
236/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
237 * cannot be removed unless it was the last one added. */
238int cbmem_entry_remove(const struct cbmem_entry *entry)
239{
240 struct imd *imd;
241 struct imd imd_backing;
242
243 imd = imd_init_backing_with_recover(&imd_backing);
244
245 return imd_entry_remove(imd, cbmem_to_imd(entry));
246}
247
248u64 cbmem_entry_size(const struct cbmem_entry *entry)
249{
250 struct imd *imd;
251 struct imd imd_backing;
252
253 imd = imd_init_backing_with_recover(&imd_backing);
254
255 return imd_entry_size(imd, cbmem_to_imd(entry));
256}
257
258void *cbmem_entry_start(const struct cbmem_entry *entry)
259{
260 struct imd *imd;
261 struct imd imd_backing;
262
263 imd = imd_init_backing_with_recover(&imd_backing);
264
265 return imd_entry_at(imd, cbmem_to_imd(entry));
266}
267
268#if ENV_RAMSTAGE
269void cbmem_add_bootmem(void)
270{
271 void *base = NULL;
272 size_t size = 0;
273
274 imd_region_used(cbmem_get_imd(), &base, &size);
275 bootmem_add_range((uintptr_t)base, size, LB_MEM_TABLE);
276}
277
278void cbmem_list(void)
279{
280 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
281
282 imd_print_entries(cbmem_get_imd(), lookup, ARRAY_SIZE(lookup));
283}
284#endif /* __PRE_RAM__ */