blob: fa6533484fb83dee6996329caeef8f23a2fb480e [file] [log] [blame]
Aaron Durbin0dff57d2015-03-05 21:18:33 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc.
18 */
19
20#include <bootstate.h>
21#include <bootmem.h>
22#include <console/console.h>
23#include <cbmem.h>
24#include <imd.h>
25#include <rules.h>
26#include <string.h>
27#include <stdlib.h>
28#include <arch/early_variables.h>
29#if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)
30#include <arch/acpi.h>
31#endif
32
Aaron Durbin0dff57d2015-03-05 21:18:33 -060033static inline struct imd *cbmem_get_imd(void)
34{
35 /* Only supply a backing store for imd in ramstage. */
36 if (ENV_RAMSTAGE) {
37 static struct imd imd_cbmem;
38 return &imd_cbmem;
39 }
40 return NULL;
41}
42
Aaron Durbin0dff57d2015-03-05 21:18:33 -060043static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
44{
45 return (const struct cbmem_entry *)e;
46}
47
48static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
49{
50 return (const struct imd_entry *)e;
51}
52
53/* These are the different situations to handle:
54 * CONFIG_EARLY_CBMEM_INIT:
55 * In ramstage cbmem_initialize() attempts a recovery of the
56 * cbmem region set up by romstage. It uses cbmem_top() as the
57 * starting point of recovery.
58 *
59 * In romstage, similar to ramstage, cbmem_initialize() needs to
60 * attempt recovery of the cbmem area using cbmem_top() as the limit.
61 * cbmem_initialize_empty() initializes an empty cbmem area from
62 * cbmem_top();
63 *
64 */
65static struct imd *imd_init_backing(struct imd *backing)
66{
67 struct imd *imd;
68
69 imd = cbmem_get_imd();
70
71 if (imd != NULL)
72 return imd;
73
74 imd = backing;
75
76 return imd;
77}
78
79static struct imd *imd_init_backing_with_recover(struct imd *backing)
80{
81 struct imd *imd;
82
83 imd = imd_init_backing(backing);
84 if (!ENV_RAMSTAGE) {
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +030085 imd_handle_init(imd, cbmem_top());
86
Aaron Durbin0dff57d2015-03-05 21:18:33 -060087 /* Need to partially recover all the time outside of ramstage
88 * because there's object storage outside of the stack. */
89 imd_handle_init_partial_recovery(imd);
90 }
91
92 return imd;
93}
94
95void cbmem_initialize_empty(void)
96{
Lee Leahy522149c2015-05-08 11:33:55 -070097 cbmem_initialize_empty_id_size(0, 0);
98}
99
100void cbmem_initialize_empty_id_size(u32 id, u64 size)
101{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600102 struct imd *imd;
103 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500104 const int no_recovery = 0;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600105
106 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300107 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600108
109 printk(BIOS_DEBUG, "CBMEM:\n");
110
Lee Leahy522149c2015-05-08 11:33:55 -0700111 if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
112 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600113 printk(BIOS_DEBUG, "failed.\n");
114 return;
115 }
116
Lee Leahy522149c2015-05-08 11:33:55 -0700117 /* Add the specified range first */
118 if (size)
119 cbmem_add(id, size);
120
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600121 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500122 cbmem_run_init_hooks(no_recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600123}
124
125static inline int cbmem_fail_recovery(void)
126{
127 cbmem_initialize_empty();
128 cbmem_fail_resume();
129 return 1;
130}
131
132int cbmem_initialize(void)
133{
Lee Leahy522149c2015-05-08 11:33:55 -0700134 return cbmem_initialize_id_size(0, 0);
135}
136
137int cbmem_initialize_id_size(u32 id, u64 size)
138{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600139 struct imd *imd;
140 struct imd imd_backing;
Aaron Durbin41607a42015-06-09 13:54:10 -0500141 const int recovery = 1;
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600142
143 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300144 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600145
146 if (imd_recover(imd))
147 return 1;
148
149#if defined(__PRE_RAM__)
150 /*
151 * Lock the imd in romstage on a recovery. The assumption is that
152 * if the imd area was recovered in romstage then S3 resume path
153 * is being taken.
154 */
155 imd_lockdown(imd);
156#endif
157
Lee Leahy522149c2015-05-08 11:33:55 -0700158 /* Add the specified range first */
159 if (size)
160 cbmem_add(id, size);
161
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600162 /* Complete migration to CBMEM. */
Aaron Durbin41607a42015-06-09 13:54:10 -0500163 cbmem_run_init_hooks(recovery);
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600164
165 /* Recovery successful. */
166 return 0;
167}
168
169int cbmem_recovery(int is_wakeup)
170{
171 int rv = 0;
172 if (!is_wakeup)
173 cbmem_initialize_empty();
174 else
175 rv = cbmem_initialize();
176 return rv;
177}
178
179const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
180{
181 struct imd *imd;
182 struct imd imd_backing;
183 const struct imd_entry *e;
184
185 imd = imd_init_backing_with_recover(&imd_backing);
186
187 e = imd_entry_find_or_add(imd, id, size64);
188
189 return imd_to_cbmem(e);
190}
191
192void *cbmem_add(u32 id, u64 size)
193{
194 struct imd *imd;
195 struct imd imd_backing;
196 const struct imd_entry *e;
197
198 imd = imd_init_backing_with_recover(&imd_backing);
199
200 e = imd_entry_find_or_add(imd, id, size);
201
202 if (e == NULL)
203 return NULL;
204
205 return imd_entry_at(imd, e);
206}
207
208/* Retrieve a region provided a given id. */
209const struct cbmem_entry *cbmem_entry_find(u32 id)
210{
211 struct imd *imd;
212 struct imd imd_backing;
213 const struct imd_entry *e;
214
215 imd = imd_init_backing_with_recover(&imd_backing);
216
217 e = imd_entry_find(imd, id);
218
219 return imd_to_cbmem(e);
220}
221
222void *cbmem_find(u32 id)
223{
224 struct imd *imd;
225 struct imd imd_backing;
226 const struct imd_entry *e;
227
228 imd = imd_init_backing_with_recover(&imd_backing);
229
230 e = imd_entry_find(imd, id);
231
232 if (e == NULL)
233 return NULL;
234
235 return imd_entry_at(imd, e);
236}
237
238/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
239 * cannot be removed unless it was the last one added. */
240int cbmem_entry_remove(const struct cbmem_entry *entry)
241{
242 struct imd *imd;
243 struct imd imd_backing;
244
245 imd = imd_init_backing_with_recover(&imd_backing);
246
247 return imd_entry_remove(imd, cbmem_to_imd(entry));
248}
249
250u64 cbmem_entry_size(const struct cbmem_entry *entry)
251{
252 struct imd *imd;
253 struct imd imd_backing;
254
255 imd = imd_init_backing_with_recover(&imd_backing);
256
257 return imd_entry_size(imd, cbmem_to_imd(entry));
258}
259
260void *cbmem_entry_start(const struct cbmem_entry *entry)
261{
262 struct imd *imd;
263 struct imd imd_backing;
264
265 imd = imd_init_backing_with_recover(&imd_backing);
266
267 return imd_entry_at(imd, cbmem_to_imd(entry));
268}
269
270#if ENV_RAMSTAGE
271void cbmem_add_bootmem(void)
272{
273 void *base = NULL;
274 size_t size = 0;
275
276 imd_region_used(cbmem_get_imd(), &base, &size);
277 bootmem_add_range((uintptr_t)base, size, LB_MEM_TABLE);
278}
279
280void cbmem_list(void)
281{
282 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
283
284 imd_print_entries(cbmem_get_imd(), lookup, ARRAY_SIZE(lookup));
285}
286#endif /* __PRE_RAM__ */