blob: 6255b1893217f7480bdaecd9879ce499ae192a16 [file] [log] [blame]
Aaron Durbin0dff57d2015-03-05 21:18:33 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc.
18 */
19
20#include <bootstate.h>
21#include <bootmem.h>
22#include <console/console.h>
23#include <cbmem.h>
24#include <imd.h>
25#include <rules.h>
26#include <string.h>
27#include <stdlib.h>
28#include <arch/early_variables.h>
29#if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)
30#include <arch/acpi.h>
31#endif
32
Aaron Durbin0dff57d2015-03-05 21:18:33 -060033static inline struct imd *cbmem_get_imd(void)
34{
35 /* Only supply a backing store for imd in ramstage. */
36 if (ENV_RAMSTAGE) {
37 static struct imd imd_cbmem;
38 return &imd_cbmem;
39 }
40 return NULL;
41}
42
43/*
44 * x86 !CONFIG_EARLY_CBMEM_INIT platforms need to do the following in ramstage:
45 * 1. Call set_top_of_ram() which in turn calls cbmem_set_top().
46 * 2. Provide a get_top_of_ram() implementation.
47 *
48 * CONFIG_EARLY_CBMEM_INIT platforms just need to provide cbmem_top().
49 */
50void cbmem_set_top(void *ramtop)
51{
52 struct imd *imd = cbmem_get_imd();
53
54 imd_handle_init(imd, ramtop);
55}
56
57static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
58{
59 return (const struct cbmem_entry *)e;
60}
61
62static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
63{
64 return (const struct imd_entry *)e;
65}
66
67/* These are the different situations to handle:
68 * CONFIG_EARLY_CBMEM_INIT:
69 * In ramstage cbmem_initialize() attempts a recovery of the
70 * cbmem region set up by romstage. It uses cbmem_top() as the
71 * starting point of recovery.
72 *
73 * In romstage, similar to ramstage, cbmem_initialize() needs to
74 * attempt recovery of the cbmem area using cbmem_top() as the limit.
75 * cbmem_initialize_empty() initializes an empty cbmem area from
76 * cbmem_top();
77 *
78 */
79static struct imd *imd_init_backing(struct imd *backing)
80{
81 struct imd *imd;
82
83 imd = cbmem_get_imd();
84
85 if (imd != NULL)
86 return imd;
87
88 imd = backing;
89
90 return imd;
91}
92
93static struct imd *imd_init_backing_with_recover(struct imd *backing)
94{
95 struct imd *imd;
96
97 imd = imd_init_backing(backing);
98 if (!ENV_RAMSTAGE) {
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +030099 imd_handle_init(imd, cbmem_top());
100
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600101 /* Need to partially recover all the time outside of ramstage
102 * because there's object storage outside of the stack. */
103 imd_handle_init_partial_recovery(imd);
104 }
105
106 return imd;
107}
108
109void cbmem_initialize_empty(void)
110{
Lee Leahy522149c2015-05-08 11:33:55 -0700111 cbmem_initialize_empty_id_size(0, 0);
112}
113
114void cbmem_initialize_empty_id_size(u32 id, u64 size)
115{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600116 struct imd *imd;
117 struct imd imd_backing;
118
119 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300120 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600121
122 printk(BIOS_DEBUG, "CBMEM:\n");
123
Lee Leahy522149c2015-05-08 11:33:55 -0700124 if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN,
125 CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) {
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600126 printk(BIOS_DEBUG, "failed.\n");
127 return;
128 }
129
Lee Leahy522149c2015-05-08 11:33:55 -0700130 /* Add the specified range first */
131 if (size)
132 cbmem_add(id, size);
133
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600134 /* Complete migration to CBMEM. */
135 cbmem_run_init_hooks();
136}
137
138static inline int cbmem_fail_recovery(void)
139{
140 cbmem_initialize_empty();
141 cbmem_fail_resume();
142 return 1;
143}
144
145int cbmem_initialize(void)
146{
Lee Leahy522149c2015-05-08 11:33:55 -0700147 return cbmem_initialize_id_size(0, 0);
148}
149
150int cbmem_initialize_id_size(u32 id, u64 size)
151{
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600152 struct imd *imd;
153 struct imd imd_backing;
154
155 imd = imd_init_backing(&imd_backing);
Kyösti Mälkkie1fb0522015-05-26 00:30:10 +0300156 imd_handle_init(imd, cbmem_top());
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600157
158 if (imd_recover(imd))
159 return 1;
160
161#if defined(__PRE_RAM__)
162 /*
163 * Lock the imd in romstage on a recovery. The assumption is that
164 * if the imd area was recovered in romstage then S3 resume path
165 * is being taken.
166 */
167 imd_lockdown(imd);
168#endif
169
Lee Leahy522149c2015-05-08 11:33:55 -0700170 /* Add the specified range first */
171 if (size)
172 cbmem_add(id, size);
173
Aaron Durbin0dff57d2015-03-05 21:18:33 -0600174 /* Complete migration to CBMEM. */
175 cbmem_run_init_hooks();
176
177 /* Recovery successful. */
178 return 0;
179}
180
181int cbmem_recovery(int is_wakeup)
182{
183 int rv = 0;
184 if (!is_wakeup)
185 cbmem_initialize_empty();
186 else
187 rv = cbmem_initialize();
188 return rv;
189}
190
191const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
192{
193 struct imd *imd;
194 struct imd imd_backing;
195 const struct imd_entry *e;
196
197 imd = imd_init_backing_with_recover(&imd_backing);
198
199 e = imd_entry_find_or_add(imd, id, size64);
200
201 return imd_to_cbmem(e);
202}
203
204void *cbmem_add(u32 id, u64 size)
205{
206 struct imd *imd;
207 struct imd imd_backing;
208 const struct imd_entry *e;
209
210 imd = imd_init_backing_with_recover(&imd_backing);
211
212 e = imd_entry_find_or_add(imd, id, size);
213
214 if (e == NULL)
215 return NULL;
216
217 return imd_entry_at(imd, e);
218}
219
220/* Retrieve a region provided a given id. */
221const struct cbmem_entry *cbmem_entry_find(u32 id)
222{
223 struct imd *imd;
224 struct imd imd_backing;
225 const struct imd_entry *e;
226
227 imd = imd_init_backing_with_recover(&imd_backing);
228
229 e = imd_entry_find(imd, id);
230
231 return imd_to_cbmem(e);
232}
233
234void *cbmem_find(u32 id)
235{
236 struct imd *imd;
237 struct imd imd_backing;
238 const struct imd_entry *e;
239
240 imd = imd_init_backing_with_recover(&imd_backing);
241
242 e = imd_entry_find(imd, id);
243
244 if (e == NULL)
245 return NULL;
246
247 return imd_entry_at(imd, e);
248}
249
250/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
251 * cannot be removed unless it was the last one added. */
252int cbmem_entry_remove(const struct cbmem_entry *entry)
253{
254 struct imd *imd;
255 struct imd imd_backing;
256
257 imd = imd_init_backing_with_recover(&imd_backing);
258
259 return imd_entry_remove(imd, cbmem_to_imd(entry));
260}
261
262u64 cbmem_entry_size(const struct cbmem_entry *entry)
263{
264 struct imd *imd;
265 struct imd imd_backing;
266
267 imd = imd_init_backing_with_recover(&imd_backing);
268
269 return imd_entry_size(imd, cbmem_to_imd(entry));
270}
271
272void *cbmem_entry_start(const struct cbmem_entry *entry)
273{
274 struct imd *imd;
275 struct imd imd_backing;
276
277 imd = imd_init_backing_with_recover(&imd_backing);
278
279 return imd_entry_at(imd, cbmem_to_imd(entry));
280}
281
282#if ENV_RAMSTAGE
283void cbmem_add_bootmem(void)
284{
285 void *base = NULL;
286 size_t size = 0;
287
288 imd_region_used(cbmem_get_imd(), &base, &size);
289 bootmem_add_range((uintptr_t)base, size, LB_MEM_TABLE);
290}
291
292void cbmem_list(void)
293{
294 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
295
296 imd_print_entries(cbmem_get_imd(), lookup, ARRAY_SIZE(lookup));
297}
298#endif /* __PRE_RAM__ */