blob: 0649bf3b1736bba4b709d1675f94f1e945ac77fa [file] [log] [blame]
Aaron Durbin0dff57d2015-03-05 21:18:33 -06001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc.
18 */
19
20#include <bootstate.h>
21#include <bootmem.h>
22#include <console/console.h>
23#include <cbmem.h>
24#include <imd.h>
25#include <rules.h>
26#include <string.h>
27#include <stdlib.h>
28#include <arch/early_variables.h>
29#if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)
30#include <arch/acpi.h>
31#endif
32
33/* The root region is at least DYN_CBMEM_ALIGN_SIZE . */
34#define ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE
35#define LG_ALIGN ROOT_MIN_SIZE
36/* Small allocation parameters. */
37#define SM_ROOT_SIZE 1024
38#define SM_ALIGN 32
39
40static inline struct imd *cbmem_get_imd(void)
41{
42 /* Only supply a backing store for imd in ramstage. */
43 if (ENV_RAMSTAGE) {
44 static struct imd imd_cbmem;
45 return &imd_cbmem;
46 }
47 return NULL;
48}
49
50/*
51 * x86 !CONFIG_EARLY_CBMEM_INIT platforms need to do the following in ramstage:
52 * 1. Call set_top_of_ram() which in turn calls cbmem_set_top().
53 * 2. Provide a get_top_of_ram() implementation.
54 *
55 * CONFIG_EARLY_CBMEM_INIT platforms just need to provide cbmem_top().
56 */
57void cbmem_set_top(void *ramtop)
58{
59 struct imd *imd = cbmem_get_imd();
60
61 imd_handle_init(imd, ramtop);
62}
63
64static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e)
65{
66 return (const struct cbmem_entry *)e;
67}
68
69static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e)
70{
71 return (const struct imd_entry *)e;
72}
73
74/* These are the different situations to handle:
75 * CONFIG_EARLY_CBMEM_INIT:
76 * In ramstage cbmem_initialize() attempts a recovery of the
77 * cbmem region set up by romstage. It uses cbmem_top() as the
78 * starting point of recovery.
79 *
80 * In romstage, similar to ramstage, cbmem_initialize() needs to
81 * attempt recovery of the cbmem area using cbmem_top() as the limit.
82 * cbmem_initialize_empty() initializes an empty cbmem area from
83 * cbmem_top();
84 *
85 */
86static struct imd *imd_init_backing(struct imd *backing)
87{
88 struct imd *imd;
89
90 imd = cbmem_get_imd();
91
92 if (imd != NULL)
93 return imd;
94
95 imd = backing;
96
97 return imd;
98}
99
100static struct imd *imd_init_backing_with_recover(struct imd *backing)
101{
102 struct imd *imd;
103
104 imd = imd_init_backing(backing);
105 if (!ENV_RAMSTAGE) {
106 /* Early cbmem init platforms need to always use cbmem_top(). */
107 if (IS_ENABLED(CONFIG_EARLY_CBMEM_INIT))
108 imd_handle_init(imd, cbmem_top());
109 /* Need to partially recover all the time outside of ramstage
110 * because there's object storage outside of the stack. */
111 imd_handle_init_partial_recovery(imd);
112 }
113
114 return imd;
115}
116
117void cbmem_initialize_empty(void)
118{
119 struct imd *imd;
120 struct imd imd_backing;
121
122 imd = imd_init_backing(&imd_backing);
123
124 /* Early cbmem init platforms need to always use cbmem_top(). */
125 if (IS_ENABLED(CONFIG_EARLY_CBMEM_INIT))
126 imd_handle_init(imd, cbmem_top());
127
128 printk(BIOS_DEBUG, "CBMEM:\n");
129
130 if (imd_create_tiered_empty(imd, ROOT_MIN_SIZE, LG_ALIGN,
131 SM_ROOT_SIZE, SM_ALIGN)) {
132 printk(BIOS_DEBUG, "failed.\n");
133 return;
134 }
135
136 /* Complete migration to CBMEM. */
137 cbmem_run_init_hooks();
138}
139
140static inline int cbmem_fail_recovery(void)
141{
142 cbmem_initialize_empty();
143 cbmem_fail_resume();
144 return 1;
145}
146
147int cbmem_initialize(void)
148{
149 struct imd *imd;
150 struct imd imd_backing;
151
152 imd = imd_init_backing(&imd_backing);
153
154 /* Early cbmem init platforms need to always use cbmem_top(). */
155 if (IS_ENABLED(CONFIG_EARLY_CBMEM_INIT))
156 imd_handle_init(imd, cbmem_top());
157
158 if (imd_recover(imd))
159 return 1;
160
161#if defined(__PRE_RAM__)
162 /*
163 * Lock the imd in romstage on a recovery. The assumption is that
164 * if the imd area was recovered in romstage then S3 resume path
165 * is being taken.
166 */
167 imd_lockdown(imd);
168#endif
169
170 /* Complete migration to CBMEM. */
171 cbmem_run_init_hooks();
172
173 /* Recovery successful. */
174 return 0;
175}
176
177int cbmem_recovery(int is_wakeup)
178{
179 int rv = 0;
180 if (!is_wakeup)
181 cbmem_initialize_empty();
182 else
183 rv = cbmem_initialize();
184 return rv;
185}
186
187const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
188{
189 struct imd *imd;
190 struct imd imd_backing;
191 const struct imd_entry *e;
192
193 imd = imd_init_backing_with_recover(&imd_backing);
194
195 e = imd_entry_find_or_add(imd, id, size64);
196
197 return imd_to_cbmem(e);
198}
199
200void *cbmem_add(u32 id, u64 size)
201{
202 struct imd *imd;
203 struct imd imd_backing;
204 const struct imd_entry *e;
205
206 imd = imd_init_backing_with_recover(&imd_backing);
207
208 e = imd_entry_find_or_add(imd, id, size);
209
210 if (e == NULL)
211 return NULL;
212
213 return imd_entry_at(imd, e);
214}
215
216/* Retrieve a region provided a given id. */
217const struct cbmem_entry *cbmem_entry_find(u32 id)
218{
219 struct imd *imd;
220 struct imd imd_backing;
221 const struct imd_entry *e;
222
223 imd = imd_init_backing_with_recover(&imd_backing);
224
225 e = imd_entry_find(imd, id);
226
227 return imd_to_cbmem(e);
228}
229
230void *cbmem_find(u32 id)
231{
232 struct imd *imd;
233 struct imd imd_backing;
234 const struct imd_entry *e;
235
236 imd = imd_init_backing_with_recover(&imd_backing);
237
238 e = imd_entry_find(imd, id);
239
240 if (e == NULL)
241 return NULL;
242
243 return imd_entry_at(imd, e);
244}
245
246/* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
247 * cannot be removed unless it was the last one added. */
248int cbmem_entry_remove(const struct cbmem_entry *entry)
249{
250 struct imd *imd;
251 struct imd imd_backing;
252
253 imd = imd_init_backing_with_recover(&imd_backing);
254
255 return imd_entry_remove(imd, cbmem_to_imd(entry));
256}
257
258u64 cbmem_entry_size(const struct cbmem_entry *entry)
259{
260 struct imd *imd;
261 struct imd imd_backing;
262
263 imd = imd_init_backing_with_recover(&imd_backing);
264
265 return imd_entry_size(imd, cbmem_to_imd(entry));
266}
267
268void *cbmem_entry_start(const struct cbmem_entry *entry)
269{
270 struct imd *imd;
271 struct imd imd_backing;
272
273 imd = imd_init_backing_with_recover(&imd_backing);
274
275 return imd_entry_at(imd, cbmem_to_imd(entry));
276}
277
278#if ENV_RAMSTAGE
279void cbmem_add_bootmem(void)
280{
281 void *base = NULL;
282 size_t size = 0;
283
284 imd_region_used(cbmem_get_imd(), &base, &size);
285 bootmem_add_range((uintptr_t)base, size, LB_MEM_TABLE);
286}
287
288void cbmem_list(void)
289{
290 static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE };
291
292 imd_print_entries(cbmem_get_imd(), lookup, ARRAY_SIZE(lookup));
293}
294#endif /* __PRE_RAM__ */