Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
| 4 | * Copyright (C) 2013 Google, Inc. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; version 2 of the License. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 14 | */ |
| 15 | |
| 16 | #include <bootstate.h> |
| 17 | #include <bootmem.h> |
| 18 | #include <console/console.h> |
| 19 | #include <cbmem.h> |
| 20 | #include <imd.h> |
| 21 | #include <rules.h> |
| 22 | #include <string.h> |
| 23 | #include <stdlib.h> |
| 24 | #include <arch/early_variables.h> |
| 25 | #if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT) |
| 26 | #include <arch/acpi.h> |
| 27 | #endif |
| 28 | |
Julius Werner | 3c814b2 | 2016-08-19 16:20:40 -0700 | [diff] [blame^] | 29 | /* |
| 30 | * We need special handling on x86 before ramstage because we cannot use global |
| 31 | * variables (we're executing in-place from flash so we don't have a writable |
| 32 | * data segment, and we cannot use CAR_GLOBAL here since that mechanism itself |
| 33 | * is dependent on CBMEM). Therefore, we have to always try to partially recover |
| 34 | * CBMEM from cbmem_top() whenever we try to access it. In other environments |
| 35 | * we're not so constrained and just keep the backing imd struct in a global. |
| 36 | * This also means that we can easily tell whether CBMEM has explicitly been |
| 37 | * initialized or recovered yet on those platforms, and don't need to put the |
| 38 | * burden on board or chipset code to tell us by returning NULL from cbmem_top() |
| 39 | * before that point. |
| 40 | */ |
| 41 | #define CAN_USE_GLOBALS (!IS_ENABLED(CONFIG_ARCH_X86) || ENV_RAMSTAGE) |
| 42 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 43 | static inline struct imd *cbmem_get_imd(void) |
| 44 | { |
Julius Werner | 3c814b2 | 2016-08-19 16:20:40 -0700 | [diff] [blame^] | 45 | if (CAN_USE_GLOBALS) { |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 46 | static struct imd imd_cbmem; |
| 47 | return &imd_cbmem; |
| 48 | } |
| 49 | return NULL; |
| 50 | } |
| 51 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 52 | static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e) |
| 53 | { |
| 54 | return (const struct cbmem_entry *)e; |
| 55 | } |
| 56 | |
| 57 | static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e) |
| 58 | { |
| 59 | return (const struct imd_entry *)e; |
| 60 | } |
| 61 | |
| 62 | /* These are the different situations to handle: |
| 63 | * CONFIG_EARLY_CBMEM_INIT: |
| 64 | * In ramstage cbmem_initialize() attempts a recovery of the |
| 65 | * cbmem region set up by romstage. It uses cbmem_top() as the |
| 66 | * starting point of recovery. |
| 67 | * |
| 68 | * In romstage, similar to ramstage, cbmem_initialize() needs to |
| 69 | * attempt recovery of the cbmem area using cbmem_top() as the limit. |
| 70 | * cbmem_initialize_empty() initializes an empty cbmem area from |
| 71 | * cbmem_top(); |
| 72 | * |
| 73 | */ |
| 74 | static struct imd *imd_init_backing(struct imd *backing) |
| 75 | { |
| 76 | struct imd *imd; |
| 77 | |
| 78 | imd = cbmem_get_imd(); |
| 79 | |
| 80 | if (imd != NULL) |
| 81 | return imd; |
| 82 | |
| 83 | imd = backing; |
| 84 | |
| 85 | return imd; |
| 86 | } |
| 87 | |
| 88 | static struct imd *imd_init_backing_with_recover(struct imd *backing) |
| 89 | { |
| 90 | struct imd *imd; |
| 91 | |
| 92 | imd = imd_init_backing(backing); |
Julius Werner | 3c814b2 | 2016-08-19 16:20:40 -0700 | [diff] [blame^] | 93 | if (!CAN_USE_GLOBALS) { |
| 94 | /* Always partially recover if we can't keep track of whether |
| 95 | * we have already initialized CBMEM in this stage. */ |
Kyösti Mälkki | e1fb052 | 2015-05-26 00:30:10 +0300 | [diff] [blame] | 96 | imd_handle_init(imd, cbmem_top()); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 97 | imd_handle_init_partial_recovery(imd); |
| 98 | } |
| 99 | |
| 100 | return imd; |
| 101 | } |
| 102 | |
| 103 | void cbmem_initialize_empty(void) |
| 104 | { |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 105 | cbmem_initialize_empty_id_size(0, 0); |
| 106 | } |
| 107 | |
| 108 | void cbmem_initialize_empty_id_size(u32 id, u64 size) |
| 109 | { |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 110 | struct imd *imd; |
| 111 | struct imd imd_backing; |
Aaron Durbin | 41607a4 | 2015-06-09 13:54:10 -0500 | [diff] [blame] | 112 | const int no_recovery = 0; |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 113 | |
| 114 | imd = imd_init_backing(&imd_backing); |
Kyösti Mälkki | e1fb052 | 2015-05-26 00:30:10 +0300 | [diff] [blame] | 115 | imd_handle_init(imd, cbmem_top()); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 116 | |
| 117 | printk(BIOS_DEBUG, "CBMEM:\n"); |
| 118 | |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 119 | if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN, |
| 120 | CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) { |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 121 | printk(BIOS_DEBUG, "failed.\n"); |
| 122 | return; |
| 123 | } |
| 124 | |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 125 | /* Add the specified range first */ |
| 126 | if (size) |
| 127 | cbmem_add(id, size); |
| 128 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 129 | /* Complete migration to CBMEM. */ |
Aaron Durbin | 41607a4 | 2015-06-09 13:54:10 -0500 | [diff] [blame] | 130 | cbmem_run_init_hooks(no_recovery); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 131 | } |
| 132 | |
| 133 | static inline int cbmem_fail_recovery(void) |
| 134 | { |
| 135 | cbmem_initialize_empty(); |
| 136 | cbmem_fail_resume(); |
| 137 | return 1; |
| 138 | } |
| 139 | |
| 140 | int cbmem_initialize(void) |
| 141 | { |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 142 | return cbmem_initialize_id_size(0, 0); |
| 143 | } |
| 144 | |
| 145 | int cbmem_initialize_id_size(u32 id, u64 size) |
| 146 | { |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 147 | struct imd *imd; |
| 148 | struct imd imd_backing; |
Aaron Durbin | 41607a4 | 2015-06-09 13:54:10 -0500 | [diff] [blame] | 149 | const int recovery = 1; |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 150 | |
| 151 | imd = imd_init_backing(&imd_backing); |
Kyösti Mälkki | e1fb052 | 2015-05-26 00:30:10 +0300 | [diff] [blame] | 152 | imd_handle_init(imd, cbmem_top()); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 153 | |
| 154 | if (imd_recover(imd)) |
| 155 | return 1; |
| 156 | |
| 157 | #if defined(__PRE_RAM__) |
| 158 | /* |
| 159 | * Lock the imd in romstage on a recovery. The assumption is that |
| 160 | * if the imd area was recovered in romstage then S3 resume path |
| 161 | * is being taken. |
| 162 | */ |
| 163 | imd_lockdown(imd); |
| 164 | #endif |
| 165 | |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 166 | /* Add the specified range first */ |
| 167 | if (size) |
| 168 | cbmem_add(id, size); |
| 169 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 170 | /* Complete migration to CBMEM. */ |
Aaron Durbin | 41607a4 | 2015-06-09 13:54:10 -0500 | [diff] [blame] | 171 | cbmem_run_init_hooks(recovery); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 172 | |
| 173 | /* Recovery successful. */ |
| 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | int cbmem_recovery(int is_wakeup) |
| 178 | { |
| 179 | int rv = 0; |
| 180 | if (!is_wakeup) |
| 181 | cbmem_initialize_empty(); |
| 182 | else |
| 183 | rv = cbmem_initialize(); |
| 184 | return rv; |
| 185 | } |
| 186 | |
| 187 | const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64) |
| 188 | { |
| 189 | struct imd *imd; |
| 190 | struct imd imd_backing; |
| 191 | const struct imd_entry *e; |
| 192 | |
| 193 | imd = imd_init_backing_with_recover(&imd_backing); |
| 194 | |
| 195 | e = imd_entry_find_or_add(imd, id, size64); |
| 196 | |
| 197 | return imd_to_cbmem(e); |
| 198 | } |
| 199 | |
| 200 | void *cbmem_add(u32 id, u64 size) |
| 201 | { |
| 202 | struct imd *imd; |
| 203 | struct imd imd_backing; |
| 204 | const struct imd_entry *e; |
| 205 | |
| 206 | imd = imd_init_backing_with_recover(&imd_backing); |
| 207 | |
| 208 | e = imd_entry_find_or_add(imd, id, size); |
| 209 | |
| 210 | if (e == NULL) |
| 211 | return NULL; |
| 212 | |
| 213 | return imd_entry_at(imd, e); |
| 214 | } |
| 215 | |
| 216 | /* Retrieve a region provided a given id. */ |
| 217 | const struct cbmem_entry *cbmem_entry_find(u32 id) |
| 218 | { |
| 219 | struct imd *imd; |
| 220 | struct imd imd_backing; |
| 221 | const struct imd_entry *e; |
| 222 | |
| 223 | imd = imd_init_backing_with_recover(&imd_backing); |
| 224 | |
| 225 | e = imd_entry_find(imd, id); |
| 226 | |
| 227 | return imd_to_cbmem(e); |
| 228 | } |
| 229 | |
| 230 | void *cbmem_find(u32 id) |
| 231 | { |
| 232 | struct imd *imd; |
| 233 | struct imd imd_backing; |
| 234 | const struct imd_entry *e; |
| 235 | |
| 236 | imd = imd_init_backing_with_recover(&imd_backing); |
| 237 | |
| 238 | e = imd_entry_find(imd, id); |
| 239 | |
| 240 | if (e == NULL) |
| 241 | return NULL; |
| 242 | |
| 243 | return imd_entry_at(imd, e); |
| 244 | } |
| 245 | |
| 246 | /* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region |
| 247 | * cannot be removed unless it was the last one added. */ |
| 248 | int cbmem_entry_remove(const struct cbmem_entry *entry) |
| 249 | { |
| 250 | struct imd *imd; |
| 251 | struct imd imd_backing; |
| 252 | |
| 253 | imd = imd_init_backing_with_recover(&imd_backing); |
| 254 | |
| 255 | return imd_entry_remove(imd, cbmem_to_imd(entry)); |
| 256 | } |
| 257 | |
| 258 | u64 cbmem_entry_size(const struct cbmem_entry *entry) |
| 259 | { |
| 260 | struct imd *imd; |
| 261 | struct imd imd_backing; |
| 262 | |
| 263 | imd = imd_init_backing_with_recover(&imd_backing); |
| 264 | |
| 265 | return imd_entry_size(imd, cbmem_to_imd(entry)); |
| 266 | } |
| 267 | |
| 268 | void *cbmem_entry_start(const struct cbmem_entry *entry) |
| 269 | { |
| 270 | struct imd *imd; |
| 271 | struct imd imd_backing; |
| 272 | |
| 273 | imd = imd_init_backing_with_recover(&imd_backing); |
| 274 | |
| 275 | return imd_entry_at(imd, cbmem_to_imd(entry)); |
| 276 | } |
| 277 | |
Alexandru Gagniuc | 555d6c2 | 2015-11-16 13:26:33 -0800 | [diff] [blame] | 278 | void cbmem_region_used(uintptr_t *base, size_t *size) |
| 279 | { |
| 280 | void *baseptr; |
| 281 | imd_region_used(cbmem_get_imd(), &baseptr, size); |
| 282 | *base = (uintptr_t)baseptr; |
| 283 | } |
| 284 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 285 | void cbmem_add_bootmem(void) |
| 286 | { |
Andrey Petrov | 447d948 | 2016-03-10 22:14:41 -0800 | [diff] [blame] | 287 | uintptr_t base = 0; |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 288 | size_t size = 0; |
| 289 | |
Andrey Petrov | 447d948 | 2016-03-10 22:14:41 -0800 | [diff] [blame] | 290 | cbmem_region_used(&base, &size); |
| 291 | bootmem_add_range(base, size, LB_MEM_TABLE); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 292 | } |
| 293 | |
Lee Leahy | e2422e3 | 2016-07-24 19:52:15 -0700 | [diff] [blame] | 294 | #if ENV_RAMSTAGE || (IS_ENABLED(CONFIG_EARLY_CBMEM_LIST) \ |
| 295 | && (ENV_POSTCAR || ENV_ROMSTAGE)) |
Aaron Durbin | 1ca2d86 | 2015-09-30 12:26:54 -0500 | [diff] [blame] | 296 | /* |
| 297 | * -fdata-sections doesn't work so well on read only strings. They all |
| 298 | * get put in the same section even though those strings may never be |
| 299 | * referenced in the final binary. |
| 300 | */ |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 301 | void cbmem_list(void) |
| 302 | { |
| 303 | static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE }; |
Lee Leahy | e2422e3 | 2016-07-24 19:52:15 -0700 | [diff] [blame] | 304 | struct imd *imd; |
| 305 | struct imd imd_backing; |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 306 | |
Lee Leahy | e2422e3 | 2016-07-24 19:52:15 -0700 | [diff] [blame] | 307 | imd = imd_init_backing_with_recover(&imd_backing); |
| 308 | imd_print_entries(imd, lookup, ARRAY_SIZE(lookup)); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 309 | } |
Aaron Durbin | 1ca2d86 | 2015-09-30 12:26:54 -0500 | [diff] [blame] | 310 | #endif |
| 311 | |
| 312 | void cbmem_add_records_to_cbtable(struct lb_header *header) |
| 313 | { |
| 314 | struct imd_cursor cursor; |
| 315 | struct imd *imd; |
| 316 | |
| 317 | imd = cbmem_get_imd(); |
| 318 | |
| 319 | if (imd_cursor_init(imd, &cursor)) |
| 320 | return; |
| 321 | |
| 322 | while (1) { |
| 323 | const struct imd_entry *e; |
| 324 | struct lb_cbmem_entry *lbe; |
| 325 | uint32_t id; |
| 326 | |
| 327 | e = imd_cursor_next(&cursor); |
| 328 | |
| 329 | if (e == NULL) |
| 330 | break; |
| 331 | |
| 332 | id = imd_entry_id(imd, e); |
| 333 | /* Don't add these metadata entries. */ |
| 334 | if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL) |
| 335 | continue; |
| 336 | |
| 337 | lbe = (struct lb_cbmem_entry *)lb_new_record(header); |
| 338 | lbe->tag = LB_TAG_CBMEM_ENTRY; |
| 339 | lbe->size = sizeof(*lbe); |
| 340 | lbe->address = (uintptr_t)imd_entry_at(imd, e); |
| 341 | lbe->entry_size = imd_entry_size(imd, e); |
| 342 | lbe->id = id; |
| 343 | } |
| 344 | } |