Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
| 4 | * Copyright (C) 2013 Google, Inc. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; version 2 of the License. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 14 | */ |
| 15 | |
Arthur Heymans | 340e4b8 | 2019-10-23 17:25:58 +0200 | [diff] [blame^] | 16 | #include <assert.h> |
Elyes HAOUAS | 0edf6a5 | 2019-10-26 18:41:47 +0200 | [diff] [blame] | 17 | #include <boot/coreboot_tables.h> |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 18 | #include <bootstate.h> |
| 19 | #include <bootmem.h> |
| 20 | #include <console/console.h> |
| 21 | #include <cbmem.h> |
| 22 | #include <imd.h> |
Kyösti Mälkki | f5cf60f | 2019-03-18 15:26:48 +0200 | [diff] [blame] | 23 | #include <lib.h> |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 24 | #include <stdlib.h> |
| 25 | #include <arch/early_variables.h> |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 26 | |
Julius Werner | 3c814b2 | 2016-08-19 16:20:40 -0700 | [diff] [blame] | 27 | /* |
Aaron Durbin | 403fdbc | 2017-08-02 10:57:23 -0600 | [diff] [blame] | 28 | * We need special handling on x86 where CAR global migration is employed. One |
| 29 | * cannot use true globals in that circumstance because CAR is where the globals |
| 30 | * are backed -- creating a circular dependency. For non CAR platforms globals |
| 31 | * are free to be used as well as any stages that are purely executing out of |
| 32 | * RAM. For CAR platforms that don't migrate globals the as-linked globals can |
| 33 | * be used, but they need special decoration using CAR_GLOBAL. That ensures |
| 34 | * proper object placement in conjunction with the linker. |
| 35 | * |
| 36 | * For the CAR global migration platforms we have to always try to partially |
| 37 | * recover CBMEM from cbmem_top() whenever we try to access it. In other |
| 38 | * environments we're not so constrained and just keep the backing imd struct |
| 39 | * in a global. This also means that we can easily tell whether CBMEM has |
| 40 | * explicitly been initialized or recovered yet on those platforms, and don't |
| 41 | * need to put the burden on board or chipset code to tell us by returning |
| 42 | * NULL from cbmem_top() before that point. |
Julius Werner | 3c814b2 | 2016-08-19 16:20:40 -0700 | [diff] [blame] | 43 | */ |
Aaron Durbin | 1e9a914 | 2016-09-16 16:23:21 -0500 | [diff] [blame] | 44 | #define CAN_USE_GLOBALS \ |
Julius Werner | cd49cce | 2019-03-05 16:53:33 -0800 | [diff] [blame] | 45 | (!CONFIG(ARCH_X86) || ENV_RAMSTAGE || ENV_POSTCAR || \ |
Kyösti Mälkki | 0f5e01a | 2019-08-09 07:11:07 +0300 | [diff] [blame] | 46 | !CONFIG(CAR_GLOBAL_MIGRATION)) |
Julius Werner | 3c814b2 | 2016-08-19 16:20:40 -0700 | [diff] [blame] | 47 | |
Arthur Heymans | 340e4b8 | 2019-10-23 17:25:58 +0200 | [diff] [blame^] | 48 | /* The program loader passes on cbmem_top and the program entry point |
| 49 | has to fill in the _cbmem_top_ptr symbol based on the calling arguments. */ |
| 50 | uintptr_t _cbmem_top_ptr; |
| 51 | |
| 52 | void *cbmem_top(void) |
| 53 | { |
| 54 | if (ENV_ROMSTAGE |
| 55 | || ((ENV_POSTCAR || ENV_RAMSTAGE) |
| 56 | && !CONFIG(RAMSTAGE_CBMEM_TOP_ARG))) { |
| 57 | MAYBE_STATIC_BSS void *top = NULL; |
| 58 | if (top) |
| 59 | return top; |
| 60 | top = cbmem_top_chipset(); |
| 61 | return top; |
| 62 | } |
| 63 | if ((ENV_POSTCAR || ENV_RAMSTAGE) && CONFIG(RAMSTAGE_CBMEM_TOP_ARG)) |
| 64 | return (void *)_cbmem_top_ptr; |
| 65 | |
| 66 | dead_code(); |
| 67 | } |
| 68 | |
| 69 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 70 | static inline struct imd *cbmem_get_imd(void) |
| 71 | { |
Julius Werner | 3c814b2 | 2016-08-19 16:20:40 -0700 | [diff] [blame] | 72 | if (CAN_USE_GLOBALS) { |
Aaron Durbin | 403fdbc | 2017-08-02 10:57:23 -0600 | [diff] [blame] | 73 | static struct imd imd_cbmem CAR_GLOBAL; |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 74 | return &imd_cbmem; |
| 75 | } |
| 76 | return NULL; |
| 77 | } |
| 78 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 79 | static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e) |
| 80 | { |
| 81 | return (const struct cbmem_entry *)e; |
| 82 | } |
| 83 | |
| 84 | static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e) |
| 85 | { |
| 86 | return (const struct imd_entry *)e; |
| 87 | } |
| 88 | |
| 89 | /* These are the different situations to handle: |
Kyösti Mälkki | 513a1a8 | 2018-06-03 12:29:50 +0300 | [diff] [blame] | 90 | * |
Lee Leahy | e20a319 | 2017-03-09 16:21:34 -0800 | [diff] [blame] | 91 | * In ramstage cbmem_initialize() attempts a recovery of the |
| 92 | * cbmem region set up by romstage. It uses cbmem_top() as the |
| 93 | * starting point of recovery. |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 94 | * |
Lee Leahy | e20a319 | 2017-03-09 16:21:34 -0800 | [diff] [blame] | 95 | * In romstage, similar to ramstage, cbmem_initialize() needs to |
| 96 | * attempt recovery of the cbmem area using cbmem_top() as the limit. |
| 97 | * cbmem_initialize_empty() initializes an empty cbmem area from |
| 98 | * cbmem_top(); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 99 | * |
| 100 | */ |
| 101 | static struct imd *imd_init_backing(struct imd *backing) |
| 102 | { |
| 103 | struct imd *imd; |
| 104 | |
| 105 | imd = cbmem_get_imd(); |
| 106 | |
| 107 | if (imd != NULL) |
| 108 | return imd; |
| 109 | |
| 110 | imd = backing; |
| 111 | |
| 112 | return imd; |
| 113 | } |
| 114 | |
| 115 | static struct imd *imd_init_backing_with_recover(struct imd *backing) |
| 116 | { |
| 117 | struct imd *imd; |
| 118 | |
| 119 | imd = imd_init_backing(backing); |
Julius Werner | 3c814b2 | 2016-08-19 16:20:40 -0700 | [diff] [blame] | 120 | if (!CAN_USE_GLOBALS) { |
| 121 | /* Always partially recover if we can't keep track of whether |
| 122 | * we have already initialized CBMEM in this stage. */ |
Kyösti Mälkki | e1fb052 | 2015-05-26 00:30:10 +0300 | [diff] [blame] | 123 | imd_handle_init(imd, cbmem_top()); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 124 | imd_handle_init_partial_recovery(imd); |
| 125 | } |
| 126 | |
| 127 | return imd; |
| 128 | } |
| 129 | |
| 130 | void cbmem_initialize_empty(void) |
| 131 | { |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 132 | cbmem_initialize_empty_id_size(0, 0); |
| 133 | } |
| 134 | |
Aaron Durbin | 6403167 | 2018-04-21 14:45:32 -0600 | [diff] [blame] | 135 | void __weak cbmem_top_init(void) |
Aaron Durbin | dfdea2a | 2017-08-01 10:27:10 -0600 | [diff] [blame] | 136 | { |
| 137 | } |
| 138 | |
| 139 | static void cbmem_top_init_once(void) |
| 140 | { |
| 141 | /* Call one-time hook on expected cbmem init during boot. This sequence |
Kyösti Mälkki | 513a1a8 | 2018-06-03 12:29:50 +0300 | [diff] [blame] | 142 | assumes first init call is in romstage. */ |
| 143 | if (!ENV_ROMSTAGE) |
Aaron Durbin | dfdea2a | 2017-08-01 10:27:10 -0600 | [diff] [blame] | 144 | return; |
| 145 | |
| 146 | cbmem_top_init(); |
Kyösti Mälkki | f5cf60f | 2019-03-18 15:26:48 +0200 | [diff] [blame] | 147 | |
| 148 | /* The test is only effective on X86 and when address hits UC memory. */ |
| 149 | if (ENV_X86) |
| 150 | quick_ram_check_or_die((uintptr_t)cbmem_top() - sizeof(u32)); |
Aaron Durbin | dfdea2a | 2017-08-01 10:27:10 -0600 | [diff] [blame] | 151 | } |
| 152 | |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 153 | void cbmem_initialize_empty_id_size(u32 id, u64 size) |
| 154 | { |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 155 | struct imd *imd; |
| 156 | struct imd imd_backing; |
Aaron Durbin | 41607a4 | 2015-06-09 13:54:10 -0500 | [diff] [blame] | 157 | const int no_recovery = 0; |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 158 | |
Aaron Durbin | dfdea2a | 2017-08-01 10:27:10 -0600 | [diff] [blame] | 159 | cbmem_top_init_once(); |
| 160 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 161 | imd = imd_init_backing(&imd_backing); |
Kyösti Mälkki | e1fb052 | 2015-05-26 00:30:10 +0300 | [diff] [blame] | 162 | imd_handle_init(imd, cbmem_top()); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 163 | |
| 164 | printk(BIOS_DEBUG, "CBMEM:\n"); |
| 165 | |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 166 | if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN, |
| 167 | CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) { |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 168 | printk(BIOS_DEBUG, "failed.\n"); |
| 169 | return; |
| 170 | } |
| 171 | |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 172 | /* Add the specified range first */ |
| 173 | if (size) |
| 174 | cbmem_add(id, size); |
| 175 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 176 | /* Complete migration to CBMEM. */ |
Aaron Durbin | 41607a4 | 2015-06-09 13:54:10 -0500 | [diff] [blame] | 177 | cbmem_run_init_hooks(no_recovery); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 178 | } |
| 179 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 180 | int cbmem_initialize(void) |
| 181 | { |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 182 | return cbmem_initialize_id_size(0, 0); |
| 183 | } |
| 184 | |
| 185 | int cbmem_initialize_id_size(u32 id, u64 size) |
| 186 | { |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 187 | struct imd *imd; |
| 188 | struct imd imd_backing; |
Aaron Durbin | 41607a4 | 2015-06-09 13:54:10 -0500 | [diff] [blame] | 189 | const int recovery = 1; |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 190 | |
Aaron Durbin | dfdea2a | 2017-08-01 10:27:10 -0600 | [diff] [blame] | 191 | cbmem_top_init_once(); |
| 192 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 193 | imd = imd_init_backing(&imd_backing); |
Kyösti Mälkki | e1fb052 | 2015-05-26 00:30:10 +0300 | [diff] [blame] | 194 | imd_handle_init(imd, cbmem_top()); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 195 | |
| 196 | if (imd_recover(imd)) |
| 197 | return 1; |
| 198 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 199 | /* |
| 200 | * Lock the imd in romstage on a recovery. The assumption is that |
| 201 | * if the imd area was recovered in romstage then S3 resume path |
| 202 | * is being taken. |
| 203 | */ |
Kyösti Mälkki | e3acc8f | 2019-09-13 10:49:20 +0300 | [diff] [blame] | 204 | if (ENV_ROMSTAGE) |
| 205 | imd_lockdown(imd); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 206 | |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 207 | /* Add the specified range first */ |
| 208 | if (size) |
| 209 | cbmem_add(id, size); |
| 210 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 211 | /* Complete migration to CBMEM. */ |
Aaron Durbin | 41607a4 | 2015-06-09 13:54:10 -0500 | [diff] [blame] | 212 | cbmem_run_init_hooks(recovery); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 213 | |
| 214 | /* Recovery successful. */ |
| 215 | return 0; |
| 216 | } |
| 217 | |
| 218 | int cbmem_recovery(int is_wakeup) |
| 219 | { |
| 220 | int rv = 0; |
| 221 | if (!is_wakeup) |
| 222 | cbmem_initialize_empty(); |
| 223 | else |
| 224 | rv = cbmem_initialize(); |
| 225 | return rv; |
| 226 | } |
| 227 | |
| 228 | const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64) |
| 229 | { |
| 230 | struct imd *imd; |
| 231 | struct imd imd_backing; |
| 232 | const struct imd_entry *e; |
| 233 | |
| 234 | imd = imd_init_backing_with_recover(&imd_backing); |
| 235 | |
| 236 | e = imd_entry_find_or_add(imd, id, size64); |
| 237 | |
| 238 | return imd_to_cbmem(e); |
| 239 | } |
| 240 | |
| 241 | void *cbmem_add(u32 id, u64 size) |
| 242 | { |
| 243 | struct imd *imd; |
| 244 | struct imd imd_backing; |
| 245 | const struct imd_entry *e; |
| 246 | |
| 247 | imd = imd_init_backing_with_recover(&imd_backing); |
| 248 | |
| 249 | e = imd_entry_find_or_add(imd, id, size); |
| 250 | |
| 251 | if (e == NULL) |
| 252 | return NULL; |
| 253 | |
| 254 | return imd_entry_at(imd, e); |
| 255 | } |
| 256 | |
| 257 | /* Retrieve a region provided a given id. */ |
| 258 | const struct cbmem_entry *cbmem_entry_find(u32 id) |
| 259 | { |
| 260 | struct imd *imd; |
| 261 | struct imd imd_backing; |
| 262 | const struct imd_entry *e; |
| 263 | |
| 264 | imd = imd_init_backing_with_recover(&imd_backing); |
| 265 | |
| 266 | e = imd_entry_find(imd, id); |
| 267 | |
| 268 | return imd_to_cbmem(e); |
| 269 | } |
| 270 | |
| 271 | void *cbmem_find(u32 id) |
| 272 | { |
| 273 | struct imd *imd; |
| 274 | struct imd imd_backing; |
| 275 | const struct imd_entry *e; |
| 276 | |
| 277 | imd = imd_init_backing_with_recover(&imd_backing); |
| 278 | |
| 279 | e = imd_entry_find(imd, id); |
| 280 | |
| 281 | if (e == NULL) |
| 282 | return NULL; |
| 283 | |
| 284 | return imd_entry_at(imd, e); |
| 285 | } |
| 286 | |
| 287 | /* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region |
| 288 | * cannot be removed unless it was the last one added. */ |
| 289 | int cbmem_entry_remove(const struct cbmem_entry *entry) |
| 290 | { |
| 291 | struct imd *imd; |
| 292 | struct imd imd_backing; |
| 293 | |
| 294 | imd = imd_init_backing_with_recover(&imd_backing); |
| 295 | |
| 296 | return imd_entry_remove(imd, cbmem_to_imd(entry)); |
| 297 | } |
| 298 | |
| 299 | u64 cbmem_entry_size(const struct cbmem_entry *entry) |
| 300 | { |
| 301 | struct imd *imd; |
| 302 | struct imd imd_backing; |
| 303 | |
| 304 | imd = imd_init_backing_with_recover(&imd_backing); |
| 305 | |
| 306 | return imd_entry_size(imd, cbmem_to_imd(entry)); |
| 307 | } |
| 308 | |
| 309 | void *cbmem_entry_start(const struct cbmem_entry *entry) |
| 310 | { |
| 311 | struct imd *imd; |
| 312 | struct imd imd_backing; |
| 313 | |
| 314 | imd = imd_init_backing_with_recover(&imd_backing); |
| 315 | |
| 316 | return imd_entry_at(imd, cbmem_to_imd(entry)); |
| 317 | } |
| 318 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 319 | void cbmem_add_bootmem(void) |
| 320 | { |
Aaron Durbin | fb53242 | 2017-08-02 10:40:25 -0600 | [diff] [blame] | 321 | void *baseptr = NULL; |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 322 | size_t size = 0; |
| 323 | |
Philipp Deppenwiese | 84258db | 2018-08-16 00:31:26 +0200 | [diff] [blame] | 324 | cbmem_get_region(&baseptr, &size); |
Patrick Rudolph | 9ab9db0 | 2018-04-05 09:14:51 +0200 | [diff] [blame] | 325 | bootmem_add_range((uintptr_t)baseptr, size, BM_MEM_TABLE); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 326 | } |
| 327 | |
Philipp Deppenwiese | 84258db | 2018-08-16 00:31:26 +0200 | [diff] [blame] | 328 | void cbmem_get_region(void **baseptr, size_t *size) |
| 329 | { |
| 330 | imd_region_used(cbmem_get_imd(), baseptr, size); |
| 331 | } |
| 332 | |
Subrata Banik | 42c44c2 | 2019-05-15 20:27:04 +0530 | [diff] [blame] | 333 | #if ENV_PAYLOAD_LOADER || (CONFIG(EARLY_CBMEM_LIST) \ |
Lee Leahy | e2422e3 | 2016-07-24 19:52:15 -0700 | [diff] [blame] | 334 | && (ENV_POSTCAR || ENV_ROMSTAGE)) |
Aaron Durbin | 1ca2d86 | 2015-09-30 12:26:54 -0500 | [diff] [blame] | 335 | /* |
| 336 | * -fdata-sections doesn't work so well on read only strings. They all |
| 337 | * get put in the same section even though those strings may never be |
| 338 | * referenced in the final binary. |
| 339 | */ |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 340 | void cbmem_list(void) |
| 341 | { |
| 342 | static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE }; |
Lee Leahy | e2422e3 | 2016-07-24 19:52:15 -0700 | [diff] [blame] | 343 | struct imd *imd; |
| 344 | struct imd imd_backing; |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 345 | |
Lee Leahy | e2422e3 | 2016-07-24 19:52:15 -0700 | [diff] [blame] | 346 | imd = imd_init_backing_with_recover(&imd_backing); |
| 347 | imd_print_entries(imd, lookup, ARRAY_SIZE(lookup)); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 348 | } |
Aaron Durbin | 1ca2d86 | 2015-09-30 12:26:54 -0500 | [diff] [blame] | 349 | #endif |
| 350 | |
| 351 | void cbmem_add_records_to_cbtable(struct lb_header *header) |
| 352 | { |
| 353 | struct imd_cursor cursor; |
| 354 | struct imd *imd; |
| 355 | |
| 356 | imd = cbmem_get_imd(); |
| 357 | |
| 358 | if (imd_cursor_init(imd, &cursor)) |
| 359 | return; |
| 360 | |
| 361 | while (1) { |
| 362 | const struct imd_entry *e; |
| 363 | struct lb_cbmem_entry *lbe; |
| 364 | uint32_t id; |
| 365 | |
| 366 | e = imd_cursor_next(&cursor); |
| 367 | |
| 368 | if (e == NULL) |
| 369 | break; |
| 370 | |
| 371 | id = imd_entry_id(imd, e); |
| 372 | /* Don't add these metadata entries. */ |
| 373 | if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL) |
| 374 | continue; |
| 375 | |
| 376 | lbe = (struct lb_cbmem_entry *)lb_new_record(header); |
| 377 | lbe->tag = LB_TAG_CBMEM_ENTRY; |
| 378 | lbe->size = sizeof(*lbe); |
| 379 | lbe->address = (uintptr_t)imd_entry_at(imd, e); |
| 380 | lbe->entry_size = imd_entry_size(imd, e); |
| 381 | lbe->id = id; |
| 382 | } |
| 383 | } |