Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
| 4 | * Copyright (C) 2013 Google, Inc. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; version 2 of the License. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 14 | */ |
| 15 | |
| 16 | #include <bootstate.h> |
| 17 | #include <bootmem.h> |
| 18 | #include <console/console.h> |
| 19 | #include <cbmem.h> |
| 20 | #include <imd.h> |
| 21 | #include <rules.h> |
| 22 | #include <string.h> |
| 23 | #include <stdlib.h> |
| 24 | #include <arch/early_variables.h> |
| 25 | #if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT) |
| 26 | #include <arch/acpi.h> |
| 27 | #endif |
| 28 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 29 | static inline struct imd *cbmem_get_imd(void) |
| 30 | { |
| 31 | /* Only supply a backing store for imd in ramstage. */ |
| 32 | if (ENV_RAMSTAGE) { |
| 33 | static struct imd imd_cbmem; |
| 34 | return &imd_cbmem; |
| 35 | } |
| 36 | return NULL; |
| 37 | } |
| 38 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 39 | static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e) |
| 40 | { |
| 41 | return (const struct cbmem_entry *)e; |
| 42 | } |
| 43 | |
| 44 | static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e) |
| 45 | { |
| 46 | return (const struct imd_entry *)e; |
| 47 | } |
| 48 | |
| 49 | /* These are the different situations to handle: |
| 50 | * CONFIG_EARLY_CBMEM_INIT: |
| 51 | * In ramstage cbmem_initialize() attempts a recovery of the |
| 52 | * cbmem region set up by romstage. It uses cbmem_top() as the |
| 53 | * starting point of recovery. |
| 54 | * |
| 55 | * In romstage, similar to ramstage, cbmem_initialize() needs to |
| 56 | * attempt recovery of the cbmem area using cbmem_top() as the limit. |
| 57 | * cbmem_initialize_empty() initializes an empty cbmem area from |
| 58 | * cbmem_top(); |
| 59 | * |
| 60 | */ |
| 61 | static struct imd *imd_init_backing(struct imd *backing) |
| 62 | { |
| 63 | struct imd *imd; |
| 64 | |
| 65 | imd = cbmem_get_imd(); |
| 66 | |
| 67 | if (imd != NULL) |
| 68 | return imd; |
| 69 | |
| 70 | imd = backing; |
| 71 | |
| 72 | return imd; |
| 73 | } |
| 74 | |
| 75 | static struct imd *imd_init_backing_with_recover(struct imd *backing) |
| 76 | { |
| 77 | struct imd *imd; |
| 78 | |
| 79 | imd = imd_init_backing(backing); |
| 80 | if (!ENV_RAMSTAGE) { |
Kyösti Mälkki | e1fb052 | 2015-05-26 00:30:10 +0300 | [diff] [blame] | 81 | imd_handle_init(imd, cbmem_top()); |
| 82 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 83 | /* Need to partially recover all the time outside of ramstage |
| 84 | * because there's object storage outside of the stack. */ |
| 85 | imd_handle_init_partial_recovery(imd); |
| 86 | } |
| 87 | |
| 88 | return imd; |
| 89 | } |
| 90 | |
| 91 | void cbmem_initialize_empty(void) |
| 92 | { |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 93 | cbmem_initialize_empty_id_size(0, 0); |
| 94 | } |
| 95 | |
| 96 | void cbmem_initialize_empty_id_size(u32 id, u64 size) |
| 97 | { |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 98 | struct imd *imd; |
| 99 | struct imd imd_backing; |
Aaron Durbin | 41607a4 | 2015-06-09 13:54:10 -0500 | [diff] [blame] | 100 | const int no_recovery = 0; |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 101 | |
| 102 | imd = imd_init_backing(&imd_backing); |
Kyösti Mälkki | e1fb052 | 2015-05-26 00:30:10 +0300 | [diff] [blame] | 103 | imd_handle_init(imd, cbmem_top()); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 104 | |
| 105 | printk(BIOS_DEBUG, "CBMEM:\n"); |
| 106 | |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 107 | if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN, |
| 108 | CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) { |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 109 | printk(BIOS_DEBUG, "failed.\n"); |
| 110 | return; |
| 111 | } |
| 112 | |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 113 | /* Add the specified range first */ |
| 114 | if (size) |
| 115 | cbmem_add(id, size); |
| 116 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 117 | /* Complete migration to CBMEM. */ |
Aaron Durbin | 41607a4 | 2015-06-09 13:54:10 -0500 | [diff] [blame] | 118 | cbmem_run_init_hooks(no_recovery); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 119 | } |
| 120 | |
| 121 | static inline int cbmem_fail_recovery(void) |
| 122 | { |
| 123 | cbmem_initialize_empty(); |
| 124 | cbmem_fail_resume(); |
| 125 | return 1; |
| 126 | } |
| 127 | |
| 128 | int cbmem_initialize(void) |
| 129 | { |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 130 | return cbmem_initialize_id_size(0, 0); |
| 131 | } |
| 132 | |
| 133 | int cbmem_initialize_id_size(u32 id, u64 size) |
| 134 | { |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 135 | struct imd *imd; |
| 136 | struct imd imd_backing; |
Aaron Durbin | 41607a4 | 2015-06-09 13:54:10 -0500 | [diff] [blame] | 137 | const int recovery = 1; |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 138 | |
| 139 | imd = imd_init_backing(&imd_backing); |
Kyösti Mälkki | e1fb052 | 2015-05-26 00:30:10 +0300 | [diff] [blame] | 140 | imd_handle_init(imd, cbmem_top()); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 141 | |
| 142 | if (imd_recover(imd)) |
| 143 | return 1; |
| 144 | |
| 145 | #if defined(__PRE_RAM__) |
| 146 | /* |
| 147 | * Lock the imd in romstage on a recovery. The assumption is that |
| 148 | * if the imd area was recovered in romstage then S3 resume path |
| 149 | * is being taken. |
| 150 | */ |
| 151 | imd_lockdown(imd); |
| 152 | #endif |
| 153 | |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame] | 154 | /* Add the specified range first */ |
| 155 | if (size) |
| 156 | cbmem_add(id, size); |
| 157 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 158 | /* Complete migration to CBMEM. */ |
Aaron Durbin | 41607a4 | 2015-06-09 13:54:10 -0500 | [diff] [blame] | 159 | cbmem_run_init_hooks(recovery); |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 160 | |
| 161 | /* Recovery successful. */ |
| 162 | return 0; |
| 163 | } |
| 164 | |
| 165 | int cbmem_recovery(int is_wakeup) |
| 166 | { |
| 167 | int rv = 0; |
| 168 | if (!is_wakeup) |
| 169 | cbmem_initialize_empty(); |
| 170 | else |
| 171 | rv = cbmem_initialize(); |
| 172 | return rv; |
| 173 | } |
| 174 | |
| 175 | const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64) |
| 176 | { |
| 177 | struct imd *imd; |
| 178 | struct imd imd_backing; |
| 179 | const struct imd_entry *e; |
| 180 | |
| 181 | imd = imd_init_backing_with_recover(&imd_backing); |
| 182 | |
| 183 | e = imd_entry_find_or_add(imd, id, size64); |
| 184 | |
| 185 | return imd_to_cbmem(e); |
| 186 | } |
| 187 | |
| 188 | void *cbmem_add(u32 id, u64 size) |
| 189 | { |
| 190 | struct imd *imd; |
| 191 | struct imd imd_backing; |
| 192 | const struct imd_entry *e; |
| 193 | |
| 194 | imd = imd_init_backing_with_recover(&imd_backing); |
| 195 | |
| 196 | e = imd_entry_find_or_add(imd, id, size); |
| 197 | |
| 198 | if (e == NULL) |
| 199 | return NULL; |
| 200 | |
| 201 | return imd_entry_at(imd, e); |
| 202 | } |
| 203 | |
| 204 | /* Retrieve a region provided a given id. */ |
| 205 | const struct cbmem_entry *cbmem_entry_find(u32 id) |
| 206 | { |
| 207 | struct imd *imd; |
| 208 | struct imd imd_backing; |
| 209 | const struct imd_entry *e; |
| 210 | |
| 211 | imd = imd_init_backing_with_recover(&imd_backing); |
| 212 | |
| 213 | e = imd_entry_find(imd, id); |
| 214 | |
| 215 | return imd_to_cbmem(e); |
| 216 | } |
| 217 | |
| 218 | void *cbmem_find(u32 id) |
| 219 | { |
| 220 | struct imd *imd; |
| 221 | struct imd imd_backing; |
| 222 | const struct imd_entry *e; |
| 223 | |
| 224 | imd = imd_init_backing_with_recover(&imd_backing); |
| 225 | |
| 226 | e = imd_entry_find(imd, id); |
| 227 | |
| 228 | if (e == NULL) |
| 229 | return NULL; |
| 230 | |
| 231 | return imd_entry_at(imd, e); |
| 232 | } |
| 233 | |
| 234 | /* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region |
| 235 | * cannot be removed unless it was the last one added. */ |
| 236 | int cbmem_entry_remove(const struct cbmem_entry *entry) |
| 237 | { |
| 238 | struct imd *imd; |
| 239 | struct imd imd_backing; |
| 240 | |
| 241 | imd = imd_init_backing_with_recover(&imd_backing); |
| 242 | |
| 243 | return imd_entry_remove(imd, cbmem_to_imd(entry)); |
| 244 | } |
| 245 | |
| 246 | u64 cbmem_entry_size(const struct cbmem_entry *entry) |
| 247 | { |
| 248 | struct imd *imd; |
| 249 | struct imd imd_backing; |
| 250 | |
| 251 | imd = imd_init_backing_with_recover(&imd_backing); |
| 252 | |
| 253 | return imd_entry_size(imd, cbmem_to_imd(entry)); |
| 254 | } |
| 255 | |
| 256 | void *cbmem_entry_start(const struct cbmem_entry *entry) |
| 257 | { |
| 258 | struct imd *imd; |
| 259 | struct imd imd_backing; |
| 260 | |
| 261 | imd = imd_init_backing_with_recover(&imd_backing); |
| 262 | |
| 263 | return imd_entry_at(imd, cbmem_to_imd(entry)); |
| 264 | } |
| 265 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 266 | void cbmem_add_bootmem(void) |
| 267 | { |
| 268 | void *base = NULL; |
| 269 | size_t size = 0; |
| 270 | |
| 271 | imd_region_used(cbmem_get_imd(), &base, &size); |
| 272 | bootmem_add_range((uintptr_t)base, size, LB_MEM_TABLE); |
| 273 | } |
| 274 | |
Aaron Durbin | 1ca2d86 | 2015-09-30 12:26:54 -0500 | [diff] [blame] | 275 | #if ENV_RAMSTAGE |
| 276 | /* |
| 277 | * -fdata-sections doesn't work so well on read only strings. They all |
| 278 | * get put in the same section even though those strings may never be |
| 279 | * referenced in the final binary. |
| 280 | */ |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 281 | void cbmem_list(void) |
| 282 | { |
| 283 | static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE }; |
| 284 | |
| 285 | imd_print_entries(cbmem_get_imd(), lookup, ARRAY_SIZE(lookup)); |
| 286 | } |
Aaron Durbin | 1ca2d86 | 2015-09-30 12:26:54 -0500 | [diff] [blame] | 287 | #endif |
| 288 | |
| 289 | void cbmem_add_records_to_cbtable(struct lb_header *header) |
| 290 | { |
| 291 | struct imd_cursor cursor; |
| 292 | struct imd *imd; |
| 293 | |
| 294 | imd = cbmem_get_imd(); |
| 295 | |
| 296 | if (imd_cursor_init(imd, &cursor)) |
| 297 | return; |
| 298 | |
| 299 | while (1) { |
| 300 | const struct imd_entry *e; |
| 301 | struct lb_cbmem_entry *lbe; |
| 302 | uint32_t id; |
| 303 | |
| 304 | e = imd_cursor_next(&cursor); |
| 305 | |
| 306 | if (e == NULL) |
| 307 | break; |
| 308 | |
| 309 | id = imd_entry_id(imd, e); |
| 310 | /* Don't add these metadata entries. */ |
| 311 | if (id == CBMEM_ID_IMD_ROOT || id == CBMEM_ID_IMD_SMALL) |
| 312 | continue; |
| 313 | |
| 314 | lbe = (struct lb_cbmem_entry *)lb_new_record(header); |
| 315 | lbe->tag = LB_TAG_CBMEM_ENTRY; |
| 316 | lbe->size = sizeof(*lbe); |
| 317 | lbe->address = (uintptr_t)imd_entry_at(imd, e); |
| 318 | lbe->entry_size = imd_entry_size(imd, e); |
| 319 | lbe->id = id; |
| 320 | } |
| 321 | } |