Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
| 4 | * Copyright (C) 2013 Google, Inc. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; version 2 of the License. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc. |
| 18 | */ |
| 19 | |
| 20 | #include <bootstate.h> |
| 21 | #include <bootmem.h> |
| 22 | #include <console/console.h> |
| 23 | #include <cbmem.h> |
| 24 | #include <imd.h> |
| 25 | #include <rules.h> |
| 26 | #include <string.h> |
| 27 | #include <stdlib.h> |
| 28 | #include <arch/early_variables.h> |
| 29 | #if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT) |
| 30 | #include <arch/acpi.h> |
| 31 | #endif |
| 32 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 33 | static inline struct imd *cbmem_get_imd(void) |
| 34 | { |
| 35 | /* Only supply a backing store for imd in ramstage. */ |
| 36 | if (ENV_RAMSTAGE) { |
| 37 | static struct imd imd_cbmem; |
| 38 | return &imd_cbmem; |
| 39 | } |
| 40 | return NULL; |
| 41 | } |
| 42 | |
| 43 | /* |
| 44 | * x86 !CONFIG_EARLY_CBMEM_INIT platforms need to do the following in ramstage: |
| 45 | * 1. Call set_top_of_ram() which in turn calls cbmem_set_top(). |
| 46 | * 2. Provide a get_top_of_ram() implementation. |
| 47 | * |
| 48 | * CONFIG_EARLY_CBMEM_INIT platforms just need to provide cbmem_top(). |
| 49 | */ |
| 50 | void cbmem_set_top(void *ramtop) |
| 51 | { |
| 52 | struct imd *imd = cbmem_get_imd(); |
| 53 | |
| 54 | imd_handle_init(imd, ramtop); |
| 55 | } |
| 56 | |
| 57 | static inline const struct cbmem_entry *imd_to_cbmem(const struct imd_entry *e) |
| 58 | { |
| 59 | return (const struct cbmem_entry *)e; |
| 60 | } |
| 61 | |
| 62 | static inline const struct imd_entry *cbmem_to_imd(const struct cbmem_entry *e) |
| 63 | { |
| 64 | return (const struct imd_entry *)e; |
| 65 | } |
| 66 | |
| 67 | /* These are the different situations to handle: |
| 68 | * CONFIG_EARLY_CBMEM_INIT: |
| 69 | * In ramstage cbmem_initialize() attempts a recovery of the |
| 70 | * cbmem region set up by romstage. It uses cbmem_top() as the |
| 71 | * starting point of recovery. |
| 72 | * |
| 73 | * In romstage, similar to ramstage, cbmem_initialize() needs to |
| 74 | * attempt recovery of the cbmem area using cbmem_top() as the limit. |
| 75 | * cbmem_initialize_empty() initializes an empty cbmem area from |
| 76 | * cbmem_top(); |
| 77 | * |
| 78 | */ |
| 79 | static struct imd *imd_init_backing(struct imd *backing) |
| 80 | { |
| 81 | struct imd *imd; |
| 82 | |
| 83 | imd = cbmem_get_imd(); |
| 84 | |
| 85 | if (imd != NULL) |
| 86 | return imd; |
| 87 | |
| 88 | imd = backing; |
| 89 | |
| 90 | return imd; |
| 91 | } |
| 92 | |
| 93 | static struct imd *imd_init_backing_with_recover(struct imd *backing) |
| 94 | { |
| 95 | struct imd *imd; |
| 96 | |
| 97 | imd = imd_init_backing(backing); |
| 98 | if (!ENV_RAMSTAGE) { |
| 99 | /* Early cbmem init platforms need to always use cbmem_top(). */ |
| 100 | if (IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)) |
| 101 | imd_handle_init(imd, cbmem_top()); |
| 102 | /* Need to partially recover all the time outside of ramstage |
| 103 | * because there's object storage outside of the stack. */ |
| 104 | imd_handle_init_partial_recovery(imd); |
| 105 | } |
| 106 | |
| 107 | return imd; |
| 108 | } |
| 109 | |
| 110 | void cbmem_initialize_empty(void) |
| 111 | { |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame^] | 112 | cbmem_initialize_empty_id_size(0, 0); |
| 113 | } |
| 114 | |
| 115 | void cbmem_initialize_empty_id_size(u32 id, u64 size) |
| 116 | { |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 117 | struct imd *imd; |
| 118 | struct imd imd_backing; |
| 119 | |
| 120 | imd = imd_init_backing(&imd_backing); |
| 121 | |
| 122 | /* Early cbmem init platforms need to always use cbmem_top(). */ |
| 123 | if (IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)) |
| 124 | imd_handle_init(imd, cbmem_top()); |
| 125 | |
| 126 | printk(BIOS_DEBUG, "CBMEM:\n"); |
| 127 | |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame^] | 128 | if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN, |
| 129 | CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) { |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 130 | printk(BIOS_DEBUG, "failed.\n"); |
| 131 | return; |
| 132 | } |
| 133 | |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame^] | 134 | /* Add the specified range first */ |
| 135 | if (size) |
| 136 | cbmem_add(id, size); |
| 137 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 138 | /* Complete migration to CBMEM. */ |
| 139 | cbmem_run_init_hooks(); |
| 140 | } |
| 141 | |
| 142 | static inline int cbmem_fail_recovery(void) |
| 143 | { |
| 144 | cbmem_initialize_empty(); |
| 145 | cbmem_fail_resume(); |
| 146 | return 1; |
| 147 | } |
| 148 | |
| 149 | int cbmem_initialize(void) |
| 150 | { |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame^] | 151 | return cbmem_initialize_id_size(0, 0); |
| 152 | } |
| 153 | |
| 154 | int cbmem_initialize_id_size(u32 id, u64 size) |
| 155 | { |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 156 | struct imd *imd; |
| 157 | struct imd imd_backing; |
| 158 | |
| 159 | imd = imd_init_backing(&imd_backing); |
| 160 | |
| 161 | /* Early cbmem init platforms need to always use cbmem_top(). */ |
| 162 | if (IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)) |
| 163 | imd_handle_init(imd, cbmem_top()); |
| 164 | |
| 165 | if (imd_recover(imd)) |
| 166 | return 1; |
| 167 | |
| 168 | #if defined(__PRE_RAM__) |
| 169 | /* |
| 170 | * Lock the imd in romstage on a recovery. The assumption is that |
| 171 | * if the imd area was recovered in romstage then S3 resume path |
| 172 | * is being taken. |
| 173 | */ |
| 174 | imd_lockdown(imd); |
| 175 | #endif |
| 176 | |
Lee Leahy | 522149c | 2015-05-08 11:33:55 -0700 | [diff] [blame^] | 177 | /* Add the specified range first */ |
| 178 | if (size) |
| 179 | cbmem_add(id, size); |
| 180 | |
Aaron Durbin | 0dff57d | 2015-03-05 21:18:33 -0600 | [diff] [blame] | 181 | /* Complete migration to CBMEM. */ |
| 182 | cbmem_run_init_hooks(); |
| 183 | |
| 184 | /* Recovery successful. */ |
| 185 | return 0; |
| 186 | } |
| 187 | |
| 188 | int cbmem_recovery(int is_wakeup) |
| 189 | { |
| 190 | int rv = 0; |
| 191 | if (!is_wakeup) |
| 192 | cbmem_initialize_empty(); |
| 193 | else |
| 194 | rv = cbmem_initialize(); |
| 195 | return rv; |
| 196 | } |
| 197 | |
| 198 | const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64) |
| 199 | { |
| 200 | struct imd *imd; |
| 201 | struct imd imd_backing; |
| 202 | const struct imd_entry *e; |
| 203 | |
| 204 | imd = imd_init_backing_with_recover(&imd_backing); |
| 205 | |
| 206 | e = imd_entry_find_or_add(imd, id, size64); |
| 207 | |
| 208 | return imd_to_cbmem(e); |
| 209 | } |
| 210 | |
| 211 | void *cbmem_add(u32 id, u64 size) |
| 212 | { |
| 213 | struct imd *imd; |
| 214 | struct imd imd_backing; |
| 215 | const struct imd_entry *e; |
| 216 | |
| 217 | imd = imd_init_backing_with_recover(&imd_backing); |
| 218 | |
| 219 | e = imd_entry_find_or_add(imd, id, size); |
| 220 | |
| 221 | if (e == NULL) |
| 222 | return NULL; |
| 223 | |
| 224 | return imd_entry_at(imd, e); |
| 225 | } |
| 226 | |
| 227 | /* Retrieve a region provided a given id. */ |
| 228 | const struct cbmem_entry *cbmem_entry_find(u32 id) |
| 229 | { |
| 230 | struct imd *imd; |
| 231 | struct imd imd_backing; |
| 232 | const struct imd_entry *e; |
| 233 | |
| 234 | imd = imd_init_backing_with_recover(&imd_backing); |
| 235 | |
| 236 | e = imd_entry_find(imd, id); |
| 237 | |
| 238 | return imd_to_cbmem(e); |
| 239 | } |
| 240 | |
| 241 | void *cbmem_find(u32 id) |
| 242 | { |
| 243 | struct imd *imd; |
| 244 | struct imd imd_backing; |
| 245 | const struct imd_entry *e; |
| 246 | |
| 247 | imd = imd_init_backing_with_recover(&imd_backing); |
| 248 | |
| 249 | e = imd_entry_find(imd, id); |
| 250 | |
| 251 | if (e == NULL) |
| 252 | return NULL; |
| 253 | |
| 254 | return imd_entry_at(imd, e); |
| 255 | } |
| 256 | |
| 257 | /* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region |
| 258 | * cannot be removed unless it was the last one added. */ |
| 259 | int cbmem_entry_remove(const struct cbmem_entry *entry) |
| 260 | { |
| 261 | struct imd *imd; |
| 262 | struct imd imd_backing; |
| 263 | |
| 264 | imd = imd_init_backing_with_recover(&imd_backing); |
| 265 | |
| 266 | return imd_entry_remove(imd, cbmem_to_imd(entry)); |
| 267 | } |
| 268 | |
| 269 | u64 cbmem_entry_size(const struct cbmem_entry *entry) |
| 270 | { |
| 271 | struct imd *imd; |
| 272 | struct imd imd_backing; |
| 273 | |
| 274 | imd = imd_init_backing_with_recover(&imd_backing); |
| 275 | |
| 276 | return imd_entry_size(imd, cbmem_to_imd(entry)); |
| 277 | } |
| 278 | |
| 279 | void *cbmem_entry_start(const struct cbmem_entry *entry) |
| 280 | { |
| 281 | struct imd *imd; |
| 282 | struct imd imd_backing; |
| 283 | |
| 284 | imd = imd_init_backing_with_recover(&imd_backing); |
| 285 | |
| 286 | return imd_entry_at(imd, cbmem_to_imd(entry)); |
| 287 | } |
| 288 | |
| 289 | #if ENV_RAMSTAGE |
| 290 | void cbmem_add_bootmem(void) |
| 291 | { |
| 292 | void *base = NULL; |
| 293 | size_t size = 0; |
| 294 | |
| 295 | imd_region_used(cbmem_get_imd(), &base, &size); |
| 296 | bootmem_add_range((uintptr_t)base, size, LB_MEM_TABLE); |
| 297 | } |
| 298 | |
| 299 | void cbmem_list(void) |
| 300 | { |
| 301 | static const struct imd_lookup lookup[] = { CBMEM_ID_TO_NAME_TABLE }; |
| 302 | |
| 303 | imd_print_entries(cbmem_get_imd(), lookup, ARRAY_SIZE(lookup)); |
| 304 | } |
| 305 | #endif /* __PRE_RAM__ */ |