Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
| 4 | * Copyright (C) 2013 Google, Inc. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; version 2 of the License. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
Aaron Durbin | 2c4aab3 | 2015-03-06 23:26:06 -0600 | [diff] [blame] | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| 18 | */ |
| 19 | |
Aaron Durbin | 40131cf | 2013-04-24 16:39:08 -0500 | [diff] [blame] | 20 | #include <bootstate.h> |
Aaron Durbin | 4904802 | 2014-02-18 21:55:02 -0600 | [diff] [blame] | 21 | #include <bootmem.h> |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 22 | #include <console/console.h> |
| 23 | #include <cbmem.h> |
| 24 | #include <string.h> |
| 25 | #include <stdlib.h> |
Stefan Reinauer | fd4f413 | 2013-06-19 12:25:44 -0700 | [diff] [blame] | 26 | #include <arch/early_variables.h> |
Kyösti Mälkki | 2fb6b40 | 2014-12-19 08:20:45 +0200 | [diff] [blame] | 27 | #if IS_ENABLED(CONFIG_ARCH_X86) && !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT) |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 28 | #include <arch/acpi.h> |
| 29 | #endif |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 30 | #ifndef UINT_MAX |
| 31 | #define UINT_MAX 4294967295U |
| 32 | #endif |
| 33 | |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 34 | /* |
| 35 | * The dynamic cbmem code uses a root region. The root region boundary |
| 36 | * addresses are determined by cbmem_top() and ROOT_MIN_SIZE. Just below |
| 37 | * the address returned by cbmem_top() is a pointer that points to the |
| 38 | * root data structure. The root data structure provides the book keeping |
| 39 | * for each large entry. |
| 40 | */ |
| 41 | |
| 42 | /* The root region is at least DYN_CBMEM_ALIGN_SIZE . */ |
| 43 | #define ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE |
| 44 | #define CBMEM_POINTER_MAGIC 0xc0389479 |
| 45 | #define CBMEM_ENTRY_MAGIC ~(CBMEM_POINTER_MAGIC) |
| 46 | |
| 47 | /* The cbmem_root_pointer structure lives just below address returned |
| 48 | * from cbmem_top(). It points to the root data structure that |
| 49 | * maintains the entries. */ |
| 50 | struct cbmem_root_pointer { |
| 51 | u32 magic; |
| 52 | u32 root; |
| 53 | } __attribute__((packed)); |
| 54 | |
| 55 | struct cbmem_entry { |
| 56 | u32 magic; |
| 57 | u32 start; |
| 58 | u32 size; |
| 59 | u32 id; |
| 60 | } __attribute__((packed)); |
| 61 | |
| 62 | struct cbmem_root { |
| 63 | u32 max_entries; |
| 64 | u32 num_entries; |
| 65 | u32 locked; |
| 66 | u32 size; |
| 67 | struct cbmem_entry entries[0]; |
| 68 | } __attribute__((packed)); |
| 69 | |
| 70 | |
Kyösti Mälkki | 2fb6b40 | 2014-12-19 08:20:45 +0200 | [diff] [blame] | 71 | #if !defined(__PRE_RAM__) |
| 72 | static void *cached_cbmem_top; |
| 73 | |
| 74 | void cbmem_set_top(void * ramtop) |
| 75 | { |
| 76 | cached_cbmem_top = ramtop; |
| 77 | } |
| 78 | #endif |
| 79 | |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 80 | static inline void *cbmem_top_cached(void) |
| 81 | { |
| 82 | #if !defined(__PRE_RAM__) |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 83 | if (cached_cbmem_top == NULL) |
| 84 | cached_cbmem_top = cbmem_top(); |
| 85 | |
| 86 | return cached_cbmem_top; |
| 87 | #else |
| 88 | return cbmem_top(); |
| 89 | #endif |
| 90 | } |
| 91 | |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 92 | static inline uintptr_t get_top_aligned(void) |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 93 | { |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 94 | uintptr_t top; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 95 | |
| 96 | /* Align down what is returned from cbmem_top(). */ |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 97 | top = (uintptr_t)cbmem_top_cached(); |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 98 | top &= ~(DYN_CBMEM_ALIGN_SIZE - 1); |
| 99 | |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 100 | return top; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 101 | } |
| 102 | |
| 103 | static inline void *get_root(void) |
| 104 | { |
Ronald G. Minnich | f33d270 | 2014-10-16 10:58:09 +0000 | [diff] [blame] | 105 | uintptr_t pointer_addr; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 106 | struct cbmem_root_pointer *pointer; |
| 107 | |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 108 | pointer_addr = get_top_aligned(); |
Kyösti Mälkki | 2fb6b40 | 2014-12-19 08:20:45 +0200 | [diff] [blame] | 109 | if (pointer_addr == 0) |
| 110 | return NULL; |
| 111 | |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 112 | pointer_addr -= sizeof(struct cbmem_root_pointer); |
| 113 | |
| 114 | pointer = (void *)pointer_addr; |
| 115 | if (pointer->magic != CBMEM_POINTER_MAGIC) |
| 116 | return NULL; |
| 117 | |
Ronald G. Minnich | f33d270 | 2014-10-16 10:58:09 +0000 | [diff] [blame] | 118 | pointer_addr = pointer->root; |
| 119 | return (void *)pointer_addr; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 120 | } |
| 121 | |
| 122 | static inline void cbmem_entry_assign(struct cbmem_entry *entry, |
| 123 | u32 id, u32 start, u32 size) |
| 124 | { |
| 125 | entry->magic = CBMEM_ENTRY_MAGIC; |
| 126 | entry->start = start; |
| 127 | entry->size = size; |
| 128 | entry->id = id; |
| 129 | } |
| 130 | |
| 131 | static inline const struct cbmem_entry * |
| 132 | cbmem_entry_append(struct cbmem_root *root, u32 id, u32 start, u32 size) |
| 133 | { |
| 134 | struct cbmem_entry *cbmem_entry; |
| 135 | |
| 136 | cbmem_entry = &root->entries[root->num_entries]; |
| 137 | root->num_entries++; |
| 138 | |
| 139 | cbmem_entry_assign(cbmem_entry, id, start, size); |
| 140 | |
| 141 | return cbmem_entry; |
| 142 | } |
| 143 | |
| 144 | void cbmem_initialize_empty(void) |
| 145 | { |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 146 | uintptr_t pointer_addr; |
| 147 | uintptr_t root_addr; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 148 | unsigned long max_entries; |
| 149 | struct cbmem_root *root; |
| 150 | struct cbmem_root_pointer *pointer; |
| 151 | |
| 152 | /* Place the root pointer and the root. The number of entries is |
| 153 | * dictated by difference between the root address and the pointer |
| 154 | * where the root address is aligned down to |
| 155 | * DYN_CBMEM_ALIGN_SIZE. The pointer falls just below the |
| 156 | * address returned by get_top_aligned(). */ |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 157 | pointer_addr = get_top_aligned(); |
Kyösti Mälkki | 2fb6b40 | 2014-12-19 08:20:45 +0200 | [diff] [blame] | 158 | if (pointer_addr == 0) |
| 159 | return; |
| 160 | |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 161 | root_addr = pointer_addr - ROOT_MIN_SIZE; |
| 162 | root_addr &= ~(DYN_CBMEM_ALIGN_SIZE - 1); |
| 163 | pointer_addr -= sizeof(struct cbmem_root_pointer); |
| 164 | |
| 165 | max_entries = (pointer_addr - (root_addr + sizeof(*root))) / |
| 166 | sizeof(struct cbmem_entry); |
| 167 | |
| 168 | pointer = (void *)pointer_addr; |
| 169 | pointer->magic = CBMEM_POINTER_MAGIC; |
| 170 | pointer->root = root_addr; |
| 171 | |
| 172 | root = (void *)root_addr; |
| 173 | root->max_entries = max_entries; |
| 174 | root->num_entries = 0; |
| 175 | root->locked = 0; |
| 176 | root->size = pointer_addr - root_addr + |
| 177 | sizeof(struct cbmem_root_pointer); |
| 178 | |
| 179 | /* Add an entry covering the root region. */ |
| 180 | cbmem_entry_append(root, CBMEM_ID_ROOT, root_addr, root->size); |
| 181 | |
| 182 | printk(BIOS_DEBUG, "CBMEM: root @ %p %d entries.\n", |
| 183 | root, root->max_entries); |
| 184 | |
Kyösti Mälkki | 91fac61 | 2014-12-31 20:55:19 +0200 | [diff] [blame] | 185 | /* Complete migration to CBMEM. */ |
Kyösti Mälkki | 823edda | 2014-12-18 18:30:29 +0200 | [diff] [blame] | 186 | cbmem_run_init_hooks(); |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | static inline int cbmem_fail_recovery(void) |
| 190 | { |
| 191 | cbmem_initialize_empty(); |
Kyösti Mälkki | cb28f3f | 2014-01-03 15:15:22 +0200 | [diff] [blame] | 192 | cbmem_fail_resume(); |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 193 | return 1; |
| 194 | } |
| 195 | |
| 196 | static int validate_entries(struct cbmem_root *root) |
| 197 | { |
| 198 | unsigned int i; |
Marcelo Povoa | 4b90b79 | 2014-02-24 10:00:26 -0800 | [diff] [blame] | 199 | uintptr_t current_end; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 200 | |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 201 | current_end = get_top_aligned(); |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 202 | |
| 203 | printk(BIOS_DEBUG, "CBMEM: recovering %d/%d entries from root @ %p\n", |
| 204 | root->num_entries, root->max_entries, root); |
| 205 | |
| 206 | /* Check that all regions are properly aligned and are just below |
| 207 | * the previous entry */ |
| 208 | for (i = 0; i < root->num_entries; i++) { |
| 209 | struct cbmem_entry *entry = &root->entries[i]; |
| 210 | |
| 211 | if (entry->magic != CBMEM_ENTRY_MAGIC) |
| 212 | return -1; |
| 213 | |
| 214 | if (entry->start & (DYN_CBMEM_ALIGN_SIZE - 1)) |
| 215 | return -1; |
| 216 | |
| 217 | if (entry->start + entry->size != current_end) |
| 218 | return -1; |
| 219 | |
| 220 | current_end = entry->start; |
| 221 | } |
| 222 | |
| 223 | return 0; |
| 224 | } |
| 225 | |
| 226 | int cbmem_initialize(void) |
| 227 | { |
| 228 | struct cbmem_root *root; |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 229 | uintptr_t top_according_to_root; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 230 | |
| 231 | root = get_root(); |
| 232 | |
| 233 | /* No recovery possible since root couldn't be recovered. */ |
| 234 | if (root == NULL) |
| 235 | return cbmem_fail_recovery(); |
| 236 | |
| 237 | /* Sanity check the root. */ |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 238 | top_according_to_root = (root->size + (uintptr_t)root); |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 239 | if (get_top_aligned() != top_according_to_root) |
| 240 | return cbmem_fail_recovery(); |
| 241 | |
| 242 | if (root->num_entries > root->max_entries) |
| 243 | return cbmem_fail_recovery(); |
| 244 | |
| 245 | if ((root->max_entries * sizeof(struct cbmem_entry)) > |
| 246 | (root->size - sizeof(struct cbmem_root_pointer) - sizeof(*root))) |
| 247 | return cbmem_fail_recovery(); |
| 248 | |
| 249 | /* Validate current entries. */ |
| 250 | if (validate_entries(root)) |
| 251 | return cbmem_fail_recovery(); |
| 252 | |
| 253 | #if defined(__PRE_RAM__) |
| 254 | /* Lock the root in the romstage on a recovery. The assumption is that |
| 255 | * recovery is called during romstage on the S3 resume path. */ |
| 256 | root->locked = 1; |
| 257 | #endif |
| 258 | |
Kyösti Mälkki | 91fac61 | 2014-12-31 20:55:19 +0200 | [diff] [blame] | 259 | /* Complete migration to CBMEM. */ |
Kyösti Mälkki | 823edda | 2014-12-18 18:30:29 +0200 | [diff] [blame] | 260 | cbmem_run_init_hooks(); |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 261 | |
| 262 | /* Recovery successful. */ |
| 263 | return 0; |
| 264 | } |
| 265 | |
Kyösti Mälkki | 2d8520b | 2014-01-06 17:20:31 +0200 | [diff] [blame] | 266 | int cbmem_recovery(int is_wakeup) |
| 267 | { |
| 268 | int rv = 0; |
| 269 | if (!is_wakeup) |
| 270 | cbmem_initialize_empty(); |
| 271 | else |
| 272 | rv = cbmem_initialize(); |
| 273 | return rv; |
| 274 | } |
| 275 | |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 276 | static uintptr_t cbmem_base(void) |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 277 | { |
| 278 | struct cbmem_root *root; |
Ronald G. Minnich | f33d270 | 2014-10-16 10:58:09 +0000 | [diff] [blame] | 279 | uintptr_t low_addr; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 280 | |
| 281 | root = get_root(); |
| 282 | |
| 283 | if (root == NULL) |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 284 | return 0; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 285 | |
Ronald G. Minnich | f33d270 | 2014-10-16 10:58:09 +0000 | [diff] [blame] | 286 | low_addr = (uintptr_t)root; |
| 287 | /* a low address is low. */ |
| 288 | low_addr &= 0xffffffff; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 289 | |
| 290 | /* Assume the lowest address is the last one added. */ |
| 291 | if (root->num_entries > 0) { |
| 292 | low_addr = root->entries[root->num_entries - 1].start; |
| 293 | } |
| 294 | |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 295 | return low_addr; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | |
| 299 | const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64) |
| 300 | { |
| 301 | struct cbmem_root *root; |
| 302 | const struct cbmem_entry *entry; |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 303 | uintptr_t base; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 304 | u32 size; |
| 305 | u32 aligned_size; |
| 306 | |
| 307 | entry = cbmem_entry_find(id); |
| 308 | |
| 309 | if (entry != NULL) |
| 310 | return entry; |
| 311 | |
| 312 | /* Only handle sizes <= UINT_MAX internally. */ |
| 313 | if (size64 > (u64)UINT_MAX) |
| 314 | return NULL; |
| 315 | |
| 316 | size = size64; |
| 317 | |
| 318 | root = get_root(); |
| 319 | |
| 320 | if (root == NULL) |
| 321 | return NULL; |
| 322 | |
| 323 | /* Nothing can be added once it is locked down. */ |
| 324 | if (root->locked) |
| 325 | return NULL; |
| 326 | |
| 327 | if (root->max_entries == root->num_entries) |
| 328 | return NULL; |
| 329 | |
| 330 | aligned_size = ALIGN(size, DYN_CBMEM_ALIGN_SIZE); |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 331 | base = cbmem_base(); |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 332 | base -= aligned_size; |
| 333 | |
| 334 | return cbmem_entry_append(root, id, base, aligned_size); |
| 335 | } |
| 336 | |
| 337 | void *cbmem_add(u32 id, u64 size) |
| 338 | { |
| 339 | const struct cbmem_entry *entry; |
| 340 | |
| 341 | entry = cbmem_entry_add(id, size); |
| 342 | |
| 343 | if (entry == NULL) |
| 344 | return NULL; |
| 345 | |
| 346 | return cbmem_entry_start(entry); |
| 347 | } |
| 348 | |
| 349 | /* Retrieve a region provided a given id. */ |
| 350 | const struct cbmem_entry *cbmem_entry_find(u32 id) |
| 351 | { |
| 352 | struct cbmem_root *root; |
| 353 | const struct cbmem_entry *entry; |
| 354 | unsigned int i; |
| 355 | |
| 356 | root = get_root(); |
| 357 | |
| 358 | if (root == NULL) |
| 359 | return NULL; |
| 360 | |
| 361 | entry = NULL; |
| 362 | |
| 363 | for (i = 0; i < root->num_entries; i++) { |
| 364 | if (root->entries[i].id == id) { |
| 365 | entry = &root->entries[i]; |
| 366 | break; |
| 367 | } |
| 368 | } |
| 369 | |
| 370 | return entry; |
| 371 | } |
| 372 | |
| 373 | void *cbmem_find(u32 id) |
| 374 | { |
| 375 | const struct cbmem_entry *entry; |
| 376 | |
| 377 | entry = cbmem_entry_find(id); |
| 378 | |
| 379 | if (entry == NULL) |
| 380 | return NULL; |
| 381 | |
| 382 | return cbmem_entry_start(entry); |
| 383 | } |
| 384 | |
| 385 | /* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region |
| 386 | * cannot be removed unless it was the last one added. */ |
| 387 | int cbmem_entry_remove(const struct cbmem_entry *entry) |
| 388 | { |
| 389 | unsigned long entry_num; |
| 390 | struct cbmem_root *root; |
| 391 | |
| 392 | root = get_root(); |
| 393 | |
| 394 | if (root == NULL) |
| 395 | return -1; |
| 396 | |
| 397 | if (root->num_entries == 0) |
| 398 | return -1; |
| 399 | |
| 400 | /* Nothing can be removed. */ |
| 401 | if (root->locked) |
| 402 | return -1; |
| 403 | |
| 404 | entry_num = entry - &root->entries[0]; |
| 405 | |
| 406 | /* If the entry is the last one in the root it can be removed. */ |
| 407 | if (entry_num == (root->num_entries - 1)) { |
| 408 | root->num_entries--; |
| 409 | return 0; |
| 410 | } |
| 411 | |
| 412 | return -1; |
| 413 | } |
| 414 | |
| 415 | u64 cbmem_entry_size(const struct cbmem_entry *entry) |
| 416 | { |
| 417 | return entry->size; |
| 418 | } |
| 419 | |
| 420 | void *cbmem_entry_start(const struct cbmem_entry *entry) |
| 421 | { |
Ronald G. Minnich | f33d270 | 2014-10-16 10:58:09 +0000 | [diff] [blame] | 422 | uintptr_t addr = entry->start; |
| 423 | return (void *)addr; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 424 | } |
| 425 | |
| 426 | |
| 427 | #if !defined(__PRE_RAM__) |
Kyösti Mälkki | 2fb6b40 | 2014-12-19 08:20:45 +0200 | [diff] [blame] | 428 | |
Aaron Durbin | b0d8f5e | 2015-04-06 16:12:58 -0500 | [diff] [blame] | 429 | #if !IS_ENABLED(CONFIG_EARLY_CBMEM_INIT) |
Kyösti Mälkki | 2fb6b40 | 2014-12-19 08:20:45 +0200 | [diff] [blame] | 430 | static void init_cbmem_post_device(void *unused) |
| 431 | { |
| 432 | if (acpi_is_wakeup()) |
| 433 | cbmem_initialize(); |
| 434 | else |
| 435 | cbmem_initialize_empty(); |
| 436 | } |
| 437 | |
Aaron Durbin | 9ef9d85 | 2015-03-16 17:30:09 -0500 | [diff] [blame] | 438 | BOOT_STATE_INIT_ENTRY(BS_POST_DEVICE, BS_ON_ENTRY, |
| 439 | init_cbmem_post_device, NULL); |
Kyösti Mälkki | 2fb6b40 | 2014-12-19 08:20:45 +0200 | [diff] [blame] | 440 | #endif |
| 441 | |
Aaron Durbin | 4904802 | 2014-02-18 21:55:02 -0600 | [diff] [blame] | 442 | void cbmem_add_bootmem(void) |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 443 | { |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 444 | uintptr_t base; |
| 445 | uintptr_t top; |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 446 | |
Kyösti Mälkki | 02aebb6 | 2014-12-21 08:55:47 +0200 | [diff] [blame] | 447 | base = cbmem_base(); |
| 448 | top = get_top_aligned(); |
Aaron Durbin | 4904802 | 2014-02-18 21:55:02 -0600 | [diff] [blame] | 449 | bootmem_add_range(base, top - base, LB_MEM_TABLE); |
Aaron Durbin | df3a109 | 2013-03-13 12:41:44 -0500 | [diff] [blame] | 450 | } |
| 451 | |
| 452 | void cbmem_list(void) |
| 453 | { |
| 454 | unsigned int i; |
| 455 | struct cbmem_root *root; |
| 456 | |
| 457 | root = get_root(); |
| 458 | |
| 459 | if (root == NULL) |
| 460 | return; |
| 461 | |
| 462 | for (i = 0; i < root->num_entries; i++) { |
| 463 | struct cbmem_entry *entry; |
| 464 | |
| 465 | entry = &root->entries[i]; |
| 466 | |
| 467 | cbmem_print_entry(i, entry->id, entry->start, entry->size); |
| 468 | } |
| 469 | } |
| 470 | #endif /* __PRE_RAM__ */ |