Aaron Durbin | 4904802 | 2014-02-18 21:55:02 -0600 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
| 4 | * Copyright (C) 2003-2004 Eric Biederman |
| 5 | * Copyright (C) 2005-2010 coresystems GmbH |
| 6 | * Copyright (C) 2014 Google Inc. |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; version 2 of the License. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | * GNU General Public License for more details. |
Aaron Durbin | 4904802 | 2014-02-18 21:55:02 -0600 | [diff] [blame] | 16 | */ |
| 17 | |
| 18 | #include <console/console.h> |
| 19 | #include <bootmem.h> |
| 20 | #include <cbmem.h> |
| 21 | #include <device/resource.h> |
| 22 | #include <stdlib.h> |
| 23 | |
| 24 | static struct memranges bootmem; |
| 25 | |
| 26 | void bootmem_init(void) |
| 27 | { |
| 28 | const unsigned long cacheable = IORESOURCE_CACHEABLE; |
| 29 | const unsigned long reserved = IORESOURCE_RESERVE; |
| 30 | struct memranges *bm = &bootmem; |
| 31 | |
| 32 | /* |
| 33 | * Fill the memory map out. The order of operations is important in |
| 34 | * that each overlapping range will take over the next. Therefore, |
| 35 | * add cacheable resources as RAM then add the reserved resources. |
| 36 | */ |
| 37 | memranges_init(bm, cacheable, cacheable, LB_MEM_RAM); |
| 38 | memranges_add_resources(bm, reserved, reserved, LB_MEM_RESERVED); |
| 39 | |
| 40 | /* Add memory used by CBMEM. */ |
| 41 | cbmem_add_bootmem(); |
| 42 | } |
| 43 | |
| 44 | void bootmem_add_range(uint64_t start, uint64_t size, uint32_t type) |
| 45 | { |
| 46 | memranges_insert(&bootmem, start, size, type); |
| 47 | } |
| 48 | |
| 49 | void bootmem_write_memory_table(struct lb_memory *mem) |
| 50 | { |
| 51 | const struct range_entry *r; |
| 52 | struct lb_memory_range *lb_r; |
| 53 | |
| 54 | lb_r = &mem->map[0]; |
| 55 | |
| 56 | bootmem_dump_ranges(); |
| 57 | |
| 58 | memranges_each_entry(r, &bootmem) { |
| 59 | lb_r->start = pack_lb64(range_entry_base(r)); |
| 60 | lb_r->size = pack_lb64(range_entry_size(r)); |
| 61 | lb_r->type = range_entry_tag(r); |
| 62 | |
| 63 | lb_r++; |
| 64 | mem->size += sizeof(struct lb_memory_range); |
| 65 | } |
| 66 | } |
| 67 | |
| 68 | struct range_strings { |
| 69 | unsigned long tag; |
| 70 | const char *str; |
| 71 | }; |
| 72 | |
| 73 | static const struct range_strings type_strings[] = { |
| 74 | { LB_MEM_RAM, "RAM" }, |
| 75 | { LB_MEM_RESERVED, "RESERVED" }, |
| 76 | { LB_MEM_ACPI, "ACPI" }, |
| 77 | { LB_MEM_NVS, "NVS" }, |
| 78 | { LB_MEM_UNUSABLE, "UNUSABLE" }, |
| 79 | { LB_MEM_VENDOR_RSVD, "VENDOR RESERVED" }, |
| 80 | { LB_MEM_TABLE, "CONFIGURATION TABLES" }, |
| 81 | }; |
| 82 | |
| 83 | static const char *bootmem_range_string(unsigned long tag) |
| 84 | { |
| 85 | int i; |
| 86 | |
| 87 | for (i = 0; i < ARRAY_SIZE(type_strings); i++) { |
| 88 | if (type_strings[i].tag == tag) |
| 89 | return type_strings[i].str; |
| 90 | } |
| 91 | |
| 92 | return "UNKNOWN!"; |
| 93 | } |
| 94 | |
| 95 | void bootmem_dump_ranges(void) |
| 96 | { |
| 97 | int i; |
| 98 | const struct range_entry *r; |
| 99 | |
| 100 | i = 0; |
| 101 | memranges_each_entry(r, &bootmem) { |
| 102 | printk(BIOS_DEBUG, "%2d. %016llx-%016llx: %s\n", |
| 103 | i, range_entry_base(r), range_entry_end(r) - 1, |
| 104 | bootmem_range_string(range_entry_tag(r))); |
| 105 | i++; |
| 106 | } |
| 107 | } |
| 108 | |
| 109 | int bootmem_region_targets_usable_ram(uint64_t start, uint64_t size) |
| 110 | { |
| 111 | const struct range_entry *r; |
| 112 | uint64_t end = start + size; |
| 113 | |
| 114 | memranges_each_entry(r, &bootmem) { |
| 115 | /* All further bootmem entries are beyond this range. */ |
| 116 | if (end <= range_entry_base(r)) |
| 117 | break; |
| 118 | |
| 119 | if (start >= range_entry_base(r) && end <= range_entry_end(r)) { |
| 120 | if (range_entry_tag(r) == LB_MEM_RAM) |
| 121 | return 1; |
| 122 | } |
| 123 | } |
| 124 | return 0; |
| 125 | } |
| 126 | |
| 127 | void *bootmem_allocate_buffer(size_t size) |
| 128 | { |
| 129 | const struct range_entry *r; |
| 130 | const struct range_entry *region; |
| 131 | /* All allocated buffers fall below the 32-bit boundary. */ |
| 132 | const resource_t max_addr = 1ULL << 32; |
| 133 | resource_t begin; |
| 134 | resource_t end; |
| 135 | |
| 136 | /* 4KiB alignment. */ |
| 137 | size = ALIGN(size, 4096); |
| 138 | region = NULL; |
| 139 | memranges_each_entry(r, &bootmem) { |
| 140 | if (range_entry_size(r) < size) |
| 141 | continue; |
| 142 | |
| 143 | if (range_entry_tag(r) != LB_MEM_RAM) |
| 144 | continue; |
| 145 | |
| 146 | if (range_entry_base(r) >= max_addr) |
| 147 | continue; |
| 148 | |
| 149 | end = range_entry_end(r); |
| 150 | if (end > max_addr) |
| 151 | end = max_addr; |
| 152 | |
| 153 | if ((end - range_entry_base(r)) < size) |
| 154 | continue; |
| 155 | |
| 156 | region = r; |
| 157 | } |
| 158 | |
| 159 | if (region == NULL) |
| 160 | return NULL; |
| 161 | |
| 162 | /* region now points to the highest usable region for the given size. */ |
| 163 | begin = range_entry_base(region); |
| 164 | end = range_entry_end(region); |
| 165 | if (end > max_addr) |
| 166 | end = max_addr; |
| 167 | begin = end - size; |
| 168 | |
| 169 | /* Mark buffer as unusuable for future buffer use. */ |
| 170 | bootmem_add_range(begin, size, LB_MEM_UNUSABLE); |
| 171 | |
| 172 | return (void *)(uintptr_t)begin; |
| 173 | } |