Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
| 4 | * Copyright (C) 2013 Google, Inc. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; version 2 of the License. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| 18 | */ |
| 19 | #include <stdlib.h> |
| 20 | #include <console/console.h> |
| 21 | #include <memrange.h> |
| 22 | |
| 23 | /* Coreboot doesn't have a free() function. Therefore, keep a cache of |
| 24 | * free'd entries. */ |
| 25 | static struct range_entry *free_list; |
| 26 | |
| 27 | static inline void range_entry_link(struct range_entry **prev_ptr, |
| 28 | struct range_entry *r) |
| 29 | { |
| 30 | r->next = *prev_ptr; |
| 31 | *prev_ptr = r; |
| 32 | } |
| 33 | |
| 34 | static inline void range_entry_unlink(struct range_entry **prev_ptr, |
| 35 | struct range_entry *r) |
| 36 | { |
| 37 | *prev_ptr = r->next; |
| 38 | r->next = NULL; |
| 39 | } |
| 40 | |
| 41 | static inline void range_entry_unlink_and_free(struct range_entry **prev_ptr, |
| 42 | struct range_entry *r) |
| 43 | { |
| 44 | range_entry_unlink(prev_ptr, r); |
| 45 | range_entry_link(&free_list, r); |
| 46 | } |
| 47 | |
| 48 | static struct range_entry *alloc_range(void) |
| 49 | { |
| 50 | if (free_list != NULL) { |
| 51 | struct range_entry *r; |
| 52 | |
| 53 | r = free_list; |
| 54 | range_entry_unlink(&free_list, r); |
| 55 | return r; |
| 56 | } |
| 57 | return malloc(sizeof(struct range_entry)); |
| 58 | } |
| 59 | |
| 60 | static inline struct range_entry * |
| 61 | range_list_add(struct range_entry **prev_ptr, resource_t begin, resource_t end, |
| 62 | unsigned long tag) |
| 63 | { |
| 64 | struct range_entry *new_entry; |
| 65 | |
| 66 | new_entry = alloc_range(); |
| 67 | if (new_entry == NULL) { |
| 68 | printk(BIOS_ERR, "Could not allocate range_entry!\n"); |
| 69 | return NULL; |
| 70 | } |
| 71 | new_entry->begin = begin; |
| 72 | new_entry->end = end; |
| 73 | new_entry->tag = tag; |
| 74 | range_entry_link(prev_ptr, new_entry); |
| 75 | |
| 76 | return new_entry; |
| 77 | } |
| 78 | |
| 79 | static void merge_neighbor_entries(struct memranges *ranges) |
| 80 | { |
| 81 | struct range_entry *cur; |
| 82 | struct range_entry *prev; |
| 83 | |
| 84 | prev = NULL; |
| 85 | /* Merge all neighbors and delete/free the leftover entry. */ |
| 86 | for (cur = ranges->entries; cur != NULL; cur = cur->next) { |
| 87 | /* First entry. Just set prev. */ |
| 88 | if (prev == NULL) { |
| 89 | prev = cur; |
| 90 | continue; |
| 91 | } |
| 92 | |
| 93 | /* If the previous entry merges with the current update the |
| 94 | * previous entry to cover full range and delete current from |
| 95 | * the list. */ |
| 96 | if (prev->end + 1 >= cur->begin && prev->tag == cur->tag) { |
| 97 | prev->end = cur->end; |
| 98 | range_entry_unlink_and_free(&prev->next, cur); |
| 99 | /* Set cur to prev so cur->next is valid since cur |
| 100 | * was just unlinked and free. */ |
| 101 | cur = prev; |
| 102 | continue; |
| 103 | } |
| 104 | |
| 105 | prev = cur; |
| 106 | } |
| 107 | } |
| 108 | |
| 109 | static void remove_memranges(struct memranges *ranges, |
| 110 | resource_t begin, resource_t end, |
| 111 | unsigned long unused) |
| 112 | { |
| 113 | struct range_entry *cur; |
| 114 | struct range_entry *next; |
| 115 | struct range_entry **prev_ptr; |
| 116 | |
| 117 | prev_ptr = &ranges->entries; |
| 118 | for (cur = ranges->entries; cur != NULL; cur = next) { |
| 119 | resource_t tmp_end; |
| 120 | |
| 121 | /* Cache the next value to handle unlinks. */ |
| 122 | next = cur->next; |
| 123 | |
| 124 | /* No other ranges are affected. */ |
| 125 | if (end < cur->begin) |
| 126 | break; |
| 127 | |
| 128 | /* The removal range starts after this one. */ |
| 129 | if (begin > cur->end) { |
| 130 | prev_ptr = &cur->next; |
| 131 | continue; |
| 132 | } |
| 133 | |
| 134 | /* The removal range overlaps with the current entry either |
| 135 | * partially or fully. However, we need to adjust the removal |
| 136 | * range for any holes. */ |
| 137 | if (begin <= cur->begin) { |
| 138 | begin = cur->begin; |
| 139 | |
| 140 | /* Full removal. */ |
| 141 | if (end >= cur->end) { |
| 142 | begin = cur->end + 1; |
| 143 | range_entry_unlink_and_free(prev_ptr, cur); |
| 144 | continue; |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | /* prev_ptr can be set now that the unlink path wasn't taken. */ |
| 149 | prev_ptr = &cur->next; |
| 150 | |
| 151 | /* Clip the end fragment to do proper splitting. */ |
| 152 | tmp_end = end; |
| 153 | if (end > cur->end) |
| 154 | tmp_end = cur->end; |
| 155 | |
| 156 | /* Hole punched in middle of entry. */ |
| 157 | if (begin > cur->begin && tmp_end < cur->end) { |
| 158 | range_list_add(&cur->next, end + 1, cur->end, cur->tag); |
| 159 | cur->end = begin - 1; |
| 160 | break; |
| 161 | } |
| 162 | |
| 163 | /* Removal at beginning. */ |
| 164 | if (begin == cur->begin) |
| 165 | cur->begin = tmp_end + 1; |
| 166 | |
| 167 | /* Removal at end. */ |
| 168 | if (tmp_end == cur->end) |
| 169 | cur->end = begin - 1; |
| 170 | } |
| 171 | } |
| 172 | |
| 173 | static void merge_add_memranges(struct memranges *ranges, |
| 174 | resource_t begin, resource_t end, |
| 175 | unsigned long tag) |
| 176 | { |
| 177 | struct range_entry *cur; |
| 178 | struct range_entry **prev_ptr; |
| 179 | |
| 180 | prev_ptr = &ranges->entries; |
| 181 | |
| 182 | /* Remove all existing entries covered by the range. */ |
| 183 | remove_memranges(ranges, begin, end, -1); |
| 184 | |
| 185 | /* Find the entry to place the new entry after. Since |
| 186 | * remove_memranges() was called above there is a guranteed |
| 187 | * spot for this new entry. */ |
| 188 | for (cur = ranges->entries; cur != NULL; cur = cur->next) { |
| 189 | /* Found insertion spot before current entry. */ |
| 190 | if (end < cur->begin) |
| 191 | break; |
| 192 | |
| 193 | /* Keep track of previous entry to insert new entry after it. */ |
| 194 | prev_ptr = &cur->next; |
| 195 | |
| 196 | /* The new entry starts after this one. */ |
| 197 | if (begin > cur->end) |
| 198 | continue; |
| 199 | |
| 200 | } |
| 201 | |
| 202 | /* Add new entry and merge with neighbors. */ |
| 203 | range_list_add(prev_ptr, begin, end, tag); |
| 204 | merge_neighbor_entries(ranges); |
| 205 | } |
| 206 | |
| 207 | typedef void (*range_action_t)(struct memranges *ranges, |
| 208 | resource_t begin, resource_t end, |
| 209 | unsigned long tag); |
| 210 | |
| 211 | static void do_action(struct memranges *ranges, |
| 212 | resource_t base, resource_t size, unsigned long tag, |
| 213 | range_action_t action) |
| 214 | { |
| 215 | resource_t end; |
| 216 | resource_t begin; |
| 217 | |
| 218 | /* The addresses are aligned to 4096 bytes: the begin address is |
| 219 | * aligned down while the end address is aligned up to be conservative |
| 220 | * about the full range covered. */ |
| 221 | begin = ALIGN_DOWN(base, 4096); |
| 222 | end = begin + size + (base - begin); |
| 223 | end = ALIGN_UP(end, 4096) - 1; |
| 224 | action(ranges, begin, end, tag); |
| 225 | } |
| 226 | |
| 227 | void memranges_create_hole(struct memranges *ranges, |
| 228 | resource_t base, resource_t size) |
| 229 | { |
| 230 | do_action(ranges, base, size, -1, remove_memranges); |
| 231 | } |
| 232 | |
| 233 | void memranges_insert(struct memranges *ranges, |
| 234 | resource_t base, resource_t size, unsigned long tag) |
| 235 | { |
| 236 | do_action(ranges, base, size, tag, merge_add_memranges); |
| 237 | } |
| 238 | |
| 239 | struct collect_context { |
| 240 | struct memranges *ranges; |
| 241 | unsigned long tag; |
| 242 | }; |
| 243 | |
| 244 | static void collect_ranges(void *gp, struct device *dev, struct resource *res) |
| 245 | { |
| 246 | struct collect_context *ctx = gp; |
| 247 | |
| 248 | memranges_insert(ctx->ranges, res->base, res->size, ctx->tag); |
| 249 | } |
| 250 | |
| 251 | void memranges_add_resources(struct memranges *ranges, |
| 252 | unsigned long mask, unsigned long match, |
| 253 | unsigned long tag) |
| 254 | { |
| 255 | struct collect_context context; |
| 256 | |
| 257 | /* Only deal with MEM resources. */ |
| 258 | mask |= IORESOURCE_MEM; |
| 259 | match |= IORESOURCE_MEM; |
| 260 | |
| 261 | context.ranges = ranges; |
| 262 | context.tag = tag; |
| 263 | search_global_resources(mask, match, collect_ranges, &context); |
| 264 | } |
| 265 | |
| 266 | void memranges_init(struct memranges *ranges, |
| 267 | unsigned long mask, unsigned long match, |
| 268 | unsigned long tag) |
| 269 | { |
| 270 | ranges->entries = NULL; |
| 271 | memranges_add_resources(ranges, mask, match, tag); |
| 272 | } |
| 273 | |
| 274 | void memranges_teardown(struct memranges *ranges) |
| 275 | { |
| 276 | while (ranges->entries != NULL) { |
| 277 | range_entry_unlink_and_free(&ranges->entries, ranges->entries); |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | void memranges_fill_holes_up_to(struct memranges *ranges, |
| 282 | resource_t limit, unsigned long tag) |
| 283 | { |
| 284 | struct range_entry *cur; |
| 285 | struct range_entry *prev; |
| 286 | |
| 287 | prev = NULL; |
| 288 | for (cur = ranges->entries; cur != NULL; cur = cur->next) { |
| 289 | /* First entry. Just set prev. */ |
| 290 | if (prev == NULL) { |
| 291 | prev = cur; |
| 292 | continue; |
| 293 | } |
| 294 | |
| 295 | /* If the previous entry does not directly preceed the current |
| 296 | * entry then add a new entry just after the previous one. */ |
| 297 | if (range_entry_end(prev) != cur->begin) { |
| 298 | resource_t end; |
| 299 | |
| 300 | end = cur->begin - 1; |
| 301 | if (end >= limit) |
| 302 | end = limit - 1; |
| 303 | range_list_add(&prev->next, range_entry_end(prev), |
| 304 | end, tag); |
| 305 | } |
| 306 | |
| 307 | prev = cur; |
| 308 | |
| 309 | /* Hit the requested range limit. No other entries after this |
| 310 | * are affected. */ |
| 311 | if (cur->begin >= limit) |
| 312 | break; |
| 313 | } |
| 314 | |
| 315 | /* Handle the case where the limit was never reached. A new entry needs |
| 316 | * to be added to cover the range up to the limit. */ |
| 317 | if (prev != NULL && range_entry_end(prev) < limit) |
| 318 | range_list_add(&prev->next, range_entry_end(prev), |
| 319 | limit - 1, tag); |
| 320 | |
| 321 | /* Merge all entries that were newly added. */ |
| 322 | merge_neighbor_entries(ranges); |
| 323 | } |
Aaron Durbin | f6f6e13 | 2013-03-26 21:22:42 -0500 | [diff] [blame^] | 324 | |
| 325 | struct range_entry *memranges_next_entry(struct memranges *ranges, |
| 326 | const struct range_entry *r) |
| 327 | { |
| 328 | return r->next; |
| 329 | } |