Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 1 | /* |
| 2 | * This file is part of the coreboot project. |
| 3 | * |
| 4 | * Copyright (C) 2013 Google, Inc. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; version 2 of the License. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 14 | */ |
| 15 | #include <stdlib.h> |
| 16 | #include <console/console.h> |
| 17 | #include <memrange.h> |
| 18 | |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 19 | static inline void range_entry_link(struct range_entry **prev_ptr, |
| 20 | struct range_entry *r) |
| 21 | { |
| 22 | r->next = *prev_ptr; |
| 23 | *prev_ptr = r; |
| 24 | } |
| 25 | |
| 26 | static inline void range_entry_unlink(struct range_entry **prev_ptr, |
| 27 | struct range_entry *r) |
| 28 | { |
| 29 | *prev_ptr = r->next; |
| 30 | r->next = NULL; |
| 31 | } |
| 32 | |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 33 | static inline void range_entry_unlink_and_free(struct memranges *ranges, |
| 34 | struct range_entry **prev_ptr, |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 35 | struct range_entry *r) |
| 36 | { |
| 37 | range_entry_unlink(prev_ptr, r); |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 38 | range_entry_link(&ranges->free_list, r); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 39 | } |
| 40 | |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 41 | static struct range_entry *alloc_range(struct memranges *ranges) |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 42 | { |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 43 | if (ranges->free_list != NULL) { |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 44 | struct range_entry *r; |
| 45 | |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 46 | r = ranges->free_list; |
| 47 | range_entry_unlink(&ranges->free_list, r); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 48 | return r; |
| 49 | } |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 50 | if (ENV_RAMSTAGE) |
| 51 | return malloc(sizeof(struct range_entry)); |
| 52 | return NULL; |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | static inline struct range_entry * |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 56 | range_list_add(struct memranges *ranges, struct range_entry **prev_ptr, |
| 57 | resource_t begin, resource_t end, unsigned long tag) |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 58 | { |
| 59 | struct range_entry *new_entry; |
| 60 | |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 61 | new_entry = alloc_range(ranges); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 62 | if (new_entry == NULL) { |
| 63 | printk(BIOS_ERR, "Could not allocate range_entry!\n"); |
| 64 | return NULL; |
| 65 | } |
| 66 | new_entry->begin = begin; |
| 67 | new_entry->end = end; |
| 68 | new_entry->tag = tag; |
| 69 | range_entry_link(prev_ptr, new_entry); |
| 70 | |
| 71 | return new_entry; |
| 72 | } |
| 73 | |
| 74 | static void merge_neighbor_entries(struct memranges *ranges) |
| 75 | { |
| 76 | struct range_entry *cur; |
| 77 | struct range_entry *prev; |
| 78 | |
| 79 | prev = NULL; |
| 80 | /* Merge all neighbors and delete/free the leftover entry. */ |
| 81 | for (cur = ranges->entries; cur != NULL; cur = cur->next) { |
| 82 | /* First entry. Just set prev. */ |
| 83 | if (prev == NULL) { |
| 84 | prev = cur; |
| 85 | continue; |
| 86 | } |
| 87 | |
| 88 | /* If the previous entry merges with the current update the |
| 89 | * previous entry to cover full range and delete current from |
| 90 | * the list. */ |
| 91 | if (prev->end + 1 >= cur->begin && prev->tag == cur->tag) { |
| 92 | prev->end = cur->end; |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 93 | range_entry_unlink_and_free(ranges, &prev->next, cur); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 94 | /* Set cur to prev so cur->next is valid since cur |
| 95 | * was just unlinked and free. */ |
| 96 | cur = prev; |
| 97 | continue; |
| 98 | } |
| 99 | |
| 100 | prev = cur; |
| 101 | } |
| 102 | } |
| 103 | |
| 104 | static void remove_memranges(struct memranges *ranges, |
| 105 | resource_t begin, resource_t end, |
| 106 | unsigned long unused) |
| 107 | { |
| 108 | struct range_entry *cur; |
| 109 | struct range_entry *next; |
| 110 | struct range_entry **prev_ptr; |
| 111 | |
| 112 | prev_ptr = &ranges->entries; |
| 113 | for (cur = ranges->entries; cur != NULL; cur = next) { |
| 114 | resource_t tmp_end; |
| 115 | |
| 116 | /* Cache the next value to handle unlinks. */ |
| 117 | next = cur->next; |
| 118 | |
| 119 | /* No other ranges are affected. */ |
| 120 | if (end < cur->begin) |
| 121 | break; |
| 122 | |
| 123 | /* The removal range starts after this one. */ |
| 124 | if (begin > cur->end) { |
| 125 | prev_ptr = &cur->next; |
| 126 | continue; |
| 127 | } |
| 128 | |
| 129 | /* The removal range overlaps with the current entry either |
| 130 | * partially or fully. However, we need to adjust the removal |
| 131 | * range for any holes. */ |
| 132 | if (begin <= cur->begin) { |
| 133 | begin = cur->begin; |
| 134 | |
| 135 | /* Full removal. */ |
| 136 | if (end >= cur->end) { |
| 137 | begin = cur->end + 1; |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 138 | range_entry_unlink_and_free(ranges, prev_ptr, |
| 139 | cur); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 140 | continue; |
| 141 | } |
| 142 | } |
| 143 | |
| 144 | /* prev_ptr can be set now that the unlink path wasn't taken. */ |
| 145 | prev_ptr = &cur->next; |
| 146 | |
| 147 | /* Clip the end fragment to do proper splitting. */ |
| 148 | tmp_end = end; |
| 149 | if (end > cur->end) |
| 150 | tmp_end = cur->end; |
| 151 | |
| 152 | /* Hole punched in middle of entry. */ |
| 153 | if (begin > cur->begin && tmp_end < cur->end) { |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 154 | range_list_add(ranges, &cur->next, end + 1, cur->end, |
| 155 | cur->tag); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 156 | cur->end = begin - 1; |
| 157 | break; |
| 158 | } |
| 159 | |
| 160 | /* Removal at beginning. */ |
| 161 | if (begin == cur->begin) |
| 162 | cur->begin = tmp_end + 1; |
| 163 | |
| 164 | /* Removal at end. */ |
| 165 | if (tmp_end == cur->end) |
| 166 | cur->end = begin - 1; |
| 167 | } |
| 168 | } |
| 169 | |
| 170 | static void merge_add_memranges(struct memranges *ranges, |
| 171 | resource_t begin, resource_t end, |
| 172 | unsigned long tag) |
| 173 | { |
| 174 | struct range_entry *cur; |
| 175 | struct range_entry **prev_ptr; |
| 176 | |
| 177 | prev_ptr = &ranges->entries; |
| 178 | |
| 179 | /* Remove all existing entries covered by the range. */ |
| 180 | remove_memranges(ranges, begin, end, -1); |
| 181 | |
| 182 | /* Find the entry to place the new entry after. Since |
Martin Roth | cbf2bd7 | 2013-07-09 21:51:14 -0600 | [diff] [blame] | 183 | * remove_memranges() was called above there is a guaranteed |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 184 | * spot for this new entry. */ |
| 185 | for (cur = ranges->entries; cur != NULL; cur = cur->next) { |
| 186 | /* Found insertion spot before current entry. */ |
| 187 | if (end < cur->begin) |
| 188 | break; |
| 189 | |
| 190 | /* Keep track of previous entry to insert new entry after it. */ |
| 191 | prev_ptr = &cur->next; |
| 192 | |
| 193 | /* The new entry starts after this one. */ |
| 194 | if (begin > cur->end) |
| 195 | continue; |
| 196 | |
| 197 | } |
| 198 | |
| 199 | /* Add new entry and merge with neighbors. */ |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 200 | range_list_add(ranges, prev_ptr, begin, end, tag); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 201 | merge_neighbor_entries(ranges); |
| 202 | } |
| 203 | |
Aaron Durbin | ed9307d | 2014-02-05 15:44:30 -0600 | [diff] [blame] | 204 | void memranges_update_tag(struct memranges *ranges, unsigned long old_tag, |
| 205 | unsigned long new_tag) |
| 206 | { |
| 207 | struct range_entry *r; |
| 208 | |
| 209 | memranges_each_entry(r, ranges) { |
| 210 | if (range_entry_tag(r) == old_tag) |
| 211 | range_entry_update_tag(r, new_tag); |
| 212 | } |
| 213 | |
| 214 | merge_neighbor_entries(ranges); |
| 215 | } |
| 216 | |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 217 | typedef void (*range_action_t)(struct memranges *ranges, |
| 218 | resource_t begin, resource_t end, |
| 219 | unsigned long tag); |
| 220 | |
| 221 | static void do_action(struct memranges *ranges, |
| 222 | resource_t base, resource_t size, unsigned long tag, |
| 223 | range_action_t action) |
| 224 | { |
| 225 | resource_t end; |
| 226 | resource_t begin; |
| 227 | |
Furquan Shaikh | 196ee2b | 2014-07-18 10:25:54 -0700 | [diff] [blame] | 228 | if (size == 0) |
| 229 | return; |
| 230 | |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 231 | /* The addresses are aligned to 4096 bytes: the begin address is |
| 232 | * aligned down while the end address is aligned up to be conservative |
| 233 | * about the full range covered. */ |
| 234 | begin = ALIGN_DOWN(base, 4096); |
| 235 | end = begin + size + (base - begin); |
| 236 | end = ALIGN_UP(end, 4096) - 1; |
| 237 | action(ranges, begin, end, tag); |
| 238 | } |
| 239 | |
| 240 | void memranges_create_hole(struct memranges *ranges, |
| 241 | resource_t base, resource_t size) |
| 242 | { |
| 243 | do_action(ranges, base, size, -1, remove_memranges); |
| 244 | } |
| 245 | |
| 246 | void memranges_insert(struct memranges *ranges, |
| 247 | resource_t base, resource_t size, unsigned long tag) |
| 248 | { |
| 249 | do_action(ranges, base, size, tag, merge_add_memranges); |
| 250 | } |
| 251 | |
| 252 | struct collect_context { |
| 253 | struct memranges *ranges; |
| 254 | unsigned long tag; |
Aaron Durbin | ca4f4b8 | 2014-02-08 15:41:52 -0600 | [diff] [blame] | 255 | memrange_filter_t filter; |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 256 | }; |
| 257 | |
| 258 | static void collect_ranges(void *gp, struct device *dev, struct resource *res) |
| 259 | { |
| 260 | struct collect_context *ctx = gp; |
| 261 | |
Vladimir Serbinenko | 7a4fa0a | 2014-02-05 23:38:29 +0100 | [diff] [blame] | 262 | if (res->size == 0) |
| 263 | return; |
| 264 | |
Aaron Durbin | ca4f4b8 | 2014-02-08 15:41:52 -0600 | [diff] [blame] | 265 | if (ctx->filter == NULL || ctx->filter(dev, res)) |
| 266 | memranges_insert(ctx->ranges, res->base, res->size, ctx->tag); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 267 | } |
| 268 | |
Aaron Durbin | ca4f4b8 | 2014-02-08 15:41:52 -0600 | [diff] [blame] | 269 | void memranges_add_resources_filter(struct memranges *ranges, |
| 270 | unsigned long mask, unsigned long match, |
| 271 | unsigned long tag, |
| 272 | memrange_filter_t filter) |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 273 | { |
| 274 | struct collect_context context; |
| 275 | |
| 276 | /* Only deal with MEM resources. */ |
| 277 | mask |= IORESOURCE_MEM; |
| 278 | match |= IORESOURCE_MEM; |
| 279 | |
| 280 | context.ranges = ranges; |
| 281 | context.tag = tag; |
Aaron Durbin | ca4f4b8 | 2014-02-08 15:41:52 -0600 | [diff] [blame] | 282 | context.filter = filter; |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 283 | search_global_resources(mask, match, collect_ranges, &context); |
| 284 | } |
| 285 | |
Aaron Durbin | ca4f4b8 | 2014-02-08 15:41:52 -0600 | [diff] [blame] | 286 | void memranges_add_resources(struct memranges *ranges, |
| 287 | unsigned long mask, unsigned long match, |
| 288 | unsigned long tag) |
| 289 | { |
| 290 | memranges_add_resources_filter(ranges, mask, match, tag, NULL); |
| 291 | } |
| 292 | |
Aaron Durbin | a7141c5 | 2016-02-24 18:49:25 -0600 | [diff] [blame] | 293 | void memranges_init_empty(struct memranges *ranges, struct range_entry *to_free, |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 294 | size_t num_free) |
Furquan Shaikh | 196ee2b | 2014-07-18 10:25:54 -0700 | [diff] [blame] | 295 | { |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 296 | size_t i; |
| 297 | |
Furquan Shaikh | 196ee2b | 2014-07-18 10:25:54 -0700 | [diff] [blame] | 298 | ranges->entries = NULL; |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 299 | ranges->free_list = NULL; |
| 300 | |
| 301 | for (i = 0; i < num_free; i++) |
Aaron Durbin | a7141c5 | 2016-02-24 18:49:25 -0600 | [diff] [blame] | 302 | range_entry_link(&ranges->free_list, &to_free[i]); |
Furquan Shaikh | 196ee2b | 2014-07-18 10:25:54 -0700 | [diff] [blame] | 303 | } |
| 304 | |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 305 | void memranges_init(struct memranges *ranges, |
| 306 | unsigned long mask, unsigned long match, |
| 307 | unsigned long tag) |
| 308 | { |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 309 | memranges_init_empty(ranges, NULL, 0); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 310 | memranges_add_resources(ranges, mask, match, tag); |
| 311 | } |
| 312 | |
| 313 | void memranges_teardown(struct memranges *ranges) |
| 314 | { |
| 315 | while (ranges->entries != NULL) { |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 316 | range_entry_unlink_and_free(ranges, &ranges->entries, |
| 317 | ranges->entries); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 318 | } |
| 319 | } |
| 320 | |
| 321 | void memranges_fill_holes_up_to(struct memranges *ranges, |
| 322 | resource_t limit, unsigned long tag) |
| 323 | { |
| 324 | struct range_entry *cur; |
| 325 | struct range_entry *prev; |
| 326 | |
| 327 | prev = NULL; |
| 328 | for (cur = ranges->entries; cur != NULL; cur = cur->next) { |
| 329 | /* First entry. Just set prev. */ |
| 330 | if (prev == NULL) { |
| 331 | prev = cur; |
| 332 | continue; |
| 333 | } |
| 334 | |
Martin Roth | cbf2bd7 | 2013-07-09 21:51:14 -0600 | [diff] [blame] | 335 | /* If the previous entry does not directly precede the current |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 336 | * entry then add a new entry just after the previous one. */ |
| 337 | if (range_entry_end(prev) != cur->begin) { |
| 338 | resource_t end; |
| 339 | |
| 340 | end = cur->begin - 1; |
| 341 | if (end >= limit) |
| 342 | end = limit - 1; |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 343 | range_list_add(ranges, &prev->next, |
| 344 | range_entry_end(prev), end, tag); |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 345 | } |
| 346 | |
| 347 | prev = cur; |
| 348 | |
| 349 | /* Hit the requested range limit. No other entries after this |
| 350 | * are affected. */ |
| 351 | if (cur->begin >= limit) |
| 352 | break; |
| 353 | } |
| 354 | |
| 355 | /* Handle the case where the limit was never reached. A new entry needs |
| 356 | * to be added to cover the range up to the limit. */ |
| 357 | if (prev != NULL && range_entry_end(prev) < limit) |
Aaron Durbin | 1b915b8 | 2016-01-15 21:59:37 -0600 | [diff] [blame] | 358 | range_list_add(ranges, &prev->next, range_entry_end(prev), |
Aaron Durbin | a05a852 | 2013-03-22 20:44:46 -0500 | [diff] [blame] | 359 | limit - 1, tag); |
| 360 | |
| 361 | /* Merge all entries that were newly added. */ |
| 362 | merge_neighbor_entries(ranges); |
| 363 | } |
Aaron Durbin | f6f6e13 | 2013-03-26 21:22:42 -0500 | [diff] [blame] | 364 | |
| 365 | struct range_entry *memranges_next_entry(struct memranges *ranges, |
| 366 | const struct range_entry *r) |
| 367 | { |
| 368 | return r->next; |
| 369 | } |