blob: b9c09e87823196091f9ab22eafd670a828aeeb2c [file] [log] [blame]
Aaron Durbina05a8522013-03-22 20:44:46 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbina05a8522013-03-22 20:44:46 -050014 */
Furquan Shaikh14290922020-03-11 14:35:35 -070015
16#include <assert.h>
Aaron Durbina05a8522013-03-22 20:44:46 -050017#include <stdlib.h>
Furquan Shaikh14290922020-03-11 14:35:35 -070018#include <commonlib/helpers.h>
Aaron Durbina05a8522013-03-22 20:44:46 -050019#include <console/console.h>
20#include <memrange.h>
21
Aaron Durbina05a8522013-03-22 20:44:46 -050022static inline void range_entry_link(struct range_entry **prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -080023 struct range_entry *r)
Aaron Durbina05a8522013-03-22 20:44:46 -050024{
25 r->next = *prev_ptr;
26 *prev_ptr = r;
27}
28
29static inline void range_entry_unlink(struct range_entry **prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -080030 struct range_entry *r)
Aaron Durbina05a8522013-03-22 20:44:46 -050031{
32 *prev_ptr = r->next;
33 r->next = NULL;
34}
35
Aaron Durbin1b915b82016-01-15 21:59:37 -060036static inline void range_entry_unlink_and_free(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -080037 struct range_entry **prev_ptr,
38 struct range_entry *r)
Aaron Durbina05a8522013-03-22 20:44:46 -050039{
40 range_entry_unlink(prev_ptr, r);
Aaron Durbin1b915b82016-01-15 21:59:37 -060041 range_entry_link(&ranges->free_list, r);
Aaron Durbina05a8522013-03-22 20:44:46 -050042}
43
Aaron Durbin1b915b82016-01-15 21:59:37 -060044static struct range_entry *alloc_range(struct memranges *ranges)
Aaron Durbina05a8522013-03-22 20:44:46 -050045{
Aaron Durbin1b915b82016-01-15 21:59:37 -060046 if (ranges->free_list != NULL) {
Aaron Durbina05a8522013-03-22 20:44:46 -050047 struct range_entry *r;
48
Aaron Durbin1b915b82016-01-15 21:59:37 -060049 r = ranges->free_list;
50 range_entry_unlink(&ranges->free_list, r);
Aaron Durbina05a8522013-03-22 20:44:46 -050051 return r;
52 }
Subrata Banik42c44c22019-05-15 20:27:04 +053053 if (ENV_PAYLOAD_LOADER)
Aaron Durbin1b915b82016-01-15 21:59:37 -060054 return malloc(sizeof(struct range_entry));
55 return NULL;
Aaron Durbina05a8522013-03-22 20:44:46 -050056}
57
58static inline struct range_entry *
Aaron Durbin1b915b82016-01-15 21:59:37 -060059range_list_add(struct memranges *ranges, struct range_entry **prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -080060 resource_t begin, resource_t end, unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -050061{
62 struct range_entry *new_entry;
63
Aaron Durbin1b915b82016-01-15 21:59:37 -060064 new_entry = alloc_range(ranges);
Aaron Durbina05a8522013-03-22 20:44:46 -050065 if (new_entry == NULL) {
66 printk(BIOS_ERR, "Could not allocate range_entry!\n");
67 return NULL;
68 }
69 new_entry->begin = begin;
70 new_entry->end = end;
71 new_entry->tag = tag;
72 range_entry_link(prev_ptr, new_entry);
73
74 return new_entry;
75}
76
77static void merge_neighbor_entries(struct memranges *ranges)
78{
79 struct range_entry *cur;
80 struct range_entry *prev;
81
82 prev = NULL;
83 /* Merge all neighbors and delete/free the leftover entry. */
84 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
85 /* First entry. Just set prev. */
86 if (prev == NULL) {
87 prev = cur;
88 continue;
89 }
90
91 /* If the previous entry merges with the current update the
92 * previous entry to cover full range and delete current from
93 * the list. */
94 if (prev->end + 1 >= cur->begin && prev->tag == cur->tag) {
95 prev->end = cur->end;
Aaron Durbin1b915b82016-01-15 21:59:37 -060096 range_entry_unlink_and_free(ranges, &prev->next, cur);
Aaron Durbina05a8522013-03-22 20:44:46 -050097 /* Set cur to prev so cur->next is valid since cur
98 * was just unlinked and free. */
99 cur = prev;
100 continue;
101 }
102
103 prev = cur;
104 }
105}
106
107static void remove_memranges(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800108 resource_t begin, resource_t end,
109 unsigned long unused)
Aaron Durbina05a8522013-03-22 20:44:46 -0500110{
111 struct range_entry *cur;
112 struct range_entry *next;
113 struct range_entry **prev_ptr;
114
115 prev_ptr = &ranges->entries;
116 for (cur = ranges->entries; cur != NULL; cur = next) {
117 resource_t tmp_end;
118
119 /* Cache the next value to handle unlinks. */
120 next = cur->next;
121
122 /* No other ranges are affected. */
123 if (end < cur->begin)
124 break;
125
126 /* The removal range starts after this one. */
127 if (begin > cur->end) {
128 prev_ptr = &cur->next;
129 continue;
130 }
131
132 /* The removal range overlaps with the current entry either
133 * partially or fully. However, we need to adjust the removal
134 * range for any holes. */
135 if (begin <= cur->begin) {
136 begin = cur->begin;
137
138 /* Full removal. */
139 if (end >= cur->end) {
140 begin = cur->end + 1;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600141 range_entry_unlink_and_free(ranges, prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -0800142 cur);
Aaron Durbina05a8522013-03-22 20:44:46 -0500143 continue;
144 }
145 }
146
147 /* prev_ptr can be set now that the unlink path wasn't taken. */
148 prev_ptr = &cur->next;
149
150 /* Clip the end fragment to do proper splitting. */
151 tmp_end = end;
152 if (end > cur->end)
153 tmp_end = cur->end;
154
155 /* Hole punched in middle of entry. */
156 if (begin > cur->begin && tmp_end < cur->end) {
Aaron Durbin1b915b82016-01-15 21:59:37 -0600157 range_list_add(ranges, &cur->next, end + 1, cur->end,
Lee Leahye20a3192017-03-09 16:21:34 -0800158 cur->tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500159 cur->end = begin - 1;
160 break;
161 }
162
163 /* Removal at beginning. */
164 if (begin == cur->begin)
165 cur->begin = tmp_end + 1;
166
167 /* Removal at end. */
168 if (tmp_end == cur->end)
169 cur->end = begin - 1;
170 }
171}
172
173static void merge_add_memranges(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800174 resource_t begin, resource_t end,
175 unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -0500176{
177 struct range_entry *cur;
178 struct range_entry **prev_ptr;
179
180 prev_ptr = &ranges->entries;
181
182 /* Remove all existing entries covered by the range. */
183 remove_memranges(ranges, begin, end, -1);
184
185 /* Find the entry to place the new entry after. Since
Martin Rothcbf2bd72013-07-09 21:51:14 -0600186 * remove_memranges() was called above there is a guaranteed
Aaron Durbina05a8522013-03-22 20:44:46 -0500187 * spot for this new entry. */
188 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
189 /* Found insertion spot before current entry. */
190 if (end < cur->begin)
191 break;
192
193 /* Keep track of previous entry to insert new entry after it. */
194 prev_ptr = &cur->next;
195
196 /* The new entry starts after this one. */
197 if (begin > cur->end)
198 continue;
199
200 }
201
202 /* Add new entry and merge with neighbors. */
Aaron Durbin1b915b82016-01-15 21:59:37 -0600203 range_list_add(ranges, prev_ptr, begin, end, tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500204 merge_neighbor_entries(ranges);
205}
206
Aaron Durbined9307d2014-02-05 15:44:30 -0600207void memranges_update_tag(struct memranges *ranges, unsigned long old_tag,
Lee Leahye20a3192017-03-09 16:21:34 -0800208 unsigned long new_tag)
Aaron Durbined9307d2014-02-05 15:44:30 -0600209{
210 struct range_entry *r;
211
212 memranges_each_entry(r, ranges) {
213 if (range_entry_tag(r) == old_tag)
214 range_entry_update_tag(r, new_tag);
215 }
216
217 merge_neighbor_entries(ranges);
218}
219
Aaron Durbina05a8522013-03-22 20:44:46 -0500220typedef void (*range_action_t)(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800221 resource_t begin, resource_t end,
222 unsigned long tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500223
224static void do_action(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800225 resource_t base, resource_t size, unsigned long tag,
226 range_action_t action)
Aaron Durbina05a8522013-03-22 20:44:46 -0500227{
228 resource_t end;
229 resource_t begin;
230
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700231 if (size == 0)
232 return;
233
Aaron Durbina05a8522013-03-22 20:44:46 -0500234 /* The addresses are aligned to 4096 bytes: the begin address is
235 * aligned down while the end address is aligned up to be conservative
236 * about the full range covered. */
Furquan Shaikh14290922020-03-11 14:35:35 -0700237 begin = ALIGN_DOWN(base, ranges->align);
Aaron Durbina05a8522013-03-22 20:44:46 -0500238 end = begin + size + (base - begin);
Furquan Shaikh14290922020-03-11 14:35:35 -0700239 end = ALIGN_UP(end, ranges->align) - 1;
Aaron Durbina05a8522013-03-22 20:44:46 -0500240 action(ranges, begin, end, tag);
241}
242
243void memranges_create_hole(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800244 resource_t base, resource_t size)
Aaron Durbina05a8522013-03-22 20:44:46 -0500245{
246 do_action(ranges, base, size, -1, remove_memranges);
247}
248
249void memranges_insert(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800250 resource_t base, resource_t size, unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -0500251{
252 do_action(ranges, base, size, tag, merge_add_memranges);
253}
254
255struct collect_context {
256 struct memranges *ranges;
257 unsigned long tag;
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600258 memrange_filter_t filter;
Aaron Durbina05a8522013-03-22 20:44:46 -0500259};
260
261static void collect_ranges(void *gp, struct device *dev, struct resource *res)
262{
263 struct collect_context *ctx = gp;
264
Vladimir Serbinenko7a4fa0a2014-02-05 23:38:29 +0100265 if (res->size == 0)
266 return;
267
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600268 if (ctx->filter == NULL || ctx->filter(dev, res))
269 memranges_insert(ctx->ranges, res->base, res->size, ctx->tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500270}
271
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600272void memranges_add_resources_filter(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800273 unsigned long mask, unsigned long match,
274 unsigned long tag,
275 memrange_filter_t filter)
Aaron Durbina05a8522013-03-22 20:44:46 -0500276{
277 struct collect_context context;
278
279 /* Only deal with MEM resources. */
280 mask |= IORESOURCE_MEM;
281 match |= IORESOURCE_MEM;
282
283 context.ranges = ranges;
284 context.tag = tag;
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600285 context.filter = filter;
Aaron Durbina05a8522013-03-22 20:44:46 -0500286 search_global_resources(mask, match, collect_ranges, &context);
287}
288
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600289void memranges_add_resources(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800290 unsigned long mask, unsigned long match,
291 unsigned long tag)
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600292{
293 memranges_add_resources_filter(ranges, mask, match, tag, NULL);
294}
295
Furquan Shaikh14290922020-03-11 14:35:35 -0700296void memranges_init_empty_with_alignment(struct memranges *ranges,
297 struct range_entry *to_free,
298 size_t num_free, size_t align)
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700299{
Aaron Durbin1b915b82016-01-15 21:59:37 -0600300 size_t i;
301
Furquan Shaikh14290922020-03-11 14:35:35 -0700302 /* Alignment must be a power of 2. */
303 assert(IS_POWER_OF_2(align));
304
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700305 ranges->entries = NULL;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600306 ranges->free_list = NULL;
Furquan Shaikh14290922020-03-11 14:35:35 -0700307 ranges->align = align;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600308
309 for (i = 0; i < num_free; i++)
Aaron Durbina7141c52016-02-24 18:49:25 -0600310 range_entry_link(&ranges->free_list, &to_free[i]);
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700311}
312
Furquan Shaikh14290922020-03-11 14:35:35 -0700313void memranges_init_with_alignment(struct memranges *ranges,
314 unsigned long mask, unsigned long match,
315 unsigned long tag, size_t align)
Aaron Durbina05a8522013-03-22 20:44:46 -0500316{
Furquan Shaikh14290922020-03-11 14:35:35 -0700317 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
Aaron Durbina05a8522013-03-22 20:44:46 -0500318 memranges_add_resources(ranges, mask, match, tag);
319}
320
Patrick Rudolphd67a4bd2018-04-10 09:31:10 +0200321/* Clone a memrange. The new memrange has the same entries as the old one. */
322void memranges_clone(struct memranges *newranges, struct memranges *oldranges)
323{
324 struct range_entry *r, *cur;
325 struct range_entry **prev_ptr;
326
Furquan Shaikh14290922020-03-11 14:35:35 -0700327 memranges_init_empty_with_alignment(newranges, NULL, 0, oldranges->align);
Patrick Rudolphd67a4bd2018-04-10 09:31:10 +0200328
329 prev_ptr = &newranges->entries;
330 memranges_each_entry(r, oldranges) {
331 cur = range_list_add(newranges, prev_ptr, r->begin, r->end,
332 r->tag);
333 prev_ptr = &cur->next;
334 }
335}
336
Aaron Durbina05a8522013-03-22 20:44:46 -0500337void memranges_teardown(struct memranges *ranges)
338{
339 while (ranges->entries != NULL) {
Aaron Durbin1b915b82016-01-15 21:59:37 -0600340 range_entry_unlink_and_free(ranges, &ranges->entries,
Lee Leahye20a3192017-03-09 16:21:34 -0800341 ranges->entries);
Aaron Durbina05a8522013-03-22 20:44:46 -0500342 }
343}
344
345void memranges_fill_holes_up_to(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800346 resource_t limit, unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -0500347{
348 struct range_entry *cur;
349 struct range_entry *prev;
350
351 prev = NULL;
352 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
353 /* First entry. Just set prev. */
354 if (prev == NULL) {
355 prev = cur;
356 continue;
357 }
358
Martin Rothcbf2bd72013-07-09 21:51:14 -0600359 /* If the previous entry does not directly precede the current
Aaron Durbina05a8522013-03-22 20:44:46 -0500360 * entry then add a new entry just after the previous one. */
361 if (range_entry_end(prev) != cur->begin) {
362 resource_t end;
363
364 end = cur->begin - 1;
365 if (end >= limit)
366 end = limit - 1;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600367 range_list_add(ranges, &prev->next,
Lee Leahye20a3192017-03-09 16:21:34 -0800368 range_entry_end(prev), end, tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500369 }
370
371 prev = cur;
372
373 /* Hit the requested range limit. No other entries after this
374 * are affected. */
375 if (cur->begin >= limit)
376 break;
377 }
378
379 /* Handle the case where the limit was never reached. A new entry needs
380 * to be added to cover the range up to the limit. */
381 if (prev != NULL && range_entry_end(prev) < limit)
Aaron Durbin1b915b82016-01-15 21:59:37 -0600382 range_list_add(ranges, &prev->next, range_entry_end(prev),
Lee Leahye20a3192017-03-09 16:21:34 -0800383 limit - 1, tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500384
385 /* Merge all entries that were newly added. */
386 merge_neighbor_entries(ranges);
387}
Aaron Durbinf6f6e132013-03-26 21:22:42 -0500388
389struct range_entry *memranges_next_entry(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800390 const struct range_entry *r)
Aaron Durbinf6f6e132013-03-26 21:22:42 -0500391{
392 return r->next;
393}
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700394
395/* Find a range entry that satisfies the given constraints to fit a hole that matches the
396 * required alignment, is big enough, does not exceed the limit and has a matching tag. */
397static const struct range_entry *memranges_find_entry(struct memranges *ranges,
398 resource_t limit, resource_t size,
399 size_t align, unsigned long tag)
400{
401 const struct range_entry *r;
402 resource_t base, end;
403
404 if (size == 0)
405 return NULL;
406
407 if (!IS_POWER_OF_2(align))
408 return NULL;
409
410 if (!IS_ALIGNED(align, ranges->align))
411 return NULL;
412
413 memranges_each_entry(r, ranges) {
414
415 if (r->tag != tag)
416 continue;
417
418 base = ALIGN_UP(r->begin, align);
419 end = base + size - 1;
420
421 if (end > r->end)
422 continue;
423
424 if (end > limit)
425 continue;
426
427 return r;
428 }
429
430 return NULL;
431}
432
433bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size, size_t align,
434 unsigned long tag, resource_t *stolen_base)
435{
436 resource_t base;
437 const struct range_entry *r = memranges_find_entry(ranges, limit, size, align, tag);
438
439 if (r == NULL)
440 return false;
441
442 base = ALIGN_UP(r->begin, align);
443
444 memranges_create_hole(ranges, base, size);
445 *stolen_base = base;
446
447 return true;
448}