blob: 9ae3e3e54138dc46bad5bd68fa156543ed9db064 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Furquan Shaikh14290922020-03-11 14:35:35 -07002
3#include <assert.h>
Aaron Durbina05a8522013-03-22 20:44:46 -05004#include <stdlib.h>
Furquan Shaikh14290922020-03-11 14:35:35 -07005#include <commonlib/helpers.h>
Aaron Durbina05a8522013-03-22 20:44:46 -05006#include <console/console.h>
7#include <memrange.h>
8
Aaron Durbina05a8522013-03-22 20:44:46 -05009static inline void range_entry_link(struct range_entry **prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -080010 struct range_entry *r)
Aaron Durbina05a8522013-03-22 20:44:46 -050011{
12 r->next = *prev_ptr;
13 *prev_ptr = r;
14}
15
16static inline void range_entry_unlink(struct range_entry **prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -080017 struct range_entry *r)
Aaron Durbina05a8522013-03-22 20:44:46 -050018{
19 *prev_ptr = r->next;
20 r->next = NULL;
21}
22
Aaron Durbin1b915b82016-01-15 21:59:37 -060023static inline void range_entry_unlink_and_free(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -080024 struct range_entry **prev_ptr,
25 struct range_entry *r)
Aaron Durbina05a8522013-03-22 20:44:46 -050026{
27 range_entry_unlink(prev_ptr, r);
Aaron Durbin1b915b82016-01-15 21:59:37 -060028 range_entry_link(&ranges->free_list, r);
Aaron Durbina05a8522013-03-22 20:44:46 -050029}
30
Aaron Durbin1b915b82016-01-15 21:59:37 -060031static struct range_entry *alloc_range(struct memranges *ranges)
Aaron Durbina05a8522013-03-22 20:44:46 -050032{
Aaron Durbin1b915b82016-01-15 21:59:37 -060033 if (ranges->free_list != NULL) {
Aaron Durbina05a8522013-03-22 20:44:46 -050034 struct range_entry *r;
35
Aaron Durbin1b915b82016-01-15 21:59:37 -060036 r = ranges->free_list;
37 range_entry_unlink(&ranges->free_list, r);
Aaron Durbina05a8522013-03-22 20:44:46 -050038 return r;
39 }
Subrata Banik42c44c22019-05-15 20:27:04 +053040 if (ENV_PAYLOAD_LOADER)
Aaron Durbin1b915b82016-01-15 21:59:37 -060041 return malloc(sizeof(struct range_entry));
42 return NULL;
Aaron Durbina05a8522013-03-22 20:44:46 -050043}
44
45static inline struct range_entry *
Aaron Durbin1b915b82016-01-15 21:59:37 -060046range_list_add(struct memranges *ranges, struct range_entry **prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -080047 resource_t begin, resource_t end, unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -050048{
49 struct range_entry *new_entry;
50
Aaron Durbin1b915b82016-01-15 21:59:37 -060051 new_entry = alloc_range(ranges);
Aaron Durbina05a8522013-03-22 20:44:46 -050052 if (new_entry == NULL) {
53 printk(BIOS_ERR, "Could not allocate range_entry!\n");
54 return NULL;
55 }
56 new_entry->begin = begin;
57 new_entry->end = end;
58 new_entry->tag = tag;
59 range_entry_link(prev_ptr, new_entry);
60
61 return new_entry;
62}
63
64static void merge_neighbor_entries(struct memranges *ranges)
65{
66 struct range_entry *cur;
67 struct range_entry *prev;
68
69 prev = NULL;
70 /* Merge all neighbors and delete/free the leftover entry. */
71 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
72 /* First entry. Just set prev. */
73 if (prev == NULL) {
74 prev = cur;
75 continue;
76 }
77
78 /* If the previous entry merges with the current update the
79 * previous entry to cover full range and delete current from
80 * the list. */
81 if (prev->end + 1 >= cur->begin && prev->tag == cur->tag) {
82 prev->end = cur->end;
Aaron Durbin1b915b82016-01-15 21:59:37 -060083 range_entry_unlink_and_free(ranges, &prev->next, cur);
Aaron Durbina05a8522013-03-22 20:44:46 -050084 /* Set cur to prev so cur->next is valid since cur
85 * was just unlinked and free. */
86 cur = prev;
87 continue;
88 }
89
90 prev = cur;
91 }
92}
93
94static void remove_memranges(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -080095 resource_t begin, resource_t end,
96 unsigned long unused)
Aaron Durbina05a8522013-03-22 20:44:46 -050097{
98 struct range_entry *cur;
99 struct range_entry *next;
100 struct range_entry **prev_ptr;
101
102 prev_ptr = &ranges->entries;
103 for (cur = ranges->entries; cur != NULL; cur = next) {
104 resource_t tmp_end;
105
106 /* Cache the next value to handle unlinks. */
107 next = cur->next;
108
109 /* No other ranges are affected. */
110 if (end < cur->begin)
111 break;
112
113 /* The removal range starts after this one. */
114 if (begin > cur->end) {
115 prev_ptr = &cur->next;
116 continue;
117 }
118
119 /* The removal range overlaps with the current entry either
120 * partially or fully. However, we need to adjust the removal
121 * range for any holes. */
122 if (begin <= cur->begin) {
123 begin = cur->begin;
124
125 /* Full removal. */
126 if (end >= cur->end) {
127 begin = cur->end + 1;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600128 range_entry_unlink_and_free(ranges, prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -0800129 cur);
Aaron Durbina05a8522013-03-22 20:44:46 -0500130 continue;
131 }
132 }
133
134 /* prev_ptr can be set now that the unlink path wasn't taken. */
135 prev_ptr = &cur->next;
136
137 /* Clip the end fragment to do proper splitting. */
138 tmp_end = end;
139 if (end > cur->end)
140 tmp_end = cur->end;
141
142 /* Hole punched in middle of entry. */
143 if (begin > cur->begin && tmp_end < cur->end) {
Aaron Durbin1b915b82016-01-15 21:59:37 -0600144 range_list_add(ranges, &cur->next, end + 1, cur->end,
Lee Leahye20a3192017-03-09 16:21:34 -0800145 cur->tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500146 cur->end = begin - 1;
147 break;
148 }
149
150 /* Removal at beginning. */
151 if (begin == cur->begin)
152 cur->begin = tmp_end + 1;
153
154 /* Removal at end. */
155 if (tmp_end == cur->end)
156 cur->end = begin - 1;
157 }
158}
159
160static void merge_add_memranges(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800161 resource_t begin, resource_t end,
162 unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -0500163{
164 struct range_entry *cur;
165 struct range_entry **prev_ptr;
166
167 prev_ptr = &ranges->entries;
168
169 /* Remove all existing entries covered by the range. */
170 remove_memranges(ranges, begin, end, -1);
171
172 /* Find the entry to place the new entry after. Since
Martin Rothcbf2bd72013-07-09 21:51:14 -0600173 * remove_memranges() was called above there is a guaranteed
Aaron Durbina05a8522013-03-22 20:44:46 -0500174 * spot for this new entry. */
175 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
176 /* Found insertion spot before current entry. */
177 if (end < cur->begin)
178 break;
179
180 /* Keep track of previous entry to insert new entry after it. */
181 prev_ptr = &cur->next;
182
183 /* The new entry starts after this one. */
184 if (begin > cur->end)
185 continue;
186
187 }
188
189 /* Add new entry and merge with neighbors. */
Aaron Durbin1b915b82016-01-15 21:59:37 -0600190 range_list_add(ranges, prev_ptr, begin, end, tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500191 merge_neighbor_entries(ranges);
192}
193
Aaron Durbined9307d2014-02-05 15:44:30 -0600194void memranges_update_tag(struct memranges *ranges, unsigned long old_tag,
Lee Leahye20a3192017-03-09 16:21:34 -0800195 unsigned long new_tag)
Aaron Durbined9307d2014-02-05 15:44:30 -0600196{
197 struct range_entry *r;
198
199 memranges_each_entry(r, ranges) {
200 if (range_entry_tag(r) == old_tag)
201 range_entry_update_tag(r, new_tag);
202 }
203
204 merge_neighbor_entries(ranges);
205}
206
Aaron Durbina05a8522013-03-22 20:44:46 -0500207typedef void (*range_action_t)(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800208 resource_t begin, resource_t end,
209 unsigned long tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500210
211static void do_action(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800212 resource_t base, resource_t size, unsigned long tag,
213 range_action_t action)
Aaron Durbina05a8522013-03-22 20:44:46 -0500214{
215 resource_t end;
216 resource_t begin;
217
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700218 if (size == 0)
219 return;
220
Furquan Shaikh19083402020-03-24 14:56:38 -0700221 /* The addresses are aligned to (1ULL << ranges->align): the begin address is
Aaron Durbina05a8522013-03-22 20:44:46 -0500222 * aligned down while the end address is aligned up to be conservative
223 * about the full range covered. */
Furquan Shaikh19083402020-03-24 14:56:38 -0700224 begin = ALIGN_DOWN(base, POWER_OF_2(ranges->align));
Aaron Durbina05a8522013-03-22 20:44:46 -0500225 end = begin + size + (base - begin);
Furquan Shaikh19083402020-03-24 14:56:38 -0700226 end = ALIGN_UP(end, POWER_OF_2(ranges->align)) - 1;
Aaron Durbina05a8522013-03-22 20:44:46 -0500227 action(ranges, begin, end, tag);
228}
229
230void memranges_create_hole(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800231 resource_t base, resource_t size)
Aaron Durbina05a8522013-03-22 20:44:46 -0500232{
233 do_action(ranges, base, size, -1, remove_memranges);
234}
235
236void memranges_insert(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800237 resource_t base, resource_t size, unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -0500238{
239 do_action(ranges, base, size, tag, merge_add_memranges);
240}
241
242struct collect_context {
243 struct memranges *ranges;
244 unsigned long tag;
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600245 memrange_filter_t filter;
Aaron Durbina05a8522013-03-22 20:44:46 -0500246};
247
248static void collect_ranges(void *gp, struct device *dev, struct resource *res)
249{
250 struct collect_context *ctx = gp;
251
Vladimir Serbinenko7a4fa0a2014-02-05 23:38:29 +0100252 if (res->size == 0)
253 return;
254
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600255 if (ctx->filter == NULL || ctx->filter(dev, res))
256 memranges_insert(ctx->ranges, res->base, res->size, ctx->tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500257}
258
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600259void memranges_add_resources_filter(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800260 unsigned long mask, unsigned long match,
261 unsigned long tag,
262 memrange_filter_t filter)
Aaron Durbina05a8522013-03-22 20:44:46 -0500263{
264 struct collect_context context;
265
266 /* Only deal with MEM resources. */
267 mask |= IORESOURCE_MEM;
268 match |= IORESOURCE_MEM;
269
270 context.ranges = ranges;
271 context.tag = tag;
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600272 context.filter = filter;
Aaron Durbina05a8522013-03-22 20:44:46 -0500273 search_global_resources(mask, match, collect_ranges, &context);
274}
275
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600276void memranges_add_resources(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800277 unsigned long mask, unsigned long match,
278 unsigned long tag)
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600279{
280 memranges_add_resources_filter(ranges, mask, match, tag, NULL);
281}
282
Furquan Shaikh14290922020-03-11 14:35:35 -0700283void memranges_init_empty_with_alignment(struct memranges *ranges,
284 struct range_entry *to_free,
Furquan Shaikh19083402020-03-24 14:56:38 -0700285 size_t num_free, unsigned char align)
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700286{
Aaron Durbin1b915b82016-01-15 21:59:37 -0600287 size_t i;
288
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700289 ranges->entries = NULL;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600290 ranges->free_list = NULL;
Furquan Shaikh14290922020-03-11 14:35:35 -0700291 ranges->align = align;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600292
293 for (i = 0; i < num_free; i++)
Aaron Durbina7141c52016-02-24 18:49:25 -0600294 range_entry_link(&ranges->free_list, &to_free[i]);
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700295}
296
Furquan Shaikh14290922020-03-11 14:35:35 -0700297void memranges_init_with_alignment(struct memranges *ranges,
298 unsigned long mask, unsigned long match,
Furquan Shaikh19083402020-03-24 14:56:38 -0700299 unsigned long tag, unsigned char align)
Aaron Durbina05a8522013-03-22 20:44:46 -0500300{
Furquan Shaikh14290922020-03-11 14:35:35 -0700301 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
Aaron Durbina05a8522013-03-22 20:44:46 -0500302 memranges_add_resources(ranges, mask, match, tag);
303}
304
Patrick Rudolphd67a4bd2018-04-10 09:31:10 +0200305/* Clone a memrange. The new memrange has the same entries as the old one. */
306void memranges_clone(struct memranges *newranges, struct memranges *oldranges)
307{
308 struct range_entry *r, *cur;
309 struct range_entry **prev_ptr;
310
Furquan Shaikh14290922020-03-11 14:35:35 -0700311 memranges_init_empty_with_alignment(newranges, NULL, 0, oldranges->align);
Patrick Rudolphd67a4bd2018-04-10 09:31:10 +0200312
313 prev_ptr = &newranges->entries;
314 memranges_each_entry(r, oldranges) {
315 cur = range_list_add(newranges, prev_ptr, r->begin, r->end,
316 r->tag);
317 prev_ptr = &cur->next;
318 }
319}
320
Aaron Durbina05a8522013-03-22 20:44:46 -0500321void memranges_teardown(struct memranges *ranges)
322{
323 while (ranges->entries != NULL) {
Aaron Durbin1b915b82016-01-15 21:59:37 -0600324 range_entry_unlink_and_free(ranges, &ranges->entries,
Lee Leahye20a3192017-03-09 16:21:34 -0800325 ranges->entries);
Aaron Durbina05a8522013-03-22 20:44:46 -0500326 }
327}
328
329void memranges_fill_holes_up_to(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800330 resource_t limit, unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -0500331{
332 struct range_entry *cur;
333 struct range_entry *prev;
334
335 prev = NULL;
336 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
337 /* First entry. Just set prev. */
338 if (prev == NULL) {
339 prev = cur;
340 continue;
341 }
342
Martin Rothcbf2bd72013-07-09 21:51:14 -0600343 /* If the previous entry does not directly precede the current
Aaron Durbina05a8522013-03-22 20:44:46 -0500344 * entry then add a new entry just after the previous one. */
345 if (range_entry_end(prev) != cur->begin) {
346 resource_t end;
347
348 end = cur->begin - 1;
349 if (end >= limit)
350 end = limit - 1;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600351 range_list_add(ranges, &prev->next,
Lee Leahye20a3192017-03-09 16:21:34 -0800352 range_entry_end(prev), end, tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500353 }
354
355 prev = cur;
356
357 /* Hit the requested range limit. No other entries after this
358 * are affected. */
359 if (cur->begin >= limit)
360 break;
361 }
362
363 /* Handle the case where the limit was never reached. A new entry needs
364 * to be added to cover the range up to the limit. */
365 if (prev != NULL && range_entry_end(prev) < limit)
Aaron Durbin1b915b82016-01-15 21:59:37 -0600366 range_list_add(ranges, &prev->next, range_entry_end(prev),
Lee Leahye20a3192017-03-09 16:21:34 -0800367 limit - 1, tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500368
369 /* Merge all entries that were newly added. */
370 merge_neighbor_entries(ranges);
371}
Aaron Durbinf6f6e132013-03-26 21:22:42 -0500372
373struct range_entry *memranges_next_entry(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800374 const struct range_entry *r)
Aaron Durbinf6f6e132013-03-26 21:22:42 -0500375{
376 return r->next;
377}
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700378
379/* Find a range entry that satisfies the given constraints to fit a hole that matches the
380 * required alignment, is big enough, does not exceed the limit and has a matching tag. */
Nico Huber526c6422020-05-25 00:03:14 +0200381static const struct range_entry *
382memranges_find_entry(struct memranges *ranges, resource_t limit, resource_t size,
383 unsigned char align, unsigned long tag, bool last)
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700384{
Nico Huber526c6422020-05-25 00:03:14 +0200385 const struct range_entry *r, *last_entry = NULL;
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700386 resource_t base, end;
387
388 if (size == 0)
389 return NULL;
390
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700391 memranges_each_entry(r, ranges) {
392
393 if (r->tag != tag)
394 continue;
395
Furquan Shaikh19083402020-03-24 14:56:38 -0700396 base = ALIGN_UP(r->begin, POWER_OF_2(align));
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700397 end = base + size - 1;
398
399 if (end > r->end)
400 continue;
401
Furquan Shaikhf8f56502020-05-06 13:18:05 -0700402 /*
403 * If end for the hole in the current range entry goes beyond the requested
404 * limit, then none of the following ranges can satisfy this request because all
405 * range entries are maintained in increasing order.
406 */
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700407 if (end > limit)
Furquan Shaikhf8f56502020-05-06 13:18:05 -0700408 break;
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700409
Nico Huber526c6422020-05-25 00:03:14 +0200410 if (!last)
411 return r;
412
413 last_entry = r;
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700414 }
415
Nico Huber526c6422020-05-25 00:03:14 +0200416 return last_entry;
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700417}
418
Furquan Shaikh19083402020-03-24 14:56:38 -0700419bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
Nico Huber526c6422020-05-25 00:03:14 +0200420 unsigned char align, unsigned long tag, resource_t *stolen_base,
421 bool from_top)
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700422{
Nico Huber526c6422020-05-25 00:03:14 +0200423 const struct range_entry *r;
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700424
Nico Huber526c6422020-05-25 00:03:14 +0200425 r = memranges_find_entry(ranges, limit, size, align, tag, from_top);
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700426 if (r == NULL)
427 return false;
428
Nico Huber526c6422020-05-25 00:03:14 +0200429 if (from_top) {
Nico Huberb2893e22023-07-15 01:47:04 +0200430 limit = MIN(limit, r->end);
Nico Huber526c6422020-05-25 00:03:14 +0200431 /* Ensure we're within the range, even aligned down.
432 Proof is simple: If ALIGN_UP(r->begin) would be
433 higher, the stolen range wouldn't fit.*/
Nico Huberb2893e22023-07-15 01:47:04 +0200434 assert(r->begin <= ALIGN_DOWN(limit - size + 1, POWER_OF_2(align)));
435 *stolen_base = ALIGN_DOWN(limit - size + 1, POWER_OF_2(align));
Nico Huber526c6422020-05-25 00:03:14 +0200436 } else {
437 *stolen_base = ALIGN_UP(r->begin, POWER_OF_2(align));
438 }
439 memranges_create_hole(ranges, *stolen_base, size);
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700440
441 return true;
442}