blob: bc827d363577479bd87d9215329f01fca6712514 [file] [log] [blame]
Angel Pons118a9c72020-04-02 23:48:34 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2/* This file is part of the coreboot project. */
Furquan Shaikh14290922020-03-11 14:35:35 -07003
4#include <assert.h>
Aaron Durbina05a8522013-03-22 20:44:46 -05005#include <stdlib.h>
Furquan Shaikh14290922020-03-11 14:35:35 -07006#include <commonlib/helpers.h>
Aaron Durbina05a8522013-03-22 20:44:46 -05007#include <console/console.h>
8#include <memrange.h>
9
Aaron Durbina05a8522013-03-22 20:44:46 -050010static inline void range_entry_link(struct range_entry **prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -080011 struct range_entry *r)
Aaron Durbina05a8522013-03-22 20:44:46 -050012{
13 r->next = *prev_ptr;
14 *prev_ptr = r;
15}
16
17static inline void range_entry_unlink(struct range_entry **prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -080018 struct range_entry *r)
Aaron Durbina05a8522013-03-22 20:44:46 -050019{
20 *prev_ptr = r->next;
21 r->next = NULL;
22}
23
Aaron Durbin1b915b82016-01-15 21:59:37 -060024static inline void range_entry_unlink_and_free(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -080025 struct range_entry **prev_ptr,
26 struct range_entry *r)
Aaron Durbina05a8522013-03-22 20:44:46 -050027{
28 range_entry_unlink(prev_ptr, r);
Aaron Durbin1b915b82016-01-15 21:59:37 -060029 range_entry_link(&ranges->free_list, r);
Aaron Durbina05a8522013-03-22 20:44:46 -050030}
31
Aaron Durbin1b915b82016-01-15 21:59:37 -060032static struct range_entry *alloc_range(struct memranges *ranges)
Aaron Durbina05a8522013-03-22 20:44:46 -050033{
Aaron Durbin1b915b82016-01-15 21:59:37 -060034 if (ranges->free_list != NULL) {
Aaron Durbina05a8522013-03-22 20:44:46 -050035 struct range_entry *r;
36
Aaron Durbin1b915b82016-01-15 21:59:37 -060037 r = ranges->free_list;
38 range_entry_unlink(&ranges->free_list, r);
Aaron Durbina05a8522013-03-22 20:44:46 -050039 return r;
40 }
Subrata Banik42c44c22019-05-15 20:27:04 +053041 if (ENV_PAYLOAD_LOADER)
Aaron Durbin1b915b82016-01-15 21:59:37 -060042 return malloc(sizeof(struct range_entry));
43 return NULL;
Aaron Durbina05a8522013-03-22 20:44:46 -050044}
45
46static inline struct range_entry *
Aaron Durbin1b915b82016-01-15 21:59:37 -060047range_list_add(struct memranges *ranges, struct range_entry **prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -080048 resource_t begin, resource_t end, unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -050049{
50 struct range_entry *new_entry;
51
Aaron Durbin1b915b82016-01-15 21:59:37 -060052 new_entry = alloc_range(ranges);
Aaron Durbina05a8522013-03-22 20:44:46 -050053 if (new_entry == NULL) {
54 printk(BIOS_ERR, "Could not allocate range_entry!\n");
55 return NULL;
56 }
57 new_entry->begin = begin;
58 new_entry->end = end;
59 new_entry->tag = tag;
60 range_entry_link(prev_ptr, new_entry);
61
62 return new_entry;
63}
64
65static void merge_neighbor_entries(struct memranges *ranges)
66{
67 struct range_entry *cur;
68 struct range_entry *prev;
69
70 prev = NULL;
71 /* Merge all neighbors and delete/free the leftover entry. */
72 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
73 /* First entry. Just set prev. */
74 if (prev == NULL) {
75 prev = cur;
76 continue;
77 }
78
79 /* If the previous entry merges with the current update the
80 * previous entry to cover full range and delete current from
81 * the list. */
82 if (prev->end + 1 >= cur->begin && prev->tag == cur->tag) {
83 prev->end = cur->end;
Aaron Durbin1b915b82016-01-15 21:59:37 -060084 range_entry_unlink_and_free(ranges, &prev->next, cur);
Aaron Durbina05a8522013-03-22 20:44:46 -050085 /* Set cur to prev so cur->next is valid since cur
86 * was just unlinked and free. */
87 cur = prev;
88 continue;
89 }
90
91 prev = cur;
92 }
93}
94
95static void remove_memranges(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -080096 resource_t begin, resource_t end,
97 unsigned long unused)
Aaron Durbina05a8522013-03-22 20:44:46 -050098{
99 struct range_entry *cur;
100 struct range_entry *next;
101 struct range_entry **prev_ptr;
102
103 prev_ptr = &ranges->entries;
104 for (cur = ranges->entries; cur != NULL; cur = next) {
105 resource_t tmp_end;
106
107 /* Cache the next value to handle unlinks. */
108 next = cur->next;
109
110 /* No other ranges are affected. */
111 if (end < cur->begin)
112 break;
113
114 /* The removal range starts after this one. */
115 if (begin > cur->end) {
116 prev_ptr = &cur->next;
117 continue;
118 }
119
120 /* The removal range overlaps with the current entry either
121 * partially or fully. However, we need to adjust the removal
122 * range for any holes. */
123 if (begin <= cur->begin) {
124 begin = cur->begin;
125
126 /* Full removal. */
127 if (end >= cur->end) {
128 begin = cur->end + 1;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600129 range_entry_unlink_and_free(ranges, prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -0800130 cur);
Aaron Durbina05a8522013-03-22 20:44:46 -0500131 continue;
132 }
133 }
134
135 /* prev_ptr can be set now that the unlink path wasn't taken. */
136 prev_ptr = &cur->next;
137
138 /* Clip the end fragment to do proper splitting. */
139 tmp_end = end;
140 if (end > cur->end)
141 tmp_end = cur->end;
142
143 /* Hole punched in middle of entry. */
144 if (begin > cur->begin && tmp_end < cur->end) {
Aaron Durbin1b915b82016-01-15 21:59:37 -0600145 range_list_add(ranges, &cur->next, end + 1, cur->end,
Lee Leahye20a3192017-03-09 16:21:34 -0800146 cur->tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500147 cur->end = begin - 1;
148 break;
149 }
150
151 /* Removal at beginning. */
152 if (begin == cur->begin)
153 cur->begin = tmp_end + 1;
154
155 /* Removal at end. */
156 if (tmp_end == cur->end)
157 cur->end = begin - 1;
158 }
159}
160
161static void merge_add_memranges(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800162 resource_t begin, resource_t end,
163 unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -0500164{
165 struct range_entry *cur;
166 struct range_entry **prev_ptr;
167
168 prev_ptr = &ranges->entries;
169
170 /* Remove all existing entries covered by the range. */
171 remove_memranges(ranges, begin, end, -1);
172
173 /* Find the entry to place the new entry after. Since
Martin Rothcbf2bd72013-07-09 21:51:14 -0600174 * remove_memranges() was called above there is a guaranteed
Aaron Durbina05a8522013-03-22 20:44:46 -0500175 * spot for this new entry. */
176 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
177 /* Found insertion spot before current entry. */
178 if (end < cur->begin)
179 break;
180
181 /* Keep track of previous entry to insert new entry after it. */
182 prev_ptr = &cur->next;
183
184 /* The new entry starts after this one. */
185 if (begin > cur->end)
186 continue;
187
188 }
189
190 /* Add new entry and merge with neighbors. */
Aaron Durbin1b915b82016-01-15 21:59:37 -0600191 range_list_add(ranges, prev_ptr, begin, end, tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500192 merge_neighbor_entries(ranges);
193}
194
Aaron Durbined9307d2014-02-05 15:44:30 -0600195void memranges_update_tag(struct memranges *ranges, unsigned long old_tag,
Lee Leahye20a3192017-03-09 16:21:34 -0800196 unsigned long new_tag)
Aaron Durbined9307d2014-02-05 15:44:30 -0600197{
198 struct range_entry *r;
199
200 memranges_each_entry(r, ranges) {
201 if (range_entry_tag(r) == old_tag)
202 range_entry_update_tag(r, new_tag);
203 }
204
205 merge_neighbor_entries(ranges);
206}
207
Aaron Durbina05a8522013-03-22 20:44:46 -0500208typedef void (*range_action_t)(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800209 resource_t begin, resource_t end,
210 unsigned long tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500211
212static void do_action(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800213 resource_t base, resource_t size, unsigned long tag,
214 range_action_t action)
Aaron Durbina05a8522013-03-22 20:44:46 -0500215{
216 resource_t end;
217 resource_t begin;
218
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700219 if (size == 0)
220 return;
221
Furquan Shaikh19083402020-03-24 14:56:38 -0700222 /* The addresses are aligned to (1ULL << ranges->align): the begin address is
Aaron Durbina05a8522013-03-22 20:44:46 -0500223 * aligned down while the end address is aligned up to be conservative
224 * about the full range covered. */
Furquan Shaikh19083402020-03-24 14:56:38 -0700225 begin = ALIGN_DOWN(base, POWER_OF_2(ranges->align));
Aaron Durbina05a8522013-03-22 20:44:46 -0500226 end = begin + size + (base - begin);
Furquan Shaikh19083402020-03-24 14:56:38 -0700227 end = ALIGN_UP(end, POWER_OF_2(ranges->align)) - 1;
Aaron Durbina05a8522013-03-22 20:44:46 -0500228 action(ranges, begin, end, tag);
229}
230
231void memranges_create_hole(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800232 resource_t base, resource_t size)
Aaron Durbina05a8522013-03-22 20:44:46 -0500233{
234 do_action(ranges, base, size, -1, remove_memranges);
235}
236
237void memranges_insert(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800238 resource_t base, resource_t size, unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -0500239{
240 do_action(ranges, base, size, tag, merge_add_memranges);
241}
242
243struct collect_context {
244 struct memranges *ranges;
245 unsigned long tag;
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600246 memrange_filter_t filter;
Aaron Durbina05a8522013-03-22 20:44:46 -0500247};
248
249static void collect_ranges(void *gp, struct device *dev, struct resource *res)
250{
251 struct collect_context *ctx = gp;
252
Vladimir Serbinenko7a4fa0a2014-02-05 23:38:29 +0100253 if (res->size == 0)
254 return;
255
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600256 if (ctx->filter == NULL || ctx->filter(dev, res))
257 memranges_insert(ctx->ranges, res->base, res->size, ctx->tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500258}
259
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600260void memranges_add_resources_filter(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800261 unsigned long mask, unsigned long match,
262 unsigned long tag,
263 memrange_filter_t filter)
Aaron Durbina05a8522013-03-22 20:44:46 -0500264{
265 struct collect_context context;
266
267 /* Only deal with MEM resources. */
268 mask |= IORESOURCE_MEM;
269 match |= IORESOURCE_MEM;
270
271 context.ranges = ranges;
272 context.tag = tag;
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600273 context.filter = filter;
Aaron Durbina05a8522013-03-22 20:44:46 -0500274 search_global_resources(mask, match, collect_ranges, &context);
275}
276
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600277void memranges_add_resources(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800278 unsigned long mask, unsigned long match,
279 unsigned long tag)
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600280{
281 memranges_add_resources_filter(ranges, mask, match, tag, NULL);
282}
283
Furquan Shaikh14290922020-03-11 14:35:35 -0700284void memranges_init_empty_with_alignment(struct memranges *ranges,
285 struct range_entry *to_free,
Furquan Shaikh19083402020-03-24 14:56:38 -0700286 size_t num_free, unsigned char align)
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700287{
Aaron Durbin1b915b82016-01-15 21:59:37 -0600288 size_t i;
289
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700290 ranges->entries = NULL;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600291 ranges->free_list = NULL;
Furquan Shaikh14290922020-03-11 14:35:35 -0700292 ranges->align = align;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600293
294 for (i = 0; i < num_free; i++)
Aaron Durbina7141c52016-02-24 18:49:25 -0600295 range_entry_link(&ranges->free_list, &to_free[i]);
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700296}
297
Furquan Shaikh14290922020-03-11 14:35:35 -0700298void memranges_init_with_alignment(struct memranges *ranges,
299 unsigned long mask, unsigned long match,
Furquan Shaikh19083402020-03-24 14:56:38 -0700300 unsigned long tag, unsigned char align)
Aaron Durbina05a8522013-03-22 20:44:46 -0500301{
Furquan Shaikh14290922020-03-11 14:35:35 -0700302 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
Aaron Durbina05a8522013-03-22 20:44:46 -0500303 memranges_add_resources(ranges, mask, match, tag);
304}
305
Patrick Rudolphd67a4bd2018-04-10 09:31:10 +0200306/* Clone a memrange. The new memrange has the same entries as the old one. */
307void memranges_clone(struct memranges *newranges, struct memranges *oldranges)
308{
309 struct range_entry *r, *cur;
310 struct range_entry **prev_ptr;
311
Furquan Shaikh14290922020-03-11 14:35:35 -0700312 memranges_init_empty_with_alignment(newranges, NULL, 0, oldranges->align);
Patrick Rudolphd67a4bd2018-04-10 09:31:10 +0200313
314 prev_ptr = &newranges->entries;
315 memranges_each_entry(r, oldranges) {
316 cur = range_list_add(newranges, prev_ptr, r->begin, r->end,
317 r->tag);
318 prev_ptr = &cur->next;
319 }
320}
321
Aaron Durbina05a8522013-03-22 20:44:46 -0500322void memranges_teardown(struct memranges *ranges)
323{
324 while (ranges->entries != NULL) {
Aaron Durbin1b915b82016-01-15 21:59:37 -0600325 range_entry_unlink_and_free(ranges, &ranges->entries,
Lee Leahye20a3192017-03-09 16:21:34 -0800326 ranges->entries);
Aaron Durbina05a8522013-03-22 20:44:46 -0500327 }
328}
329
330void memranges_fill_holes_up_to(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800331 resource_t limit, unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -0500332{
333 struct range_entry *cur;
334 struct range_entry *prev;
335
336 prev = NULL;
337 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
338 /* First entry. Just set prev. */
339 if (prev == NULL) {
340 prev = cur;
341 continue;
342 }
343
Martin Rothcbf2bd72013-07-09 21:51:14 -0600344 /* If the previous entry does not directly precede the current
Aaron Durbina05a8522013-03-22 20:44:46 -0500345 * entry then add a new entry just after the previous one. */
346 if (range_entry_end(prev) != cur->begin) {
347 resource_t end;
348
349 end = cur->begin - 1;
350 if (end >= limit)
351 end = limit - 1;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600352 range_list_add(ranges, &prev->next,
Lee Leahye20a3192017-03-09 16:21:34 -0800353 range_entry_end(prev), end, tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500354 }
355
356 prev = cur;
357
358 /* Hit the requested range limit. No other entries after this
359 * are affected. */
360 if (cur->begin >= limit)
361 break;
362 }
363
364 /* Handle the case where the limit was never reached. A new entry needs
365 * to be added to cover the range up to the limit. */
366 if (prev != NULL && range_entry_end(prev) < limit)
Aaron Durbin1b915b82016-01-15 21:59:37 -0600367 range_list_add(ranges, &prev->next, range_entry_end(prev),
Lee Leahye20a3192017-03-09 16:21:34 -0800368 limit - 1, tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500369
370 /* Merge all entries that were newly added. */
371 merge_neighbor_entries(ranges);
372}
Aaron Durbinf6f6e132013-03-26 21:22:42 -0500373
374struct range_entry *memranges_next_entry(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800375 const struct range_entry *r)
Aaron Durbinf6f6e132013-03-26 21:22:42 -0500376{
377 return r->next;
378}
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700379
380/* Find a range entry that satisfies the given constraints to fit a hole that matches the
381 * required alignment, is big enough, does not exceed the limit and has a matching tag. */
382static const struct range_entry *memranges_find_entry(struct memranges *ranges,
383 resource_t limit, resource_t size,
Furquan Shaikh19083402020-03-24 14:56:38 -0700384 unsigned char align, unsigned long tag)
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700385{
386 const struct range_entry *r;
387 resource_t base, end;
388
389 if (size == 0)
390 return NULL;
391
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700392 memranges_each_entry(r, ranges) {
393
394 if (r->tag != tag)
395 continue;
396
Furquan Shaikh19083402020-03-24 14:56:38 -0700397 base = ALIGN_UP(r->begin, POWER_OF_2(align));
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700398 end = base + size - 1;
399
400 if (end > r->end)
401 continue;
402
Furquan Shaikhf8f56502020-05-06 13:18:05 -0700403 /*
404 * If end for the hole in the current range entry goes beyond the requested
405 * limit, then none of the following ranges can satisfy this request because all
406 * range entries are maintained in increasing order.
407 */
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700408 if (end > limit)
Furquan Shaikhf8f56502020-05-06 13:18:05 -0700409 break;
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700410
411 return r;
412 }
413
414 return NULL;
415}
416
Furquan Shaikh19083402020-03-24 14:56:38 -0700417bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
418 unsigned char align, unsigned long tag, resource_t *stolen_base)
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700419{
420 resource_t base;
421 const struct range_entry *r = memranges_find_entry(ranges, limit, size, align, tag);
422
423 if (r == NULL)
424 return false;
425
Furquan Shaikh19083402020-03-24 14:56:38 -0700426 base = ALIGN_UP(r->begin, POWER_OF_2(align));
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700427
428 memranges_create_hole(ranges, base, size);
429 *stolen_base = base;
430
431 return true;
432}