blob: fd7a4d64899ebdf23e74f3651f790ae90acc2eb7 [file] [log] [blame]
Aaron Durbina05a8522013-03-22 20:44:46 -05001/*
2 * This file is part of the coreboot project.
3 *
Aaron Durbina05a8522013-03-22 20:44:46 -05004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Aaron Durbina05a8522013-03-22 20:44:46 -050013 */
Furquan Shaikh14290922020-03-11 14:35:35 -070014
15#include <assert.h>
Aaron Durbina05a8522013-03-22 20:44:46 -050016#include <stdlib.h>
Furquan Shaikh14290922020-03-11 14:35:35 -070017#include <commonlib/helpers.h>
Aaron Durbina05a8522013-03-22 20:44:46 -050018#include <console/console.h>
19#include <memrange.h>
20
Aaron Durbina05a8522013-03-22 20:44:46 -050021static inline void range_entry_link(struct range_entry **prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -080022 struct range_entry *r)
Aaron Durbina05a8522013-03-22 20:44:46 -050023{
24 r->next = *prev_ptr;
25 *prev_ptr = r;
26}
27
28static inline void range_entry_unlink(struct range_entry **prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -080029 struct range_entry *r)
Aaron Durbina05a8522013-03-22 20:44:46 -050030{
31 *prev_ptr = r->next;
32 r->next = NULL;
33}
34
Aaron Durbin1b915b82016-01-15 21:59:37 -060035static inline void range_entry_unlink_and_free(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -080036 struct range_entry **prev_ptr,
37 struct range_entry *r)
Aaron Durbina05a8522013-03-22 20:44:46 -050038{
39 range_entry_unlink(prev_ptr, r);
Aaron Durbin1b915b82016-01-15 21:59:37 -060040 range_entry_link(&ranges->free_list, r);
Aaron Durbina05a8522013-03-22 20:44:46 -050041}
42
Aaron Durbin1b915b82016-01-15 21:59:37 -060043static struct range_entry *alloc_range(struct memranges *ranges)
Aaron Durbina05a8522013-03-22 20:44:46 -050044{
Aaron Durbin1b915b82016-01-15 21:59:37 -060045 if (ranges->free_list != NULL) {
Aaron Durbina05a8522013-03-22 20:44:46 -050046 struct range_entry *r;
47
Aaron Durbin1b915b82016-01-15 21:59:37 -060048 r = ranges->free_list;
49 range_entry_unlink(&ranges->free_list, r);
Aaron Durbina05a8522013-03-22 20:44:46 -050050 return r;
51 }
Subrata Banik42c44c22019-05-15 20:27:04 +053052 if (ENV_PAYLOAD_LOADER)
Aaron Durbin1b915b82016-01-15 21:59:37 -060053 return malloc(sizeof(struct range_entry));
54 return NULL;
Aaron Durbina05a8522013-03-22 20:44:46 -050055}
56
57static inline struct range_entry *
Aaron Durbin1b915b82016-01-15 21:59:37 -060058range_list_add(struct memranges *ranges, struct range_entry **prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -080059 resource_t begin, resource_t end, unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -050060{
61 struct range_entry *new_entry;
62
Aaron Durbin1b915b82016-01-15 21:59:37 -060063 new_entry = alloc_range(ranges);
Aaron Durbina05a8522013-03-22 20:44:46 -050064 if (new_entry == NULL) {
65 printk(BIOS_ERR, "Could not allocate range_entry!\n");
66 return NULL;
67 }
68 new_entry->begin = begin;
69 new_entry->end = end;
70 new_entry->tag = tag;
71 range_entry_link(prev_ptr, new_entry);
72
73 return new_entry;
74}
75
76static void merge_neighbor_entries(struct memranges *ranges)
77{
78 struct range_entry *cur;
79 struct range_entry *prev;
80
81 prev = NULL;
82 /* Merge all neighbors and delete/free the leftover entry. */
83 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
84 /* First entry. Just set prev. */
85 if (prev == NULL) {
86 prev = cur;
87 continue;
88 }
89
90 /* If the previous entry merges with the current update the
91 * previous entry to cover full range and delete current from
92 * the list. */
93 if (prev->end + 1 >= cur->begin && prev->tag == cur->tag) {
94 prev->end = cur->end;
Aaron Durbin1b915b82016-01-15 21:59:37 -060095 range_entry_unlink_and_free(ranges, &prev->next, cur);
Aaron Durbina05a8522013-03-22 20:44:46 -050096 /* Set cur to prev so cur->next is valid since cur
97 * was just unlinked and free. */
98 cur = prev;
99 continue;
100 }
101
102 prev = cur;
103 }
104}
105
106static void remove_memranges(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800107 resource_t begin, resource_t end,
108 unsigned long unused)
Aaron Durbina05a8522013-03-22 20:44:46 -0500109{
110 struct range_entry *cur;
111 struct range_entry *next;
112 struct range_entry **prev_ptr;
113
114 prev_ptr = &ranges->entries;
115 for (cur = ranges->entries; cur != NULL; cur = next) {
116 resource_t tmp_end;
117
118 /* Cache the next value to handle unlinks. */
119 next = cur->next;
120
121 /* No other ranges are affected. */
122 if (end < cur->begin)
123 break;
124
125 /* The removal range starts after this one. */
126 if (begin > cur->end) {
127 prev_ptr = &cur->next;
128 continue;
129 }
130
131 /* The removal range overlaps with the current entry either
132 * partially or fully. However, we need to adjust the removal
133 * range for any holes. */
134 if (begin <= cur->begin) {
135 begin = cur->begin;
136
137 /* Full removal. */
138 if (end >= cur->end) {
139 begin = cur->end + 1;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600140 range_entry_unlink_and_free(ranges, prev_ptr,
Lee Leahye20a3192017-03-09 16:21:34 -0800141 cur);
Aaron Durbina05a8522013-03-22 20:44:46 -0500142 continue;
143 }
144 }
145
146 /* prev_ptr can be set now that the unlink path wasn't taken. */
147 prev_ptr = &cur->next;
148
149 /* Clip the end fragment to do proper splitting. */
150 tmp_end = end;
151 if (end > cur->end)
152 tmp_end = cur->end;
153
154 /* Hole punched in middle of entry. */
155 if (begin > cur->begin && tmp_end < cur->end) {
Aaron Durbin1b915b82016-01-15 21:59:37 -0600156 range_list_add(ranges, &cur->next, end + 1, cur->end,
Lee Leahye20a3192017-03-09 16:21:34 -0800157 cur->tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500158 cur->end = begin - 1;
159 break;
160 }
161
162 /* Removal at beginning. */
163 if (begin == cur->begin)
164 cur->begin = tmp_end + 1;
165
166 /* Removal at end. */
167 if (tmp_end == cur->end)
168 cur->end = begin - 1;
169 }
170}
171
172static void merge_add_memranges(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800173 resource_t begin, resource_t end,
174 unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -0500175{
176 struct range_entry *cur;
177 struct range_entry **prev_ptr;
178
179 prev_ptr = &ranges->entries;
180
181 /* Remove all existing entries covered by the range. */
182 remove_memranges(ranges, begin, end, -1);
183
184 /* Find the entry to place the new entry after. Since
Martin Rothcbf2bd72013-07-09 21:51:14 -0600185 * remove_memranges() was called above there is a guaranteed
Aaron Durbina05a8522013-03-22 20:44:46 -0500186 * spot for this new entry. */
187 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
188 /* Found insertion spot before current entry. */
189 if (end < cur->begin)
190 break;
191
192 /* Keep track of previous entry to insert new entry after it. */
193 prev_ptr = &cur->next;
194
195 /* The new entry starts after this one. */
196 if (begin > cur->end)
197 continue;
198
199 }
200
201 /* Add new entry and merge with neighbors. */
Aaron Durbin1b915b82016-01-15 21:59:37 -0600202 range_list_add(ranges, prev_ptr, begin, end, tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500203 merge_neighbor_entries(ranges);
204}
205
Aaron Durbined9307d2014-02-05 15:44:30 -0600206void memranges_update_tag(struct memranges *ranges, unsigned long old_tag,
Lee Leahye20a3192017-03-09 16:21:34 -0800207 unsigned long new_tag)
Aaron Durbined9307d2014-02-05 15:44:30 -0600208{
209 struct range_entry *r;
210
211 memranges_each_entry(r, ranges) {
212 if (range_entry_tag(r) == old_tag)
213 range_entry_update_tag(r, new_tag);
214 }
215
216 merge_neighbor_entries(ranges);
217}
218
Aaron Durbina05a8522013-03-22 20:44:46 -0500219typedef void (*range_action_t)(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800220 resource_t begin, resource_t end,
221 unsigned long tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500222
223static void do_action(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800224 resource_t base, resource_t size, unsigned long tag,
225 range_action_t action)
Aaron Durbina05a8522013-03-22 20:44:46 -0500226{
227 resource_t end;
228 resource_t begin;
229
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700230 if (size == 0)
231 return;
232
Furquan Shaikh19083402020-03-24 14:56:38 -0700233 /* The addresses are aligned to (1ULL << ranges->align): the begin address is
Aaron Durbina05a8522013-03-22 20:44:46 -0500234 * aligned down while the end address is aligned up to be conservative
235 * about the full range covered. */
Furquan Shaikh19083402020-03-24 14:56:38 -0700236 begin = ALIGN_DOWN(base, POWER_OF_2(ranges->align));
Aaron Durbina05a8522013-03-22 20:44:46 -0500237 end = begin + size + (base - begin);
Furquan Shaikh19083402020-03-24 14:56:38 -0700238 end = ALIGN_UP(end, POWER_OF_2(ranges->align)) - 1;
Aaron Durbina05a8522013-03-22 20:44:46 -0500239 action(ranges, begin, end, tag);
240}
241
242void memranges_create_hole(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800243 resource_t base, resource_t size)
Aaron Durbina05a8522013-03-22 20:44:46 -0500244{
245 do_action(ranges, base, size, -1, remove_memranges);
246}
247
248void memranges_insert(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800249 resource_t base, resource_t size, unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -0500250{
251 do_action(ranges, base, size, tag, merge_add_memranges);
252}
253
254struct collect_context {
255 struct memranges *ranges;
256 unsigned long tag;
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600257 memrange_filter_t filter;
Aaron Durbina05a8522013-03-22 20:44:46 -0500258};
259
260static void collect_ranges(void *gp, struct device *dev, struct resource *res)
261{
262 struct collect_context *ctx = gp;
263
Vladimir Serbinenko7a4fa0a2014-02-05 23:38:29 +0100264 if (res->size == 0)
265 return;
266
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600267 if (ctx->filter == NULL || ctx->filter(dev, res))
268 memranges_insert(ctx->ranges, res->base, res->size, ctx->tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500269}
270
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600271void memranges_add_resources_filter(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800272 unsigned long mask, unsigned long match,
273 unsigned long tag,
274 memrange_filter_t filter)
Aaron Durbina05a8522013-03-22 20:44:46 -0500275{
276 struct collect_context context;
277
278 /* Only deal with MEM resources. */
279 mask |= IORESOURCE_MEM;
280 match |= IORESOURCE_MEM;
281
282 context.ranges = ranges;
283 context.tag = tag;
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600284 context.filter = filter;
Aaron Durbina05a8522013-03-22 20:44:46 -0500285 search_global_resources(mask, match, collect_ranges, &context);
286}
287
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600288void memranges_add_resources(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800289 unsigned long mask, unsigned long match,
290 unsigned long tag)
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600291{
292 memranges_add_resources_filter(ranges, mask, match, tag, NULL);
293}
294
Furquan Shaikh14290922020-03-11 14:35:35 -0700295void memranges_init_empty_with_alignment(struct memranges *ranges,
296 struct range_entry *to_free,
Furquan Shaikh19083402020-03-24 14:56:38 -0700297 size_t num_free, unsigned char align)
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700298{
Aaron Durbin1b915b82016-01-15 21:59:37 -0600299 size_t i;
300
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700301 ranges->entries = NULL;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600302 ranges->free_list = NULL;
Furquan Shaikh14290922020-03-11 14:35:35 -0700303 ranges->align = align;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600304
305 for (i = 0; i < num_free; i++)
Aaron Durbina7141c52016-02-24 18:49:25 -0600306 range_entry_link(&ranges->free_list, &to_free[i]);
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700307}
308
Furquan Shaikh14290922020-03-11 14:35:35 -0700309void memranges_init_with_alignment(struct memranges *ranges,
310 unsigned long mask, unsigned long match,
Furquan Shaikh19083402020-03-24 14:56:38 -0700311 unsigned long tag, unsigned char align)
Aaron Durbina05a8522013-03-22 20:44:46 -0500312{
Furquan Shaikh14290922020-03-11 14:35:35 -0700313 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
Aaron Durbina05a8522013-03-22 20:44:46 -0500314 memranges_add_resources(ranges, mask, match, tag);
315}
316
Patrick Rudolphd67a4bd2018-04-10 09:31:10 +0200317/* Clone a memrange. The new memrange has the same entries as the old one. */
318void memranges_clone(struct memranges *newranges, struct memranges *oldranges)
319{
320 struct range_entry *r, *cur;
321 struct range_entry **prev_ptr;
322
Furquan Shaikh14290922020-03-11 14:35:35 -0700323 memranges_init_empty_with_alignment(newranges, NULL, 0, oldranges->align);
Patrick Rudolphd67a4bd2018-04-10 09:31:10 +0200324
325 prev_ptr = &newranges->entries;
326 memranges_each_entry(r, oldranges) {
327 cur = range_list_add(newranges, prev_ptr, r->begin, r->end,
328 r->tag);
329 prev_ptr = &cur->next;
330 }
331}
332
Aaron Durbina05a8522013-03-22 20:44:46 -0500333void memranges_teardown(struct memranges *ranges)
334{
335 while (ranges->entries != NULL) {
Aaron Durbin1b915b82016-01-15 21:59:37 -0600336 range_entry_unlink_and_free(ranges, &ranges->entries,
Lee Leahye20a3192017-03-09 16:21:34 -0800337 ranges->entries);
Aaron Durbina05a8522013-03-22 20:44:46 -0500338 }
339}
340
341void memranges_fill_holes_up_to(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800342 resource_t limit, unsigned long tag)
Aaron Durbina05a8522013-03-22 20:44:46 -0500343{
344 struct range_entry *cur;
345 struct range_entry *prev;
346
347 prev = NULL;
348 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
349 /* First entry. Just set prev. */
350 if (prev == NULL) {
351 prev = cur;
352 continue;
353 }
354
Martin Rothcbf2bd72013-07-09 21:51:14 -0600355 /* If the previous entry does not directly precede the current
Aaron Durbina05a8522013-03-22 20:44:46 -0500356 * entry then add a new entry just after the previous one. */
357 if (range_entry_end(prev) != cur->begin) {
358 resource_t end;
359
360 end = cur->begin - 1;
361 if (end >= limit)
362 end = limit - 1;
Aaron Durbin1b915b82016-01-15 21:59:37 -0600363 range_list_add(ranges, &prev->next,
Lee Leahye20a3192017-03-09 16:21:34 -0800364 range_entry_end(prev), end, tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500365 }
366
367 prev = cur;
368
369 /* Hit the requested range limit. No other entries after this
370 * are affected. */
371 if (cur->begin >= limit)
372 break;
373 }
374
375 /* Handle the case where the limit was never reached. A new entry needs
376 * to be added to cover the range up to the limit. */
377 if (prev != NULL && range_entry_end(prev) < limit)
Aaron Durbin1b915b82016-01-15 21:59:37 -0600378 range_list_add(ranges, &prev->next, range_entry_end(prev),
Lee Leahye20a3192017-03-09 16:21:34 -0800379 limit - 1, tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500380
381 /* Merge all entries that were newly added. */
382 merge_neighbor_entries(ranges);
383}
Aaron Durbinf6f6e132013-03-26 21:22:42 -0500384
385struct range_entry *memranges_next_entry(struct memranges *ranges,
Lee Leahye20a3192017-03-09 16:21:34 -0800386 const struct range_entry *r)
Aaron Durbinf6f6e132013-03-26 21:22:42 -0500387{
388 return r->next;
389}
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700390
391/* Find a range entry that satisfies the given constraints to fit a hole that matches the
392 * required alignment, is big enough, does not exceed the limit and has a matching tag. */
393static const struct range_entry *memranges_find_entry(struct memranges *ranges,
394 resource_t limit, resource_t size,
Furquan Shaikh19083402020-03-24 14:56:38 -0700395 unsigned char align, unsigned long tag)
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700396{
397 const struct range_entry *r;
398 resource_t base, end;
399
400 if (size == 0)
401 return NULL;
402
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700403 memranges_each_entry(r, ranges) {
404
405 if (r->tag != tag)
406 continue;
407
Furquan Shaikh19083402020-03-24 14:56:38 -0700408 base = ALIGN_UP(r->begin, POWER_OF_2(align));
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700409 end = base + size - 1;
410
411 if (end > r->end)
412 continue;
413
414 if (end > limit)
415 continue;
416
417 return r;
418 }
419
420 return NULL;
421}
422
Furquan Shaikh19083402020-03-24 14:56:38 -0700423bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
424 unsigned char align, unsigned long tag, resource_t *stolen_base)
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700425{
426 resource_t base;
427 const struct range_entry *r = memranges_find_entry(ranges, limit, size, align, tag);
428
429 if (r == NULL)
430 return false;
431
Furquan Shaikh19083402020-03-24 14:56:38 -0700432 base = ALIGN_UP(r->begin, POWER_OF_2(align));
Furquan Shaikh9c6274c2020-03-11 19:06:24 -0700433
434 memranges_create_hole(ranges, base, size);
435 *stolen_base = base;
436
437 return true;
438}