blob: d2ffa26e99c828041979d1225b9bdc482e7fb8d2 [file] [log] [blame]
Aaron Durbina05a8522013-03-22 20:44:46 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <stdlib.h>
20#include <console/console.h>
21#include <memrange.h>
22
23/* Coreboot doesn't have a free() function. Therefore, keep a cache of
24 * free'd entries. */
25static struct range_entry *free_list;
26
27static inline void range_entry_link(struct range_entry **prev_ptr,
28 struct range_entry *r)
29{
30 r->next = *prev_ptr;
31 *prev_ptr = r;
32}
33
34static inline void range_entry_unlink(struct range_entry **prev_ptr,
35 struct range_entry *r)
36{
37 *prev_ptr = r->next;
38 r->next = NULL;
39}
40
41static inline void range_entry_unlink_and_free(struct range_entry **prev_ptr,
42 struct range_entry *r)
43{
44 range_entry_unlink(prev_ptr, r);
45 range_entry_link(&free_list, r);
46}
47
48static struct range_entry *alloc_range(void)
49{
50 if (free_list != NULL) {
51 struct range_entry *r;
52
53 r = free_list;
54 range_entry_unlink(&free_list, r);
55 return r;
56 }
57 return malloc(sizeof(struct range_entry));
58}
59
60static inline struct range_entry *
61range_list_add(struct range_entry **prev_ptr, resource_t begin, resource_t end,
62 unsigned long tag)
63{
64 struct range_entry *new_entry;
65
66 new_entry = alloc_range();
67 if (new_entry == NULL) {
68 printk(BIOS_ERR, "Could not allocate range_entry!\n");
69 return NULL;
70 }
71 new_entry->begin = begin;
72 new_entry->end = end;
73 new_entry->tag = tag;
74 range_entry_link(prev_ptr, new_entry);
75
76 return new_entry;
77}
78
79static void merge_neighbor_entries(struct memranges *ranges)
80{
81 struct range_entry *cur;
82 struct range_entry *prev;
83
84 prev = NULL;
85 /* Merge all neighbors and delete/free the leftover entry. */
86 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
87 /* First entry. Just set prev. */
88 if (prev == NULL) {
89 prev = cur;
90 continue;
91 }
92
93 /* If the previous entry merges with the current update the
94 * previous entry to cover full range and delete current from
95 * the list. */
96 if (prev->end + 1 >= cur->begin && prev->tag == cur->tag) {
97 prev->end = cur->end;
98 range_entry_unlink_and_free(&prev->next, cur);
99 /* Set cur to prev so cur->next is valid since cur
100 * was just unlinked and free. */
101 cur = prev;
102 continue;
103 }
104
105 prev = cur;
106 }
107}
108
109static void remove_memranges(struct memranges *ranges,
110 resource_t begin, resource_t end,
111 unsigned long unused)
112{
113 struct range_entry *cur;
114 struct range_entry *next;
115 struct range_entry **prev_ptr;
116
117 prev_ptr = &ranges->entries;
118 for (cur = ranges->entries; cur != NULL; cur = next) {
119 resource_t tmp_end;
120
121 /* Cache the next value to handle unlinks. */
122 next = cur->next;
123
124 /* No other ranges are affected. */
125 if (end < cur->begin)
126 break;
127
128 /* The removal range starts after this one. */
129 if (begin > cur->end) {
130 prev_ptr = &cur->next;
131 continue;
132 }
133
134 /* The removal range overlaps with the current entry either
135 * partially or fully. However, we need to adjust the removal
136 * range for any holes. */
137 if (begin <= cur->begin) {
138 begin = cur->begin;
139
140 /* Full removal. */
141 if (end >= cur->end) {
142 begin = cur->end + 1;
143 range_entry_unlink_and_free(prev_ptr, cur);
144 continue;
145 }
146 }
147
148 /* prev_ptr can be set now that the unlink path wasn't taken. */
149 prev_ptr = &cur->next;
150
151 /* Clip the end fragment to do proper splitting. */
152 tmp_end = end;
153 if (end > cur->end)
154 tmp_end = cur->end;
155
156 /* Hole punched in middle of entry. */
157 if (begin > cur->begin && tmp_end < cur->end) {
158 range_list_add(&cur->next, end + 1, cur->end, cur->tag);
159 cur->end = begin - 1;
160 break;
161 }
162
163 /* Removal at beginning. */
164 if (begin == cur->begin)
165 cur->begin = tmp_end + 1;
166
167 /* Removal at end. */
168 if (tmp_end == cur->end)
169 cur->end = begin - 1;
170 }
171}
172
173static void merge_add_memranges(struct memranges *ranges,
174 resource_t begin, resource_t end,
175 unsigned long tag)
176{
177 struct range_entry *cur;
178 struct range_entry **prev_ptr;
179
180 prev_ptr = &ranges->entries;
181
182 /* Remove all existing entries covered by the range. */
183 remove_memranges(ranges, begin, end, -1);
184
185 /* Find the entry to place the new entry after. Since
Martin Rothcbf2bd72013-07-09 21:51:14 -0600186 * remove_memranges() was called above there is a guaranteed
Aaron Durbina05a8522013-03-22 20:44:46 -0500187 * spot for this new entry. */
188 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
189 /* Found insertion spot before current entry. */
190 if (end < cur->begin)
191 break;
192
193 /* Keep track of previous entry to insert new entry after it. */
194 prev_ptr = &cur->next;
195
196 /* The new entry starts after this one. */
197 if (begin > cur->end)
198 continue;
199
200 }
201
202 /* Add new entry and merge with neighbors. */
203 range_list_add(prev_ptr, begin, end, tag);
204 merge_neighbor_entries(ranges);
205}
206
Aaron Durbined9307d2014-02-05 15:44:30 -0600207void memranges_update_tag(struct memranges *ranges, unsigned long old_tag,
208 unsigned long new_tag)
209{
210 struct range_entry *r;
211
212 memranges_each_entry(r, ranges) {
213 if (range_entry_tag(r) == old_tag)
214 range_entry_update_tag(r, new_tag);
215 }
216
217 merge_neighbor_entries(ranges);
218}
219
Aaron Durbina05a8522013-03-22 20:44:46 -0500220typedef void (*range_action_t)(struct memranges *ranges,
221 resource_t begin, resource_t end,
222 unsigned long tag);
223
224static void do_action(struct memranges *ranges,
225 resource_t base, resource_t size, unsigned long tag,
226 range_action_t action)
227{
228 resource_t end;
229 resource_t begin;
230
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700231 if (size == 0)
232 return;
233
Aaron Durbina05a8522013-03-22 20:44:46 -0500234 /* The addresses are aligned to 4096 bytes: the begin address is
235 * aligned down while the end address is aligned up to be conservative
236 * about the full range covered. */
237 begin = ALIGN_DOWN(base, 4096);
238 end = begin + size + (base - begin);
239 end = ALIGN_UP(end, 4096) - 1;
240 action(ranges, begin, end, tag);
241}
242
243void memranges_create_hole(struct memranges *ranges,
244 resource_t base, resource_t size)
245{
246 do_action(ranges, base, size, -1, remove_memranges);
247}
248
249void memranges_insert(struct memranges *ranges,
250 resource_t base, resource_t size, unsigned long tag)
251{
252 do_action(ranges, base, size, tag, merge_add_memranges);
253}
254
255struct collect_context {
256 struct memranges *ranges;
257 unsigned long tag;
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600258 memrange_filter_t filter;
Aaron Durbina05a8522013-03-22 20:44:46 -0500259};
260
261static void collect_ranges(void *gp, struct device *dev, struct resource *res)
262{
263 struct collect_context *ctx = gp;
264
Vladimir Serbinenko7a4fa0a2014-02-05 23:38:29 +0100265 if (res->size == 0)
266 return;
267
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600268 if (ctx->filter == NULL || ctx->filter(dev, res))
269 memranges_insert(ctx->ranges, res->base, res->size, ctx->tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500270}
271
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600272void memranges_add_resources_filter(struct memranges *ranges,
273 unsigned long mask, unsigned long match,
274 unsigned long tag,
275 memrange_filter_t filter)
Aaron Durbina05a8522013-03-22 20:44:46 -0500276{
277 struct collect_context context;
278
279 /* Only deal with MEM resources. */
280 mask |= IORESOURCE_MEM;
281 match |= IORESOURCE_MEM;
282
283 context.ranges = ranges;
284 context.tag = tag;
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600285 context.filter = filter;
Aaron Durbina05a8522013-03-22 20:44:46 -0500286 search_global_resources(mask, match, collect_ranges, &context);
287}
288
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600289void memranges_add_resources(struct memranges *ranges,
290 unsigned long mask, unsigned long match,
291 unsigned long tag)
292{
293 memranges_add_resources_filter(ranges, mask, match, tag, NULL);
294}
295
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700296void memranges_init_empty(struct memranges *ranges)
297{
298 ranges->entries = NULL;
299}
300
Aaron Durbina05a8522013-03-22 20:44:46 -0500301void memranges_init(struct memranges *ranges,
302 unsigned long mask, unsigned long match,
303 unsigned long tag)
304{
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700305 memranges_init_empty(ranges);
Aaron Durbina05a8522013-03-22 20:44:46 -0500306 memranges_add_resources(ranges, mask, match, tag);
307}
308
309void memranges_teardown(struct memranges *ranges)
310{
311 while (ranges->entries != NULL) {
312 range_entry_unlink_and_free(&ranges->entries, ranges->entries);
313 }
314}
315
316void memranges_fill_holes_up_to(struct memranges *ranges,
317 resource_t limit, unsigned long tag)
318{
319 struct range_entry *cur;
320 struct range_entry *prev;
321
322 prev = NULL;
323 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
324 /* First entry. Just set prev. */
325 if (prev == NULL) {
326 prev = cur;
327 continue;
328 }
329
Martin Rothcbf2bd72013-07-09 21:51:14 -0600330 /* If the previous entry does not directly precede the current
Aaron Durbina05a8522013-03-22 20:44:46 -0500331 * entry then add a new entry just after the previous one. */
332 if (range_entry_end(prev) != cur->begin) {
333 resource_t end;
334
335 end = cur->begin - 1;
336 if (end >= limit)
337 end = limit - 1;
338 range_list_add(&prev->next, range_entry_end(prev),
339 end, tag);
340 }
341
342 prev = cur;
343
344 /* Hit the requested range limit. No other entries after this
345 * are affected. */
346 if (cur->begin >= limit)
347 break;
348 }
349
350 /* Handle the case where the limit was never reached. A new entry needs
351 * to be added to cover the range up to the limit. */
352 if (prev != NULL && range_entry_end(prev) < limit)
353 range_list_add(&prev->next, range_entry_end(prev),
354 limit - 1, tag);
355
356 /* Merge all entries that were newly added. */
357 merge_neighbor_entries(ranges);
358}
Aaron Durbinf6f6e132013-03-26 21:22:42 -0500359
360struct range_entry *memranges_next_entry(struct memranges *ranges,
361 const struct range_entry *r)
362{
363 return r->next;
364}