blob: b2839d09dc1aab1b2edffb44b133bea2966df301 [file] [log] [blame]
Aaron Durbina05a8522013-03-22 20:44:46 -05001/*
2 * This file is part of the coreboot project.
3 *
4 * Copyright (C) 2013 Google, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
Aaron Durbina05a8522013-03-22 20:44:46 -050014 */
15#include <stdlib.h>
16#include <console/console.h>
17#include <memrange.h>
18
19/* Coreboot doesn't have a free() function. Therefore, keep a cache of
20 * free'd entries. */
21static struct range_entry *free_list;
22
23static inline void range_entry_link(struct range_entry **prev_ptr,
24 struct range_entry *r)
25{
26 r->next = *prev_ptr;
27 *prev_ptr = r;
28}
29
30static inline void range_entry_unlink(struct range_entry **prev_ptr,
31 struct range_entry *r)
32{
33 *prev_ptr = r->next;
34 r->next = NULL;
35}
36
37static inline void range_entry_unlink_and_free(struct range_entry **prev_ptr,
38 struct range_entry *r)
39{
40 range_entry_unlink(prev_ptr, r);
41 range_entry_link(&free_list, r);
42}
43
44static struct range_entry *alloc_range(void)
45{
46 if (free_list != NULL) {
47 struct range_entry *r;
48
49 r = free_list;
50 range_entry_unlink(&free_list, r);
51 return r;
52 }
53 return malloc(sizeof(struct range_entry));
54}
55
56static inline struct range_entry *
57range_list_add(struct range_entry **prev_ptr, resource_t begin, resource_t end,
58 unsigned long tag)
59{
60 struct range_entry *new_entry;
61
62 new_entry = alloc_range();
63 if (new_entry == NULL) {
64 printk(BIOS_ERR, "Could not allocate range_entry!\n");
65 return NULL;
66 }
67 new_entry->begin = begin;
68 new_entry->end = end;
69 new_entry->tag = tag;
70 range_entry_link(prev_ptr, new_entry);
71
72 return new_entry;
73}
74
75static void merge_neighbor_entries(struct memranges *ranges)
76{
77 struct range_entry *cur;
78 struct range_entry *prev;
79
80 prev = NULL;
81 /* Merge all neighbors and delete/free the leftover entry. */
82 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
83 /* First entry. Just set prev. */
84 if (prev == NULL) {
85 prev = cur;
86 continue;
87 }
88
89 /* If the previous entry merges with the current update the
90 * previous entry to cover full range and delete current from
91 * the list. */
92 if (prev->end + 1 >= cur->begin && prev->tag == cur->tag) {
93 prev->end = cur->end;
94 range_entry_unlink_and_free(&prev->next, cur);
95 /* Set cur to prev so cur->next is valid since cur
96 * was just unlinked and free. */
97 cur = prev;
98 continue;
99 }
100
101 prev = cur;
102 }
103}
104
105static void remove_memranges(struct memranges *ranges,
106 resource_t begin, resource_t end,
107 unsigned long unused)
108{
109 struct range_entry *cur;
110 struct range_entry *next;
111 struct range_entry **prev_ptr;
112
113 prev_ptr = &ranges->entries;
114 for (cur = ranges->entries; cur != NULL; cur = next) {
115 resource_t tmp_end;
116
117 /* Cache the next value to handle unlinks. */
118 next = cur->next;
119
120 /* No other ranges are affected. */
121 if (end < cur->begin)
122 break;
123
124 /* The removal range starts after this one. */
125 if (begin > cur->end) {
126 prev_ptr = &cur->next;
127 continue;
128 }
129
130 /* The removal range overlaps with the current entry either
131 * partially or fully. However, we need to adjust the removal
132 * range for any holes. */
133 if (begin <= cur->begin) {
134 begin = cur->begin;
135
136 /* Full removal. */
137 if (end >= cur->end) {
138 begin = cur->end + 1;
139 range_entry_unlink_and_free(prev_ptr, cur);
140 continue;
141 }
142 }
143
144 /* prev_ptr can be set now that the unlink path wasn't taken. */
145 prev_ptr = &cur->next;
146
147 /* Clip the end fragment to do proper splitting. */
148 tmp_end = end;
149 if (end > cur->end)
150 tmp_end = cur->end;
151
152 /* Hole punched in middle of entry. */
153 if (begin > cur->begin && tmp_end < cur->end) {
154 range_list_add(&cur->next, end + 1, cur->end, cur->tag);
155 cur->end = begin - 1;
156 break;
157 }
158
159 /* Removal at beginning. */
160 if (begin == cur->begin)
161 cur->begin = tmp_end + 1;
162
163 /* Removal at end. */
164 if (tmp_end == cur->end)
165 cur->end = begin - 1;
166 }
167}
168
169static void merge_add_memranges(struct memranges *ranges,
170 resource_t begin, resource_t end,
171 unsigned long tag)
172{
173 struct range_entry *cur;
174 struct range_entry **prev_ptr;
175
176 prev_ptr = &ranges->entries;
177
178 /* Remove all existing entries covered by the range. */
179 remove_memranges(ranges, begin, end, -1);
180
181 /* Find the entry to place the new entry after. Since
Martin Rothcbf2bd72013-07-09 21:51:14 -0600182 * remove_memranges() was called above there is a guaranteed
Aaron Durbina05a8522013-03-22 20:44:46 -0500183 * spot for this new entry. */
184 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
185 /* Found insertion spot before current entry. */
186 if (end < cur->begin)
187 break;
188
189 /* Keep track of previous entry to insert new entry after it. */
190 prev_ptr = &cur->next;
191
192 /* The new entry starts after this one. */
193 if (begin > cur->end)
194 continue;
195
196 }
197
198 /* Add new entry and merge with neighbors. */
199 range_list_add(prev_ptr, begin, end, tag);
200 merge_neighbor_entries(ranges);
201}
202
Aaron Durbined9307d2014-02-05 15:44:30 -0600203void memranges_update_tag(struct memranges *ranges, unsigned long old_tag,
204 unsigned long new_tag)
205{
206 struct range_entry *r;
207
208 memranges_each_entry(r, ranges) {
209 if (range_entry_tag(r) == old_tag)
210 range_entry_update_tag(r, new_tag);
211 }
212
213 merge_neighbor_entries(ranges);
214}
215
Aaron Durbina05a8522013-03-22 20:44:46 -0500216typedef void (*range_action_t)(struct memranges *ranges,
217 resource_t begin, resource_t end,
218 unsigned long tag);
219
220static void do_action(struct memranges *ranges,
221 resource_t base, resource_t size, unsigned long tag,
222 range_action_t action)
223{
224 resource_t end;
225 resource_t begin;
226
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700227 if (size == 0)
228 return;
229
Aaron Durbina05a8522013-03-22 20:44:46 -0500230 /* The addresses are aligned to 4096 bytes: the begin address is
231 * aligned down while the end address is aligned up to be conservative
232 * about the full range covered. */
233 begin = ALIGN_DOWN(base, 4096);
234 end = begin + size + (base - begin);
235 end = ALIGN_UP(end, 4096) - 1;
236 action(ranges, begin, end, tag);
237}
238
239void memranges_create_hole(struct memranges *ranges,
240 resource_t base, resource_t size)
241{
242 do_action(ranges, base, size, -1, remove_memranges);
243}
244
245void memranges_insert(struct memranges *ranges,
246 resource_t base, resource_t size, unsigned long tag)
247{
248 do_action(ranges, base, size, tag, merge_add_memranges);
249}
250
251struct collect_context {
252 struct memranges *ranges;
253 unsigned long tag;
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600254 memrange_filter_t filter;
Aaron Durbina05a8522013-03-22 20:44:46 -0500255};
256
257static void collect_ranges(void *gp, struct device *dev, struct resource *res)
258{
259 struct collect_context *ctx = gp;
260
Vladimir Serbinenko7a4fa0a2014-02-05 23:38:29 +0100261 if (res->size == 0)
262 return;
263
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600264 if (ctx->filter == NULL || ctx->filter(dev, res))
265 memranges_insert(ctx->ranges, res->base, res->size, ctx->tag);
Aaron Durbina05a8522013-03-22 20:44:46 -0500266}
267
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600268void memranges_add_resources_filter(struct memranges *ranges,
269 unsigned long mask, unsigned long match,
270 unsigned long tag,
271 memrange_filter_t filter)
Aaron Durbina05a8522013-03-22 20:44:46 -0500272{
273 struct collect_context context;
274
275 /* Only deal with MEM resources. */
276 mask |= IORESOURCE_MEM;
277 match |= IORESOURCE_MEM;
278
279 context.ranges = ranges;
280 context.tag = tag;
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600281 context.filter = filter;
Aaron Durbina05a8522013-03-22 20:44:46 -0500282 search_global_resources(mask, match, collect_ranges, &context);
283}
284
Aaron Durbinca4f4b82014-02-08 15:41:52 -0600285void memranges_add_resources(struct memranges *ranges,
286 unsigned long mask, unsigned long match,
287 unsigned long tag)
288{
289 memranges_add_resources_filter(ranges, mask, match, tag, NULL);
290}
291
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700292void memranges_init_empty(struct memranges *ranges)
293{
294 ranges->entries = NULL;
295}
296
Aaron Durbina05a8522013-03-22 20:44:46 -0500297void memranges_init(struct memranges *ranges,
298 unsigned long mask, unsigned long match,
299 unsigned long tag)
300{
Furquan Shaikh196ee2b2014-07-18 10:25:54 -0700301 memranges_init_empty(ranges);
Aaron Durbina05a8522013-03-22 20:44:46 -0500302 memranges_add_resources(ranges, mask, match, tag);
303}
304
305void memranges_teardown(struct memranges *ranges)
306{
307 while (ranges->entries != NULL) {
308 range_entry_unlink_and_free(&ranges->entries, ranges->entries);
309 }
310}
311
312void memranges_fill_holes_up_to(struct memranges *ranges,
313 resource_t limit, unsigned long tag)
314{
315 struct range_entry *cur;
316 struct range_entry *prev;
317
318 prev = NULL;
319 for (cur = ranges->entries; cur != NULL; cur = cur->next) {
320 /* First entry. Just set prev. */
321 if (prev == NULL) {
322 prev = cur;
323 continue;
324 }
325
Martin Rothcbf2bd72013-07-09 21:51:14 -0600326 /* If the previous entry does not directly precede the current
Aaron Durbina05a8522013-03-22 20:44:46 -0500327 * entry then add a new entry just after the previous one. */
328 if (range_entry_end(prev) != cur->begin) {
329 resource_t end;
330
331 end = cur->begin - 1;
332 if (end >= limit)
333 end = limit - 1;
334 range_list_add(&prev->next, range_entry_end(prev),
335 end, tag);
336 }
337
338 prev = cur;
339
340 /* Hit the requested range limit. No other entries after this
341 * are affected. */
342 if (cur->begin >= limit)
343 break;
344 }
345
346 /* Handle the case where the limit was never reached. A new entry needs
347 * to be added to cover the range up to the limit. */
348 if (prev != NULL && range_entry_end(prev) < limit)
349 range_list_add(&prev->next, range_entry_end(prev),
350 limit - 1, tag);
351
352 /* Merge all entries that were newly added. */
353 merge_neighbor_entries(ranges);
354}
Aaron Durbinf6f6e132013-03-26 21:22:42 -0500355
356struct range_entry *memranges_next_entry(struct memranges *ranges,
357 const struct range_entry *r)
358{
359 return r->next;
360}