blob: 5be1dff98400f330404c23ca77836c9234f601c3 [file] [log] [blame]
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <console/console.h>
4#include <device/device.h>
5#include <memrange.h>
6#include <post.h>
7
8/**
9 * Round a number up to an alignment.
10 *
11 * @param val The starting value.
12 * @param pow Alignment as a power of two.
13 * @return Rounded up number.
14 */
15static resource_t round(resource_t val, unsigned long pow)
16{
17 return ALIGN_UP(val, POWER_OF_2(pow));
18}
19
20static const char *resource2str(const struct resource *res)
21{
22 if (res->flags & IORESOURCE_IO)
23 return "io";
24 if (res->flags & IORESOURCE_PREFETCH)
25 return "prefmem";
26 if (res->flags & IORESOURCE_MEM)
27 return "mem";
28 return "undefined";
29}
30
31static bool dev_has_children(const struct device *dev)
32{
33 const struct bus *bus = dev->link_list;
34 return bus && bus->children;
35}
36
Furquan Shaikhc3568612020-05-16 15:18:23 -070037#define res_printk(depth, str, ...) printk(BIOS_DEBUG, "%*c"str, depth, ' ', __VA_ARGS__)
38
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070039/*
Nico Huber9d7728a2020-05-23 18:00:10 +020040 * During pass 1, once all the requirements for downstream devices of a
41 * bridge are gathered, this function calculates the overall resource
42 * requirement for the bridge. It starts by picking the largest resource
43 * requirement downstream for the given resource type and works by
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070044 * adding requirements in descending order.
45 *
Nico Huber9d7728a2020-05-23 18:00:10 +020046 * Additionally, it takes alignment and limits of the downstream devices
47 * into consideration and ensures that they get propagated to the bridge
48 * resource. This is required to guarantee that the upstream bridge/
49 * domain honors the limit and alignment requirements for this bridge
50 * based on the tightest constraints downstream.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070051 */
52static void update_bridge_resource(const struct device *bridge, struct resource *bridge_res,
Furquan Shaikhc3568612020-05-16 15:18:23 -070053 unsigned long type_match, int print_depth)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070054{
55 const struct device *child;
56 struct resource *child_res;
57 resource_t base;
58 bool first_child_res = true;
59 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
60 struct bus *bus = bridge->link_list;
61
62 child_res = NULL;
63
64 /*
Nico Huber9d7728a2020-05-23 18:00:10 +020065 * `base` keeps track of where the next allocation for child resources
66 * can take place from within the bridge resource window. Since the
67 * bridge resource window allocation is not performed yet, it can start
68 * at 0. Base gets updated every time a resource requirement is
69 * accounted for in the loop below. After scanning all these resources,
70 * base will indicate the total size requirement for the current bridge
71 * resource window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070072 */
73 base = 0;
74
Furquan Shaikhc3568612020-05-16 15:18:23 -070075 res_printk(print_depth, "%s %s: size: %llx align: %d gran: %d limit: %llx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070076 dev_path(bridge), resource2str(bridge_res), bridge_res->size,
77 bridge_res->align, bridge_res->gran, bridge_res->limit);
78
79 while ((child = largest_resource(bus, &child_res, type_mask, type_match))) {
80
81 /* Size 0 resources can be skipped. */
82 if (!child_res->size)
83 continue;
84
85 /*
Nico Huber9d7728a2020-05-23 18:00:10 +020086 * Propagate the resource alignment to the bridge resource if this is
87 * the first child resource with non-zero size being considered. For all
88 * other children resources, alignment is taken care of by updating the
89 * base to round up as per the child resource alignment. It is
90 * guaranteed that pass 2 follows the exact same method of picking the
91 * resource for allocation using largest_resource(). Thus, as long as
92 * the alignment for first child resource is propagated up to the bridge
93 * resource, it can be guaranteed that the alignment for all resources
94 * is appropriately met.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070095 */
96 if (first_child_res && (child_res->align > bridge_res->align))
97 bridge_res->align = child_res->align;
98
99 first_child_res = false;
100
101 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200102 * Propagate the resource limit to the bridge resource only if child
103 * resource limit is non-zero. If a downstream device has stricter
104 * requirements w.r.t. limits for any resource, that constraint needs to
105 * be propagated back up to the downstream bridges of the domain. This
106 * guarantees that the resource allocation which starts at the domain
107 * level takes into account all these constraints thus working on a
108 * global view.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700109 */
110 if (child_res->limit && (child_res->limit < bridge_res->limit))
111 bridge_res->limit = child_res->limit;
112
113 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200114 * Propagate the downstream resource request to allocate above 4G
115 * boundary to upstream bridge resource. This ensures that during
116 * pass 2, the resource allocator at domain level has a global view
117 * of all the downstream device requirements and thus address space
118 * is allocated as per updated flags in the bridge resource.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700119 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200120 * Since the bridge resource is a single window, all the downstream
121 * resources of this bridge resource will be allocated in space above
122 * the 4G boundary.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700123 */
124 if (child_res->flags & IORESOURCE_ABOVE_4G)
125 bridge_res->flags |= IORESOURCE_ABOVE_4G;
126
127 /*
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700128 * Alignment value of 0 means that the child resource has no alignment
129 * requirements and so the base value remains unchanged here.
130 */
131 base = round(base, child_res->align);
132
Furquan Shaikhc3568612020-05-16 15:18:23 -0700133 res_printk(print_depth + 1, "%s %02lx * [0x%llx - 0x%llx] %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700134 dev_path(child), child_res->index, base, base + child_res->size - 1,
135 resource2str(child_res));
136
137 base += child_res->size;
138 }
139
140 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200141 * After all downstream device resources are scanned, `base` represents
142 * the total size requirement for the current bridge resource window.
143 * This size needs to be rounded up to the granularity requirement of
144 * the bridge to ensure that the upstream bridge/domain allocates big
145 * enough window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700146 */
147 bridge_res->size = round(base, bridge_res->gran);
148
Furquan Shaikhc3568612020-05-16 15:18:23 -0700149 res_printk(print_depth, "%s %s: size: %llx align: %d gran: %d limit: %llx done\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700150 dev_path(bridge), resource2str(bridge_res), bridge_res->size,
151 bridge_res->align, bridge_res->gran, bridge_res->limit);
152}
153
154/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200155 * During pass 1, at the bridge level, the resource allocator gathers
156 * requirements from downstream devices and updates its own resource
157 * windows for the provided resource type.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700158 */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700159static void compute_bridge_resources(const struct device *bridge, unsigned long type_match,
160 int print_depth)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700161{
162 const struct device *child;
163 struct resource *res;
164 struct bus *bus = bridge->link_list;
165 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
166
167 for (res = bridge->resource_list; res; res = res->next) {
168 if (!(res->flags & IORESOURCE_BRIDGE))
169 continue;
170
171 if ((res->flags & type_mask) != type_match)
172 continue;
173
174 /*
175 * Ensure that the resource requirements for all downstream bridges are
176 * gathered before updating the window for current bridge resource.
177 */
178 for (child = bus->children; child; child = child->sibling) {
179 if (!dev_has_children(child))
180 continue;
Furquan Shaikhc3568612020-05-16 15:18:23 -0700181 compute_bridge_resources(child, type_match, print_depth + 1);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700182 }
183
184 /*
185 * Update the window for current bridge resource now that all downstream
186 * requirements are gathered.
187 */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700188 update_bridge_resource(bridge, res, type_match, print_depth);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700189 }
190}
191
192/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200193 * During pass 1, the resource allocator walks down the entire sub-tree
194 * of a domain. It gathers resource requirements for every downstream
195 * bridge by looking at the resource requests of its children. Thus, the
196 * requirement gathering begins at the leaf devices and is propagated
197 * back up to the downstream bridges of the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700198 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200199 * At the domain level, it identifies every downstream bridge and walks
200 * down that bridge to gather requirements for each resource type i.e.
201 * i/o, mem and prefmem. Since bridges have separate windows for mem and
202 * prefmem, requirements for each need to be collected separately.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700203 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200204 * Domain resource windows are fixed ranges and hence requirement
205 * gathering does not result in any changes to these fixed ranges.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700206 */
207static void compute_domain_resources(const struct device *domain)
208{
209 const struct device *child;
Furquan Shaikhc3568612020-05-16 15:18:23 -0700210 const int print_depth = 1;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700211
212 if (domain->link_list == NULL)
213 return;
214
215 for (child = domain->link_list->children; child; child = child->sibling) {
216
217 /* Skip if this is not a bridge or has no children under it. */
218 if (!dev_has_children(child))
219 continue;
220
Furquan Shaikhc3568612020-05-16 15:18:23 -0700221 compute_bridge_resources(child, IORESOURCE_IO, print_depth);
222 compute_bridge_resources(child, IORESOURCE_MEM, print_depth);
223 compute_bridge_resources(child, IORESOURCE_MEM | IORESOURCE_PREFETCH,
224 print_depth);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700225 }
226}
227
228static unsigned char get_alignment_by_resource_type(const struct resource *res)
229{
230 if (res->flags & IORESOURCE_MEM)
231 return 12; /* Page-aligned --> log2(4KiB) */
232 else if (res->flags & IORESOURCE_IO)
233 return 0; /* No special alignment required --> log2(1) */
234
235 die("Unexpected resource type: flags(%d)!\n", res->flags);
236}
237
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700238/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200239 * If the resource is NULL or if the resource is not assigned, then it
240 * cannot be used for allocation for downstream devices.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700241 */
242static bool is_resource_invalid(const struct resource *res)
243{
244 return (res == NULL) || !(res->flags & IORESOURCE_ASSIGNED);
245}
246
247static void initialize_domain_io_resource_memranges(struct memranges *ranges,
248 const struct resource *res,
249 unsigned long memrange_type)
250{
251 memranges_insert(ranges, res->base, res->limit - res->base + 1, memrange_type);
252}
253
254static void initialize_domain_mem_resource_memranges(struct memranges *ranges,
255 const struct resource *res,
256 unsigned long memrange_type)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700257{
258 resource_t res_base;
259 resource_t res_limit;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700260
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700261 const resource_t limit_4g = 0xffffffff;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700262
263 res_base = res->base;
264 res_limit = res->limit;
265
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700266 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200267 * Split the resource into two separate ranges if it crosses the 4G
268 * boundary. Memrange type is set differently to ensure that memrange
269 * does not merge these two ranges. For the range above 4G boundary,
270 * given memrange type is ORed with IORESOURCE_ABOVE_4G.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700271 */
272 if (res_base <= limit_4g) {
273
274 resource_t range_limit;
275
276 /* Clip the resource limit at 4G boundary if necessary. */
277 range_limit = MIN(res_limit, limit_4g);
278 memranges_insert(ranges, res_base, range_limit - res_base + 1, memrange_type);
279
280 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200281 * If the resource lies completely below the 4G boundary, nothing more
282 * needs to be done.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700283 */
284 if (res_limit <= limit_4g)
285 return;
286
287 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200288 * If the resource window crosses the 4G boundary, then update res_base
289 * to add another entry for the range above the boundary.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700290 */
291 res_base = limit_4g + 1;
292 }
293
294 if (res_base > res_limit)
295 return;
296
297 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200298 * If resource lies completely above the 4G boundary or if the resource
299 * was clipped to add two separate ranges, the range above 4G boundary
300 * has the resource flag IORESOURCE_ABOVE_4G set. This allows domain to
301 * handle any downstream requests for resource allocation above 4G
302 * differently.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700303 */
304 memranges_insert(ranges, res_base, res_limit - res_base + 1,
305 memrange_type | IORESOURCE_ABOVE_4G);
306}
307
308/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200309 * This function initializes memranges for domain device. If the
310 * resource crosses 4G boundary, then this function splits it into two
311 * ranges -- one for the window below 4G and the other for the window
312 * above 4G. The latter range has IORESOURCE_ABOVE_4G flag set to
313 * satisfy resource requests from downstream devices for allocations
314 * above 4G.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700315 */
316static void initialize_domain_memranges(struct memranges *ranges, const struct resource *res,
317 unsigned long memrange_type)
318{
319 unsigned char align = get_alignment_by_resource_type(res);
320
321 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
322
323 if (is_resource_invalid(res))
324 return;
325
326 if (res->flags & IORESOURCE_IO)
327 initialize_domain_io_resource_memranges(ranges, res, memrange_type);
328 else
329 initialize_domain_mem_resource_memranges(ranges, res, memrange_type);
330}
331
332/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200333 * This function initializes memranges for bridge device. Unlike domain,
334 * bridge does not need to care about resource window crossing 4G
335 * boundary. This is handled by the resource allocator at domain level
336 * to ensure that all downstream bridges are allocated space either
337 * above or below 4G boundary as per the state of IORESOURCE_ABOVE_4G
338 * for the respective bridge resource.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700339 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200340 * So, this function creates a single range of the entire resource
341 * window available for the bridge resource. Thus all downstream
342 * resources of the bridge for the given resource type get allocated
343 * space from the same window. If there is any downstream resource of
344 * the bridge which requests allocation above 4G, then all other
345 * downstream resources of the same type under the bridge get allocated
346 * above 4G.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700347 */
348static void initialize_bridge_memranges(struct memranges *ranges, const struct resource *res,
349 unsigned long memrange_type)
350{
351 unsigned char align = get_alignment_by_resource_type(res);
352
353 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
354
355 if (is_resource_invalid(res))
356 return;
357
358 memranges_insert(ranges, res->base, res->limit - res->base + 1, memrange_type);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700359}
360
Furquan Shaikhc3568612020-05-16 15:18:23 -0700361static void print_resource_ranges(const struct device *dev, const struct memranges *ranges)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700362{
363 const struct range_entry *r;
364
Furquan Shaikhc3568612020-05-16 15:18:23 -0700365 printk(BIOS_INFO, " %s: Resource ranges:\n", dev_path(dev));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700366
367 if (memranges_is_empty(ranges))
Furquan Shaikhc3568612020-05-16 15:18:23 -0700368 printk(BIOS_INFO, " * EMPTY!!\n");
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700369
370 memranges_each_entry(r, ranges) {
Furquan Shaikhc3568612020-05-16 15:18:23 -0700371 printk(BIOS_INFO, " * Base: %llx, Size: %llx, Tag: %lx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700372 range_entry_base(r), range_entry_size(r), range_entry_tag(r));
373 }
374}
375
376/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200377 * This is where the actual allocation of resources happens during
378 * pass 2. Given the list of memory ranges corresponding to the
379 * resource of given type, it finds the biggest unallocated resource
380 * using the type mask on the downstream bus. This continues in a
381 * descending order until all resources of given type are allocated
382 * address space within the current resource window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700383 */
384static void allocate_child_resources(struct bus *bus, struct memranges *ranges,
385 unsigned long type_mask, unsigned long type_match)
386{
387 struct resource *resource = NULL;
388 const struct device *dev;
389
390 while ((dev = largest_resource(bus, &resource, type_mask, type_match))) {
391
392 if (!resource->size)
393 continue;
394
395 if (memranges_steal(ranges, resource->limit, resource->size, resource->align,
396 type_match, &resource->base) == false) {
Furquan Shaikhc3568612020-05-16 15:18:23 -0700397 printk(BIOS_ERR, " ERROR: Resource didn't fit!!! ");
398 printk(BIOS_DEBUG, " %s %02lx * size: 0x%llx limit: %llx %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700399 dev_path(dev), resource->index,
400 resource->size, resource->limit, resource2str(resource));
401 continue;
402 }
403
404 resource->limit = resource->base + resource->size - 1;
405 resource->flags |= IORESOURCE_ASSIGNED;
406
Furquan Shaikhc3568612020-05-16 15:18:23 -0700407 printk(BIOS_DEBUG, " %s %02lx * [0x%llx - 0x%llx] limit: %llx %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700408 dev_path(dev), resource->index, resource->base,
409 resource->size ? resource->base + resource->size - 1 :
410 resource->base, resource->limit, resource2str(resource));
411 }
412}
413
414static void update_constraints(struct memranges *ranges, const struct device *dev,
415 const struct resource *res)
416{
417 if (!res->size)
418 return;
419
Furquan Shaikhc3568612020-05-16 15:18:23 -0700420 printk(BIOS_DEBUG, " %s: %s %02lx base %08llx limit %08llx %s (fixed)\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700421 __func__, dev_path(dev), res->index, res->base,
422 res->base + res->size - 1, resource2str(res));
423
424 memranges_create_hole(ranges, res->base, res->size);
425}
426
427/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200428 * Scan the entire tree to identify any fixed resources allocated by
429 * any device to ensure that the address map for domain resources are
430 * appropriately updated.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700431 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200432 * Domains can typically provide a memrange for entire address space.
433 * So, this function punches holes in the address space for all fixed
434 * resources that are already defined. Both I/O and normal memory
435 * resources are added as fixed. Both need to be removed from address
436 * space where dynamic resource allocations are sourced.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700437 */
438static void avoid_fixed_resources(struct memranges *ranges, const struct device *dev,
439 unsigned long mask_match)
440{
441 const struct resource *res;
442 const struct device *child;
443 const struct bus *bus;
444
445 for (res = dev->resource_list; res != NULL; res = res->next) {
446 if ((res->flags & mask_match) != mask_match)
447 continue;
448 update_constraints(ranges, dev, res);
449 }
450
451 bus = dev->link_list;
452 if (bus == NULL)
453 return;
454
455 for (child = bus->children; child != NULL; child = child->sibling)
456 avoid_fixed_resources(ranges, child, mask_match);
457}
458
459static void constrain_domain_resources(const struct device *domain, struct memranges *ranges,
460 unsigned long type)
461{
462 unsigned long mask_match = type | IORESOURCE_FIXED;
463
464 if (type == IORESOURCE_IO) {
465 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200466 * Don't allow allocations in the VGA I/O range. PCI has special
467 * cases for that.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700468 */
Furquan Shaikh563e6142020-05-26 12:04:35 -0700469 memranges_create_hole(ranges, 0x3b0, 0x3df - 0x3b0 + 1);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700470
471 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200472 * Resource allocator no longer supports the legacy behavior where
473 * I/O resource allocation is guaranteed to avoid aliases over legacy
474 * PCI expansion card addresses.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700475 */
476 }
477
478 avoid_fixed_resources(ranges, domain, mask_match);
479}
480
481/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200482 * This function creates a list of memranges of given type using the
483 * resource that is provided. If the given resource is NULL or if the
484 * resource window size is 0, then it creates an empty list. This
485 * results in resource allocation for that resource type failing for
486 * all downstream devices since there is nothing to allocate from.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700487 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200488 * In case of domain, it applies additional constraints to ensure that
489 * the memranges do not overlap any of the fixed resources under that
490 * domain. Domain typically seems to provide memrange for entire address
491 * space. Thus, it is up to the chipset to add DRAM and all other
492 * windows which cannot be used for resource allocation as fixed
493 * resources.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700494 */
495static void setup_resource_ranges(const struct device *dev, const struct resource *res,
496 unsigned long type, struct memranges *ranges)
497{
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -0700498 printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %d gran: %d limit: %llx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700499 dev_path(dev), resource2str(res), res->base, res->size, res->align,
500 res->gran, res->limit);
501
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700502 if (dev->path.type == DEVICE_PATH_DOMAIN) {
503 initialize_domain_memranges(ranges, res, type);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700504 constrain_domain_resources(dev, ranges, type);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700505 } else {
506 initialize_bridge_memranges(ranges, res, type);
507 }
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700508
Furquan Shaikhc3568612020-05-16 15:18:23 -0700509 print_resource_ranges(dev, ranges);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700510}
511
512static void cleanup_resource_ranges(const struct device *dev, struct memranges *ranges,
513 const struct resource *res)
514{
515 memranges_teardown(ranges);
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -0700516 printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %d gran: %d limit: %llx done\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700517 dev_path(dev), resource2str(res), res->base, res->size, res->align,
518 res->gran, res->limit);
519}
520
521/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200522 * Pass 2 of the resource allocator at the bridge level loops through
523 * all the resources for the bridge and generates a list of memory
524 * ranges similar to that at the domain level. However, there is no need
525 * to apply any additional constraints since the window allocated to the
526 * bridge is guaranteed to be non-overlapping by the allocator at domain
527 * level.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700528 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200529 * Allocation at the bridge level works the same as at domain level
530 * (starts with the biggest resource requirement from downstream devices
531 * and continues in descending order). One major difference at the
532 * bridge level is that it considers prefmem resources separately from
533 * mem resources.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700534 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200535 * Once allocation at the current bridge is complete, resource allocator
536 * continues walking down the downstream bridges until it hits the leaf
537 * devices.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700538 */
539static void allocate_bridge_resources(const struct device *bridge)
540{
541 struct memranges ranges;
542 const struct resource *res;
543 struct bus *bus = bridge->link_list;
544 unsigned long type_match;
545 struct device *child;
546 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
547
548 for (res = bridge->resource_list; res; res = res->next) {
549 if (!res->size)
550 continue;
551
552 if (!(res->flags & IORESOURCE_BRIDGE))
553 continue;
554
555 type_match = res->flags & type_mask;
556
557 setup_resource_ranges(bridge, res, type_match, &ranges);
558 allocate_child_resources(bus, &ranges, type_mask, type_match);
559 cleanup_resource_ranges(bridge, &ranges, res);
560 }
561
562 for (child = bus->children; child; child = child->sibling) {
563 if (!dev_has_children(child))
564 continue;
565
566 allocate_bridge_resources(child);
567 }
568}
569
570static const struct resource *find_domain_resource(const struct device *domain,
571 unsigned long type)
572{
573 const struct resource *res;
574
575 for (res = domain->resource_list; res; res = res->next) {
576 if (res->flags & IORESOURCE_FIXED)
577 continue;
578
579 if ((res->flags & IORESOURCE_TYPE_MASK) == type)
580 return res;
581 }
582
583 return NULL;
584}
585
586/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200587 * Pass 2 of resource allocator begins at the domain level. Every domain
588 * has two types of resources - io and mem. For each of these resources,
589 * this function creates a list of memory ranges that can be used for
590 * downstream resource allocation. This list is constrained to remove
591 * any fixed resources in the domain sub-tree of the given resource
592 * type. It then uses the memory ranges to apply best fit on the
593 * resource requirements of the downstream devices.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700594 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200595 * Once resources are allocated to all downstream devices of the domain,
596 * it walks down each downstream bridge to continue the same process
597 * until resources are allocated to all devices under the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700598 */
599static void allocate_domain_resources(const struct device *domain)
600{
601 struct memranges ranges;
602 struct device *child;
603 const struct resource *res;
604
605 /* Resource type I/O */
606 res = find_domain_resource(domain, IORESOURCE_IO);
607 if (res) {
608 setup_resource_ranges(domain, res, IORESOURCE_IO, &ranges);
609 allocate_child_resources(domain->link_list, &ranges, IORESOURCE_TYPE_MASK,
610 IORESOURCE_IO);
611 cleanup_resource_ranges(domain, &ranges, res);
612 }
613
614 /*
615 * Resource type Mem:
Nico Huber9d7728a2020-05-23 18:00:10 +0200616 * Domain does not distinguish between mem and prefmem resources. Thus,
617 * the resource allocation at domain level considers mem and prefmem
618 * together when finding the best fit based on the biggest resource
619 * requirement.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700620 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200621 * However, resource requests for allocation above 4G boundary need to
622 * be handled separately if the domain resource window crosses this
623 * boundary. There is a single window for resource of type
624 * IORESOURCE_MEM. When creating memranges, this resource is split into
625 * two separate ranges -- one for the window below 4G boundary and other
626 * for the window above 4G boundary (with IORESOURCE_ABOVE_4G flag set).
627 * Thus, when allocating child resources, requests for below and above
628 * the 4G boundary are handled separately by setting the type_mask and
629 * type_match to allocate_child_resources() accordingly.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700630 */
631 res = find_domain_resource(domain, IORESOURCE_MEM);
632 if (res) {
633 setup_resource_ranges(domain, res, IORESOURCE_MEM, &ranges);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700634 allocate_child_resources(domain->link_list, &ranges,
635 IORESOURCE_TYPE_MASK | IORESOURCE_ABOVE_4G,
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700636 IORESOURCE_MEM);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700637 allocate_child_resources(domain->link_list, &ranges,
638 IORESOURCE_TYPE_MASK | IORESOURCE_ABOVE_4G,
639 IORESOURCE_MEM | IORESOURCE_ABOVE_4G);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700640 cleanup_resource_ranges(domain, &ranges, res);
641 }
642
643 for (child = domain->link_list->children; child; child = child->sibling) {
644 if (!dev_has_children(child))
645 continue;
646
647 /* Continue allocation for all downstream bridges. */
648 allocate_bridge_resources(child);
649 }
650}
651
652/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200653 * This function forms the guts of the resource allocator. It walks
654 * through the entire device tree for each domain two times.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700655 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200656 * Every domain has a fixed set of ranges. These ranges cannot be
657 * relaxed based on the requirements of the downstream devices. They
658 * represent the available windows from which resources can be allocated
659 * to the different devices under the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700660 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200661 * In order to identify the requirements of downstream devices, resource
662 * allocator walks in a DFS fashion. It gathers the requirements from
663 * leaf devices and propagates those back up to their upstream bridges
664 * until the requirements for all the downstream devices of the domain
665 * are gathered. This is referred to as pass 1 of the resource allocator.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700666 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200667 * Once the requirements for all the devices under the domain are
668 * gathered, the resource allocator walks a second time to allocate
669 * resources to downstream devices as per the requirements. It always
670 * picks the biggest resource request as per the type (i/o and mem) to
671 * allocate space from its fixed window to the immediate downstream
672 * device of the domain. In order to accomplish best fit for the
673 * resources, a list of ranges is maintained by each resource type (i/o
674 * and mem). At the domain level we don't differentiate between mem and
675 * prefmem. Since they are allocated space from the same window, the
676 * resource allocator at the domain level ensures that the biggest
677 * requirement is selected independent of the prefetch type. Once the
678 * resource allocation for all immediate downstream devices is complete
679 * at the domain level, the resource allocator walks down the subtree
680 * for each downstream bridge to continue the allocation process at the
681 * bridge level. Since bridges have separate windows for i/o, mem and
682 * prefmem, best fit algorithm at bridge level looks for the biggest
683 * requirement considering prefmem resources separately from non-prefmem
684 * resources. This continues until resource allocation is performed for
685 * all downstream bridges in the domain sub-tree. This is referred to as
686 * pass 2 of the resource allocator.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700687 *
688 * Some rules that are followed by the resource allocator:
Nico Huber9d7728a2020-05-23 18:00:10 +0200689 * - Allocate resource locations for every device as long as
690 * the requirements can be satisfied.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700691 * - Don't overlap with resources in fixed locations.
Nico Huber9d7728a2020-05-23 18:00:10 +0200692 * - Don't overlap and follow the rules of bridges -- downstream
693 * devices of bridges should use parts of the address space
694 * allocated to the bridge.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700695 */
696void allocate_resources(const struct device *root)
697{
698 const struct device *child;
699
700 if ((root == NULL) || (root->link_list == NULL))
701 return;
702
703 for (child = root->link_list->children; child; child = child->sibling) {
704
705 if (child->path.type != DEVICE_PATH_DOMAIN)
706 continue;
707
708 post_log_path(child);
709
710 /* Pass 1 - Gather requirements. */
Paul Menzel2efcafa2021-07-02 17:39:45 +0200711 printk(BIOS_INFO, "=== Resource allocator: %s - Pass 1 (gathering requirements) ===\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700712 dev_path(child));
713 compute_domain_resources(child);
714
715 /* Pass 2 - Allocate resources as per gathered requirements. */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700716 printk(BIOS_INFO, "=== Resource allocator: %s - Pass 2 (allocating resources) ===\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700717 dev_path(child));
718 allocate_domain_resources(child);
Furquan Shaikhc3568612020-05-16 15:18:23 -0700719
720 printk(BIOS_INFO, "=== Resource allocator: %s - resource allocation complete ===\n",
721 dev_path(child));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700722 }
723}