blob: 3a110326295747cee0b3af792728615bcc28ea7d [file] [log] [blame]
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <console/console.h>
4#include <device/device.h>
5#include <memrange.h>
6#include <post.h>
7
8/**
9 * Round a number up to an alignment.
10 *
11 * @param val The starting value.
12 * @param pow Alignment as a power of two.
13 * @return Rounded up number.
14 */
15static resource_t round(resource_t val, unsigned long pow)
16{
17 return ALIGN_UP(val, POWER_OF_2(pow));
18}
19
20static const char *resource2str(const struct resource *res)
21{
22 if (res->flags & IORESOURCE_IO)
23 return "io";
24 if (res->flags & IORESOURCE_PREFETCH)
25 return "prefmem";
26 if (res->flags & IORESOURCE_MEM)
27 return "mem";
28 return "undefined";
29}
30
31static bool dev_has_children(const struct device *dev)
32{
33 const struct bus *bus = dev->link_list;
34 return bus && bus->children;
35}
36
Furquan Shaikhc3568612020-05-16 15:18:23 -070037#define res_printk(depth, str, ...) printk(BIOS_DEBUG, "%*c"str, depth, ' ', __VA_ARGS__)
38
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070039/*
40 * During pass 1, once all the requirements for downstream devices of a bridge are gathered,
41 * this function calculates the overall resource requirement for the bridge. It starts by
42 * picking the largest resource requirement downstream for the given resource type and works by
43 * adding requirements in descending order.
44 *
45 * Additionally, it takes alignment and limits of the downstream devices into consideration and
46 * ensures that they get propagated to the bridge resource. This is required to guarantee that
47 * the upstream bridge/domain honors the limit and alignment requirements for this bridge based
48 * on the tightest constraints downstream.
49 */
50static void update_bridge_resource(const struct device *bridge, struct resource *bridge_res,
Furquan Shaikhc3568612020-05-16 15:18:23 -070051 unsigned long type_match, int print_depth)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070052{
53 const struct device *child;
54 struct resource *child_res;
55 resource_t base;
56 bool first_child_res = true;
57 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
58 struct bus *bus = bridge->link_list;
59
60 child_res = NULL;
61
62 /*
63 * `base` keeps track of where the next allocation for child resource can take place
64 * from within the bridge resource window. Since the bridge resource window allocation
65 * is not performed yet, it can start at 0. Base gets updated every time a resource
66 * requirement is accounted for in the loop below. After scanning all these resources,
67 * base will indicate the total size requirement for the current bridge resource
68 * window.
69 */
70 base = 0;
71
Furquan Shaikhc3568612020-05-16 15:18:23 -070072 res_printk(print_depth, "%s %s: size: %llx align: %d gran: %d limit: %llx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070073 dev_path(bridge), resource2str(bridge_res), bridge_res->size,
74 bridge_res->align, bridge_res->gran, bridge_res->limit);
75
76 while ((child = largest_resource(bus, &child_res, type_mask, type_match))) {
77
78 /* Size 0 resources can be skipped. */
79 if (!child_res->size)
80 continue;
81
82 /*
83 * Propagate the resource alignment to the bridge resource if this is the first
84 * child resource with non-zero size being considered. For all other children
85 * resources, alignment is taken care of by updating the base to round up as per
86 * the child resource alignment. It is guaranteed that pass 2 follows the exact
87 * same method of picking the resource for allocation using
88 * largest_resource(). Thus, as long as the alignment for first child resource
89 * is propagated up to the bridge resource, it can be guaranteed that the
90 * alignment for all resources is appropriately met.
91 */
92 if (first_child_res && (child_res->align > bridge_res->align))
93 bridge_res->align = child_res->align;
94
95 first_child_res = false;
96
97 /*
98 * Propagate the resource limit to the bridge resource only if child resource
99 * limit is non-zero. If a downstream device has stricter requirements
100 * w.r.t. limits for any resource, that constraint needs to be propagated back
101 * up to the downstream bridges of the domain. This guarantees that the resource
102 * allocation which starts at the domain level takes into account all these
103 * constraints thus working on a global view.
104 */
105 if (child_res->limit && (child_res->limit < bridge_res->limit))
106 bridge_res->limit = child_res->limit;
107
108 /*
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700109 * Propagate the downstream resource request to allocate above 4G boundary to
110 * upstream bridge resource. This ensures that during pass 2, the resource
111 * allocator at domain level has a global view of all the downstream device
112 * requirements and thus address space is allocated as per updated flags in the
113 * bridge resource.
114 *
115 * Since the bridge resource is a single window, all the downstream resources of
116 * this bridge resource will be allocated space above 4G boundary.
117 */
118 if (child_res->flags & IORESOURCE_ABOVE_4G)
119 bridge_res->flags |= IORESOURCE_ABOVE_4G;
120
121 /*
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700122 * Alignment value of 0 means that the child resource has no alignment
123 * requirements and so the base value remains unchanged here.
124 */
125 base = round(base, child_res->align);
126
Furquan Shaikhc3568612020-05-16 15:18:23 -0700127 res_printk(print_depth + 1, "%s %02lx * [0x%llx - 0x%llx] %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700128 dev_path(child), child_res->index, base, base + child_res->size - 1,
129 resource2str(child_res));
130
131 base += child_res->size;
132 }
133
134 /*
135 * After all downstream device resources are scanned, `base` represents the total size
136 * requirement for the current bridge resource window. This size needs to be rounded up
137 * to the granularity requirement of the bridge to ensure that the upstream
138 * bridge/domain allocates big enough window.
139 */
140 bridge_res->size = round(base, bridge_res->gran);
141
Furquan Shaikhc3568612020-05-16 15:18:23 -0700142 res_printk(print_depth, "%s %s: size: %llx align: %d gran: %d limit: %llx done\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700143 dev_path(bridge), resource2str(bridge_res), bridge_res->size,
144 bridge_res->align, bridge_res->gran, bridge_res->limit);
145}
146
147/*
148 * During pass 1, resource allocator at bridge level gathers requirements from downstream
149 * devices and updates its own resource windows for the provided resource type.
150 */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700151static void compute_bridge_resources(const struct device *bridge, unsigned long type_match,
152 int print_depth)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700153{
154 const struct device *child;
155 struct resource *res;
156 struct bus *bus = bridge->link_list;
157 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
158
159 for (res = bridge->resource_list; res; res = res->next) {
160 if (!(res->flags & IORESOURCE_BRIDGE))
161 continue;
162
163 if ((res->flags & type_mask) != type_match)
164 continue;
165
166 /*
167 * Ensure that the resource requirements for all downstream bridges are
168 * gathered before updating the window for current bridge resource.
169 */
170 for (child = bus->children; child; child = child->sibling) {
171 if (!dev_has_children(child))
172 continue;
Furquan Shaikhc3568612020-05-16 15:18:23 -0700173 compute_bridge_resources(child, type_match, print_depth + 1);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700174 }
175
176 /*
177 * Update the window for current bridge resource now that all downstream
178 * requirements are gathered.
179 */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700180 update_bridge_resource(bridge, res, type_match, print_depth);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700181 }
182}
183
184/*
185 * During pass 1, resource allocator walks down the entire sub-tree of a domain. It gathers
186 * resource requirements for every downstream bridge by looking at the resource requests of its
187 * children. Thus, the requirement gathering begins at the leaf devices and is propagated back
188 * up to the downstream bridges of the domain.
189 *
190 * At domain level, it identifies every downstream bridge and walks down that bridge to gather
191 * requirements for each resource type i.e. i/o, mem and prefmem. Since bridges have separate
192 * windows for mem and prefmem, requirements for each need to be collected separately.
193 *
194 * Domain resource windows are fixed ranges and hence requirement gathering does not result in
195 * any changes to these fixed ranges.
196 */
197static void compute_domain_resources(const struct device *domain)
198{
199 const struct device *child;
Furquan Shaikhc3568612020-05-16 15:18:23 -0700200 const int print_depth = 1;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700201
202 if (domain->link_list == NULL)
203 return;
204
205 for (child = domain->link_list->children; child; child = child->sibling) {
206
207 /* Skip if this is not a bridge or has no children under it. */
208 if (!dev_has_children(child))
209 continue;
210
Furquan Shaikhc3568612020-05-16 15:18:23 -0700211 compute_bridge_resources(child, IORESOURCE_IO, print_depth);
212 compute_bridge_resources(child, IORESOURCE_MEM, print_depth);
213 compute_bridge_resources(child, IORESOURCE_MEM | IORESOURCE_PREFETCH,
214 print_depth);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700215 }
216}
217
218static unsigned char get_alignment_by_resource_type(const struct resource *res)
219{
220 if (res->flags & IORESOURCE_MEM)
221 return 12; /* Page-aligned --> log2(4KiB) */
222 else if (res->flags & IORESOURCE_IO)
223 return 0; /* No special alignment required --> log2(1) */
224
225 die("Unexpected resource type: flags(%d)!\n", res->flags);
226}
227
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700228/*
229 * If the resource is NULL or if the resource is not assigned, then it cannot be used for
230 * allocation for downstream devices.
231 */
232static bool is_resource_invalid(const struct resource *res)
233{
234 return (res == NULL) || !(res->flags & IORESOURCE_ASSIGNED);
235}
236
237static void initialize_domain_io_resource_memranges(struct memranges *ranges,
238 const struct resource *res,
239 unsigned long memrange_type)
240{
241 memranges_insert(ranges, res->base, res->limit - res->base + 1, memrange_type);
242}
243
244static void initialize_domain_mem_resource_memranges(struct memranges *ranges,
245 const struct resource *res,
246 unsigned long memrange_type)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700247{
248 resource_t res_base;
249 resource_t res_limit;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700250
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700251 const resource_t limit_4g = 0xffffffff;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700252
253 res_base = res->base;
254 res_limit = res->limit;
255
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700256 /*
257 * Split the resource into two separate ranges if it crosses the 4G boundary. Memrange
258 * type is set differently to ensure that memrange does not merge these two ranges. For
259 * the range above 4G boundary, given memrange type is ORed with IORESOURCE_ABOVE_4G.
260 */
261 if (res_base <= limit_4g) {
262
263 resource_t range_limit;
264
265 /* Clip the resource limit at 4G boundary if necessary. */
266 range_limit = MIN(res_limit, limit_4g);
267 memranges_insert(ranges, res_base, range_limit - res_base + 1, memrange_type);
268
269 /*
270 * If the resource lies completely below the 4G boundary, nothing more needs to
271 * be done.
272 */
273 if (res_limit <= limit_4g)
274 return;
275
276 /*
277 * If the resource window crosses the 4G boundary, then update res_base to add
278 * another entry for the range above the boundary.
279 */
280 res_base = limit_4g + 1;
281 }
282
283 if (res_base > res_limit)
284 return;
285
286 /*
287 * If resource lies completely above the 4G boundary or if the resource was clipped to
288 * add two separate ranges, the range above 4G boundary has the resource flag
289 * IORESOURCE_ABOVE_4G set. This allows domain to handle any downstream requests for
290 * resource allocation above 4G differently.
291 */
292 memranges_insert(ranges, res_base, res_limit - res_base + 1,
293 memrange_type | IORESOURCE_ABOVE_4G);
294}
295
296/*
297 * This function initializes memranges for domain device. If the resource crosses 4G boundary,
298 * then this function splits it into two ranges -- one for the window below 4G and the other for
299 * the window above 4G. The latter range has IORESOURCE_ABOVE_4G flag set to satisfy resource
300 * requests from downstream devices for allocations above 4G.
301 */
302static void initialize_domain_memranges(struct memranges *ranges, const struct resource *res,
303 unsigned long memrange_type)
304{
305 unsigned char align = get_alignment_by_resource_type(res);
306
307 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
308
309 if (is_resource_invalid(res))
310 return;
311
312 if (res->flags & IORESOURCE_IO)
313 initialize_domain_io_resource_memranges(ranges, res, memrange_type);
314 else
315 initialize_domain_mem_resource_memranges(ranges, res, memrange_type);
316}
317
318/*
319 * This function initializes memranges for bridge device. Unlike domain, bridge does not need to
320 * care about resource window crossing 4G boundary. This is handled by the resource allocator at
321 * domain level to ensure that all downstream bridges are allocated space either above or below
322 * 4G boundary as per the state of IORESOURCE_ABOVE_4G for the respective bridge resource.
323 *
324 * So, this function creates a single range of the entire resource window available for the
325 * bridge resource. Thus all downstream resources of the bridge for the given resource type get
326 * allocated space from the same window. If there is any downstream resource of the bridge which
327 * requests allocation above 4G, then all other downstream resources of the same type under the
328 * bridge get allocated above 4G.
329 */
330static void initialize_bridge_memranges(struct memranges *ranges, const struct resource *res,
331 unsigned long memrange_type)
332{
333 unsigned char align = get_alignment_by_resource_type(res);
334
335 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
336
337 if (is_resource_invalid(res))
338 return;
339
340 memranges_insert(ranges, res->base, res->limit - res->base + 1, memrange_type);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700341}
342
Furquan Shaikhc3568612020-05-16 15:18:23 -0700343static void print_resource_ranges(const struct device *dev, const struct memranges *ranges)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700344{
345 const struct range_entry *r;
346
Furquan Shaikhc3568612020-05-16 15:18:23 -0700347 printk(BIOS_INFO, " %s: Resource ranges:\n", dev_path(dev));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700348
349 if (memranges_is_empty(ranges))
Furquan Shaikhc3568612020-05-16 15:18:23 -0700350 printk(BIOS_INFO, " * EMPTY!!\n");
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700351
352 memranges_each_entry(r, ranges) {
Furquan Shaikhc3568612020-05-16 15:18:23 -0700353 printk(BIOS_INFO, " * Base: %llx, Size: %llx, Tag: %lx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700354 range_entry_base(r), range_entry_size(r), range_entry_tag(r));
355 }
356}
357
358/*
359 * This is where the actual allocation of resources happens during pass 2. Given the list of
360 * memory ranges corresponding to the resource of given type, it finds the biggest unallocated
361 * resource using the type mask on the downstream bus. This continues in a descending
362 * order until all resources of given type are allocated address space within the current
363 * resource window.
364 */
365static void allocate_child_resources(struct bus *bus, struct memranges *ranges,
366 unsigned long type_mask, unsigned long type_match)
367{
368 struct resource *resource = NULL;
369 const struct device *dev;
370
371 while ((dev = largest_resource(bus, &resource, type_mask, type_match))) {
372
373 if (!resource->size)
374 continue;
375
376 if (memranges_steal(ranges, resource->limit, resource->size, resource->align,
377 type_match, &resource->base) == false) {
Furquan Shaikhc3568612020-05-16 15:18:23 -0700378 printk(BIOS_ERR, " ERROR: Resource didn't fit!!! ");
379 printk(BIOS_DEBUG, " %s %02lx * size: 0x%llx limit: %llx %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700380 dev_path(dev), resource->index,
381 resource->size, resource->limit, resource2str(resource));
382 continue;
383 }
384
385 resource->limit = resource->base + resource->size - 1;
386 resource->flags |= IORESOURCE_ASSIGNED;
387
Furquan Shaikhc3568612020-05-16 15:18:23 -0700388 printk(BIOS_DEBUG, " %s %02lx * [0x%llx - 0x%llx] limit: %llx %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700389 dev_path(dev), resource->index, resource->base,
390 resource->size ? resource->base + resource->size - 1 :
391 resource->base, resource->limit, resource2str(resource));
392 }
393}
394
395static void update_constraints(struct memranges *ranges, const struct device *dev,
396 const struct resource *res)
397{
398 if (!res->size)
399 return;
400
Furquan Shaikhc3568612020-05-16 15:18:23 -0700401 printk(BIOS_DEBUG, " %s: %s %02lx base %08llx limit %08llx %s (fixed)\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700402 __func__, dev_path(dev), res->index, res->base,
403 res->base + res->size - 1, resource2str(res));
404
405 memranges_create_hole(ranges, res->base, res->size);
406}
407
408/*
409 * Scan the entire tree to identify any fixed resources allocated by any device to
410 * ensure that the address map for domain resources are appropriately updated.
411 *
412 * Domains can typically provide memrange for entire address space. So, this function
413 * punches holes in the address space for all fixed resources that are already
414 * defined. Both IO and normal memory resources are added as fixed. Both need to be
415 * removed from address space where dynamic resource allocations are sourced.
416 */
417static void avoid_fixed_resources(struct memranges *ranges, const struct device *dev,
418 unsigned long mask_match)
419{
420 const struct resource *res;
421 const struct device *child;
422 const struct bus *bus;
423
424 for (res = dev->resource_list; res != NULL; res = res->next) {
425 if ((res->flags & mask_match) != mask_match)
426 continue;
427 update_constraints(ranges, dev, res);
428 }
429
430 bus = dev->link_list;
431 if (bus == NULL)
432 return;
433
434 for (child = bus->children; child != NULL; child = child->sibling)
435 avoid_fixed_resources(ranges, child, mask_match);
436}
437
438static void constrain_domain_resources(const struct device *domain, struct memranges *ranges,
439 unsigned long type)
440{
441 unsigned long mask_match = type | IORESOURCE_FIXED;
442
443 if (type == IORESOURCE_IO) {
444 /*
445 * Don't allow allocations in the VGA I/O range. PCI has special cases for
446 * that.
447 */
Furquan Shaikh563e6142020-05-26 12:04:35 -0700448 memranges_create_hole(ranges, 0x3b0, 0x3df - 0x3b0 + 1);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700449
450 /*
451 * Resource allocator no longer supports the legacy behavior where I/O resource
452 * allocation is guaranteed to avoid aliases over legacy PCI expansion card
453 * addresses.
454 */
455 }
456
457 avoid_fixed_resources(ranges, domain, mask_match);
458}
459
460/*
461 * This function creates a list of memranges of given type using the resource that is
462 * provided. If the given resource is NULL or if the resource window size is 0, then it creates
463 * an empty list. This results in resource allocation for that resource type failing for all
464 * downstream devices since there is nothing to allocate from.
465 *
466 * In case of domain, it applies additional constraints to ensure that the memranges do not
467 * overlap any of the fixed resources under that domain. Domain typically seems to provide
468 * memrange for entire address space. Thus, it is up to the chipset to add DRAM and all other
469 * windows which cannot be used for resource allocation as fixed resources.
470 */
471static void setup_resource_ranges(const struct device *dev, const struct resource *res,
472 unsigned long type, struct memranges *ranges)
473{
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -0700474 printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %d gran: %d limit: %llx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700475 dev_path(dev), resource2str(res), res->base, res->size, res->align,
476 res->gran, res->limit);
477
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700478 if (dev->path.type == DEVICE_PATH_DOMAIN) {
479 initialize_domain_memranges(ranges, res, type);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700480 constrain_domain_resources(dev, ranges, type);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700481 } else {
482 initialize_bridge_memranges(ranges, res, type);
483 }
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700484
Furquan Shaikhc3568612020-05-16 15:18:23 -0700485 print_resource_ranges(dev, ranges);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700486}
487
488static void cleanup_resource_ranges(const struct device *dev, struct memranges *ranges,
489 const struct resource *res)
490{
491 memranges_teardown(ranges);
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -0700492 printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %d gran: %d limit: %llx done\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700493 dev_path(dev), resource2str(res), res->base, res->size, res->align,
494 res->gran, res->limit);
495}
496
497/*
498 * Pass 2 of resource allocator at the bridge level loops through all the resources for the
499 * bridge and generates a list of memory ranges similar to that at the domain level. However,
500 * there is no need to apply any additional constraints since the window allocated to the bridge
501 * is guaranteed to be non-overlapping by the allocator at domain level.
502 *
503 * Allocation at the bridge level works the same as at domain level (starts with the biggest
504 * resource requirement from downstream devices and continues in descending order). One major
505 * difference at the bridge level is that it considers prefmem resources separately from mem
506 * resources.
507 *
508 * Once allocation at the current bridge is complete, resource allocator continues walking down
509 * the downstream bridges until it hits the leaf devices.
510 */
511static void allocate_bridge_resources(const struct device *bridge)
512{
513 struct memranges ranges;
514 const struct resource *res;
515 struct bus *bus = bridge->link_list;
516 unsigned long type_match;
517 struct device *child;
518 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
519
520 for (res = bridge->resource_list; res; res = res->next) {
521 if (!res->size)
522 continue;
523
524 if (!(res->flags & IORESOURCE_BRIDGE))
525 continue;
526
527 type_match = res->flags & type_mask;
528
529 setup_resource_ranges(bridge, res, type_match, &ranges);
530 allocate_child_resources(bus, &ranges, type_mask, type_match);
531 cleanup_resource_ranges(bridge, &ranges, res);
532 }
533
534 for (child = bus->children; child; child = child->sibling) {
535 if (!dev_has_children(child))
536 continue;
537
538 allocate_bridge_resources(child);
539 }
540}
541
542static const struct resource *find_domain_resource(const struct device *domain,
543 unsigned long type)
544{
545 const struct resource *res;
546
547 for (res = domain->resource_list; res; res = res->next) {
548 if (res->flags & IORESOURCE_FIXED)
549 continue;
550
551 if ((res->flags & IORESOURCE_TYPE_MASK) == type)
552 return res;
553 }
554
555 return NULL;
556}
557
558/*
559 * Pass 2 of resource allocator begins at the domain level. Every domain has two types of
560 * resources - io and mem. For each of these resources, this function creates a list of memory
561 * ranges that can be used for downstream resource allocation. This list is constrained to
562 * remove any fixed resources in the domain sub-tree of the given resource type. It then uses
563 * the memory ranges to apply best fit on the resource requirements of the downstream devices.
564 *
565 * Once resources are allocated to all downstream devices of the domain, it walks down each
566 * downstream bridge to continue the same process until resources are allocated to all devices
567 * under the domain.
568 */
569static void allocate_domain_resources(const struct device *domain)
570{
571 struct memranges ranges;
572 struct device *child;
573 const struct resource *res;
574
575 /* Resource type I/O */
576 res = find_domain_resource(domain, IORESOURCE_IO);
577 if (res) {
578 setup_resource_ranges(domain, res, IORESOURCE_IO, &ranges);
579 allocate_child_resources(domain->link_list, &ranges, IORESOURCE_TYPE_MASK,
580 IORESOURCE_IO);
581 cleanup_resource_ranges(domain, &ranges, res);
582 }
583
584 /*
585 * Resource type Mem:
586 * Domain does not distinguish between mem and prefmem resources. Thus, the resource
587 * allocation at domain level considers mem and prefmem together when finding the best
588 * fit based on the biggest resource requirement.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700589 *
590 * However, resource requests for allocation above 4G boundary need to be handled
591 * separately if the domain resource window crosses this boundary. There is a single
592 * window for resource of type IORESOURCE_MEM. When creating memranges, this resource
593 * is split into two separate ranges -- one for the window below 4G boundary and other
594 * for the window above 4G boundary (with IORESOURCE_ABOVE_4G flag set). Thus, when
595 * allocating child resources, requests for below and above the 4G boundary are handled
596 * separately by setting the type_mask and type_match to allocate_child_resources()
597 * accordingly.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700598 */
599 res = find_domain_resource(domain, IORESOURCE_MEM);
600 if (res) {
601 setup_resource_ranges(domain, res, IORESOURCE_MEM, &ranges);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700602 allocate_child_resources(domain->link_list, &ranges,
603 IORESOURCE_TYPE_MASK | IORESOURCE_ABOVE_4G,
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700604 IORESOURCE_MEM);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700605 allocate_child_resources(domain->link_list, &ranges,
606 IORESOURCE_TYPE_MASK | IORESOURCE_ABOVE_4G,
607 IORESOURCE_MEM | IORESOURCE_ABOVE_4G);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700608 cleanup_resource_ranges(domain, &ranges, res);
609 }
610
611 for (child = domain->link_list->children; child; child = child->sibling) {
612 if (!dev_has_children(child))
613 continue;
614
615 /* Continue allocation for all downstream bridges. */
616 allocate_bridge_resources(child);
617 }
618}
619
620/*
621 * This function forms the guts of the resource allocator. It walks through the entire device
622 * tree for each domain two times.
623 *
624 * Every domain has a fixed set of ranges. These ranges cannot be relaxed based on the
625 * requirements of the downstream devices. They represent the available windows from which
626 * resources can be allocated to the different devices under the domain.
627 *
628 * In order to identify the requirements of downstream devices, resource allocator walks in a
629 * DFS fashion. It gathers the requirements from leaf devices and propagates those back up
630 * to their upstream bridges until the requirements for all the downstream devices of the domain
631 * are gathered. This is referred to as pass 1 of resource allocator.
632 *
633 * Once the requirements for all the devices under the domain are gathered, resource allocator
634 * walks a second time to allocate resources to downstream devices as per the
635 * requirements. It always picks the biggest resource request as per the type (i/o and mem) to
636 * allocate space from its fixed window to the immediate downstream device of the domain. In
637 * order to accomplish best fit for the resources, a list of ranges is maintained by each
638 * resource type (i/o and mem). Domain does not differentiate between mem and prefmem. Since
639 * they are allocated space from the same window, the resource allocator at the domain level
640 * ensures that the biggest requirement is selected indepedent of the prefetch type. Once the
641 * resource allocation for all immediate downstream devices is complete at the domain level,
642 * resource allocator walks down the subtree for each downstream bridge to continue the
643 * allocation process at the bridge level. Since bridges have separate windows for i/o, mem and
644 * prefmem, best fit algorithm at bridge level looks for the biggest requirement considering
645 * prefmem resources separately from non-prefmem resources. This continues until resource
646 * allocation is performed for all downstream bridges in the domain sub-tree. This is referred
647 * to as pass 2 of resource allocator.
648 *
649 * Some rules that are followed by the resource allocator:
650 * - Allocate resource locations for every device as long as the requirements can be satisfied.
651 * - If a resource cannot be allocated any address space, then that resource needs to be
652 * properly updated to ensure that it does not incorrectly overlap some address space reserved
653 * for a different purpose.
654 * - Don't overlap with resources in fixed locations.
655 * - Don't overlap and follow the rules of bridges -- downstream devices of bridges should use
656 * parts of the address space allocated to the bridge.
657 */
658void allocate_resources(const struct device *root)
659{
660 const struct device *child;
661
662 if ((root == NULL) || (root->link_list == NULL))
663 return;
664
665 for (child = root->link_list->children; child; child = child->sibling) {
666
667 if (child->path.type != DEVICE_PATH_DOMAIN)
668 continue;
669
670 post_log_path(child);
671
672 /* Pass 1 - Gather requirements. */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700673 printk(BIOS_INFO, "==== Resource allocator: %s - Pass 1 (gathering requirements) ===\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700674 dev_path(child));
675 compute_domain_resources(child);
676
677 /* Pass 2 - Allocate resources as per gathered requirements. */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700678 printk(BIOS_INFO, "=== Resource allocator: %s - Pass 2 (allocating resources) ===\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700679 dev_path(child));
680 allocate_domain_resources(child);
Furquan Shaikhc3568612020-05-16 15:18:23 -0700681
682 printk(BIOS_INFO, "=== Resource allocator: %s - resource allocation complete ===\n",
683 dev_path(child));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700684 }
685}