blob: 75e8392a9add747b3e7e1e29676beceb33d31f94 [file] [log] [blame]
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <console/console.h>
4#include <device/device.h>
5#include <memrange.h>
6#include <post.h>
7
8/**
9 * Round a number up to an alignment.
10 *
11 * @param val The starting value.
12 * @param pow Alignment as a power of two.
13 * @return Rounded up number.
14 */
15static resource_t round(resource_t val, unsigned long pow)
16{
17 return ALIGN_UP(val, POWER_OF_2(pow));
18}
19
20static const char *resource2str(const struct resource *res)
21{
22 if (res->flags & IORESOURCE_IO)
23 return "io";
24 if (res->flags & IORESOURCE_PREFETCH)
25 return "prefmem";
26 if (res->flags & IORESOURCE_MEM)
27 return "mem";
28 return "undefined";
29}
30
31static bool dev_has_children(const struct device *dev)
32{
33 const struct bus *bus = dev->link_list;
34 return bus && bus->children;
35}
36
37/*
38 * During pass 1, once all the requirements for downstream devices of a bridge are gathered,
39 * this function calculates the overall resource requirement for the bridge. It starts by
40 * picking the largest resource requirement downstream for the given resource type and works by
41 * adding requirements in descending order.
42 *
43 * Additionally, it takes alignment and limits of the downstream devices into consideration and
44 * ensures that they get propagated to the bridge resource. This is required to guarantee that
45 * the upstream bridge/domain honors the limit and alignment requirements for this bridge based
46 * on the tightest constraints downstream.
47 */
48static void update_bridge_resource(const struct device *bridge, struct resource *bridge_res,
49 unsigned long type_match)
50{
51 const struct device *child;
52 struct resource *child_res;
53 resource_t base;
54 bool first_child_res = true;
55 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
56 struct bus *bus = bridge->link_list;
57
58 child_res = NULL;
59
60 /*
61 * `base` keeps track of where the next allocation for child resource can take place
62 * from within the bridge resource window. Since the bridge resource window allocation
63 * is not performed yet, it can start at 0. Base gets updated every time a resource
64 * requirement is accounted for in the loop below. After scanning all these resources,
65 * base will indicate the total size requirement for the current bridge resource
66 * window.
67 */
68 base = 0;
69
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -070070 printk(BIOS_DEBUG, "%s %s: size: %llx align: %d gran: %d limit: %llx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070071 dev_path(bridge), resource2str(bridge_res), bridge_res->size,
72 bridge_res->align, bridge_res->gran, bridge_res->limit);
73
74 while ((child = largest_resource(bus, &child_res, type_mask, type_match))) {
75
76 /* Size 0 resources can be skipped. */
77 if (!child_res->size)
78 continue;
79
80 /*
81 * Propagate the resource alignment to the bridge resource if this is the first
82 * child resource with non-zero size being considered. For all other children
83 * resources, alignment is taken care of by updating the base to round up as per
84 * the child resource alignment. It is guaranteed that pass 2 follows the exact
85 * same method of picking the resource for allocation using
86 * largest_resource(). Thus, as long as the alignment for first child resource
87 * is propagated up to the bridge resource, it can be guaranteed that the
88 * alignment for all resources is appropriately met.
89 */
90 if (first_child_res && (child_res->align > bridge_res->align))
91 bridge_res->align = child_res->align;
92
93 first_child_res = false;
94
95 /*
96 * Propagate the resource limit to the bridge resource only if child resource
97 * limit is non-zero. If a downstream device has stricter requirements
98 * w.r.t. limits for any resource, that constraint needs to be propagated back
99 * up to the downstream bridges of the domain. This guarantees that the resource
100 * allocation which starts at the domain level takes into account all these
101 * constraints thus working on a global view.
102 */
103 if (child_res->limit && (child_res->limit < bridge_res->limit))
104 bridge_res->limit = child_res->limit;
105
106 /*
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700107 * Propagate the downstream resource request to allocate above 4G boundary to
108 * upstream bridge resource. This ensures that during pass 2, the resource
109 * allocator at domain level has a global view of all the downstream device
110 * requirements and thus address space is allocated as per updated flags in the
111 * bridge resource.
112 *
113 * Since the bridge resource is a single window, all the downstream resources of
114 * this bridge resource will be allocated space above 4G boundary.
115 */
116 if (child_res->flags & IORESOURCE_ABOVE_4G)
117 bridge_res->flags |= IORESOURCE_ABOVE_4G;
118
119 /*
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700120 * Alignment value of 0 means that the child resource has no alignment
121 * requirements and so the base value remains unchanged here.
122 */
123 base = round(base, child_res->align);
124
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -0700125 printk(BIOS_DEBUG, "%s %02lx * [0x%llx - 0x%llx] %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700126 dev_path(child), child_res->index, base, base + child_res->size - 1,
127 resource2str(child_res));
128
129 base += child_res->size;
130 }
131
132 /*
133 * After all downstream device resources are scanned, `base` represents the total size
134 * requirement for the current bridge resource window. This size needs to be rounded up
135 * to the granularity requirement of the bridge to ensure that the upstream
136 * bridge/domain allocates big enough window.
137 */
138 bridge_res->size = round(base, bridge_res->gran);
139
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -0700140 printk(BIOS_DEBUG, "%s %s: size: %llx align: %d gran: %d limit: %llx done\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700141 dev_path(bridge), resource2str(bridge_res), bridge_res->size,
142 bridge_res->align, bridge_res->gran, bridge_res->limit);
143}
144
145/*
146 * During pass 1, resource allocator at bridge level gathers requirements from downstream
147 * devices and updates its own resource windows for the provided resource type.
148 */
149static void compute_bridge_resources(const struct device *bridge, unsigned long type_match)
150{
151 const struct device *child;
152 struct resource *res;
153 struct bus *bus = bridge->link_list;
154 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
155
156 for (res = bridge->resource_list; res; res = res->next) {
157 if (!(res->flags & IORESOURCE_BRIDGE))
158 continue;
159
160 if ((res->flags & type_mask) != type_match)
161 continue;
162
163 /*
164 * Ensure that the resource requirements for all downstream bridges are
165 * gathered before updating the window for current bridge resource.
166 */
167 for (child = bus->children; child; child = child->sibling) {
168 if (!dev_has_children(child))
169 continue;
170 compute_bridge_resources(child, type_match);
171 }
172
173 /*
174 * Update the window for current bridge resource now that all downstream
175 * requirements are gathered.
176 */
177 update_bridge_resource(bridge, res, type_match);
178 }
179}
180
181/*
182 * During pass 1, resource allocator walks down the entire sub-tree of a domain. It gathers
183 * resource requirements for every downstream bridge by looking at the resource requests of its
184 * children. Thus, the requirement gathering begins at the leaf devices and is propagated back
185 * up to the downstream bridges of the domain.
186 *
187 * At domain level, it identifies every downstream bridge and walks down that bridge to gather
188 * requirements for each resource type i.e. i/o, mem and prefmem. Since bridges have separate
189 * windows for mem and prefmem, requirements for each need to be collected separately.
190 *
191 * Domain resource windows are fixed ranges and hence requirement gathering does not result in
192 * any changes to these fixed ranges.
193 */
194static void compute_domain_resources(const struct device *domain)
195{
196 const struct device *child;
197
198 if (domain->link_list == NULL)
199 return;
200
201 for (child = domain->link_list->children; child; child = child->sibling) {
202
203 /* Skip if this is not a bridge or has no children under it. */
204 if (!dev_has_children(child))
205 continue;
206
207 compute_bridge_resources(child, IORESOURCE_IO);
208 compute_bridge_resources(child, IORESOURCE_MEM);
209 compute_bridge_resources(child, IORESOURCE_MEM | IORESOURCE_PREFETCH);
210 }
211}
212
213static unsigned char get_alignment_by_resource_type(const struct resource *res)
214{
215 if (res->flags & IORESOURCE_MEM)
216 return 12; /* Page-aligned --> log2(4KiB) */
217 else if (res->flags & IORESOURCE_IO)
218 return 0; /* No special alignment required --> log2(1) */
219
220 die("Unexpected resource type: flags(%d)!\n", res->flags);
221}
222
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700223/*
224 * If the resource is NULL or if the resource is not assigned, then it cannot be used for
225 * allocation for downstream devices.
226 */
227static bool is_resource_invalid(const struct resource *res)
228{
229 return (res == NULL) || !(res->flags & IORESOURCE_ASSIGNED);
230}
231
232static void initialize_domain_io_resource_memranges(struct memranges *ranges,
233 const struct resource *res,
234 unsigned long memrange_type)
235{
236 memranges_insert(ranges, res->base, res->limit - res->base + 1, memrange_type);
237}
238
239static void initialize_domain_mem_resource_memranges(struct memranges *ranges,
240 const struct resource *res,
241 unsigned long memrange_type)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700242{
243 resource_t res_base;
244 resource_t res_limit;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700245
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700246 const resource_t limit_4g = 0xffffffff;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700247
248 res_base = res->base;
249 res_limit = res->limit;
250
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700251 /*
252 * Split the resource into two separate ranges if it crosses the 4G boundary. Memrange
253 * type is set differently to ensure that memrange does not merge these two ranges. For
254 * the range above 4G boundary, given memrange type is ORed with IORESOURCE_ABOVE_4G.
255 */
256 if (res_base <= limit_4g) {
257
258 resource_t range_limit;
259
260 /* Clip the resource limit at 4G boundary if necessary. */
261 range_limit = MIN(res_limit, limit_4g);
262 memranges_insert(ranges, res_base, range_limit - res_base + 1, memrange_type);
263
264 /*
265 * If the resource lies completely below the 4G boundary, nothing more needs to
266 * be done.
267 */
268 if (res_limit <= limit_4g)
269 return;
270
271 /*
272 * If the resource window crosses the 4G boundary, then update res_base to add
273 * another entry for the range above the boundary.
274 */
275 res_base = limit_4g + 1;
276 }
277
278 if (res_base > res_limit)
279 return;
280
281 /*
282 * If resource lies completely above the 4G boundary or if the resource was clipped to
283 * add two separate ranges, the range above 4G boundary has the resource flag
284 * IORESOURCE_ABOVE_4G set. This allows domain to handle any downstream requests for
285 * resource allocation above 4G differently.
286 */
287 memranges_insert(ranges, res_base, res_limit - res_base + 1,
288 memrange_type | IORESOURCE_ABOVE_4G);
289}
290
291/*
292 * This function initializes memranges for domain device. If the resource crosses 4G boundary,
293 * then this function splits it into two ranges -- one for the window below 4G and the other for
294 * the window above 4G. The latter range has IORESOURCE_ABOVE_4G flag set to satisfy resource
295 * requests from downstream devices for allocations above 4G.
296 */
297static void initialize_domain_memranges(struct memranges *ranges, const struct resource *res,
298 unsigned long memrange_type)
299{
300 unsigned char align = get_alignment_by_resource_type(res);
301
302 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
303
304 if (is_resource_invalid(res))
305 return;
306
307 if (res->flags & IORESOURCE_IO)
308 initialize_domain_io_resource_memranges(ranges, res, memrange_type);
309 else
310 initialize_domain_mem_resource_memranges(ranges, res, memrange_type);
311}
312
313/*
314 * This function initializes memranges for bridge device. Unlike domain, bridge does not need to
315 * care about resource window crossing 4G boundary. This is handled by the resource allocator at
316 * domain level to ensure that all downstream bridges are allocated space either above or below
317 * 4G boundary as per the state of IORESOURCE_ABOVE_4G for the respective bridge resource.
318 *
319 * So, this function creates a single range of the entire resource window available for the
320 * bridge resource. Thus all downstream resources of the bridge for the given resource type get
321 * allocated space from the same window. If there is any downstream resource of the bridge which
322 * requests allocation above 4G, then all other downstream resources of the same type under the
323 * bridge get allocated above 4G.
324 */
325static void initialize_bridge_memranges(struct memranges *ranges, const struct resource *res,
326 unsigned long memrange_type)
327{
328 unsigned char align = get_alignment_by_resource_type(res);
329
330 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
331
332 if (is_resource_invalid(res))
333 return;
334
335 memranges_insert(ranges, res->base, res->limit - res->base + 1, memrange_type);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700336}
337
338static void print_resource_ranges(const struct memranges *ranges)
339{
340 const struct range_entry *r;
341
342 printk(BIOS_INFO, "Resource ranges:\n");
343
344 if (memranges_is_empty(ranges))
345 printk(BIOS_INFO, "EMPTY!!\n");
346
347 memranges_each_entry(r, ranges) {
348 printk(BIOS_INFO, "Base: %llx, Size: %llx, Tag: %lx\n",
349 range_entry_base(r), range_entry_size(r), range_entry_tag(r));
350 }
351}
352
353/*
354 * This is where the actual allocation of resources happens during pass 2. Given the list of
355 * memory ranges corresponding to the resource of given type, it finds the biggest unallocated
356 * resource using the type mask on the downstream bus. This continues in a descending
357 * order until all resources of given type are allocated address space within the current
358 * resource window.
359 */
360static void allocate_child_resources(struct bus *bus, struct memranges *ranges,
361 unsigned long type_mask, unsigned long type_match)
362{
363 struct resource *resource = NULL;
364 const struct device *dev;
365
366 while ((dev = largest_resource(bus, &resource, type_mask, type_match))) {
367
368 if (!resource->size)
369 continue;
370
371 if (memranges_steal(ranges, resource->limit, resource->size, resource->align,
372 type_match, &resource->base) == false) {
373 printk(BIOS_ERR, "ERROR: Resource didn't fit!!! ");
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -0700374 printk(BIOS_DEBUG, "%s %02lx * size: 0x%llx limit: %llx %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700375 dev_path(dev), resource->index,
376 resource->size, resource->limit, resource2str(resource));
377 continue;
378 }
379
380 resource->limit = resource->base + resource->size - 1;
381 resource->flags |= IORESOURCE_ASSIGNED;
382
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -0700383 printk(BIOS_DEBUG, "%s %02lx * [0x%llx - 0x%llx] limit: %llx %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700384 dev_path(dev), resource->index, resource->base,
385 resource->size ? resource->base + resource->size - 1 :
386 resource->base, resource->limit, resource2str(resource));
387 }
388}
389
390static void update_constraints(struct memranges *ranges, const struct device *dev,
391 const struct resource *res)
392{
393 if (!res->size)
394 return;
395
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -0700396 printk(BIOS_DEBUG, "%s: %s %02lx base %08llx limit %08llx %s (fixed)\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700397 __func__, dev_path(dev), res->index, res->base,
398 res->base + res->size - 1, resource2str(res));
399
400 memranges_create_hole(ranges, res->base, res->size);
401}
402
403/*
404 * Scan the entire tree to identify any fixed resources allocated by any device to
405 * ensure that the address map for domain resources are appropriately updated.
406 *
407 * Domains can typically provide memrange for entire address space. So, this function
408 * punches holes in the address space for all fixed resources that are already
409 * defined. Both IO and normal memory resources are added as fixed. Both need to be
410 * removed from address space where dynamic resource allocations are sourced.
411 */
412static void avoid_fixed_resources(struct memranges *ranges, const struct device *dev,
413 unsigned long mask_match)
414{
415 const struct resource *res;
416 const struct device *child;
417 const struct bus *bus;
418
419 for (res = dev->resource_list; res != NULL; res = res->next) {
420 if ((res->flags & mask_match) != mask_match)
421 continue;
422 update_constraints(ranges, dev, res);
423 }
424
425 bus = dev->link_list;
426 if (bus == NULL)
427 return;
428
429 for (child = bus->children; child != NULL; child = child->sibling)
430 avoid_fixed_resources(ranges, child, mask_match);
431}
432
433static void constrain_domain_resources(const struct device *domain, struct memranges *ranges,
434 unsigned long type)
435{
436 unsigned long mask_match = type | IORESOURCE_FIXED;
437
438 if (type == IORESOURCE_IO) {
439 /*
440 * Don't allow allocations in the VGA I/O range. PCI has special cases for
441 * that.
442 */
Furquan Shaikh563e6142020-05-26 12:04:35 -0700443 memranges_create_hole(ranges, 0x3b0, 0x3df - 0x3b0 + 1);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700444
445 /*
446 * Resource allocator no longer supports the legacy behavior where I/O resource
447 * allocation is guaranteed to avoid aliases over legacy PCI expansion card
448 * addresses.
449 */
450 }
451
452 avoid_fixed_resources(ranges, domain, mask_match);
453}
454
455/*
456 * This function creates a list of memranges of given type using the resource that is
457 * provided. If the given resource is NULL or if the resource window size is 0, then it creates
458 * an empty list. This results in resource allocation for that resource type failing for all
459 * downstream devices since there is nothing to allocate from.
460 *
461 * In case of domain, it applies additional constraints to ensure that the memranges do not
462 * overlap any of the fixed resources under that domain. Domain typically seems to provide
463 * memrange for entire address space. Thus, it is up to the chipset to add DRAM and all other
464 * windows which cannot be used for resource allocation as fixed resources.
465 */
466static void setup_resource_ranges(const struct device *dev, const struct resource *res,
467 unsigned long type, struct memranges *ranges)
468{
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -0700469 printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %d gran: %d limit: %llx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700470 dev_path(dev), resource2str(res), res->base, res->size, res->align,
471 res->gran, res->limit);
472
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700473 if (dev->path.type == DEVICE_PATH_DOMAIN) {
474 initialize_domain_memranges(ranges, res, type);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700475 constrain_domain_resources(dev, ranges, type);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700476 } else {
477 initialize_bridge_memranges(ranges, res, type);
478 }
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700479
480 print_resource_ranges(ranges);
481}
482
483static void cleanup_resource_ranges(const struct device *dev, struct memranges *ranges,
484 const struct resource *res)
485{
486 memranges_teardown(ranges);
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -0700487 printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %d gran: %d limit: %llx done\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700488 dev_path(dev), resource2str(res), res->base, res->size, res->align,
489 res->gran, res->limit);
490}
491
492/*
493 * Pass 2 of resource allocator at the bridge level loops through all the resources for the
494 * bridge and generates a list of memory ranges similar to that at the domain level. However,
495 * there is no need to apply any additional constraints since the window allocated to the bridge
496 * is guaranteed to be non-overlapping by the allocator at domain level.
497 *
498 * Allocation at the bridge level works the same as at domain level (starts with the biggest
499 * resource requirement from downstream devices and continues in descending order). One major
500 * difference at the bridge level is that it considers prefmem resources separately from mem
501 * resources.
502 *
503 * Once allocation at the current bridge is complete, resource allocator continues walking down
504 * the downstream bridges until it hits the leaf devices.
505 */
506static void allocate_bridge_resources(const struct device *bridge)
507{
508 struct memranges ranges;
509 const struct resource *res;
510 struct bus *bus = bridge->link_list;
511 unsigned long type_match;
512 struct device *child;
513 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
514
515 for (res = bridge->resource_list; res; res = res->next) {
516 if (!res->size)
517 continue;
518
519 if (!(res->flags & IORESOURCE_BRIDGE))
520 continue;
521
522 type_match = res->flags & type_mask;
523
524 setup_resource_ranges(bridge, res, type_match, &ranges);
525 allocate_child_resources(bus, &ranges, type_mask, type_match);
526 cleanup_resource_ranges(bridge, &ranges, res);
527 }
528
529 for (child = bus->children; child; child = child->sibling) {
530 if (!dev_has_children(child))
531 continue;
532
533 allocate_bridge_resources(child);
534 }
535}
536
537static const struct resource *find_domain_resource(const struct device *domain,
538 unsigned long type)
539{
540 const struct resource *res;
541
542 for (res = domain->resource_list; res; res = res->next) {
543 if (res->flags & IORESOURCE_FIXED)
544 continue;
545
546 if ((res->flags & IORESOURCE_TYPE_MASK) == type)
547 return res;
548 }
549
550 return NULL;
551}
552
553/*
554 * Pass 2 of resource allocator begins at the domain level. Every domain has two types of
555 * resources - io and mem. For each of these resources, this function creates a list of memory
556 * ranges that can be used for downstream resource allocation. This list is constrained to
557 * remove any fixed resources in the domain sub-tree of the given resource type. It then uses
558 * the memory ranges to apply best fit on the resource requirements of the downstream devices.
559 *
560 * Once resources are allocated to all downstream devices of the domain, it walks down each
561 * downstream bridge to continue the same process until resources are allocated to all devices
562 * under the domain.
563 */
564static void allocate_domain_resources(const struct device *domain)
565{
566 struct memranges ranges;
567 struct device *child;
568 const struct resource *res;
569
570 /* Resource type I/O */
571 res = find_domain_resource(domain, IORESOURCE_IO);
572 if (res) {
573 setup_resource_ranges(domain, res, IORESOURCE_IO, &ranges);
574 allocate_child_resources(domain->link_list, &ranges, IORESOURCE_TYPE_MASK,
575 IORESOURCE_IO);
576 cleanup_resource_ranges(domain, &ranges, res);
577 }
578
579 /*
580 * Resource type Mem:
581 * Domain does not distinguish between mem and prefmem resources. Thus, the resource
582 * allocation at domain level considers mem and prefmem together when finding the best
583 * fit based on the biggest resource requirement.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700584 *
585 * However, resource requests for allocation above 4G boundary need to be handled
586 * separately if the domain resource window crosses this boundary. There is a single
587 * window for resource of type IORESOURCE_MEM. When creating memranges, this resource
588 * is split into two separate ranges -- one for the window below 4G boundary and other
589 * for the window above 4G boundary (with IORESOURCE_ABOVE_4G flag set). Thus, when
590 * allocating child resources, requests for below and above the 4G boundary are handled
591 * separately by setting the type_mask and type_match to allocate_child_resources()
592 * accordingly.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700593 */
594 res = find_domain_resource(domain, IORESOURCE_MEM);
595 if (res) {
596 setup_resource_ranges(domain, res, IORESOURCE_MEM, &ranges);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700597 allocate_child_resources(domain->link_list, &ranges,
598 IORESOURCE_TYPE_MASK | IORESOURCE_ABOVE_4G,
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700599 IORESOURCE_MEM);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700600 allocate_child_resources(domain->link_list, &ranges,
601 IORESOURCE_TYPE_MASK | IORESOURCE_ABOVE_4G,
602 IORESOURCE_MEM | IORESOURCE_ABOVE_4G);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700603 cleanup_resource_ranges(domain, &ranges, res);
604 }
605
606 for (child = domain->link_list->children; child; child = child->sibling) {
607 if (!dev_has_children(child))
608 continue;
609
610 /* Continue allocation for all downstream bridges. */
611 allocate_bridge_resources(child);
612 }
613}
614
615/*
616 * This function forms the guts of the resource allocator. It walks through the entire device
617 * tree for each domain two times.
618 *
619 * Every domain has a fixed set of ranges. These ranges cannot be relaxed based on the
620 * requirements of the downstream devices. They represent the available windows from which
621 * resources can be allocated to the different devices under the domain.
622 *
623 * In order to identify the requirements of downstream devices, resource allocator walks in a
624 * DFS fashion. It gathers the requirements from leaf devices and propagates those back up
625 * to their upstream bridges until the requirements for all the downstream devices of the domain
626 * are gathered. This is referred to as pass 1 of resource allocator.
627 *
628 * Once the requirements for all the devices under the domain are gathered, resource allocator
629 * walks a second time to allocate resources to downstream devices as per the
630 * requirements. It always picks the biggest resource request as per the type (i/o and mem) to
631 * allocate space from its fixed window to the immediate downstream device of the domain. In
632 * order to accomplish best fit for the resources, a list of ranges is maintained by each
633 * resource type (i/o and mem). Domain does not differentiate between mem and prefmem. Since
634 * they are allocated space from the same window, the resource allocator at the domain level
635 * ensures that the biggest requirement is selected indepedent of the prefetch type. Once the
636 * resource allocation for all immediate downstream devices is complete at the domain level,
637 * resource allocator walks down the subtree for each downstream bridge to continue the
638 * allocation process at the bridge level. Since bridges have separate windows for i/o, mem and
639 * prefmem, best fit algorithm at bridge level looks for the biggest requirement considering
640 * prefmem resources separately from non-prefmem resources. This continues until resource
641 * allocation is performed for all downstream bridges in the domain sub-tree. This is referred
642 * to as pass 2 of resource allocator.
643 *
644 * Some rules that are followed by the resource allocator:
645 * - Allocate resource locations for every device as long as the requirements can be satisfied.
646 * - If a resource cannot be allocated any address space, then that resource needs to be
647 * properly updated to ensure that it does not incorrectly overlap some address space reserved
648 * for a different purpose.
649 * - Don't overlap with resources in fixed locations.
650 * - Don't overlap and follow the rules of bridges -- downstream devices of bridges should use
651 * parts of the address space allocated to the bridge.
652 */
653void allocate_resources(const struct device *root)
654{
655 const struct device *child;
656
657 if ((root == NULL) || (root->link_list == NULL))
658 return;
659
660 for (child = root->link_list->children; child; child = child->sibling) {
661
662 if (child->path.type != DEVICE_PATH_DOMAIN)
663 continue;
664
665 post_log_path(child);
666
667 /* Pass 1 - Gather requirements. */
668 printk(BIOS_INFO, "Resource allocator: %s - Pass 1 (gathering requirements)\n",
669 dev_path(child));
670 compute_domain_resources(child);
671
672 /* Pass 2 - Allocate resources as per gathered requirements. */
673 printk(BIOS_INFO, "Resource allocator: %s - Pass 2 (allocating resources)\n",
674 dev_path(child));
675 allocate_domain_resources(child);
676 }
677}