blob: d758f0105def60547cdd142114d3c931d6529bb3 [file] [log] [blame]
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
Elyes Haouas04c3b5a2022-10-07 10:08:05 +02003#include <commonlib/bsd/helpers.h>
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07004#include <console/console.h>
5#include <device/device.h>
6#include <memrange.h>
7#include <post.h>
Elyes Haouas04c3b5a2022-10-07 10:08:05 +02008#include <types.h>
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07009
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070010static const char *resource2str(const struct resource *res)
11{
12 if (res->flags & IORESOURCE_IO)
13 return "io";
14 if (res->flags & IORESOURCE_PREFETCH)
15 return "prefmem";
16 if (res->flags & IORESOURCE_MEM)
17 return "mem";
18 return "undefined";
19}
20
21static bool dev_has_children(const struct device *dev)
22{
23 const struct bus *bus = dev->link_list;
24 return bus && bus->children;
25}
26
Nico Huber52263012020-05-23 19:15:36 +020027static resource_t effective_limit(const struct resource *const res)
28{
29 /* Always allow bridge resources above 4G. */
30 if (res->flags & IORESOURCE_BRIDGE)
31 return res->limit;
32
33 const resource_t quirk_4g_limit =
34 res->flags & IORESOURCE_ABOVE_4G ? UINT64_MAX : UINT32_MAX;
35 return MIN(res->limit, quirk_4g_limit);
36}
37
Furquan Shaikhc3568612020-05-16 15:18:23 -070038#define res_printk(depth, str, ...) printk(BIOS_DEBUG, "%*c"str, depth, ' ', __VA_ARGS__)
39
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070040/*
Nico Huber9d7728a2020-05-23 18:00:10 +020041 * During pass 1, once all the requirements for downstream devices of a
42 * bridge are gathered, this function calculates the overall resource
43 * requirement for the bridge. It starts by picking the largest resource
44 * requirement downstream for the given resource type and works by
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070045 * adding requirements in descending order.
46 *
Nico Huber9d7728a2020-05-23 18:00:10 +020047 * Additionally, it takes alignment and limits of the downstream devices
48 * into consideration and ensures that they get propagated to the bridge
49 * resource. This is required to guarantee that the upstream bridge/
50 * domain honors the limit and alignment requirements for this bridge
51 * based on the tightest constraints downstream.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070052 */
53static void update_bridge_resource(const struct device *bridge, struct resource *bridge_res,
Furquan Shaikhc3568612020-05-16 15:18:23 -070054 unsigned long type_match, int print_depth)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070055{
56 const struct device *child;
57 struct resource *child_res;
58 resource_t base;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070059 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
60 struct bus *bus = bridge->link_list;
61
62 child_res = NULL;
63
64 /*
Nico Huber9d7728a2020-05-23 18:00:10 +020065 * `base` keeps track of where the next allocation for child resources
66 * can take place from within the bridge resource window. Since the
67 * bridge resource window allocation is not performed yet, it can start
68 * at 0. Base gets updated every time a resource requirement is
69 * accounted for in the loop below. After scanning all these resources,
70 * base will indicate the total size requirement for the current bridge
71 * resource window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070072 */
73 base = 0;
74
Arthur Heymansd436b162023-05-23 22:47:05 +020075 res_printk(print_depth, "%s %s: size: %llx align: %u gran: %u limit: %llx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070076 dev_path(bridge), resource2str(bridge_res), bridge_res->size,
77 bridge_res->align, bridge_res->gran, bridge_res->limit);
78
79 while ((child = largest_resource(bus, &child_res, type_mask, type_match))) {
80
81 /* Size 0 resources can be skipped. */
82 if (!child_res->size)
83 continue;
84
Nico Huberec7b3132020-05-23 18:20:47 +020085 /* Resources with 0 limit can't be assigned anything. */
86 if (!child_res->limit)
87 continue;
88
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070089 /*
Nico Huber74169c12020-05-23 18:15:34 +020090 * Propagate the resource alignment to the bridge resource. The
91 * condition can only be true for the first (largest) resource. For all
Nico Huber9d7728a2020-05-23 18:00:10 +020092 * other children resources, alignment is taken care of by updating the
93 * base to round up as per the child resource alignment. It is
94 * guaranteed that pass 2 follows the exact same method of picking the
95 * resource for allocation using largest_resource(). Thus, as long as
Nico Huber74169c12020-05-23 18:15:34 +020096 * the alignment for the largest child resource is propagated up to the
97 * bridge resource, it can be guaranteed that the alignment for all
98 * resources is appropriately met.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070099 */
Nico Huber74169c12020-05-23 18:15:34 +0200100 if (child_res->align > bridge_res->align)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700101 bridge_res->align = child_res->align;
102
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700103 /*
Nico Huberec7b3132020-05-23 18:20:47 +0200104 * Propagate the resource limit to the bridge resource. If a downstream
105 * device has stricter requirements w.r.t. limits for any resource, that
106 * constraint needs to be propagated back up to the downstream bridges
107 * of the domain. This guarantees that the resource allocation which
108 * starts at the domain level takes into account all these constraints
109 * thus working on a global view.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700110 */
Nico Huber52263012020-05-23 19:15:36 +0200111 if (effective_limit(child_res) < bridge_res->limit)
112 bridge_res->limit = effective_limit(child_res);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700113
114 /*
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700115 * Alignment value of 0 means that the child resource has no alignment
116 * requirements and so the base value remains unchanged here.
117 */
Nico Huberb3277042020-05-23 18:08:50 +0200118 base = ALIGN_UP(base, POWER_OF_2(child_res->align));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700119
Furquan Shaikhc3568612020-05-16 15:18:23 -0700120 res_printk(print_depth + 1, "%s %02lx * [0x%llx - 0x%llx] %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700121 dev_path(child), child_res->index, base, base + child_res->size - 1,
122 resource2str(child_res));
123
124 base += child_res->size;
125 }
126
127 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200128 * After all downstream device resources are scanned, `base` represents
129 * the total size requirement for the current bridge resource window.
130 * This size needs to be rounded up to the granularity requirement of
131 * the bridge to ensure that the upstream bridge/domain allocates big
132 * enough window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700133 */
Nico Huberb3277042020-05-23 18:08:50 +0200134 bridge_res->size = ALIGN_UP(base, POWER_OF_2(bridge_res->gran));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700135
Arthur Heymansd436b162023-05-23 22:47:05 +0200136 res_printk(print_depth, "%s %s: size: %llx align: %u gran: %u limit: %llx done\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700137 dev_path(bridge), resource2str(bridge_res), bridge_res->size,
138 bridge_res->align, bridge_res->gran, bridge_res->limit);
139}
140
141/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200142 * During pass 1, at the bridge level, the resource allocator gathers
143 * requirements from downstream devices and updates its own resource
144 * windows for the provided resource type.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700145 */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700146static void compute_bridge_resources(const struct device *bridge, unsigned long type_match,
147 int print_depth)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700148{
149 const struct device *child;
150 struct resource *res;
151 struct bus *bus = bridge->link_list;
152 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
153
154 for (res = bridge->resource_list; res; res = res->next) {
155 if (!(res->flags & IORESOURCE_BRIDGE))
156 continue;
157
158 if ((res->flags & type_mask) != type_match)
159 continue;
160
161 /*
162 * Ensure that the resource requirements for all downstream bridges are
163 * gathered before updating the window for current bridge resource.
164 */
165 for (child = bus->children; child; child = child->sibling) {
166 if (!dev_has_children(child))
167 continue;
Furquan Shaikhc3568612020-05-16 15:18:23 -0700168 compute_bridge_resources(child, type_match, print_depth + 1);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700169 }
170
171 /*
172 * Update the window for current bridge resource now that all downstream
173 * requirements are gathered.
174 */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700175 update_bridge_resource(bridge, res, type_match, print_depth);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700176 }
177}
178
179/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200180 * During pass 1, the resource allocator walks down the entire sub-tree
181 * of a domain. It gathers resource requirements for every downstream
182 * bridge by looking at the resource requests of its children. Thus, the
183 * requirement gathering begins at the leaf devices and is propagated
184 * back up to the downstream bridges of the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700185 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200186 * At the domain level, it identifies every downstream bridge and walks
187 * down that bridge to gather requirements for each resource type i.e.
188 * i/o, mem and prefmem. Since bridges have separate windows for mem and
189 * prefmem, requirements for each need to be collected separately.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700190 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200191 * Domain resource windows are fixed ranges and hence requirement
192 * gathering does not result in any changes to these fixed ranges.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700193 */
194static void compute_domain_resources(const struct device *domain)
195{
196 const struct device *child;
Furquan Shaikhc3568612020-05-16 15:18:23 -0700197 const int print_depth = 1;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700198
199 if (domain->link_list == NULL)
200 return;
201
202 for (child = domain->link_list->children; child; child = child->sibling) {
203
204 /* Skip if this is not a bridge or has no children under it. */
205 if (!dev_has_children(child))
206 continue;
207
Furquan Shaikhc3568612020-05-16 15:18:23 -0700208 compute_bridge_resources(child, IORESOURCE_IO, print_depth);
209 compute_bridge_resources(child, IORESOURCE_MEM, print_depth);
210 compute_bridge_resources(child, IORESOURCE_MEM | IORESOURCE_PREFETCH,
211 print_depth);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700212 }
213}
214
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100215static unsigned char get_alignment_by_resource_type(const unsigned long type)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700216{
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100217 if (type & IORESOURCE_MEM)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700218 return 12; /* Page-aligned --> log2(4KiB) */
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100219 else if (type & IORESOURCE_IO)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700220 return 0; /* No special alignment required --> log2(1) */
221
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100222 die("Unexpected resource type: flags(%lu)!\n", type);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700223}
224
Furquan Shaikhc3568612020-05-16 15:18:23 -0700225static void print_resource_ranges(const struct device *dev, const struct memranges *ranges)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700226{
227 const struct range_entry *r;
228
Furquan Shaikhc3568612020-05-16 15:18:23 -0700229 printk(BIOS_INFO, " %s: Resource ranges:\n", dev_path(dev));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700230
231 if (memranges_is_empty(ranges))
Furquan Shaikhc3568612020-05-16 15:18:23 -0700232 printk(BIOS_INFO, " * EMPTY!!\n");
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700233
234 memranges_each_entry(r, ranges) {
Furquan Shaikhc3568612020-05-16 15:18:23 -0700235 printk(BIOS_INFO, " * Base: %llx, Size: %llx, Tag: %lx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700236 range_entry_base(r), range_entry_size(r), range_entry_tag(r));
237 }
238}
239
240/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200241 * This is where the actual allocation of resources happens during
242 * pass 2. Given the list of memory ranges corresponding to the
243 * resource of given type, it finds the biggest unallocated resource
244 * using the type mask on the downstream bus. This continues in a
245 * descending order until all resources of given type are allocated
246 * address space within the current resource window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700247 */
248static void allocate_child_resources(struct bus *bus, struct memranges *ranges,
249 unsigned long type_mask, unsigned long type_match)
250{
Nico Huber526c6422020-05-25 00:03:14 +0200251 const bool allocate_top_down =
252 bus->dev->path.type == DEVICE_PATH_DOMAIN &&
253 CONFIG(RESOURCE_ALLOCATION_TOP_DOWN);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700254 struct resource *resource = NULL;
255 const struct device *dev;
256
257 while ((dev = largest_resource(bus, &resource, type_mask, type_match))) {
258
259 if (!resource->size)
260 continue;
261
Nico Huber52263012020-05-23 19:15:36 +0200262 if (memranges_steal(ranges, effective_limit(resource), resource->size,
263 resource->align, type_match, &resource->base,
264 allocate_top_down) == false) {
Elyes Haouas310ef522023-06-17 17:06:41 +0200265 printk(BIOS_ERR, "Resource didn't fit!!! ");
Furquan Shaikhc3568612020-05-16 15:18:23 -0700266 printk(BIOS_DEBUG, " %s %02lx * size: 0x%llx limit: %llx %s\n",
Nico Huber52263012020-05-23 19:15:36 +0200267 dev_path(dev), resource->index, resource->size,
268 effective_limit(resource), resource2str(resource));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700269 continue;
270 }
271
272 resource->limit = resource->base + resource->size - 1;
273 resource->flags |= IORESOURCE_ASSIGNED;
274
Furquan Shaikhc3568612020-05-16 15:18:23 -0700275 printk(BIOS_DEBUG, " %s %02lx * [0x%llx - 0x%llx] limit: %llx %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700276 dev_path(dev), resource->index, resource->base,
277 resource->size ? resource->base + resource->size - 1 :
278 resource->base, resource->limit, resource2str(resource));
279 }
280}
281
282static void update_constraints(struct memranges *ranges, const struct device *dev,
283 const struct resource *res)
284{
285 if (!res->size)
286 return;
287
Furquan Shaikhc3568612020-05-16 15:18:23 -0700288 printk(BIOS_DEBUG, " %s: %s %02lx base %08llx limit %08llx %s (fixed)\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700289 __func__, dev_path(dev), res->index, res->base,
290 res->base + res->size - 1, resource2str(res));
291
292 memranges_create_hole(ranges, res->base, res->size);
293}
294
295/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200296 * Scan the entire tree to identify any fixed resources allocated by
297 * any device to ensure that the address map for domain resources are
298 * appropriately updated.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700299 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200300 * Domains can typically provide a memrange for entire address space.
301 * So, this function punches holes in the address space for all fixed
302 * resources that are already defined. Both I/O and normal memory
303 * resources are added as fixed. Both need to be removed from address
304 * space where dynamic resource allocations are sourced.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700305 */
306static void avoid_fixed_resources(struct memranges *ranges, const struct device *dev,
307 unsigned long mask_match)
308{
309 const struct resource *res;
310 const struct device *child;
311 const struct bus *bus;
312
313 for (res = dev->resource_list; res != NULL; res = res->next) {
314 if ((res->flags & mask_match) != mask_match)
315 continue;
316 update_constraints(ranges, dev, res);
317 }
318
319 bus = dev->link_list;
320 if (bus == NULL)
321 return;
322
323 for (child = bus->children; child != NULL; child = child->sibling)
324 avoid_fixed_resources(ranges, child, mask_match);
325}
326
327static void constrain_domain_resources(const struct device *domain, struct memranges *ranges,
328 unsigned long type)
329{
330 unsigned long mask_match = type | IORESOURCE_FIXED;
331
332 if (type == IORESOURCE_IO) {
333 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200334 * Don't allow allocations in the VGA I/O range. PCI has special
335 * cases for that.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700336 */
Furquan Shaikh563e6142020-05-26 12:04:35 -0700337 memranges_create_hole(ranges, 0x3b0, 0x3df - 0x3b0 + 1);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700338
339 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200340 * Resource allocator no longer supports the legacy behavior where
341 * I/O resource allocation is guaranteed to avoid aliases over legacy
342 * PCI expansion card addresses.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700343 */
344 }
345
346 avoid_fixed_resources(ranges, domain, mask_match);
347}
348
349/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200350 * This function creates a list of memranges of given type using the
Nico Huber52263012020-05-23 19:15:36 +0200351 * resource that is provided. If the given resource is unassigned or if
352 * the resource window size is 0, then it creates an empty list. This
Nico Huber9d7728a2020-05-23 18:00:10 +0200353 * results in resource allocation for that resource type failing for
354 * all downstream devices since there is nothing to allocate from.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700355 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200356 * In case of domain, it applies additional constraints to ensure that
357 * the memranges do not overlap any of the fixed resources under that
358 * domain. Domain typically seems to provide memrange for entire address
359 * space. Thus, it is up to the chipset to add DRAM and all other
360 * windows which cannot be used for resource allocation as fixed
361 * resources.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700362 */
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100363static void setup_resource_ranges(const struct device *dev, unsigned long type,
364 struct memranges *ranges)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700365{
Nico Huber52263012020-05-23 19:15:36 +0200366 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_FIXED |
367 (dev->path.type != DEVICE_PATH_DOMAIN
368 ? IORESOURCE_PREFETCH | IORESOURCE_ASSIGNED
369 : 0);
370 const unsigned long type_match = type |
371 (dev->path.type != DEVICE_PATH_DOMAIN ? IORESOURCE_ASSIGNED : 0);
372 const unsigned char alignment = get_alignment_by_resource_type(type);
373
374 memranges_init_empty_with_alignment(ranges, NULL, 0, alignment);
375
376 for (struct resource *res = dev->resource_list; res != NULL; res = res->next) {
377 if ((res->flags & type_mask) != type_match)
378 continue;
379
380 printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %u gran: %u limit: %llx\n",
381 dev_path(dev), resource2str(res), res->base, res->size, res->align,
382 res->gran, res->limit);
383
384 memranges_insert(ranges, res->base, res->limit - res->base + 1, type);
385
386 if (dev->path.type != DEVICE_PATH_DOMAIN)
387 break; /* only one resource per type for bridges */
Nico Huber38aafa32022-09-04 22:20:21 +0200388 }
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700389
Nico Huber52263012020-05-23 19:15:36 +0200390 if (dev->path.type == DEVICE_PATH_DOMAIN)
391 constrain_domain_resources(dev, ranges, type);
392
Furquan Shaikhc3568612020-05-16 15:18:23 -0700393 print_resource_ranges(dev, ranges);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700394}
395
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100396static void print_resource_done(const struct device *dev, const struct resource *res)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700397{
Arthur Heymansd436b162023-05-23 22:47:05 +0200398 printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %u gran: %u limit: %llx done\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700399 dev_path(dev), resource2str(res), res->base, res->size, res->align,
400 res->gran, res->limit);
401}
402
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100403static void cleanup_domain_resource_ranges(const struct device *dev, struct memranges *ranges,
404 unsigned long type)
405{
406 memranges_teardown(ranges);
407 for (struct resource *res = dev->resource_list; res != NULL; res = res->next) {
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100408 if (res->flags & IORESOURCE_FIXED)
409 continue;
410 if ((res->flags & IORESOURCE_TYPE_MASK) != type)
411 continue;
412 print_resource_done(dev, res);
413 }
414}
415
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700416/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200417 * Pass 2 of the resource allocator at the bridge level loops through
418 * all the resources for the bridge and generates a list of memory
419 * ranges similar to that at the domain level. However, there is no need
420 * to apply any additional constraints since the window allocated to the
421 * bridge is guaranteed to be non-overlapping by the allocator at domain
422 * level.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700423 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200424 * Allocation at the bridge level works the same as at domain level
425 * (starts with the biggest resource requirement from downstream devices
426 * and continues in descending order). One major difference at the
427 * bridge level is that it considers prefmem resources separately from
428 * mem resources.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700429 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200430 * Once allocation at the current bridge is complete, resource allocator
431 * continues walking down the downstream bridges until it hits the leaf
432 * devices.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700433 */
434static void allocate_bridge_resources(const struct device *bridge)
435{
436 struct memranges ranges;
437 const struct resource *res;
438 struct bus *bus = bridge->link_list;
439 unsigned long type_match;
440 struct device *child;
441 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
442
443 for (res = bridge->resource_list; res; res = res->next) {
444 if (!res->size)
445 continue;
446
447 if (!(res->flags & IORESOURCE_BRIDGE))
448 continue;
449
450 type_match = res->flags & type_mask;
451
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100452 setup_resource_ranges(bridge, type_match, &ranges);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700453 allocate_child_resources(bus, &ranges, type_mask, type_match);
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100454 print_resource_done(bridge, res);
455 memranges_teardown(&ranges);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700456 }
457
458 for (child = bus->children; child; child = child->sibling) {
459 if (!dev_has_children(child))
460 continue;
461
462 allocate_bridge_resources(child);
463 }
464}
465
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700466/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200467 * Pass 2 of resource allocator begins at the domain level. Every domain
468 * has two types of resources - io and mem. For each of these resources,
469 * this function creates a list of memory ranges that can be used for
470 * downstream resource allocation. This list is constrained to remove
471 * any fixed resources in the domain sub-tree of the given resource
472 * type. It then uses the memory ranges to apply best fit on the
473 * resource requirements of the downstream devices.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700474 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200475 * Once resources are allocated to all downstream devices of the domain,
476 * it walks down each downstream bridge to continue the same process
477 * until resources are allocated to all devices under the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700478 */
479static void allocate_domain_resources(const struct device *domain)
480{
481 struct memranges ranges;
482 struct device *child;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700483
484 /* Resource type I/O */
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100485 setup_resource_ranges(domain, IORESOURCE_IO, &ranges);
486 allocate_child_resources(domain->link_list, &ranges, IORESOURCE_TYPE_MASK,
487 IORESOURCE_IO);
488 cleanup_domain_resource_ranges(domain, &ranges, IORESOURCE_IO);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700489
490 /*
491 * Resource type Mem:
Nico Huber9d7728a2020-05-23 18:00:10 +0200492 * Domain does not distinguish between mem and prefmem resources. Thus,
493 * the resource allocation at domain level considers mem and prefmem
494 * together when finding the best fit based on the biggest resource
495 * requirement.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700496 */
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100497 setup_resource_ranges(domain, IORESOURCE_MEM, &ranges);
498 allocate_child_resources(domain->link_list, &ranges,
Nico Huber52263012020-05-23 19:15:36 +0200499 IORESOURCE_TYPE_MASK, IORESOURCE_MEM);
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100500 cleanup_domain_resource_ranges(domain, &ranges, IORESOURCE_MEM);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700501
502 for (child = domain->link_list->children; child; child = child->sibling) {
503 if (!dev_has_children(child))
504 continue;
505
506 /* Continue allocation for all downstream bridges. */
507 allocate_bridge_resources(child);
508 }
509}
510
511/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200512 * This function forms the guts of the resource allocator. It walks
513 * through the entire device tree for each domain two times.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700514 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200515 * Every domain has a fixed set of ranges. These ranges cannot be
516 * relaxed based on the requirements of the downstream devices. They
517 * represent the available windows from which resources can be allocated
518 * to the different devices under the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700519 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200520 * In order to identify the requirements of downstream devices, resource
521 * allocator walks in a DFS fashion. It gathers the requirements from
522 * leaf devices and propagates those back up to their upstream bridges
523 * until the requirements for all the downstream devices of the domain
524 * are gathered. This is referred to as pass 1 of the resource allocator.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700525 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200526 * Once the requirements for all the devices under the domain are
527 * gathered, the resource allocator walks a second time to allocate
528 * resources to downstream devices as per the requirements. It always
529 * picks the biggest resource request as per the type (i/o and mem) to
530 * allocate space from its fixed window to the immediate downstream
531 * device of the domain. In order to accomplish best fit for the
532 * resources, a list of ranges is maintained by each resource type (i/o
533 * and mem). At the domain level we don't differentiate between mem and
534 * prefmem. Since they are allocated space from the same window, the
535 * resource allocator at the domain level ensures that the biggest
536 * requirement is selected independent of the prefetch type. Once the
537 * resource allocation for all immediate downstream devices is complete
538 * at the domain level, the resource allocator walks down the subtree
539 * for each downstream bridge to continue the allocation process at the
540 * bridge level. Since bridges have separate windows for i/o, mem and
541 * prefmem, best fit algorithm at bridge level looks for the biggest
542 * requirement considering prefmem resources separately from non-prefmem
543 * resources. This continues until resource allocation is performed for
544 * all downstream bridges in the domain sub-tree. This is referred to as
545 * pass 2 of the resource allocator.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700546 *
547 * Some rules that are followed by the resource allocator:
Nico Huber9d7728a2020-05-23 18:00:10 +0200548 * - Allocate resource locations for every device as long as
549 * the requirements can be satisfied.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700550 * - Don't overlap with resources in fixed locations.
Nico Huber9d7728a2020-05-23 18:00:10 +0200551 * - Don't overlap and follow the rules of bridges -- downstream
552 * devices of bridges should use parts of the address space
553 * allocated to the bridge.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700554 */
555void allocate_resources(const struct device *root)
556{
557 const struct device *child;
558
559 if ((root == NULL) || (root->link_list == NULL))
560 return;
561
562 for (child = root->link_list->children; child; child = child->sibling) {
563
564 if (child->path.type != DEVICE_PATH_DOMAIN)
565 continue;
566
567 post_log_path(child);
568
569 /* Pass 1 - Gather requirements. */
Paul Menzel2efcafa2021-07-02 17:39:45 +0200570 printk(BIOS_INFO, "=== Resource allocator: %s - Pass 1 (gathering requirements) ===\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700571 dev_path(child));
572 compute_domain_resources(child);
573
574 /* Pass 2 - Allocate resources as per gathered requirements. */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700575 printk(BIOS_INFO, "=== Resource allocator: %s - Pass 2 (allocating resources) ===\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700576 dev_path(child));
577 allocate_domain_resources(child);
Furquan Shaikhc3568612020-05-16 15:18:23 -0700578
579 printk(BIOS_INFO, "=== Resource allocator: %s - resource allocation complete ===\n",
580 dev_path(child));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700581 }
582}