blob: d17499cf89916834361cd1618e9b743d1d496510 [file] [log] [blame]
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
Elyes Haouas04c3b5a2022-10-07 10:08:05 +02003#include <commonlib/bsd/helpers.h>
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07004#include <console/console.h>
5#include <device/device.h>
6#include <memrange.h>
7#include <post.h>
Elyes Haouas04c3b5a2022-10-07 10:08:05 +02008#include <types.h>
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07009
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070010static const char *resource2str(const struct resource *res)
11{
12 if (res->flags & IORESOURCE_IO)
13 return "io";
14 if (res->flags & IORESOURCE_PREFETCH)
15 return "prefmem";
16 if (res->flags & IORESOURCE_MEM)
17 return "mem";
18 return "undefined";
19}
20
Nico Huberee570652020-05-24 17:56:51 +020021static void print_domain_res(const struct device *dev,
22 const struct resource *res, const char *suffix)
23{
24 printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %u gran: %u limit: %llx%s\n",
25 dev_path(dev), resource2str(res), res->base, res->size,
26 res->align, res->gran, res->limit, suffix);
27}
28
29#define res_printk(depth, str, ...) printk(BIOS_DEBUG, "%*c"str, depth, ' ', __VA_ARGS__)
30
31static void print_bridge_res(const struct device *dev, const struct resource *res,
32 int depth, const char *suffix)
33{
34 res_printk(depth, "%s %s: size: %llx align: %u gran: %u limit: %llx%s\n", dev_path(dev),
35 resource2str(res), res->size, res->align, res->gran, res->limit, suffix);
36}
37
38static void print_child_res(const struct device *dev, const struct resource *res, int depth)
39{
40 res_printk(depth + 1, "%s %02lx * [0x%llx - 0x%llx] %s\n", dev_path(dev),
41 res->index, res->base, res->base + res->size - 1, resource2str(res));
42}
43
44static void print_fixed_res(const struct device *dev,
45 const struct resource *res, const char *prefix)
46{
47 printk(BIOS_DEBUG, " %s: %s %02lx base %08llx limit %08llx %s (fixed)\n",
48 prefix, dev_path(dev), res->index, res->base, res->base + res->size - 1,
49 resource2str(res));
50}
51
52static void print_assigned_res(const struct device *dev, const struct resource *res)
53{
54 printk(BIOS_DEBUG, " %s %02lx * [0x%llx - 0x%llx] limit: %llx %s\n",
55 dev_path(dev), res->index, res->base, res->limit, res->limit, resource2str(res));
56}
57
58static void print_failed_res(const struct device *dev, const struct resource *res)
59{
60 printk(BIOS_DEBUG, " %s %02lx * size: 0x%llx limit: %llx %s\n",
61 dev_path(dev), res->index, res->size, res->limit, resource2str(res));
62}
63
64static void print_resource_ranges(const struct device *dev, const struct memranges *ranges)
65{
66 const struct range_entry *r;
67
68 printk(BIOS_INFO, " %s: Resource ranges:\n", dev_path(dev));
69
70 if (memranges_is_empty(ranges))
71 printk(BIOS_INFO, " * EMPTY!!\n");
72
73 memranges_each_entry(r, ranges) {
74 printk(BIOS_INFO, " * Base: %llx, Size: %llx, Tag: %lx\n",
75 range_entry_base(r), range_entry_size(r), range_entry_tag(r));
76 }
77}
78
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070079static bool dev_has_children(const struct device *dev)
80{
81 const struct bus *bus = dev->link_list;
82 return bus && bus->children;
83}
84
Nico Huber52263012020-05-23 19:15:36 +020085static resource_t effective_limit(const struct resource *const res)
86{
87 /* Always allow bridge resources above 4G. */
88 if (res->flags & IORESOURCE_BRIDGE)
89 return res->limit;
90
91 const resource_t quirk_4g_limit =
92 res->flags & IORESOURCE_ABOVE_4G ? UINT64_MAX : UINT32_MAX;
93 return MIN(res->limit, quirk_4g_limit);
94}
95
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070096/*
Nico Huber9d7728a2020-05-23 18:00:10 +020097 * During pass 1, once all the requirements for downstream devices of a
98 * bridge are gathered, this function calculates the overall resource
99 * requirement for the bridge. It starts by picking the largest resource
100 * requirement downstream for the given resource type and works by
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700101 * adding requirements in descending order.
102 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200103 * Additionally, it takes alignment and limits of the downstream devices
104 * into consideration and ensures that they get propagated to the bridge
105 * resource. This is required to guarantee that the upstream bridge/
106 * domain honors the limit and alignment requirements for this bridge
107 * based on the tightest constraints downstream.
Nico Huber9260ea62020-05-23 23:20:13 +0200108 *
109 * Last but not least, it stores the offset inside the bridge resource
110 * for each child resource in its base field. This simplifies pass 2
111 * for resources behind a bridge, as we only have to add offsets to the
112 * allocated base of the bridge resource.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700113 */
114static void update_bridge_resource(const struct device *bridge, struct resource *bridge_res,
Furquan Shaikhc3568612020-05-16 15:18:23 -0700115 unsigned long type_match, int print_depth)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700116{
117 const struct device *child;
118 struct resource *child_res;
119 resource_t base;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700120 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
121 struct bus *bus = bridge->link_list;
122
123 child_res = NULL;
124
125 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200126 * `base` keeps track of where the next allocation for child resources
127 * can take place from within the bridge resource window. Since the
128 * bridge resource window allocation is not performed yet, it can start
129 * at 0. Base gets updated every time a resource requirement is
130 * accounted for in the loop below. After scanning all these resources,
131 * base will indicate the total size requirement for the current bridge
132 * resource window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700133 */
134 base = 0;
135
Nico Huberee570652020-05-24 17:56:51 +0200136 print_bridge_res(bridge, bridge_res, print_depth, "");
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700137
138 while ((child = largest_resource(bus, &child_res, type_mask, type_match))) {
139
140 /* Size 0 resources can be skipped. */
141 if (!child_res->size)
142 continue;
143
Nico Huberec7b3132020-05-23 18:20:47 +0200144 /* Resources with 0 limit can't be assigned anything. */
145 if (!child_res->limit)
146 continue;
147
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700148 /*
Nico Huber74169c12020-05-23 18:15:34 +0200149 * Propagate the resource alignment to the bridge resource. The
150 * condition can only be true for the first (largest) resource. For all
Nico Huber9260ea62020-05-23 23:20:13 +0200151 * other child resources, alignment is taken care of by rounding their
152 * base up.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700153 */
Nico Huber74169c12020-05-23 18:15:34 +0200154 if (child_res->align > bridge_res->align)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700155 bridge_res->align = child_res->align;
156
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700157 /*
Nico Huberec7b3132020-05-23 18:20:47 +0200158 * Propagate the resource limit to the bridge resource. If a downstream
159 * device has stricter requirements w.r.t. limits for any resource, that
Nico Huber9260ea62020-05-23 23:20:13 +0200160 * constraint needs to be propagated back up to the bridges downstream
161 * of the domain. This way, the whole bridge resource fulfills the limit.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700162 */
Nico Huber52263012020-05-23 19:15:36 +0200163 if (effective_limit(child_res) < bridge_res->limit)
164 bridge_res->limit = effective_limit(child_res);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700165
166 /*
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700167 * Alignment value of 0 means that the child resource has no alignment
168 * requirements and so the base value remains unchanged here.
169 */
Nico Huberb3277042020-05-23 18:08:50 +0200170 base = ALIGN_UP(base, POWER_OF_2(child_res->align));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700171
Nico Huber9260ea62020-05-23 23:20:13 +0200172 /*
173 * Store the relative offset inside the bridge resource for later
174 * consumption in allocate_bridge_resources(), and invalidate flags
175 * related to the base.
176 */
177 child_res->base = base;
178 child_res->flags &= ~(IORESOURCE_ASSIGNED | IORESOURCE_STORED);
179
Nico Huberee570652020-05-24 17:56:51 +0200180 print_child_res(child, child_res, print_depth);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700181
182 base += child_res->size;
183 }
184
185 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200186 * After all downstream device resources are scanned, `base` represents
187 * the total size requirement for the current bridge resource window.
188 * This size needs to be rounded up to the granularity requirement of
189 * the bridge to ensure that the upstream bridge/domain allocates big
190 * enough window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700191 */
Nico Huberb3277042020-05-23 18:08:50 +0200192 bridge_res->size = ALIGN_UP(base, POWER_OF_2(bridge_res->gran));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700193
Nico Huberee570652020-05-24 17:56:51 +0200194 print_bridge_res(bridge, bridge_res, print_depth, " done");
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700195}
196
197/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200198 * During pass 1, at the bridge level, the resource allocator gathers
199 * requirements from downstream devices and updates its own resource
200 * windows for the provided resource type.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700201 */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700202static void compute_bridge_resources(const struct device *bridge, unsigned long type_match,
203 int print_depth)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700204{
205 const struct device *child;
206 struct resource *res;
207 struct bus *bus = bridge->link_list;
208 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
209
210 for (res = bridge->resource_list; res; res = res->next) {
211 if (!(res->flags & IORESOURCE_BRIDGE))
212 continue;
213
214 if ((res->flags & type_mask) != type_match)
215 continue;
216
217 /*
218 * Ensure that the resource requirements for all downstream bridges are
219 * gathered before updating the window for current bridge resource.
220 */
221 for (child = bus->children; child; child = child->sibling) {
222 if (!dev_has_children(child))
223 continue;
Furquan Shaikhc3568612020-05-16 15:18:23 -0700224 compute_bridge_resources(child, type_match, print_depth + 1);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700225 }
226
227 /*
228 * Update the window for current bridge resource now that all downstream
229 * requirements are gathered.
230 */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700231 update_bridge_resource(bridge, res, type_match, print_depth);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700232 }
233}
234
235/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200236 * During pass 1, the resource allocator walks down the entire sub-tree
237 * of a domain. It gathers resource requirements for every downstream
238 * bridge by looking at the resource requests of its children. Thus, the
239 * requirement gathering begins at the leaf devices and is propagated
240 * back up to the downstream bridges of the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700241 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200242 * At the domain level, it identifies every downstream bridge and walks
243 * down that bridge to gather requirements for each resource type i.e.
244 * i/o, mem and prefmem. Since bridges have separate windows for mem and
245 * prefmem, requirements for each need to be collected separately.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700246 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200247 * Domain resource windows are fixed ranges and hence requirement
248 * gathering does not result in any changes to these fixed ranges.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700249 */
250static void compute_domain_resources(const struct device *domain)
251{
252 const struct device *child;
Furquan Shaikhc3568612020-05-16 15:18:23 -0700253 const int print_depth = 1;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700254
255 if (domain->link_list == NULL)
256 return;
257
258 for (child = domain->link_list->children; child; child = child->sibling) {
259
260 /* Skip if this is not a bridge or has no children under it. */
261 if (!dev_has_children(child))
262 continue;
263
Furquan Shaikhc3568612020-05-16 15:18:23 -0700264 compute_bridge_resources(child, IORESOURCE_IO, print_depth);
265 compute_bridge_resources(child, IORESOURCE_MEM, print_depth);
266 compute_bridge_resources(child, IORESOURCE_MEM | IORESOURCE_PREFETCH,
267 print_depth);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700268 }
269}
270
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100271static unsigned char get_alignment_by_resource_type(const unsigned long type)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700272{
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100273 if (type & IORESOURCE_MEM)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700274 return 12; /* Page-aligned --> log2(4KiB) */
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100275 else if (type & IORESOURCE_IO)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700276 return 0; /* No special alignment required --> log2(1) */
277
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100278 die("Unexpected resource type: flags(%lu)!\n", type);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700279}
280
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700281static void update_constraints(struct memranges *ranges, const struct device *dev,
282 const struct resource *res)
283{
284 if (!res->size)
285 return;
286
Nico Huberee570652020-05-24 17:56:51 +0200287 print_fixed_res(dev, res, __func__);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700288
289 memranges_create_hole(ranges, res->base, res->size);
290}
291
292/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200293 * Scan the entire tree to identify any fixed resources allocated by
294 * any device to ensure that the address map for domain resources are
295 * appropriately updated.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700296 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200297 * Domains can typically provide a memrange for entire address space.
298 * So, this function punches holes in the address space for all fixed
299 * resources that are already defined. Both I/O and normal memory
300 * resources are added as fixed. Both need to be removed from address
301 * space where dynamic resource allocations are sourced.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700302 */
303static void avoid_fixed_resources(struct memranges *ranges, const struct device *dev,
304 unsigned long mask_match)
305{
306 const struct resource *res;
307 const struct device *child;
308 const struct bus *bus;
309
310 for (res = dev->resource_list; res != NULL; res = res->next) {
311 if ((res->flags & mask_match) != mask_match)
312 continue;
313 update_constraints(ranges, dev, res);
314 }
315
316 bus = dev->link_list;
317 if (bus == NULL)
318 return;
319
320 for (child = bus->children; child != NULL; child = child->sibling)
321 avoid_fixed_resources(ranges, child, mask_match);
322}
323
324static void constrain_domain_resources(const struct device *domain, struct memranges *ranges,
325 unsigned long type)
326{
327 unsigned long mask_match = type | IORESOURCE_FIXED;
328
329 if (type == IORESOURCE_IO) {
330 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200331 * Don't allow allocations in the VGA I/O range. PCI has special
332 * cases for that.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700333 */
Furquan Shaikh563e6142020-05-26 12:04:35 -0700334 memranges_create_hole(ranges, 0x3b0, 0x3df - 0x3b0 + 1);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700335
336 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200337 * Resource allocator no longer supports the legacy behavior where
338 * I/O resource allocation is guaranteed to avoid aliases over legacy
339 * PCI expansion card addresses.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700340 */
341 }
342
343 avoid_fixed_resources(ranges, domain, mask_match);
344}
345
346/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200347 * This function creates a list of memranges of given type using the
Nico Huber9260ea62020-05-23 23:20:13 +0200348 * resource that is provided. It applies additional constraints to
349 * ensure that the memranges do not overlap any of the fixed resources
350 * under the domain. The domain typically provides a memrange for the
351 * entire address space. Thus, it is up to the chipset to add DRAM and
352 * all other windows which cannot be used for resource allocation as
353 * fixed resources.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700354 */
Nico Huber9260ea62020-05-23 23:20:13 +0200355static void setup_resource_ranges(const struct device *const domain,
356 const unsigned long type,
357 struct memranges *const ranges)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700358{
Nico Huber9260ea62020-05-23 23:20:13 +0200359 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_FIXED;
Nico Huber52263012020-05-23 19:15:36 +0200360 const unsigned char alignment = get_alignment_by_resource_type(type);
361
362 memranges_init_empty_with_alignment(ranges, NULL, 0, alignment);
363
Nico Huber9260ea62020-05-23 23:20:13 +0200364 for (struct resource *res = domain->resource_list; res != NULL; res = res->next) {
365 if ((res->flags & type_mask) != type)
Nico Huber52263012020-05-23 19:15:36 +0200366 continue;
Nico Huberee570652020-05-24 17:56:51 +0200367 print_domain_res(domain, res, "");
Nico Huber52263012020-05-23 19:15:36 +0200368 memranges_insert(ranges, res->base, res->limit - res->base + 1, type);
Nico Huber38aafa32022-09-04 22:20:21 +0200369 }
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700370
Nico Huber9260ea62020-05-23 23:20:13 +0200371 constrain_domain_resources(domain, ranges, type);
Nico Huber52263012020-05-23 19:15:36 +0200372
Nico Huber9260ea62020-05-23 23:20:13 +0200373 print_resource_ranges(domain, ranges);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700374}
375
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100376static void cleanup_domain_resource_ranges(const struct device *dev, struct memranges *ranges,
377 unsigned long type)
378{
379 memranges_teardown(ranges);
380 for (struct resource *res = dev->resource_list; res != NULL; res = res->next) {
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100381 if (res->flags & IORESOURCE_FIXED)
382 continue;
383 if ((res->flags & IORESOURCE_TYPE_MASK) != type)
384 continue;
Nico Huberee570652020-05-24 17:56:51 +0200385 print_domain_res(dev, res, " done");
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100386 }
387}
388
Nico Huber9260ea62020-05-23 23:20:13 +0200389static void assign_resource(struct resource *const res, const resource_t base,
390 const struct device *const dev)
391{
392 res->base = base;
393 res->limit = res->base + res->size - 1;
394 res->flags |= IORESOURCE_ASSIGNED;
395 res->flags &= ~IORESOURCE_STORED;
396
Nico Huberee570652020-05-24 17:56:51 +0200397 print_assigned_res(dev, res);
Nico Huber9260ea62020-05-23 23:20:13 +0200398}
399
400/*
401 * This is where the actual allocation of resources happens during
402 * pass 2. We construct a list of memory ranges corresponding to the
403 * resource of a given type, then look for the biggest unallocated
404 * resource on the downstream bus. This continues in a descending order
405 * until all resources of a given type have space allocated within the
406 * domain's resource window.
407 */
408static void allocate_toplevel_resources(const struct device *const domain,
409 const unsigned long type)
410{
411 const unsigned long type_mask = IORESOURCE_TYPE_MASK;
412 struct resource *res = NULL;
413 const struct device *dev;
414 struct memranges ranges;
415 resource_t base;
416
417 if (!dev_has_children(domain))
418 return;
419
420 setup_resource_ranges(domain, type, &ranges);
421
422 while ((dev = largest_resource(domain->link_list, &res, type_mask, type))) {
423
424 if (!res->size)
425 continue;
426
427 if (!memranges_steal(&ranges, res->limit, res->size, res->align, type, &base,
428 CONFIG(RESOURCE_ALLOCATION_TOP_DOWN))) {
Nico Huberee570652020-05-24 17:56:51 +0200429 printk(BIOS_ERR, "Resource didn't fit!!!\n");
430 print_failed_res(dev, res);
Nico Huber9260ea62020-05-23 23:20:13 +0200431 continue;
432 }
433
434 assign_resource(res, base, dev);
435 }
436
437 cleanup_domain_resource_ranges(domain, &ranges, type);
438}
439
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700440/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200441 * Pass 2 of the resource allocator at the bridge level loops through
Nico Huber9260ea62020-05-23 23:20:13 +0200442 * all the resources for the bridge and assigns all the base addresses
443 * of its children's resources of the same type. update_bridge_resource()
444 * of pass 1 pre-calculated the offsets of these bases inside the bridge
445 * resource. Now that the bridge resource is allocated, all we have to
446 * do is to add its final base to these offsets.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700447 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200448 * Once allocation at the current bridge is complete, resource allocator
449 * continues walking down the downstream bridges until it hits the leaf
450 * devices.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700451 */
Nico Huber9260ea62020-05-23 23:20:13 +0200452static void assign_resource_cb(void *param, struct device *dev, struct resource *res)
453{
454 /* We have to filter the same resources as update_bridge_resource(). */
455 if (!res->size || !res->limit)
456 return;
457
458 assign_resource(res, *(const resource_t *)param + res->base, dev);
459}
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700460static void allocate_bridge_resources(const struct device *bridge)
461{
Nico Huber9260ea62020-05-23 23:20:13 +0200462 const unsigned long type_mask =
463 IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH | IORESOURCE_FIXED;
464 struct bus *const bus = bridge->link_list;
465 struct resource *res;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700466 struct device *child;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700467
Nico Huber9260ea62020-05-23 23:20:13 +0200468 for (res = bridge->resource_list; res != NULL; res = res->next) {
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700469 if (!res->size)
470 continue;
471
472 if (!(res->flags & IORESOURCE_BRIDGE))
473 continue;
474
Nico Huber9260ea62020-05-23 23:20:13 +0200475 if (!(res->flags & IORESOURCE_ASSIGNED))
476 continue;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700477
Nico Huber9260ea62020-05-23 23:20:13 +0200478 /* Run assign_resource_cb() for all downstream resources of the same type. */
479 search_bus_resources(bus, type_mask, res->flags & type_mask,
480 assign_resource_cb, &res->base);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700481 }
482
Nico Huber9260ea62020-05-23 23:20:13 +0200483 for (child = bus->children; child != NULL; child = child->sibling) {
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700484 if (!dev_has_children(child))
485 continue;
486
487 allocate_bridge_resources(child);
488 }
489}
490
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700491/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200492 * Pass 2 of resource allocator begins at the domain level. Every domain
493 * has two types of resources - io and mem. For each of these resources,
494 * this function creates a list of memory ranges that can be used for
495 * downstream resource allocation. This list is constrained to remove
496 * any fixed resources in the domain sub-tree of the given resource
497 * type. It then uses the memory ranges to apply best fit on the
498 * resource requirements of the downstream devices.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700499 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200500 * Once resources are allocated to all downstream devices of the domain,
Nico Huber9260ea62020-05-23 23:20:13 +0200501 * it walks down each downstream bridge to finish resource assignment
502 * of its children resources within its own window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700503 */
504static void allocate_domain_resources(const struct device *domain)
505{
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700506 /* Resource type I/O */
Nico Huber9260ea62020-05-23 23:20:13 +0200507 allocate_toplevel_resources(domain, IORESOURCE_IO);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700508
509 /*
510 * Resource type Mem:
Nico Huber9d7728a2020-05-23 18:00:10 +0200511 * Domain does not distinguish between mem and prefmem resources. Thus,
512 * the resource allocation at domain level considers mem and prefmem
513 * together when finding the best fit based on the biggest resource
514 * requirement.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700515 */
Nico Huber9260ea62020-05-23 23:20:13 +0200516 allocate_toplevel_resources(domain, IORESOURCE_MEM);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700517
Nico Huber9260ea62020-05-23 23:20:13 +0200518 struct device *child;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700519 for (child = domain->link_list->children; child; child = child->sibling) {
520 if (!dev_has_children(child))
521 continue;
522
523 /* Continue allocation for all downstream bridges. */
524 allocate_bridge_resources(child);
525 }
526}
527
528/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200529 * This function forms the guts of the resource allocator. It walks
530 * through the entire device tree for each domain two times.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700531 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200532 * Every domain has a fixed set of ranges. These ranges cannot be
533 * relaxed based on the requirements of the downstream devices. They
534 * represent the available windows from which resources can be allocated
535 * to the different devices under the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700536 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200537 * In order to identify the requirements of downstream devices, resource
538 * allocator walks in a DFS fashion. It gathers the requirements from
539 * leaf devices and propagates those back up to their upstream bridges
540 * until the requirements for all the downstream devices of the domain
541 * are gathered. This is referred to as pass 1 of the resource allocator.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700542 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200543 * Once the requirements for all the devices under the domain are
544 * gathered, the resource allocator walks a second time to allocate
545 * resources to downstream devices as per the requirements. It always
546 * picks the biggest resource request as per the type (i/o and mem) to
547 * allocate space from its fixed window to the immediate downstream
548 * device of the domain. In order to accomplish best fit for the
549 * resources, a list of ranges is maintained by each resource type (i/o
550 * and mem). At the domain level we don't differentiate between mem and
551 * prefmem. Since they are allocated space from the same window, the
552 * resource allocator at the domain level ensures that the biggest
553 * requirement is selected independent of the prefetch type. Once the
554 * resource allocation for all immediate downstream devices is complete
555 * at the domain level, the resource allocator walks down the subtree
556 * for each downstream bridge to continue the allocation process at the
Nico Huber9260ea62020-05-23 23:20:13 +0200557 * bridge level. Since bridges have either their whole window allocated
558 * or nothing, we only need to place downstream resources inside these
559 * windows by re-using offsets that were pre-calculated in pass 1. This
560 * continues until resource allocation is realized for all downstream
561 * bridges in the domain sub-tree. This is referred to as pass 2 of the
562 * resource allocator.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700563 *
564 * Some rules that are followed by the resource allocator:
Nico Huber9d7728a2020-05-23 18:00:10 +0200565 * - Allocate resource locations for every device as long as
566 * the requirements can be satisfied.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700567 * - Don't overlap with resources in fixed locations.
Nico Huber9d7728a2020-05-23 18:00:10 +0200568 * - Don't overlap and follow the rules of bridges -- downstream
569 * devices of bridges should use parts of the address space
570 * allocated to the bridge.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700571 */
572void allocate_resources(const struct device *root)
573{
574 const struct device *child;
575
576 if ((root == NULL) || (root->link_list == NULL))
577 return;
578
579 for (child = root->link_list->children; child; child = child->sibling) {
580
581 if (child->path.type != DEVICE_PATH_DOMAIN)
582 continue;
583
584 post_log_path(child);
585
Nico Huber9260ea62020-05-23 23:20:13 +0200586 /* Pass 1 - Relative placement. */
587 printk(BIOS_INFO, "=== Resource allocator: %s - Pass 1 (relative placement) ===\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700588 dev_path(child));
589 compute_domain_resources(child);
590
591 /* Pass 2 - Allocate resources as per gathered requirements. */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700592 printk(BIOS_INFO, "=== Resource allocator: %s - Pass 2 (allocating resources) ===\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700593 dev_path(child));
594 allocate_domain_resources(child);
Furquan Shaikhc3568612020-05-16 15:18:23 -0700595
596 printk(BIOS_INFO, "=== Resource allocator: %s - resource allocation complete ===\n",
597 dev_path(child));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700598 }
599}