blob: 18667b7623d5eb5aac2e79696998bb776864a691 [file] [log] [blame]
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
3#include <console/console.h>
4#include <device/device.h>
5#include <memrange.h>
6#include <post.h>
7
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07008static const char *resource2str(const struct resource *res)
9{
10 if (res->flags & IORESOURCE_IO)
11 return "io";
12 if (res->flags & IORESOURCE_PREFETCH)
13 return "prefmem";
14 if (res->flags & IORESOURCE_MEM)
15 return "mem";
16 return "undefined";
17}
18
19static bool dev_has_children(const struct device *dev)
20{
21 const struct bus *bus = dev->link_list;
22 return bus && bus->children;
23}
24
Furquan Shaikhc3568612020-05-16 15:18:23 -070025#define res_printk(depth, str, ...) printk(BIOS_DEBUG, "%*c"str, depth, ' ', __VA_ARGS__)
26
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070027/*
Nico Huber9d7728a2020-05-23 18:00:10 +020028 * During pass 1, once all the requirements for downstream devices of a
29 * bridge are gathered, this function calculates the overall resource
30 * requirement for the bridge. It starts by picking the largest resource
31 * requirement downstream for the given resource type and works by
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070032 * adding requirements in descending order.
33 *
Nico Huber9d7728a2020-05-23 18:00:10 +020034 * Additionally, it takes alignment and limits of the downstream devices
35 * into consideration and ensures that they get propagated to the bridge
36 * resource. This is required to guarantee that the upstream bridge/
37 * domain honors the limit and alignment requirements for this bridge
38 * based on the tightest constraints downstream.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070039 */
40static void update_bridge_resource(const struct device *bridge, struct resource *bridge_res,
Furquan Shaikhc3568612020-05-16 15:18:23 -070041 unsigned long type_match, int print_depth)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070042{
43 const struct device *child;
44 struct resource *child_res;
45 resource_t base;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070046 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
47 struct bus *bus = bridge->link_list;
48
49 child_res = NULL;
50
51 /*
Nico Huber9d7728a2020-05-23 18:00:10 +020052 * `base` keeps track of where the next allocation for child resources
53 * can take place from within the bridge resource window. Since the
54 * bridge resource window allocation is not performed yet, it can start
55 * at 0. Base gets updated every time a resource requirement is
56 * accounted for in the loop below. After scanning all these resources,
57 * base will indicate the total size requirement for the current bridge
58 * resource window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070059 */
60 base = 0;
61
Furquan Shaikhc3568612020-05-16 15:18:23 -070062 res_printk(print_depth, "%s %s: size: %llx align: %d gran: %d limit: %llx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070063 dev_path(bridge), resource2str(bridge_res), bridge_res->size,
64 bridge_res->align, bridge_res->gran, bridge_res->limit);
65
66 while ((child = largest_resource(bus, &child_res, type_mask, type_match))) {
67
68 /* Size 0 resources can be skipped. */
69 if (!child_res->size)
70 continue;
71
72 /*
Nico Huber74169c12020-05-23 18:15:34 +020073 * Propagate the resource alignment to the bridge resource. The
74 * condition can only be true for the first (largest) resource. For all
Nico Huber9d7728a2020-05-23 18:00:10 +020075 * other children resources, alignment is taken care of by updating the
76 * base to round up as per the child resource alignment. It is
77 * guaranteed that pass 2 follows the exact same method of picking the
78 * resource for allocation using largest_resource(). Thus, as long as
Nico Huber74169c12020-05-23 18:15:34 +020079 * the alignment for the largest child resource is propagated up to the
80 * bridge resource, it can be guaranteed that the alignment for all
81 * resources is appropriately met.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070082 */
Nico Huber74169c12020-05-23 18:15:34 +020083 if (child_res->align > bridge_res->align)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070084 bridge_res->align = child_res->align;
85
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070086 /*
Nico Huber9d7728a2020-05-23 18:00:10 +020087 * Propagate the resource limit to the bridge resource only if child
88 * resource limit is non-zero. If a downstream device has stricter
89 * requirements w.r.t. limits for any resource, that constraint needs to
90 * be propagated back up to the downstream bridges of the domain. This
91 * guarantees that the resource allocation which starts at the domain
92 * level takes into account all these constraints thus working on a
93 * global view.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070094 */
95 if (child_res->limit && (child_res->limit < bridge_res->limit))
96 bridge_res->limit = child_res->limit;
97
98 /*
Nico Huber9d7728a2020-05-23 18:00:10 +020099 * Propagate the downstream resource request to allocate above 4G
100 * boundary to upstream bridge resource. This ensures that during
101 * pass 2, the resource allocator at domain level has a global view
102 * of all the downstream device requirements and thus address space
103 * is allocated as per updated flags in the bridge resource.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700104 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200105 * Since the bridge resource is a single window, all the downstream
106 * resources of this bridge resource will be allocated in space above
107 * the 4G boundary.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700108 */
109 if (child_res->flags & IORESOURCE_ABOVE_4G)
110 bridge_res->flags |= IORESOURCE_ABOVE_4G;
111
112 /*
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700113 * Alignment value of 0 means that the child resource has no alignment
114 * requirements and so the base value remains unchanged here.
115 */
Nico Huberb3277042020-05-23 18:08:50 +0200116 base = ALIGN_UP(base, POWER_OF_2(child_res->align));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700117
Furquan Shaikhc3568612020-05-16 15:18:23 -0700118 res_printk(print_depth + 1, "%s %02lx * [0x%llx - 0x%llx] %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700119 dev_path(child), child_res->index, base, base + child_res->size - 1,
120 resource2str(child_res));
121
122 base += child_res->size;
123 }
124
125 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200126 * After all downstream device resources are scanned, `base` represents
127 * the total size requirement for the current bridge resource window.
128 * This size needs to be rounded up to the granularity requirement of
129 * the bridge to ensure that the upstream bridge/domain allocates big
130 * enough window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700131 */
Nico Huberb3277042020-05-23 18:08:50 +0200132 bridge_res->size = ALIGN_UP(base, POWER_OF_2(bridge_res->gran));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700133
Furquan Shaikhc3568612020-05-16 15:18:23 -0700134 res_printk(print_depth, "%s %s: size: %llx align: %d gran: %d limit: %llx done\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700135 dev_path(bridge), resource2str(bridge_res), bridge_res->size,
136 bridge_res->align, bridge_res->gran, bridge_res->limit);
137}
138
139/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200140 * During pass 1, at the bridge level, the resource allocator gathers
141 * requirements from downstream devices and updates its own resource
142 * windows for the provided resource type.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700143 */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700144static void compute_bridge_resources(const struct device *bridge, unsigned long type_match,
145 int print_depth)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700146{
147 const struct device *child;
148 struct resource *res;
149 struct bus *bus = bridge->link_list;
150 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
151
152 for (res = bridge->resource_list; res; res = res->next) {
153 if (!(res->flags & IORESOURCE_BRIDGE))
154 continue;
155
156 if ((res->flags & type_mask) != type_match)
157 continue;
158
159 /*
160 * Ensure that the resource requirements for all downstream bridges are
161 * gathered before updating the window for current bridge resource.
162 */
163 for (child = bus->children; child; child = child->sibling) {
164 if (!dev_has_children(child))
165 continue;
Furquan Shaikhc3568612020-05-16 15:18:23 -0700166 compute_bridge_resources(child, type_match, print_depth + 1);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700167 }
168
169 /*
170 * Update the window for current bridge resource now that all downstream
171 * requirements are gathered.
172 */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700173 update_bridge_resource(bridge, res, type_match, print_depth);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700174 }
175}
176
177/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200178 * During pass 1, the resource allocator walks down the entire sub-tree
179 * of a domain. It gathers resource requirements for every downstream
180 * bridge by looking at the resource requests of its children. Thus, the
181 * requirement gathering begins at the leaf devices and is propagated
182 * back up to the downstream bridges of the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700183 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200184 * At the domain level, it identifies every downstream bridge and walks
185 * down that bridge to gather requirements for each resource type i.e.
186 * i/o, mem and prefmem. Since bridges have separate windows for mem and
187 * prefmem, requirements for each need to be collected separately.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700188 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200189 * Domain resource windows are fixed ranges and hence requirement
190 * gathering does not result in any changes to these fixed ranges.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700191 */
192static void compute_domain_resources(const struct device *domain)
193{
194 const struct device *child;
Furquan Shaikhc3568612020-05-16 15:18:23 -0700195 const int print_depth = 1;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700196
197 if (domain->link_list == NULL)
198 return;
199
200 for (child = domain->link_list->children; child; child = child->sibling) {
201
202 /* Skip if this is not a bridge or has no children under it. */
203 if (!dev_has_children(child))
204 continue;
205
Furquan Shaikhc3568612020-05-16 15:18:23 -0700206 compute_bridge_resources(child, IORESOURCE_IO, print_depth);
207 compute_bridge_resources(child, IORESOURCE_MEM, print_depth);
208 compute_bridge_resources(child, IORESOURCE_MEM | IORESOURCE_PREFETCH,
209 print_depth);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700210 }
211}
212
213static unsigned char get_alignment_by_resource_type(const struct resource *res)
214{
215 if (res->flags & IORESOURCE_MEM)
216 return 12; /* Page-aligned --> log2(4KiB) */
217 else if (res->flags & IORESOURCE_IO)
218 return 0; /* No special alignment required --> log2(1) */
219
220 die("Unexpected resource type: flags(%d)!\n", res->flags);
221}
222
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700223/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200224 * If the resource is NULL or if the resource is not assigned, then it
225 * cannot be used for allocation for downstream devices.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700226 */
227static bool is_resource_invalid(const struct resource *res)
228{
229 return (res == NULL) || !(res->flags & IORESOURCE_ASSIGNED);
230}
231
232static void initialize_domain_io_resource_memranges(struct memranges *ranges,
233 const struct resource *res,
234 unsigned long memrange_type)
235{
236 memranges_insert(ranges, res->base, res->limit - res->base + 1, memrange_type);
237}
238
239static void initialize_domain_mem_resource_memranges(struct memranges *ranges,
240 const struct resource *res,
241 unsigned long memrange_type)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700242{
243 resource_t res_base;
244 resource_t res_limit;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700245
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700246 const resource_t limit_4g = 0xffffffff;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700247
248 res_base = res->base;
249 res_limit = res->limit;
250
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700251 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200252 * Split the resource into two separate ranges if it crosses the 4G
253 * boundary. Memrange type is set differently to ensure that memrange
254 * does not merge these two ranges. For the range above 4G boundary,
255 * given memrange type is ORed with IORESOURCE_ABOVE_4G.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700256 */
257 if (res_base <= limit_4g) {
258
259 resource_t range_limit;
260
261 /* Clip the resource limit at 4G boundary if necessary. */
262 range_limit = MIN(res_limit, limit_4g);
263 memranges_insert(ranges, res_base, range_limit - res_base + 1, memrange_type);
264
265 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200266 * If the resource lies completely below the 4G boundary, nothing more
267 * needs to be done.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700268 */
269 if (res_limit <= limit_4g)
270 return;
271
272 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200273 * If the resource window crosses the 4G boundary, then update res_base
274 * to add another entry for the range above the boundary.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700275 */
276 res_base = limit_4g + 1;
277 }
278
279 if (res_base > res_limit)
280 return;
281
282 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200283 * If resource lies completely above the 4G boundary or if the resource
284 * was clipped to add two separate ranges, the range above 4G boundary
285 * has the resource flag IORESOURCE_ABOVE_4G set. This allows domain to
286 * handle any downstream requests for resource allocation above 4G
287 * differently.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700288 */
289 memranges_insert(ranges, res_base, res_limit - res_base + 1,
290 memrange_type | IORESOURCE_ABOVE_4G);
291}
292
293/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200294 * This function initializes memranges for domain device. If the
295 * resource crosses 4G boundary, then this function splits it into two
296 * ranges -- one for the window below 4G and the other for the window
297 * above 4G. The latter range has IORESOURCE_ABOVE_4G flag set to
298 * satisfy resource requests from downstream devices for allocations
299 * above 4G.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700300 */
301static void initialize_domain_memranges(struct memranges *ranges, const struct resource *res,
302 unsigned long memrange_type)
303{
304 unsigned char align = get_alignment_by_resource_type(res);
305
306 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
307
308 if (is_resource_invalid(res))
309 return;
310
311 if (res->flags & IORESOURCE_IO)
312 initialize_domain_io_resource_memranges(ranges, res, memrange_type);
313 else
314 initialize_domain_mem_resource_memranges(ranges, res, memrange_type);
315}
316
317/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200318 * This function initializes memranges for bridge device. Unlike domain,
319 * bridge does not need to care about resource window crossing 4G
320 * boundary. This is handled by the resource allocator at domain level
321 * to ensure that all downstream bridges are allocated space either
322 * above or below 4G boundary as per the state of IORESOURCE_ABOVE_4G
323 * for the respective bridge resource.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700324 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200325 * So, this function creates a single range of the entire resource
326 * window available for the bridge resource. Thus all downstream
327 * resources of the bridge for the given resource type get allocated
328 * space from the same window. If there is any downstream resource of
329 * the bridge which requests allocation above 4G, then all other
330 * downstream resources of the same type under the bridge get allocated
331 * above 4G.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700332 */
333static void initialize_bridge_memranges(struct memranges *ranges, const struct resource *res,
334 unsigned long memrange_type)
335{
336 unsigned char align = get_alignment_by_resource_type(res);
337
338 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
339
340 if (is_resource_invalid(res))
341 return;
342
343 memranges_insert(ranges, res->base, res->limit - res->base + 1, memrange_type);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700344}
345
Furquan Shaikhc3568612020-05-16 15:18:23 -0700346static void print_resource_ranges(const struct device *dev, const struct memranges *ranges)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700347{
348 const struct range_entry *r;
349
Furquan Shaikhc3568612020-05-16 15:18:23 -0700350 printk(BIOS_INFO, " %s: Resource ranges:\n", dev_path(dev));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700351
352 if (memranges_is_empty(ranges))
Furquan Shaikhc3568612020-05-16 15:18:23 -0700353 printk(BIOS_INFO, " * EMPTY!!\n");
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700354
355 memranges_each_entry(r, ranges) {
Furquan Shaikhc3568612020-05-16 15:18:23 -0700356 printk(BIOS_INFO, " * Base: %llx, Size: %llx, Tag: %lx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700357 range_entry_base(r), range_entry_size(r), range_entry_tag(r));
358 }
359}
360
361/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200362 * This is where the actual allocation of resources happens during
363 * pass 2. Given the list of memory ranges corresponding to the
364 * resource of given type, it finds the biggest unallocated resource
365 * using the type mask on the downstream bus. This continues in a
366 * descending order until all resources of given type are allocated
367 * address space within the current resource window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700368 */
369static void allocate_child_resources(struct bus *bus, struct memranges *ranges,
370 unsigned long type_mask, unsigned long type_match)
371{
372 struct resource *resource = NULL;
373 const struct device *dev;
374
375 while ((dev = largest_resource(bus, &resource, type_mask, type_match))) {
376
377 if (!resource->size)
378 continue;
379
380 if (memranges_steal(ranges, resource->limit, resource->size, resource->align,
381 type_match, &resource->base) == false) {
Furquan Shaikhc3568612020-05-16 15:18:23 -0700382 printk(BIOS_ERR, " ERROR: Resource didn't fit!!! ");
383 printk(BIOS_DEBUG, " %s %02lx * size: 0x%llx limit: %llx %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700384 dev_path(dev), resource->index,
385 resource->size, resource->limit, resource2str(resource));
386 continue;
387 }
388
389 resource->limit = resource->base + resource->size - 1;
390 resource->flags |= IORESOURCE_ASSIGNED;
391
Furquan Shaikhc3568612020-05-16 15:18:23 -0700392 printk(BIOS_DEBUG, " %s %02lx * [0x%llx - 0x%llx] limit: %llx %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700393 dev_path(dev), resource->index, resource->base,
394 resource->size ? resource->base + resource->size - 1 :
395 resource->base, resource->limit, resource2str(resource));
396 }
397}
398
399static void update_constraints(struct memranges *ranges, const struct device *dev,
400 const struct resource *res)
401{
402 if (!res->size)
403 return;
404
Furquan Shaikhc3568612020-05-16 15:18:23 -0700405 printk(BIOS_DEBUG, " %s: %s %02lx base %08llx limit %08llx %s (fixed)\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700406 __func__, dev_path(dev), res->index, res->base,
407 res->base + res->size - 1, resource2str(res));
408
409 memranges_create_hole(ranges, res->base, res->size);
410}
411
412/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200413 * Scan the entire tree to identify any fixed resources allocated by
414 * any device to ensure that the address map for domain resources are
415 * appropriately updated.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700416 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200417 * Domains can typically provide a memrange for entire address space.
418 * So, this function punches holes in the address space for all fixed
419 * resources that are already defined. Both I/O and normal memory
420 * resources are added as fixed. Both need to be removed from address
421 * space where dynamic resource allocations are sourced.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700422 */
423static void avoid_fixed_resources(struct memranges *ranges, const struct device *dev,
424 unsigned long mask_match)
425{
426 const struct resource *res;
427 const struct device *child;
428 const struct bus *bus;
429
430 for (res = dev->resource_list; res != NULL; res = res->next) {
431 if ((res->flags & mask_match) != mask_match)
432 continue;
433 update_constraints(ranges, dev, res);
434 }
435
436 bus = dev->link_list;
437 if (bus == NULL)
438 return;
439
440 for (child = bus->children; child != NULL; child = child->sibling)
441 avoid_fixed_resources(ranges, child, mask_match);
442}
443
444static void constrain_domain_resources(const struct device *domain, struct memranges *ranges,
445 unsigned long type)
446{
447 unsigned long mask_match = type | IORESOURCE_FIXED;
448
449 if (type == IORESOURCE_IO) {
450 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200451 * Don't allow allocations in the VGA I/O range. PCI has special
452 * cases for that.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700453 */
Furquan Shaikh563e6142020-05-26 12:04:35 -0700454 memranges_create_hole(ranges, 0x3b0, 0x3df - 0x3b0 + 1);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700455
456 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200457 * Resource allocator no longer supports the legacy behavior where
458 * I/O resource allocation is guaranteed to avoid aliases over legacy
459 * PCI expansion card addresses.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700460 */
461 }
462
463 avoid_fixed_resources(ranges, domain, mask_match);
464}
465
466/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200467 * This function creates a list of memranges of given type using the
468 * resource that is provided. If the given resource is NULL or if the
469 * resource window size is 0, then it creates an empty list. This
470 * results in resource allocation for that resource type failing for
471 * all downstream devices since there is nothing to allocate from.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700472 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200473 * In case of domain, it applies additional constraints to ensure that
474 * the memranges do not overlap any of the fixed resources under that
475 * domain. Domain typically seems to provide memrange for entire address
476 * space. Thus, it is up to the chipset to add DRAM and all other
477 * windows which cannot be used for resource allocation as fixed
478 * resources.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700479 */
480static void setup_resource_ranges(const struct device *dev, const struct resource *res,
481 unsigned long type, struct memranges *ranges)
482{
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -0700483 printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %d gran: %d limit: %llx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700484 dev_path(dev), resource2str(res), res->base, res->size, res->align,
485 res->gran, res->limit);
486
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700487 if (dev->path.type == DEVICE_PATH_DOMAIN) {
488 initialize_domain_memranges(ranges, res, type);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700489 constrain_domain_resources(dev, ranges, type);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700490 } else {
491 initialize_bridge_memranges(ranges, res, type);
492 }
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700493
Furquan Shaikhc3568612020-05-16 15:18:23 -0700494 print_resource_ranges(dev, ranges);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700495}
496
497static void cleanup_resource_ranges(const struct device *dev, struct memranges *ranges,
498 const struct resource *res)
499{
500 memranges_teardown(ranges);
Furquan Shaikhc0dc1e12020-05-16 13:54:37 -0700501 printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %d gran: %d limit: %llx done\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700502 dev_path(dev), resource2str(res), res->base, res->size, res->align,
503 res->gran, res->limit);
504}
505
506/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200507 * Pass 2 of the resource allocator at the bridge level loops through
508 * all the resources for the bridge and generates a list of memory
509 * ranges similar to that at the domain level. However, there is no need
510 * to apply any additional constraints since the window allocated to the
511 * bridge is guaranteed to be non-overlapping by the allocator at domain
512 * level.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700513 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200514 * Allocation at the bridge level works the same as at domain level
515 * (starts with the biggest resource requirement from downstream devices
516 * and continues in descending order). One major difference at the
517 * bridge level is that it considers prefmem resources separately from
518 * mem resources.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700519 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200520 * Once allocation at the current bridge is complete, resource allocator
521 * continues walking down the downstream bridges until it hits the leaf
522 * devices.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700523 */
524static void allocate_bridge_resources(const struct device *bridge)
525{
526 struct memranges ranges;
527 const struct resource *res;
528 struct bus *bus = bridge->link_list;
529 unsigned long type_match;
530 struct device *child;
531 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
532
533 for (res = bridge->resource_list; res; res = res->next) {
534 if (!res->size)
535 continue;
536
537 if (!(res->flags & IORESOURCE_BRIDGE))
538 continue;
539
540 type_match = res->flags & type_mask;
541
542 setup_resource_ranges(bridge, res, type_match, &ranges);
543 allocate_child_resources(bus, &ranges, type_mask, type_match);
544 cleanup_resource_ranges(bridge, &ranges, res);
545 }
546
547 for (child = bus->children; child; child = child->sibling) {
548 if (!dev_has_children(child))
549 continue;
550
551 allocate_bridge_resources(child);
552 }
553}
554
555static const struct resource *find_domain_resource(const struct device *domain,
556 unsigned long type)
557{
558 const struct resource *res;
559
560 for (res = domain->resource_list; res; res = res->next) {
561 if (res->flags & IORESOURCE_FIXED)
562 continue;
563
564 if ((res->flags & IORESOURCE_TYPE_MASK) == type)
565 return res;
566 }
567
568 return NULL;
569}
570
571/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200572 * Pass 2 of resource allocator begins at the domain level. Every domain
573 * has two types of resources - io and mem. For each of these resources,
574 * this function creates a list of memory ranges that can be used for
575 * downstream resource allocation. This list is constrained to remove
576 * any fixed resources in the domain sub-tree of the given resource
577 * type. It then uses the memory ranges to apply best fit on the
578 * resource requirements of the downstream devices.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700579 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200580 * Once resources are allocated to all downstream devices of the domain,
581 * it walks down each downstream bridge to continue the same process
582 * until resources are allocated to all devices under the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700583 */
584static void allocate_domain_resources(const struct device *domain)
585{
586 struct memranges ranges;
587 struct device *child;
588 const struct resource *res;
589
590 /* Resource type I/O */
591 res = find_domain_resource(domain, IORESOURCE_IO);
592 if (res) {
593 setup_resource_ranges(domain, res, IORESOURCE_IO, &ranges);
594 allocate_child_resources(domain->link_list, &ranges, IORESOURCE_TYPE_MASK,
595 IORESOURCE_IO);
596 cleanup_resource_ranges(domain, &ranges, res);
597 }
598
599 /*
600 * Resource type Mem:
Nico Huber9d7728a2020-05-23 18:00:10 +0200601 * Domain does not distinguish between mem and prefmem resources. Thus,
602 * the resource allocation at domain level considers mem and prefmem
603 * together when finding the best fit based on the biggest resource
604 * requirement.
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700605 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200606 * However, resource requests for allocation above 4G boundary need to
607 * be handled separately if the domain resource window crosses this
608 * boundary. There is a single window for resource of type
609 * IORESOURCE_MEM. When creating memranges, this resource is split into
610 * two separate ranges -- one for the window below 4G boundary and other
611 * for the window above 4G boundary (with IORESOURCE_ABOVE_4G flag set).
612 * Thus, when allocating child resources, requests for below and above
613 * the 4G boundary are handled separately by setting the type_mask and
614 * type_match to allocate_child_resources() accordingly.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700615 */
616 res = find_domain_resource(domain, IORESOURCE_MEM);
617 if (res) {
618 setup_resource_ranges(domain, res, IORESOURCE_MEM, &ranges);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700619 allocate_child_resources(domain->link_list, &ranges,
620 IORESOURCE_TYPE_MASK | IORESOURCE_ABOVE_4G,
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700621 IORESOURCE_MEM);
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700622 allocate_child_resources(domain->link_list, &ranges,
623 IORESOURCE_TYPE_MASK | IORESOURCE_ABOVE_4G,
624 IORESOURCE_MEM | IORESOURCE_ABOVE_4G);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700625 cleanup_resource_ranges(domain, &ranges, res);
626 }
627
628 for (child = domain->link_list->children; child; child = child->sibling) {
629 if (!dev_has_children(child))
630 continue;
631
632 /* Continue allocation for all downstream bridges. */
633 allocate_bridge_resources(child);
634 }
635}
636
637/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200638 * This function forms the guts of the resource allocator. It walks
639 * through the entire device tree for each domain two times.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700640 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200641 * Every domain has a fixed set of ranges. These ranges cannot be
642 * relaxed based on the requirements of the downstream devices. They
643 * represent the available windows from which resources can be allocated
644 * to the different devices under the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700645 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200646 * In order to identify the requirements of downstream devices, resource
647 * allocator walks in a DFS fashion. It gathers the requirements from
648 * leaf devices and propagates those back up to their upstream bridges
649 * until the requirements for all the downstream devices of the domain
650 * are gathered. This is referred to as pass 1 of the resource allocator.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700651 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200652 * Once the requirements for all the devices under the domain are
653 * gathered, the resource allocator walks a second time to allocate
654 * resources to downstream devices as per the requirements. It always
655 * picks the biggest resource request as per the type (i/o and mem) to
656 * allocate space from its fixed window to the immediate downstream
657 * device of the domain. In order to accomplish best fit for the
658 * resources, a list of ranges is maintained by each resource type (i/o
659 * and mem). At the domain level we don't differentiate between mem and
660 * prefmem. Since they are allocated space from the same window, the
661 * resource allocator at the domain level ensures that the biggest
662 * requirement is selected independent of the prefetch type. Once the
663 * resource allocation for all immediate downstream devices is complete
664 * at the domain level, the resource allocator walks down the subtree
665 * for each downstream bridge to continue the allocation process at the
666 * bridge level. Since bridges have separate windows for i/o, mem and
667 * prefmem, best fit algorithm at bridge level looks for the biggest
668 * requirement considering prefmem resources separately from non-prefmem
669 * resources. This continues until resource allocation is performed for
670 * all downstream bridges in the domain sub-tree. This is referred to as
671 * pass 2 of the resource allocator.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700672 *
673 * Some rules that are followed by the resource allocator:
Nico Huber9d7728a2020-05-23 18:00:10 +0200674 * - Allocate resource locations for every device as long as
675 * the requirements can be satisfied.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700676 * - Don't overlap with resources in fixed locations.
Nico Huber9d7728a2020-05-23 18:00:10 +0200677 * - Don't overlap and follow the rules of bridges -- downstream
678 * devices of bridges should use parts of the address space
679 * allocated to the bridge.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700680 */
681void allocate_resources(const struct device *root)
682{
683 const struct device *child;
684
685 if ((root == NULL) || (root->link_list == NULL))
686 return;
687
688 for (child = root->link_list->children; child; child = child->sibling) {
689
690 if (child->path.type != DEVICE_PATH_DOMAIN)
691 continue;
692
693 post_log_path(child);
694
695 /* Pass 1 - Gather requirements. */
Paul Menzel2efcafa2021-07-02 17:39:45 +0200696 printk(BIOS_INFO, "=== Resource allocator: %s - Pass 1 (gathering requirements) ===\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700697 dev_path(child));
698 compute_domain_resources(child);
699
700 /* Pass 2 - Allocate resources as per gathered requirements. */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700701 printk(BIOS_INFO, "=== Resource allocator: %s - Pass 2 (allocating resources) ===\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700702 dev_path(child));
703 allocate_domain_resources(child);
Furquan Shaikhc3568612020-05-16 15:18:23 -0700704
705 printk(BIOS_INFO, "=== Resource allocator: %s - resource allocation complete ===\n",
706 dev_path(child));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700707 }
708}