blob: 0a83d29a22935383a1830b036cfe53e02b72910b [file] [log] [blame]
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07001/* SPDX-License-Identifier: GPL-2.0-only */
2
Elyes Haouas04c3b5a2022-10-07 10:08:05 +02003#include <commonlib/bsd/helpers.h>
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07004#include <console/console.h>
5#include <device/device.h>
6#include <memrange.h>
7#include <post.h>
Elyes Haouas04c3b5a2022-10-07 10:08:05 +02008#include <types.h>
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -07009
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070010static const char *resource2str(const struct resource *res)
11{
12 if (res->flags & IORESOURCE_IO)
13 return "io";
14 if (res->flags & IORESOURCE_PREFETCH)
15 return "prefmem";
16 if (res->flags & IORESOURCE_MEM)
17 return "mem";
18 return "undefined";
19}
20
21static bool dev_has_children(const struct device *dev)
22{
23 const struct bus *bus = dev->link_list;
24 return bus && bus->children;
25}
26
Furquan Shaikhc3568612020-05-16 15:18:23 -070027#define res_printk(depth, str, ...) printk(BIOS_DEBUG, "%*c"str, depth, ' ', __VA_ARGS__)
28
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070029/*
Nico Huber9d7728a2020-05-23 18:00:10 +020030 * During pass 1, once all the requirements for downstream devices of a
31 * bridge are gathered, this function calculates the overall resource
32 * requirement for the bridge. It starts by picking the largest resource
33 * requirement downstream for the given resource type and works by
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070034 * adding requirements in descending order.
35 *
Nico Huber9d7728a2020-05-23 18:00:10 +020036 * Additionally, it takes alignment and limits of the downstream devices
37 * into consideration and ensures that they get propagated to the bridge
38 * resource. This is required to guarantee that the upstream bridge/
39 * domain honors the limit and alignment requirements for this bridge
40 * based on the tightest constraints downstream.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070041 */
42static void update_bridge_resource(const struct device *bridge, struct resource *bridge_res,
Furquan Shaikhc3568612020-05-16 15:18:23 -070043 unsigned long type_match, int print_depth)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070044{
45 const struct device *child;
46 struct resource *child_res;
47 resource_t base;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070048 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
49 struct bus *bus = bridge->link_list;
50
51 child_res = NULL;
52
53 /*
Nico Huber9d7728a2020-05-23 18:00:10 +020054 * `base` keeps track of where the next allocation for child resources
55 * can take place from within the bridge resource window. Since the
56 * bridge resource window allocation is not performed yet, it can start
57 * at 0. Base gets updated every time a resource requirement is
58 * accounted for in the loop below. After scanning all these resources,
59 * base will indicate the total size requirement for the current bridge
60 * resource window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070061 */
62 base = 0;
63
Arthur Heymansd436b162023-05-23 22:47:05 +020064 res_printk(print_depth, "%s %s: size: %llx align: %u gran: %u limit: %llx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070065 dev_path(bridge), resource2str(bridge_res), bridge_res->size,
66 bridge_res->align, bridge_res->gran, bridge_res->limit);
67
68 while ((child = largest_resource(bus, &child_res, type_mask, type_match))) {
69
70 /* Size 0 resources can be skipped. */
71 if (!child_res->size)
72 continue;
73
Nico Huberec7b3132020-05-23 18:20:47 +020074 /* Resources with 0 limit can't be assigned anything. */
75 if (!child_res->limit)
76 continue;
77
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070078 /*
Nico Huber74169c12020-05-23 18:15:34 +020079 * Propagate the resource alignment to the bridge resource. The
80 * condition can only be true for the first (largest) resource. For all
Nico Huber9d7728a2020-05-23 18:00:10 +020081 * other children resources, alignment is taken care of by updating the
82 * base to round up as per the child resource alignment. It is
83 * guaranteed that pass 2 follows the exact same method of picking the
84 * resource for allocation using largest_resource(). Thus, as long as
Nico Huber74169c12020-05-23 18:15:34 +020085 * the alignment for the largest child resource is propagated up to the
86 * bridge resource, it can be guaranteed that the alignment for all
87 * resources is appropriately met.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070088 */
Nico Huber74169c12020-05-23 18:15:34 +020089 if (child_res->align > bridge_res->align)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070090 bridge_res->align = child_res->align;
91
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070092 /*
Nico Huberec7b3132020-05-23 18:20:47 +020093 * Propagate the resource limit to the bridge resource. If a downstream
94 * device has stricter requirements w.r.t. limits for any resource, that
95 * constraint needs to be propagated back up to the downstream bridges
96 * of the domain. This guarantees that the resource allocation which
97 * starts at the domain level takes into account all these constraints
98 * thus working on a global view.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -070099 */
Nico Huber38aafa32022-09-04 22:20:21 +0200100 if (child_res->limit < bridge_res->limit)
101 bridge_res->limit = child_res->limit;
102
103 /*
104 * Propagate the downstream resource request to allocate above 4G
105 * boundary to upstream bridge resource. This ensures that during
106 * pass 2, the resource allocator at domain level has a global view
107 * of all the downstream device requirements and thus address space
108 * is allocated as per updated flags in the bridge resource.
109 *
110 * Since the bridge resource is a single window, all the downstream
111 * resources of this bridge resource will be allocated in space above
112 * the 4G boundary.
113 */
114 if (child_res->flags & IORESOURCE_ABOVE_4G)
115 bridge_res->flags |= IORESOURCE_ABOVE_4G;
Furquan Shaikh1bb05ef302020-05-15 17:33:52 -0700116
117 /*
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700118 * Alignment value of 0 means that the child resource has no alignment
119 * requirements and so the base value remains unchanged here.
120 */
Nico Huberb3277042020-05-23 18:08:50 +0200121 base = ALIGN_UP(base, POWER_OF_2(child_res->align));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700122
Furquan Shaikhc3568612020-05-16 15:18:23 -0700123 res_printk(print_depth + 1, "%s %02lx * [0x%llx - 0x%llx] %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700124 dev_path(child), child_res->index, base, base + child_res->size - 1,
125 resource2str(child_res));
126
127 base += child_res->size;
128 }
129
130 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200131 * After all downstream device resources are scanned, `base` represents
132 * the total size requirement for the current bridge resource window.
133 * This size needs to be rounded up to the granularity requirement of
134 * the bridge to ensure that the upstream bridge/domain allocates big
135 * enough window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700136 */
Nico Huberb3277042020-05-23 18:08:50 +0200137 bridge_res->size = ALIGN_UP(base, POWER_OF_2(bridge_res->gran));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700138
Arthur Heymansd436b162023-05-23 22:47:05 +0200139 res_printk(print_depth, "%s %s: size: %llx align: %u gran: %u limit: %llx done\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700140 dev_path(bridge), resource2str(bridge_res), bridge_res->size,
141 bridge_res->align, bridge_res->gran, bridge_res->limit);
142}
143
144/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200145 * During pass 1, at the bridge level, the resource allocator gathers
146 * requirements from downstream devices and updates its own resource
147 * windows for the provided resource type.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700148 */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700149static void compute_bridge_resources(const struct device *bridge, unsigned long type_match,
150 int print_depth)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700151{
152 const struct device *child;
153 struct resource *res;
154 struct bus *bus = bridge->link_list;
155 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
156
157 for (res = bridge->resource_list; res; res = res->next) {
158 if (!(res->flags & IORESOURCE_BRIDGE))
159 continue;
160
161 if ((res->flags & type_mask) != type_match)
162 continue;
163
164 /*
165 * Ensure that the resource requirements for all downstream bridges are
166 * gathered before updating the window for current bridge resource.
167 */
168 for (child = bus->children; child; child = child->sibling) {
169 if (!dev_has_children(child))
170 continue;
Furquan Shaikhc3568612020-05-16 15:18:23 -0700171 compute_bridge_resources(child, type_match, print_depth + 1);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700172 }
173
174 /*
175 * Update the window for current bridge resource now that all downstream
176 * requirements are gathered.
177 */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700178 update_bridge_resource(bridge, res, type_match, print_depth);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700179 }
180}
181
182/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200183 * During pass 1, the resource allocator walks down the entire sub-tree
184 * of a domain. It gathers resource requirements for every downstream
185 * bridge by looking at the resource requests of its children. Thus, the
186 * requirement gathering begins at the leaf devices and is propagated
187 * back up to the downstream bridges of the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700188 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200189 * At the domain level, it identifies every downstream bridge and walks
190 * down that bridge to gather requirements for each resource type i.e.
191 * i/o, mem and prefmem. Since bridges have separate windows for mem and
192 * prefmem, requirements for each need to be collected separately.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700193 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200194 * Domain resource windows are fixed ranges and hence requirement
195 * gathering does not result in any changes to these fixed ranges.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700196 */
197static void compute_domain_resources(const struct device *domain)
198{
199 const struct device *child;
Furquan Shaikhc3568612020-05-16 15:18:23 -0700200 const int print_depth = 1;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700201
202 if (domain->link_list == NULL)
203 return;
204
205 for (child = domain->link_list->children; child; child = child->sibling) {
206
207 /* Skip if this is not a bridge or has no children under it. */
208 if (!dev_has_children(child))
209 continue;
210
Furquan Shaikhc3568612020-05-16 15:18:23 -0700211 compute_bridge_resources(child, IORESOURCE_IO, print_depth);
212 compute_bridge_resources(child, IORESOURCE_MEM, print_depth);
213 compute_bridge_resources(child, IORESOURCE_MEM | IORESOURCE_PREFETCH,
214 print_depth);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700215 }
216}
217
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100218static unsigned char get_alignment_by_resource_type(const unsigned long type)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700219{
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100220 if (type & IORESOURCE_MEM)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700221 return 12; /* Page-aligned --> log2(4KiB) */
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100222 else if (type & IORESOURCE_IO)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700223 return 0; /* No special alignment required --> log2(1) */
224
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100225 die("Unexpected resource type: flags(%lu)!\n", type);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700226}
227
Nico Huber38aafa32022-09-04 22:20:21 +0200228/*
229 * If the resource is NULL or if the resource is not assigned, then it
230 * cannot be used for allocation for downstream devices.
231 */
232static bool is_resource_invalid(const struct resource *res)
233{
234 return (res == NULL) || !(res->flags & IORESOURCE_ASSIGNED);
235}
236
237static void initialize_domain_io_resource_memranges(struct memranges *ranges,
238 const struct resource *res,
239 unsigned long memrange_type)
240{
241 memranges_insert(ranges, res->base, res->limit - res->base + 1, memrange_type);
242}
243
244static void initialize_domain_mem_resource_memranges(struct memranges *ranges,
245 const struct resource *res,
246 unsigned long memrange_type)
247{
248 resource_t res_base;
249 resource_t res_limit;
250
251 const resource_t limit_4g = 0xffffffff;
252
253 res_base = res->base;
254 res_limit = res->limit;
255
256 /*
257 * Split the resource into two separate ranges if it crosses the 4G
258 * boundary. Memrange type is set differently to ensure that memrange
259 * does not merge these two ranges. For the range above 4G boundary,
260 * given memrange type is ORed with IORESOURCE_ABOVE_4G.
261 */
262 if (res_base <= limit_4g) {
263
264 resource_t range_limit;
265
266 /* Clip the resource limit at 4G boundary if necessary. */
267 range_limit = MIN(res_limit, limit_4g);
268 memranges_insert(ranges, res_base, range_limit - res_base + 1, memrange_type);
269
270 /*
271 * If the resource lies completely below the 4G boundary, nothing more
272 * needs to be done.
273 */
274 if (res_limit <= limit_4g)
275 return;
276
277 /*
278 * If the resource window crosses the 4G boundary, then update res_base
279 * to add another entry for the range above the boundary.
280 */
281 res_base = limit_4g + 1;
282 }
283
284 if (res_base > res_limit)
285 return;
286
287 /*
288 * If resource lies completely above the 4G boundary or if the resource
289 * was clipped to add two separate ranges, the range above 4G boundary
290 * has the resource flag IORESOURCE_ABOVE_4G set. This allows domain to
291 * handle any downstream requests for resource allocation above 4G
292 * differently.
293 */
294 memranges_insert(ranges, res_base, res_limit - res_base + 1,
295 memrange_type | IORESOURCE_ABOVE_4G);
296}
297
298/*
299 * This function initializes memranges for domain device. If the
300 * resource crosses 4G boundary, then this function splits it into two
301 * ranges -- one for the window below 4G and the other for the window
302 * above 4G. The latter range has IORESOURCE_ABOVE_4G flag set to
303 * satisfy resource requests from downstream devices for allocations
304 * above 4G.
305 */
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100306static void initialize_domain_memranges(const struct device *dev, struct memranges *ranges,
Nico Huber38aafa32022-09-04 22:20:21 +0200307 unsigned long memrange_type)
308{
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100309 unsigned char align = get_alignment_by_resource_type(memrange_type);
Nico Huber38aafa32022-09-04 22:20:21 +0200310
311 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
312
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100313 struct resource *res;
314 for (res = dev->resource_list; res != NULL; res = res->next) {
315 if (is_resource_invalid(res))
316 continue;
317 if (res->flags & IORESOURCE_FIXED)
318 continue;
319 if ((res->flags & IORESOURCE_TYPE_MASK) != memrange_type)
320 continue;
Nico Huber38aafa32022-09-04 22:20:21 +0200321
Arthur Heymansd436b162023-05-23 22:47:05 +0200322 printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %u gran: %u limit: %llx\n",
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100323 dev_path(dev), resource2str(res), res->base, res->size, res->align,
324 res->gran, res->limit);
325
326 if (res->flags & IORESOURCE_IO)
327 initialize_domain_io_resource_memranges(ranges, res, memrange_type);
328 else
329 initialize_domain_mem_resource_memranges(ranges, res, memrange_type);
330 }
Nico Huber38aafa32022-09-04 22:20:21 +0200331}
332
333/*
334 * This function initializes memranges for bridge device. Unlike domain,
335 * bridge does not need to care about resource window crossing 4G
336 * boundary. This is handled by the resource allocator at domain level
337 * to ensure that all downstream bridges are allocated space either
338 * above or below 4G boundary as per the state of IORESOURCE_ABOVE_4G
339 * for the respective bridge resource.
340 *
341 * So, this function creates a single range of the entire resource
342 * window available for the bridge resource. Thus all downstream
343 * resources of the bridge for the given resource type get allocated
344 * space from the same window. If there is any downstream resource of
345 * the bridge which requests allocation above 4G, then all other
346 * downstream resources of the same type under the bridge get allocated
347 * above 4G.
348 */
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100349static void initialize_bridge_memranges(const struct device *dev, struct memranges *ranges,
Nico Huber38aafa32022-09-04 22:20:21 +0200350 unsigned long memrange_type)
351{
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100352 unsigned char align = get_alignment_by_resource_type(memrange_type);
Nico Huber38aafa32022-09-04 22:20:21 +0200353
354 memranges_init_empty_with_alignment(ranges, NULL, 0, align);
355
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100356 struct resource *res;
357 for (res = dev->resource_list; res != NULL; res = res->next) {
358 if (is_resource_invalid(res))
359 continue;
360 if (res->flags & IORESOURCE_FIXED)
361 continue;
362 if ((res->flags & (IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH)) == memrange_type)
363 break;
364 }
Nico Huber38aafa32022-09-04 22:20:21 +0200365
366 memranges_insert(ranges, res->base, res->limit - res->base + 1, memrange_type);
367}
368
Furquan Shaikhc3568612020-05-16 15:18:23 -0700369static void print_resource_ranges(const struct device *dev, const struct memranges *ranges)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700370{
371 const struct range_entry *r;
372
Furquan Shaikhc3568612020-05-16 15:18:23 -0700373 printk(BIOS_INFO, " %s: Resource ranges:\n", dev_path(dev));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700374
375 if (memranges_is_empty(ranges))
Furquan Shaikhc3568612020-05-16 15:18:23 -0700376 printk(BIOS_INFO, " * EMPTY!!\n");
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700377
378 memranges_each_entry(r, ranges) {
Furquan Shaikhc3568612020-05-16 15:18:23 -0700379 printk(BIOS_INFO, " * Base: %llx, Size: %llx, Tag: %lx\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700380 range_entry_base(r), range_entry_size(r), range_entry_tag(r));
381 }
382}
383
384/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200385 * This is where the actual allocation of resources happens during
386 * pass 2. Given the list of memory ranges corresponding to the
387 * resource of given type, it finds the biggest unallocated resource
388 * using the type mask on the downstream bus. This continues in a
389 * descending order until all resources of given type are allocated
390 * address space within the current resource window.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700391 */
392static void allocate_child_resources(struct bus *bus, struct memranges *ranges,
393 unsigned long type_mask, unsigned long type_match)
394{
Nico Huber526c6422020-05-25 00:03:14 +0200395 const bool allocate_top_down =
396 bus->dev->path.type == DEVICE_PATH_DOMAIN &&
397 CONFIG(RESOURCE_ALLOCATION_TOP_DOWN);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700398 struct resource *resource = NULL;
399 const struct device *dev;
400
401 while ((dev = largest_resource(bus, &resource, type_mask, type_match))) {
402
403 if (!resource->size)
404 continue;
405
Nico Huber38aafa32022-09-04 22:20:21 +0200406 if (memranges_steal(ranges, resource->limit, resource->size, resource->align,
407 type_match, &resource->base, allocate_top_down) == false) {
Furquan Shaikhc3568612020-05-16 15:18:23 -0700408 printk(BIOS_ERR, " ERROR: Resource didn't fit!!! ");
409 printk(BIOS_DEBUG, " %s %02lx * size: 0x%llx limit: %llx %s\n",
Nico Huber38aafa32022-09-04 22:20:21 +0200410 dev_path(dev), resource->index,
411 resource->size, resource->limit, resource2str(resource));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700412 continue;
413 }
414
415 resource->limit = resource->base + resource->size - 1;
416 resource->flags |= IORESOURCE_ASSIGNED;
417
Furquan Shaikhc3568612020-05-16 15:18:23 -0700418 printk(BIOS_DEBUG, " %s %02lx * [0x%llx - 0x%llx] limit: %llx %s\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700419 dev_path(dev), resource->index, resource->base,
420 resource->size ? resource->base + resource->size - 1 :
421 resource->base, resource->limit, resource2str(resource));
422 }
423}
424
425static void update_constraints(struct memranges *ranges, const struct device *dev,
426 const struct resource *res)
427{
428 if (!res->size)
429 return;
430
Furquan Shaikhc3568612020-05-16 15:18:23 -0700431 printk(BIOS_DEBUG, " %s: %s %02lx base %08llx limit %08llx %s (fixed)\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700432 __func__, dev_path(dev), res->index, res->base,
433 res->base + res->size - 1, resource2str(res));
434
435 memranges_create_hole(ranges, res->base, res->size);
436}
437
438/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200439 * Scan the entire tree to identify any fixed resources allocated by
440 * any device to ensure that the address map for domain resources are
441 * appropriately updated.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700442 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200443 * Domains can typically provide a memrange for entire address space.
444 * So, this function punches holes in the address space for all fixed
445 * resources that are already defined. Both I/O and normal memory
446 * resources are added as fixed. Both need to be removed from address
447 * space where dynamic resource allocations are sourced.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700448 */
449static void avoid_fixed_resources(struct memranges *ranges, const struct device *dev,
450 unsigned long mask_match)
451{
452 const struct resource *res;
453 const struct device *child;
454 const struct bus *bus;
455
456 for (res = dev->resource_list; res != NULL; res = res->next) {
457 if ((res->flags & mask_match) != mask_match)
458 continue;
459 update_constraints(ranges, dev, res);
460 }
461
462 bus = dev->link_list;
463 if (bus == NULL)
464 return;
465
466 for (child = bus->children; child != NULL; child = child->sibling)
467 avoid_fixed_resources(ranges, child, mask_match);
468}
469
470static void constrain_domain_resources(const struct device *domain, struct memranges *ranges,
471 unsigned long type)
472{
473 unsigned long mask_match = type | IORESOURCE_FIXED;
474
475 if (type == IORESOURCE_IO) {
476 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200477 * Don't allow allocations in the VGA I/O range. PCI has special
478 * cases for that.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700479 */
Furquan Shaikh563e6142020-05-26 12:04:35 -0700480 memranges_create_hole(ranges, 0x3b0, 0x3df - 0x3b0 + 1);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700481
482 /*
Nico Huber9d7728a2020-05-23 18:00:10 +0200483 * Resource allocator no longer supports the legacy behavior where
484 * I/O resource allocation is guaranteed to avoid aliases over legacy
485 * PCI expansion card addresses.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700486 */
487 }
488
489 avoid_fixed_resources(ranges, domain, mask_match);
490}
491
492/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200493 * This function creates a list of memranges of given type using the
Nico Huber38aafa32022-09-04 22:20:21 +0200494 * resource that is provided. If the given resource is NULL or if the
495 * resource window size is 0, then it creates an empty list. This
Nico Huber9d7728a2020-05-23 18:00:10 +0200496 * results in resource allocation for that resource type failing for
497 * all downstream devices since there is nothing to allocate from.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700498 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200499 * In case of domain, it applies additional constraints to ensure that
500 * the memranges do not overlap any of the fixed resources under that
501 * domain. Domain typically seems to provide memrange for entire address
502 * space. Thus, it is up to the chipset to add DRAM and all other
503 * windows which cannot be used for resource allocation as fixed
504 * resources.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700505 */
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100506static void setup_resource_ranges(const struct device *dev, unsigned long type,
507 struct memranges *ranges)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700508{
Nico Huber38aafa32022-09-04 22:20:21 +0200509 if (dev->path.type == DEVICE_PATH_DOMAIN) {
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100510 initialize_domain_memranges(dev, ranges, type);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700511 constrain_domain_resources(dev, ranges, type);
Nico Huber38aafa32022-09-04 22:20:21 +0200512 } else {
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100513 initialize_bridge_memranges(dev, ranges, type);
Nico Huber38aafa32022-09-04 22:20:21 +0200514 }
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700515
Furquan Shaikhc3568612020-05-16 15:18:23 -0700516 print_resource_ranges(dev, ranges);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700517}
518
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100519static void print_resource_done(const struct device *dev, const struct resource *res)
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700520{
Arthur Heymansd436b162023-05-23 22:47:05 +0200521 printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %u gran: %u limit: %llx done\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700522 dev_path(dev), resource2str(res), res->base, res->size, res->align,
523 res->gran, res->limit);
524}
525
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100526static void cleanup_domain_resource_ranges(const struct device *dev, struct memranges *ranges,
527 unsigned long type)
528{
529 memranges_teardown(ranges);
530 for (struct resource *res = dev->resource_list; res != NULL; res = res->next) {
531 if (is_resource_invalid(res))
532 continue;
533 if (res->flags & IORESOURCE_FIXED)
534 continue;
535 if ((res->flags & IORESOURCE_TYPE_MASK) != type)
536 continue;
537 print_resource_done(dev, res);
538 }
539}
540
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700541/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200542 * Pass 2 of the resource allocator at the bridge level loops through
543 * all the resources for the bridge and generates a list of memory
544 * ranges similar to that at the domain level. However, there is no need
545 * to apply any additional constraints since the window allocated to the
546 * bridge is guaranteed to be non-overlapping by the allocator at domain
547 * level.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700548 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200549 * Allocation at the bridge level works the same as at domain level
550 * (starts with the biggest resource requirement from downstream devices
551 * and continues in descending order). One major difference at the
552 * bridge level is that it considers prefmem resources separately from
553 * mem resources.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700554 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200555 * Once allocation at the current bridge is complete, resource allocator
556 * continues walking down the downstream bridges until it hits the leaf
557 * devices.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700558 */
559static void allocate_bridge_resources(const struct device *bridge)
560{
561 struct memranges ranges;
562 const struct resource *res;
563 struct bus *bus = bridge->link_list;
564 unsigned long type_match;
565 struct device *child;
566 const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH;
567
568 for (res = bridge->resource_list; res; res = res->next) {
569 if (!res->size)
570 continue;
571
572 if (!(res->flags & IORESOURCE_BRIDGE))
573 continue;
574
575 type_match = res->flags & type_mask;
576
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100577 setup_resource_ranges(bridge, type_match, &ranges);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700578 allocate_child_resources(bus, &ranges, type_mask, type_match);
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100579 print_resource_done(bridge, res);
580 memranges_teardown(&ranges);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700581 }
582
583 for (child = bus->children; child; child = child->sibling) {
584 if (!dev_has_children(child))
585 continue;
586
587 allocate_bridge_resources(child);
588 }
589}
590
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700591/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200592 * Pass 2 of resource allocator begins at the domain level. Every domain
593 * has two types of resources - io and mem. For each of these resources,
594 * this function creates a list of memory ranges that can be used for
595 * downstream resource allocation. This list is constrained to remove
596 * any fixed resources in the domain sub-tree of the given resource
597 * type. It then uses the memory ranges to apply best fit on the
598 * resource requirements of the downstream devices.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700599 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200600 * Once resources are allocated to all downstream devices of the domain,
601 * it walks down each downstream bridge to continue the same process
602 * until resources are allocated to all devices under the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700603 */
604static void allocate_domain_resources(const struct device *domain)
605{
606 struct memranges ranges;
607 struct device *child;
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700608
609 /* Resource type I/O */
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100610 setup_resource_ranges(domain, IORESOURCE_IO, &ranges);
611 allocate_child_resources(domain->link_list, &ranges, IORESOURCE_TYPE_MASK,
612 IORESOURCE_IO);
613 cleanup_domain_resource_ranges(domain, &ranges, IORESOURCE_IO);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700614
615 /*
616 * Resource type Mem:
Nico Huber9d7728a2020-05-23 18:00:10 +0200617 * Domain does not distinguish between mem and prefmem resources. Thus,
618 * the resource allocation at domain level considers mem and prefmem
619 * together when finding the best fit based on the biggest resource
620 * requirement.
Nico Huber38aafa32022-09-04 22:20:21 +0200621 *
622 * However, resource requests for allocation above 4G boundary need to
623 * be handled separately if the domain resource window crosses this
624 * boundary. There is a single window for resource of type
625 * IORESOURCE_MEM. When creating memranges, this resource is split into
626 * two separate ranges -- one for the window below 4G boundary and other
627 * for the window above 4G boundary (with IORESOURCE_ABOVE_4G flag set).
628 * Thus, when allocating child resources, requests for below and above
629 * the 4G boundary are handled separately by setting the type_mask and
630 * type_match to allocate_child_resources() accordingly.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700631 */
Arthur Heymans68b2b8f2022-12-19 15:04:50 +0100632 setup_resource_ranges(domain, IORESOURCE_MEM, &ranges);
633 allocate_child_resources(domain->link_list, &ranges,
634 IORESOURCE_TYPE_MASK | IORESOURCE_ABOVE_4G,
635 IORESOURCE_MEM);
636 allocate_child_resources(domain->link_list, &ranges,
637 IORESOURCE_TYPE_MASK | IORESOURCE_ABOVE_4G,
638 IORESOURCE_MEM | IORESOURCE_ABOVE_4G);
639 cleanup_domain_resource_ranges(domain, &ranges, IORESOURCE_MEM);
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700640
641 for (child = domain->link_list->children; child; child = child->sibling) {
642 if (!dev_has_children(child))
643 continue;
644
645 /* Continue allocation for all downstream bridges. */
646 allocate_bridge_resources(child);
647 }
648}
649
650/*
Nico Huber9d7728a2020-05-23 18:00:10 +0200651 * This function forms the guts of the resource allocator. It walks
652 * through the entire device tree for each domain two times.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700653 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200654 * Every domain has a fixed set of ranges. These ranges cannot be
655 * relaxed based on the requirements of the downstream devices. They
656 * represent the available windows from which resources can be allocated
657 * to the different devices under the domain.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700658 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200659 * In order to identify the requirements of downstream devices, resource
660 * allocator walks in a DFS fashion. It gathers the requirements from
661 * leaf devices and propagates those back up to their upstream bridges
662 * until the requirements for all the downstream devices of the domain
663 * are gathered. This is referred to as pass 1 of the resource allocator.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700664 *
Nico Huber9d7728a2020-05-23 18:00:10 +0200665 * Once the requirements for all the devices under the domain are
666 * gathered, the resource allocator walks a second time to allocate
667 * resources to downstream devices as per the requirements. It always
668 * picks the biggest resource request as per the type (i/o and mem) to
669 * allocate space from its fixed window to the immediate downstream
670 * device of the domain. In order to accomplish best fit for the
671 * resources, a list of ranges is maintained by each resource type (i/o
672 * and mem). At the domain level we don't differentiate between mem and
673 * prefmem. Since they are allocated space from the same window, the
674 * resource allocator at the domain level ensures that the biggest
675 * requirement is selected independent of the prefetch type. Once the
676 * resource allocation for all immediate downstream devices is complete
677 * at the domain level, the resource allocator walks down the subtree
678 * for each downstream bridge to continue the allocation process at the
679 * bridge level. Since bridges have separate windows for i/o, mem and
680 * prefmem, best fit algorithm at bridge level looks for the biggest
681 * requirement considering prefmem resources separately from non-prefmem
682 * resources. This continues until resource allocation is performed for
683 * all downstream bridges in the domain sub-tree. This is referred to as
684 * pass 2 of the resource allocator.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700685 *
686 * Some rules that are followed by the resource allocator:
Nico Huber9d7728a2020-05-23 18:00:10 +0200687 * - Allocate resource locations for every device as long as
688 * the requirements can be satisfied.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700689 * - Don't overlap with resources in fixed locations.
Nico Huber9d7728a2020-05-23 18:00:10 +0200690 * - Don't overlap and follow the rules of bridges -- downstream
691 * devices of bridges should use parts of the address space
692 * allocated to the bridge.
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700693 */
694void allocate_resources(const struct device *root)
695{
696 const struct device *child;
697
698 if ((root == NULL) || (root->link_list == NULL))
699 return;
700
701 for (child = root->link_list->children; child; child = child->sibling) {
702
703 if (child->path.type != DEVICE_PATH_DOMAIN)
704 continue;
705
706 post_log_path(child);
707
708 /* Pass 1 - Gather requirements. */
Paul Menzel2efcafa2021-07-02 17:39:45 +0200709 printk(BIOS_INFO, "=== Resource allocator: %s - Pass 1 (gathering requirements) ===\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700710 dev_path(child));
711 compute_domain_resources(child);
712
713 /* Pass 2 - Allocate resources as per gathered requirements. */
Furquan Shaikhc3568612020-05-16 15:18:23 -0700714 printk(BIOS_INFO, "=== Resource allocator: %s - Pass 2 (allocating resources) ===\n",
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700715 dev_path(child));
716 allocate_domain_resources(child);
Furquan Shaikhc3568612020-05-16 15:18:23 -0700717
718 printk(BIOS_INFO, "=== Resource allocator: %s - resource allocation complete ===\n",
719 dev_path(child));
Furquan Shaikhf4bc9eb2020-05-15 16:04:28 -0700720 }
721}