Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | |
Elyes Haouas | 04c3b5a | 2022-10-07 10:08:05 +0200 | [diff] [blame] | 3 | #include <commonlib/bsd/helpers.h> |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 4 | #include <console/console.h> |
| 5 | #include <device/device.h> |
| 6 | #include <memrange.h> |
| 7 | #include <post.h> |
Elyes Haouas | 04c3b5a | 2022-10-07 10:08:05 +0200 | [diff] [blame] | 8 | #include <types.h> |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 9 | |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 10 | static const char *resource2str(const struct resource *res) |
| 11 | { |
| 12 | if (res->flags & IORESOURCE_IO) |
| 13 | return "io"; |
| 14 | if (res->flags & IORESOURCE_PREFETCH) |
| 15 | return "prefmem"; |
| 16 | if (res->flags & IORESOURCE_MEM) |
| 17 | return "mem"; |
| 18 | return "undefined"; |
| 19 | } |
| 20 | |
Nico Huber | ee57065 | 2020-05-24 17:56:51 +0200 | [diff] [blame] | 21 | static void print_domain_res(const struct device *dev, |
| 22 | const struct resource *res, const char *suffix) |
| 23 | { |
| 24 | printk(BIOS_DEBUG, "%s %s: base: %llx size: %llx align: %u gran: %u limit: %llx%s\n", |
| 25 | dev_path(dev), resource2str(res), res->base, res->size, |
| 26 | res->align, res->gran, res->limit, suffix); |
| 27 | } |
| 28 | |
| 29 | #define res_printk(depth, str, ...) printk(BIOS_DEBUG, "%*c"str, depth, ' ', __VA_ARGS__) |
| 30 | |
| 31 | static void print_bridge_res(const struct device *dev, const struct resource *res, |
| 32 | int depth, const char *suffix) |
| 33 | { |
| 34 | res_printk(depth, "%s %s: size: %llx align: %u gran: %u limit: %llx%s\n", dev_path(dev), |
| 35 | resource2str(res), res->size, res->align, res->gran, res->limit, suffix); |
| 36 | } |
| 37 | |
| 38 | static void print_child_res(const struct device *dev, const struct resource *res, int depth) |
| 39 | { |
| 40 | res_printk(depth + 1, "%s %02lx * [0x%llx - 0x%llx] %s\n", dev_path(dev), |
| 41 | res->index, res->base, res->base + res->size - 1, resource2str(res)); |
| 42 | } |
| 43 | |
| 44 | static void print_fixed_res(const struct device *dev, |
| 45 | const struct resource *res, const char *prefix) |
| 46 | { |
| 47 | printk(BIOS_DEBUG, " %s: %s %02lx base %08llx limit %08llx %s (fixed)\n", |
| 48 | prefix, dev_path(dev), res->index, res->base, res->base + res->size - 1, |
| 49 | resource2str(res)); |
| 50 | } |
| 51 | |
| 52 | static void print_assigned_res(const struct device *dev, const struct resource *res) |
| 53 | { |
| 54 | printk(BIOS_DEBUG, " %s %02lx * [0x%llx - 0x%llx] limit: %llx %s\n", |
| 55 | dev_path(dev), res->index, res->base, res->limit, res->limit, resource2str(res)); |
| 56 | } |
| 57 | |
| 58 | static void print_failed_res(const struct device *dev, const struct resource *res) |
| 59 | { |
| 60 | printk(BIOS_DEBUG, " %s %02lx * size: 0x%llx limit: %llx %s\n", |
| 61 | dev_path(dev), res->index, res->size, res->limit, resource2str(res)); |
| 62 | } |
| 63 | |
| 64 | static void print_resource_ranges(const struct device *dev, const struct memranges *ranges) |
| 65 | { |
| 66 | const struct range_entry *r; |
| 67 | |
| 68 | printk(BIOS_INFO, " %s: Resource ranges:\n", dev_path(dev)); |
| 69 | |
| 70 | if (memranges_is_empty(ranges)) |
| 71 | printk(BIOS_INFO, " * EMPTY!!\n"); |
| 72 | |
| 73 | memranges_each_entry(r, ranges) { |
| 74 | printk(BIOS_INFO, " * Base: %llx, Size: %llx, Tag: %lx\n", |
| 75 | range_entry_base(r), range_entry_size(r), range_entry_tag(r)); |
| 76 | } |
| 77 | } |
| 78 | |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 79 | static bool dev_has_children(const struct device *dev) |
| 80 | { |
| 81 | const struct bus *bus = dev->link_list; |
| 82 | return bus && bus->children; |
| 83 | } |
| 84 | |
Nico Huber | 5226301 | 2020-05-23 19:15:36 +0200 | [diff] [blame] | 85 | static resource_t effective_limit(const struct resource *const res) |
| 86 | { |
| 87 | /* Always allow bridge resources above 4G. */ |
| 88 | if (res->flags & IORESOURCE_BRIDGE) |
| 89 | return res->limit; |
| 90 | |
| 91 | const resource_t quirk_4g_limit = |
| 92 | res->flags & IORESOURCE_ABOVE_4G ? UINT64_MAX : UINT32_MAX; |
| 93 | return MIN(res->limit, quirk_4g_limit); |
| 94 | } |
| 95 | |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 96 | /* |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 97 | * During pass 1, once all the requirements for downstream devices of a |
| 98 | * bridge are gathered, this function calculates the overall resource |
| 99 | * requirement for the bridge. It starts by picking the largest resource |
| 100 | * requirement downstream for the given resource type and works by |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 101 | * adding requirements in descending order. |
| 102 | * |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 103 | * Additionally, it takes alignment and limits of the downstream devices |
| 104 | * into consideration and ensures that they get propagated to the bridge |
| 105 | * resource. This is required to guarantee that the upstream bridge/ |
| 106 | * domain honors the limit and alignment requirements for this bridge |
| 107 | * based on the tightest constraints downstream. |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 108 | * |
| 109 | * Last but not least, it stores the offset inside the bridge resource |
| 110 | * for each child resource in its base field. This simplifies pass 2 |
| 111 | * for resources behind a bridge, as we only have to add offsets to the |
| 112 | * allocated base of the bridge resource. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 113 | */ |
| 114 | static void update_bridge_resource(const struct device *bridge, struct resource *bridge_res, |
Nico Huber | 58fe703 | 2022-08-17 14:43:54 +0200 | [diff] [blame] | 115 | int print_depth) |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 116 | { |
| 117 | const struct device *child; |
| 118 | struct resource *child_res; |
| 119 | resource_t base; |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 120 | const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH; |
Nico Huber | 58fe703 | 2022-08-17 14:43:54 +0200 | [diff] [blame] | 121 | const unsigned long type_match = bridge_res->flags & type_mask; |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 122 | struct bus *bus = bridge->link_list; |
| 123 | |
| 124 | child_res = NULL; |
| 125 | |
| 126 | /* |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 127 | * `base` keeps track of where the next allocation for child resources |
| 128 | * can take place from within the bridge resource window. Since the |
| 129 | * bridge resource window allocation is not performed yet, it can start |
| 130 | * at 0. Base gets updated every time a resource requirement is |
| 131 | * accounted for in the loop below. After scanning all these resources, |
| 132 | * base will indicate the total size requirement for the current bridge |
| 133 | * resource window. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 134 | */ |
| 135 | base = 0; |
| 136 | |
Nico Huber | ee57065 | 2020-05-24 17:56:51 +0200 | [diff] [blame] | 137 | print_bridge_res(bridge, bridge_res, print_depth, ""); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 138 | |
| 139 | while ((child = largest_resource(bus, &child_res, type_mask, type_match))) { |
| 140 | |
| 141 | /* Size 0 resources can be skipped. */ |
| 142 | if (!child_res->size) |
| 143 | continue; |
| 144 | |
Nico Huber | ec7b313 | 2020-05-23 18:20:47 +0200 | [diff] [blame] | 145 | /* Resources with 0 limit can't be assigned anything. */ |
| 146 | if (!child_res->limit) |
| 147 | continue; |
| 148 | |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 149 | /* |
Nico Huber | 74169c1 | 2020-05-23 18:15:34 +0200 | [diff] [blame] | 150 | * Propagate the resource alignment to the bridge resource. The |
| 151 | * condition can only be true for the first (largest) resource. For all |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 152 | * other child resources, alignment is taken care of by rounding their |
| 153 | * base up. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 154 | */ |
Nico Huber | 74169c1 | 2020-05-23 18:15:34 +0200 | [diff] [blame] | 155 | if (child_res->align > bridge_res->align) |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 156 | bridge_res->align = child_res->align; |
| 157 | |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 158 | /* |
Nico Huber | ec7b313 | 2020-05-23 18:20:47 +0200 | [diff] [blame] | 159 | * Propagate the resource limit to the bridge resource. If a downstream |
| 160 | * device has stricter requirements w.r.t. limits for any resource, that |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 161 | * constraint needs to be propagated back up to the bridges downstream |
| 162 | * of the domain. This way, the whole bridge resource fulfills the limit. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 163 | */ |
Nico Huber | 5226301 | 2020-05-23 19:15:36 +0200 | [diff] [blame] | 164 | if (effective_limit(child_res) < bridge_res->limit) |
| 165 | bridge_res->limit = effective_limit(child_res); |
Furquan Shaikh | 1bb05ef30 | 2020-05-15 17:33:52 -0700 | [diff] [blame] | 166 | |
| 167 | /* |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 168 | * Alignment value of 0 means that the child resource has no alignment |
| 169 | * requirements and so the base value remains unchanged here. |
| 170 | */ |
Nico Huber | b327704 | 2020-05-23 18:08:50 +0200 | [diff] [blame] | 171 | base = ALIGN_UP(base, POWER_OF_2(child_res->align)); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 172 | |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 173 | /* |
| 174 | * Store the relative offset inside the bridge resource for later |
| 175 | * consumption in allocate_bridge_resources(), and invalidate flags |
| 176 | * related to the base. |
| 177 | */ |
| 178 | child_res->base = base; |
| 179 | child_res->flags &= ~(IORESOURCE_ASSIGNED | IORESOURCE_STORED); |
| 180 | |
Nico Huber | ee57065 | 2020-05-24 17:56:51 +0200 | [diff] [blame] | 181 | print_child_res(child, child_res, print_depth); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 182 | |
| 183 | base += child_res->size; |
| 184 | } |
| 185 | |
| 186 | /* |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 187 | * After all downstream device resources are scanned, `base` represents |
| 188 | * the total size requirement for the current bridge resource window. |
| 189 | * This size needs to be rounded up to the granularity requirement of |
| 190 | * the bridge to ensure that the upstream bridge/domain allocates big |
| 191 | * enough window. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 192 | */ |
Nico Huber | b327704 | 2020-05-23 18:08:50 +0200 | [diff] [blame] | 193 | bridge_res->size = ALIGN_UP(base, POWER_OF_2(bridge_res->gran)); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 194 | |
Nico Huber | ee57065 | 2020-05-24 17:56:51 +0200 | [diff] [blame] | 195 | print_bridge_res(bridge, bridge_res, print_depth, " done"); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 196 | } |
| 197 | |
| 198 | /* |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 199 | * During pass 1, at the bridge level, the resource allocator gathers |
| 200 | * requirements from downstream devices and updates its own resource |
| 201 | * windows for the provided resource type. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 202 | */ |
Furquan Shaikh | c356861 | 2020-05-16 15:18:23 -0700 | [diff] [blame] | 203 | static void compute_bridge_resources(const struct device *bridge, unsigned long type_match, |
| 204 | int print_depth) |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 205 | { |
| 206 | const struct device *child; |
| 207 | struct resource *res; |
| 208 | struct bus *bus = bridge->link_list; |
| 209 | const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH; |
| 210 | |
| 211 | for (res = bridge->resource_list; res; res = res->next) { |
| 212 | if (!(res->flags & IORESOURCE_BRIDGE)) |
| 213 | continue; |
| 214 | |
| 215 | if ((res->flags & type_mask) != type_match) |
| 216 | continue; |
| 217 | |
| 218 | /* |
| 219 | * Ensure that the resource requirements for all downstream bridges are |
| 220 | * gathered before updating the window for current bridge resource. |
| 221 | */ |
| 222 | for (child = bus->children; child; child = child->sibling) { |
| 223 | if (!dev_has_children(child)) |
| 224 | continue; |
Furquan Shaikh | c356861 | 2020-05-16 15:18:23 -0700 | [diff] [blame] | 225 | compute_bridge_resources(child, type_match, print_depth + 1); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 226 | } |
| 227 | |
| 228 | /* |
| 229 | * Update the window for current bridge resource now that all downstream |
| 230 | * requirements are gathered. |
| 231 | */ |
Nico Huber | 58fe703 | 2022-08-17 14:43:54 +0200 | [diff] [blame] | 232 | update_bridge_resource(bridge, res, print_depth); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 233 | } |
| 234 | } |
| 235 | |
| 236 | /* |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 237 | * During pass 1, the resource allocator walks down the entire sub-tree |
| 238 | * of a domain. It gathers resource requirements for every downstream |
| 239 | * bridge by looking at the resource requests of its children. Thus, the |
| 240 | * requirement gathering begins at the leaf devices and is propagated |
| 241 | * back up to the downstream bridges of the domain. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 242 | * |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 243 | * At the domain level, it identifies every downstream bridge and walks |
| 244 | * down that bridge to gather requirements for each resource type i.e. |
| 245 | * i/o, mem and prefmem. Since bridges have separate windows for mem and |
| 246 | * prefmem, requirements for each need to be collected separately. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 247 | * |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 248 | * Domain resource windows are fixed ranges and hence requirement |
| 249 | * gathering does not result in any changes to these fixed ranges. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 250 | */ |
| 251 | static void compute_domain_resources(const struct device *domain) |
| 252 | { |
| 253 | const struct device *child; |
Furquan Shaikh | c356861 | 2020-05-16 15:18:23 -0700 | [diff] [blame] | 254 | const int print_depth = 1; |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 255 | |
| 256 | if (domain->link_list == NULL) |
| 257 | return; |
| 258 | |
| 259 | for (child = domain->link_list->children; child; child = child->sibling) { |
| 260 | |
| 261 | /* Skip if this is not a bridge or has no children under it. */ |
| 262 | if (!dev_has_children(child)) |
| 263 | continue; |
| 264 | |
Furquan Shaikh | c356861 | 2020-05-16 15:18:23 -0700 | [diff] [blame] | 265 | compute_bridge_resources(child, IORESOURCE_IO, print_depth); |
| 266 | compute_bridge_resources(child, IORESOURCE_MEM, print_depth); |
| 267 | compute_bridge_resources(child, IORESOURCE_MEM | IORESOURCE_PREFETCH, |
| 268 | print_depth); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 269 | } |
| 270 | } |
| 271 | |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 272 | /* |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 273 | * Scan the entire tree to identify any fixed resources allocated by |
| 274 | * any device to ensure that the address map for domain resources are |
| 275 | * appropriately updated. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 276 | * |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 277 | * Domains can typically provide a memrange for entire address space. |
| 278 | * So, this function punches holes in the address space for all fixed |
| 279 | * resources that are already defined. Both I/O and normal memory |
| 280 | * resources are added as fixed. Both need to be removed from address |
| 281 | * space where dynamic resource allocations are sourced. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 282 | */ |
| 283 | static void avoid_fixed_resources(struct memranges *ranges, const struct device *dev, |
| 284 | unsigned long mask_match) |
| 285 | { |
| 286 | const struct resource *res; |
| 287 | const struct device *child; |
| 288 | const struct bus *bus; |
| 289 | |
| 290 | for (res = dev->resource_list; res != NULL; res = res->next) { |
| 291 | if ((res->flags & mask_match) != mask_match) |
| 292 | continue; |
Nico Huber | 866eff0 | 2020-05-24 18:32:51 +0200 | [diff] [blame] | 293 | if (!res->size) |
| 294 | continue; |
| 295 | print_fixed_res(dev, res, __func__); |
| 296 | memranges_create_hole(ranges, res->base, res->size); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 297 | } |
| 298 | |
| 299 | bus = dev->link_list; |
| 300 | if (bus == NULL) |
| 301 | return; |
| 302 | |
| 303 | for (child = bus->children; child != NULL; child = child->sibling) |
| 304 | avoid_fixed_resources(ranges, child, mask_match); |
| 305 | } |
| 306 | |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 307 | /* |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 308 | * This function creates a list of memranges of given type using the |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 309 | * resource that is provided. It applies additional constraints to |
| 310 | * ensure that the memranges do not overlap any of the fixed resources |
| 311 | * under the domain. The domain typically provides a memrange for the |
| 312 | * entire address space. Thus, it is up to the chipset to add DRAM and |
| 313 | * all other windows which cannot be used for resource allocation as |
| 314 | * fixed resources. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 315 | */ |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 316 | static void setup_resource_ranges(const struct device *const domain, |
| 317 | const unsigned long type, |
| 318 | struct memranges *const ranges) |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 319 | { |
Nico Huber | 866eff0 | 2020-05-24 18:32:51 +0200 | [diff] [blame] | 320 | /* Align mem resources to 2^12 (4KiB pages) at a minimum, so they |
| 321 | can be memory-mapped individually (e.g. for virtualization guests). */ |
| 322 | const unsigned char alignment = type == IORESOURCE_MEM ? 12 : 0; |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 323 | const unsigned long type_mask = IORESOURCE_TYPE_MASK | IORESOURCE_FIXED; |
Nico Huber | 5226301 | 2020-05-23 19:15:36 +0200 | [diff] [blame] | 324 | |
| 325 | memranges_init_empty_with_alignment(ranges, NULL, 0, alignment); |
| 326 | |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 327 | for (struct resource *res = domain->resource_list; res != NULL; res = res->next) { |
| 328 | if ((res->flags & type_mask) != type) |
Nico Huber | 5226301 | 2020-05-23 19:15:36 +0200 | [diff] [blame] | 329 | continue; |
Nico Huber | ee57065 | 2020-05-24 17:56:51 +0200 | [diff] [blame] | 330 | print_domain_res(domain, res, ""); |
Nico Huber | 5226301 | 2020-05-23 19:15:36 +0200 | [diff] [blame] | 331 | memranges_insert(ranges, res->base, res->limit - res->base + 1, type); |
Nico Huber | 38aafa3 | 2022-09-04 22:20:21 +0200 | [diff] [blame] | 332 | } |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 333 | |
Nico Huber | 866eff0 | 2020-05-24 18:32:51 +0200 | [diff] [blame] | 334 | if (type == IORESOURCE_IO) { |
| 335 | /* |
| 336 | * Don't allow allocations in the VGA I/O range. PCI has special |
| 337 | * cases for that. |
| 338 | */ |
| 339 | memranges_create_hole(ranges, 0x3b0, 0x3df - 0x3b0 + 1); |
| 340 | |
| 341 | /* |
| 342 | * Resource allocator no longer supports the legacy behavior where |
| 343 | * I/O resource allocation is guaranteed to avoid aliases over legacy |
| 344 | * PCI expansion card addresses. |
| 345 | */ |
| 346 | } |
| 347 | |
| 348 | avoid_fixed_resources(ranges, domain, type | IORESOURCE_FIXED); |
Nico Huber | 5226301 | 2020-05-23 19:15:36 +0200 | [diff] [blame] | 349 | |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 350 | print_resource_ranges(domain, ranges); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 351 | } |
| 352 | |
Arthur Heymans | 68b2b8f | 2022-12-19 15:04:50 +0100 | [diff] [blame] | 353 | static void cleanup_domain_resource_ranges(const struct device *dev, struct memranges *ranges, |
| 354 | unsigned long type) |
| 355 | { |
| 356 | memranges_teardown(ranges); |
| 357 | for (struct resource *res = dev->resource_list; res != NULL; res = res->next) { |
Arthur Heymans | 68b2b8f | 2022-12-19 15:04:50 +0100 | [diff] [blame] | 358 | if (res->flags & IORESOURCE_FIXED) |
| 359 | continue; |
| 360 | if ((res->flags & IORESOURCE_TYPE_MASK) != type) |
| 361 | continue; |
Nico Huber | ee57065 | 2020-05-24 17:56:51 +0200 | [diff] [blame] | 362 | print_domain_res(dev, res, " done"); |
Arthur Heymans | 68b2b8f | 2022-12-19 15:04:50 +0100 | [diff] [blame] | 363 | } |
| 364 | } |
| 365 | |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 366 | static void assign_resource(struct resource *const res, const resource_t base, |
| 367 | const struct device *const dev) |
| 368 | { |
| 369 | res->base = base; |
| 370 | res->limit = res->base + res->size - 1; |
| 371 | res->flags |= IORESOURCE_ASSIGNED; |
| 372 | res->flags &= ~IORESOURCE_STORED; |
| 373 | |
Nico Huber | ee57065 | 2020-05-24 17:56:51 +0200 | [diff] [blame] | 374 | print_assigned_res(dev, res); |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 375 | } |
| 376 | |
| 377 | /* |
| 378 | * This is where the actual allocation of resources happens during |
| 379 | * pass 2. We construct a list of memory ranges corresponding to the |
| 380 | * resource of a given type, then look for the biggest unallocated |
| 381 | * resource on the downstream bus. This continues in a descending order |
| 382 | * until all resources of a given type have space allocated within the |
| 383 | * domain's resource window. |
| 384 | */ |
| 385 | static void allocate_toplevel_resources(const struct device *const domain, |
| 386 | const unsigned long type) |
| 387 | { |
| 388 | const unsigned long type_mask = IORESOURCE_TYPE_MASK; |
| 389 | struct resource *res = NULL; |
| 390 | const struct device *dev; |
| 391 | struct memranges ranges; |
| 392 | resource_t base; |
| 393 | |
| 394 | if (!dev_has_children(domain)) |
| 395 | return; |
| 396 | |
| 397 | setup_resource_ranges(domain, type, &ranges); |
| 398 | |
| 399 | while ((dev = largest_resource(domain->link_list, &res, type_mask, type))) { |
| 400 | |
| 401 | if (!res->size) |
| 402 | continue; |
| 403 | |
Nico Huber | 0754e00 | 2023-06-22 23:17:40 +0200 | [diff] [blame] | 404 | if (!memranges_steal(&ranges, effective_limit(res), res->size, res->align, |
| 405 | type, &base, CONFIG(RESOURCE_ALLOCATION_TOP_DOWN))) { |
Nico Huber | ee57065 | 2020-05-24 17:56:51 +0200 | [diff] [blame] | 406 | printk(BIOS_ERR, "Resource didn't fit!!!\n"); |
| 407 | print_failed_res(dev, res); |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 408 | continue; |
| 409 | } |
| 410 | |
| 411 | assign_resource(res, base, dev); |
| 412 | } |
| 413 | |
| 414 | cleanup_domain_resource_ranges(domain, &ranges, type); |
| 415 | } |
| 416 | |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 417 | /* |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 418 | * Pass 2 of the resource allocator at the bridge level loops through |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 419 | * all the resources for the bridge and assigns all the base addresses |
| 420 | * of its children's resources of the same type. update_bridge_resource() |
| 421 | * of pass 1 pre-calculated the offsets of these bases inside the bridge |
| 422 | * resource. Now that the bridge resource is allocated, all we have to |
| 423 | * do is to add its final base to these offsets. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 424 | * |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 425 | * Once allocation at the current bridge is complete, resource allocator |
| 426 | * continues walking down the downstream bridges until it hits the leaf |
| 427 | * devices. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 428 | */ |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 429 | static void assign_resource_cb(void *param, struct device *dev, struct resource *res) |
| 430 | { |
| 431 | /* We have to filter the same resources as update_bridge_resource(). */ |
| 432 | if (!res->size || !res->limit) |
| 433 | return; |
| 434 | |
| 435 | assign_resource(res, *(const resource_t *)param + res->base, dev); |
| 436 | } |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 437 | static void allocate_bridge_resources(const struct device *bridge) |
| 438 | { |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 439 | const unsigned long type_mask = |
| 440 | IORESOURCE_TYPE_MASK | IORESOURCE_PREFETCH | IORESOURCE_FIXED; |
| 441 | struct bus *const bus = bridge->link_list; |
| 442 | struct resource *res; |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 443 | struct device *child; |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 444 | |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 445 | for (res = bridge->resource_list; res != NULL; res = res->next) { |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 446 | if (!res->size) |
| 447 | continue; |
| 448 | |
| 449 | if (!(res->flags & IORESOURCE_BRIDGE)) |
| 450 | continue; |
| 451 | |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 452 | if (!(res->flags & IORESOURCE_ASSIGNED)) |
| 453 | continue; |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 454 | |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 455 | /* Run assign_resource_cb() for all downstream resources of the same type. */ |
| 456 | search_bus_resources(bus, type_mask, res->flags & type_mask, |
| 457 | assign_resource_cb, &res->base); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 458 | } |
| 459 | |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 460 | for (child = bus->children; child != NULL; child = child->sibling) { |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 461 | if (!dev_has_children(child)) |
| 462 | continue; |
| 463 | |
| 464 | allocate_bridge_resources(child); |
| 465 | } |
| 466 | } |
| 467 | |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 468 | /* |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 469 | * Pass 2 of resource allocator begins at the domain level. Every domain |
| 470 | * has two types of resources - io and mem. For each of these resources, |
| 471 | * this function creates a list of memory ranges that can be used for |
| 472 | * downstream resource allocation. This list is constrained to remove |
| 473 | * any fixed resources in the domain sub-tree of the given resource |
| 474 | * type. It then uses the memory ranges to apply best fit on the |
| 475 | * resource requirements of the downstream devices. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 476 | * |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 477 | * Once resources are allocated to all downstream devices of the domain, |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 478 | * it walks down each downstream bridge to finish resource assignment |
| 479 | * of its children resources within its own window. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 480 | */ |
| 481 | static void allocate_domain_resources(const struct device *domain) |
| 482 | { |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 483 | /* Resource type I/O */ |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 484 | allocate_toplevel_resources(domain, IORESOURCE_IO); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 485 | |
| 486 | /* |
| 487 | * Resource type Mem: |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 488 | * Domain does not distinguish between mem and prefmem resources. Thus, |
| 489 | * the resource allocation at domain level considers mem and prefmem |
| 490 | * together when finding the best fit based on the biggest resource |
| 491 | * requirement. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 492 | */ |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 493 | allocate_toplevel_resources(domain, IORESOURCE_MEM); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 494 | |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 495 | struct device *child; |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 496 | for (child = domain->link_list->children; child; child = child->sibling) { |
| 497 | if (!dev_has_children(child)) |
| 498 | continue; |
| 499 | |
| 500 | /* Continue allocation for all downstream bridges. */ |
| 501 | allocate_bridge_resources(child); |
| 502 | } |
| 503 | } |
| 504 | |
| 505 | /* |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 506 | * This function forms the guts of the resource allocator. It walks |
| 507 | * through the entire device tree for each domain two times. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 508 | * |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 509 | * Every domain has a fixed set of ranges. These ranges cannot be |
| 510 | * relaxed based on the requirements of the downstream devices. They |
| 511 | * represent the available windows from which resources can be allocated |
| 512 | * to the different devices under the domain. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 513 | * |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 514 | * In order to identify the requirements of downstream devices, resource |
| 515 | * allocator walks in a DFS fashion. It gathers the requirements from |
| 516 | * leaf devices and propagates those back up to their upstream bridges |
| 517 | * until the requirements for all the downstream devices of the domain |
| 518 | * are gathered. This is referred to as pass 1 of the resource allocator. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 519 | * |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 520 | * Once the requirements for all the devices under the domain are |
| 521 | * gathered, the resource allocator walks a second time to allocate |
| 522 | * resources to downstream devices as per the requirements. It always |
| 523 | * picks the biggest resource request as per the type (i/o and mem) to |
| 524 | * allocate space from its fixed window to the immediate downstream |
| 525 | * device of the domain. In order to accomplish best fit for the |
| 526 | * resources, a list of ranges is maintained by each resource type (i/o |
| 527 | * and mem). At the domain level we don't differentiate between mem and |
| 528 | * prefmem. Since they are allocated space from the same window, the |
| 529 | * resource allocator at the domain level ensures that the biggest |
| 530 | * requirement is selected independent of the prefetch type. Once the |
| 531 | * resource allocation for all immediate downstream devices is complete |
| 532 | * at the domain level, the resource allocator walks down the subtree |
| 533 | * for each downstream bridge to continue the allocation process at the |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 534 | * bridge level. Since bridges have either their whole window allocated |
| 535 | * or nothing, we only need to place downstream resources inside these |
| 536 | * windows by re-using offsets that were pre-calculated in pass 1. This |
| 537 | * continues until resource allocation is realized for all downstream |
| 538 | * bridges in the domain sub-tree. This is referred to as pass 2 of the |
| 539 | * resource allocator. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 540 | * |
| 541 | * Some rules that are followed by the resource allocator: |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 542 | * - Allocate resource locations for every device as long as |
| 543 | * the requirements can be satisfied. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 544 | * - Don't overlap with resources in fixed locations. |
Nico Huber | 9d7728a | 2020-05-23 18:00:10 +0200 | [diff] [blame] | 545 | * - Don't overlap and follow the rules of bridges -- downstream |
| 546 | * devices of bridges should use parts of the address space |
| 547 | * allocated to the bridge. |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 548 | */ |
| 549 | void allocate_resources(const struct device *root) |
| 550 | { |
| 551 | const struct device *child; |
| 552 | |
| 553 | if ((root == NULL) || (root->link_list == NULL)) |
| 554 | return; |
| 555 | |
| 556 | for (child = root->link_list->children; child; child = child->sibling) { |
| 557 | |
| 558 | if (child->path.type != DEVICE_PATH_DOMAIN) |
| 559 | continue; |
| 560 | |
| 561 | post_log_path(child); |
| 562 | |
Nico Huber | 9260ea6 | 2020-05-23 23:20:13 +0200 | [diff] [blame] | 563 | /* Pass 1 - Relative placement. */ |
| 564 | printk(BIOS_INFO, "=== Resource allocator: %s - Pass 1 (relative placement) ===\n", |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 565 | dev_path(child)); |
| 566 | compute_domain_resources(child); |
| 567 | |
| 568 | /* Pass 2 - Allocate resources as per gathered requirements. */ |
Furquan Shaikh | c356861 | 2020-05-16 15:18:23 -0700 | [diff] [blame] | 569 | printk(BIOS_INFO, "=== Resource allocator: %s - Pass 2 (allocating resources) ===\n", |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 570 | dev_path(child)); |
| 571 | allocate_domain_resources(child); |
Furquan Shaikh | c356861 | 2020-05-16 15:18:23 -0700 | [diff] [blame] | 572 | |
| 573 | printk(BIOS_INFO, "=== Resource allocator: %s - resource allocation complete ===\n", |
| 574 | dev_path(child)); |
Furquan Shaikh | f4bc9eb | 2020-05-15 16:04:28 -0700 | [diff] [blame] | 575 | } |
| 576 | } |