blob: a5062fcf8087df6b4bee40961ee058ccca4036fb [file] [log] [blame]
Marc Jones1f500842020-10-15 14:32:51 -06001/* SPDX-License-Identifier: GPL-2.0-or-later */
2
3#include <assert.h>
4#include <console/console.h>
5#include <post.h>
6#include <device/pci.h>
7#include <soc/chip_common.h>
8#include <soc/soc_util.h>
9#include <soc/util.h>
10#include <stdlib.h>
11
12struct pci_resource {
13 struct device *dev;
14 struct resource *res;
15 struct pci_resource *next;
16};
17
18struct stack_dev_resource {
19 uint8_t align;
20 struct pci_resource *children;
21 struct stack_dev_resource *next;
22};
23
24typedef enum {
25 RES_TYPE_IO = 0,
26 RES_TYPE_NONPREF_MEM,
27 RES_TYPE_PREF_MEM,
28 MAX_RES_TYPES
29} RES_TYPE;
30
31static RES_TYPE get_res_type(uint64_t flags)
32{
33 if (flags & IORESOURCE_IO)
34 return RES_TYPE_IO;
35 if (flags & IORESOURCE_MEM) {
36 if (flags & IORESOURCE_PREFETCH) {
37 printk(BIOS_DEBUG, "%s:%d flags: 0x%llx\n", __func__, __LINE__, flags);
38 return RES_TYPE_PREF_MEM;
39 }
40 /* both 64-bit and 32-bit use below 4GB address space */
41 return RES_TYPE_NONPREF_MEM;
42 }
43 printk(BIOS_ERR, "Invalid resource type 0x%llx\n", flags);
44 die("");
45}
46
47static bool need_assignment(uint64_t flags)
48{
49 if (flags & (IORESOURCE_STORED | IORESOURCE_RESERVE | IORESOURCE_FIXED |
50 IORESOURCE_ASSIGNED))
51 return false;
52 else
53 return true;
54}
55
56static uint64_t get_resource_base(STACK_RES *stack, RES_TYPE res_type)
57{
58 if (res_type == RES_TYPE_IO) {
59 assert(stack->PciResourceIoBase <= stack->PciResourceIoLimit);
60 return stack->PciResourceIoBase;
61 }
62 if (res_type == RES_TYPE_NONPREF_MEM) {
63 assert(stack->PciResourceMem32Base <= stack->PciResourceMem32Limit);
64 return stack->PciResourceMem32Base;
65 }
66 assert(stack->PciResourceMem64Base <= stack->PciResourceMem64Limit);
67 return stack->PciResourceMem64Base;
68}
69
70static void set_resource_base(STACK_RES *stack, RES_TYPE res_type, uint64_t base)
71{
72 if (res_type == RES_TYPE_IO) {
73 assert(base <= (stack->PciResourceIoLimit + 1));
74 stack->PciResourceIoBase = base;
75 } else if (res_type == RES_TYPE_NONPREF_MEM) {
76 assert(base <= (stack->PciResourceMem32Limit + 1));
77 stack->PciResourceMem32Base = base;
78 } else {
79 assert(base <= (stack->PciResourceMem64Limit + 1));
80 stack->PciResourceMem64Base = base;
81 }
82}
83
84static void assign_stack_resources(struct iiostack_resource *stack_list,
85 struct device *dev, struct resource *bridge);
86
87void xeonsp_pci_domain_scan_bus(struct device *dev)
88{
89 DEV_FUNC_ENTER(dev);
90 struct bus *link = dev->link_list;
91
92 printk(BIOS_SPEW, "%s:%s scanning buses under device %s\n",
93 __FILE__, __func__, dev_path(dev));
Elyes Haouasf1ba7d62022-09-13 10:03:44 +020094 while (link) {
Marc Jones1f500842020-10-15 14:32:51 -060095 if (link->secondary == 0) { // scan only PSTACK buses
96 struct device *d;
97 for (d = link->children; d; d = d->sibling)
98 pci_probe_dev(d, link, d->path.pci.devfn);
99 scan_bridges(link);
100 } else {
101 pci_scan_bus(link, PCI_DEVFN(0, 0), 0xff);
102 }
103 link = link->next;
104 }
105 DEV_FUNC_EXIT(dev);
106}
107
108static void xeonsp_pci_dev_iterator(struct bus *bus,
109 void (*dev_iterator)(struct device *, void *),
110 void (*res_iterator)(struct device *, struct resource *, void *),
111 void *data)
112{
113 struct device *curdev;
114 struct resource *res;
115
116 /* Walk through all devices and find which resources they need. */
117 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
118 struct bus *link;
119
120 if (!curdev->enabled)
121 continue;
122
123 if (!curdev->ops || !curdev->ops->read_resources) {
124 if (curdev->path.type != DEVICE_PATH_APIC)
125 printk(BIOS_ERR, "%s missing read_resources\n",
126 dev_path(curdev));
127 continue;
128 }
129
130 if (dev_iterator)
131 dev_iterator(curdev, data);
132
133 if (res_iterator) {
134 for (res = curdev->resource_list; res; res = res->next)
135 res_iterator(curdev, res, data);
136 }
137
138 /* Read in the resources behind the current device's links. */
139 for (link = curdev->link_list; link; link = link->next)
140 xeonsp_pci_dev_iterator(link, dev_iterator, res_iterator, data);
141 }
142}
143
144static void xeonsp_pci_dev_read_resources(struct device *dev, void *data)
145{
146 post_log_path(dev);
147 dev->ops->read_resources(dev);
148}
149
150static void xeonsp_pci_dev_dummy_func(struct device *dev)
151{
152}
153
154static void xeonsp_reset_pci_op(struct device *dev, void *data)
155{
156 if (dev->ops)
157 dev->ops->read_resources = xeonsp_pci_dev_dummy_func;
158}
159
160static STACK_RES *find_stack_for_bus(struct iiostack_resource *info, uint8_t bus)
161{
162 for (int i = 0; i < info->no_of_stacks; ++i) {
163 if (bus >= info->res[i].BusBase && bus <= info->res[i].BusLimit)
164 return &info->res[i];
165 }
166 return NULL;
167}
168
169static void add_res_to_stack(struct stack_dev_resource **root,
170 struct device *dev, struct resource *res)
171{
172 struct stack_dev_resource *cur = *root;
173 while (cur) {
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200174 if (cur->align == res->align || !cur->next) /* equal or last record */
Marc Jones1f500842020-10-15 14:32:51 -0600175 break;
176 else if (cur->align > res->align) {
177 if (cur->next->align < res->align) /* need to insert new record here */
178 break;
179 cur = cur->next;
180 } else {
181 break;
182 }
183 }
184
185 struct stack_dev_resource *nr;
186 if (!cur || cur->align != res->align) { /* need to add new record */
187 nr = malloc(sizeof(struct stack_dev_resource));
188 if (nr == 0)
189 die("assign_resource_to_stack(): out of memory.\n");
190 memset(nr, 0, sizeof(struct stack_dev_resource));
191 nr->align = res->align;
192 if (!cur) {
193 *root = nr; /* head node */
194 } else if (cur->align > nr->align) {
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200195 if (!cur->next) {
Marc Jones1f500842020-10-15 14:32:51 -0600196 cur->next = nr;
197 } else {
198 nr->next = cur->next;
199 cur->next = nr;
200 }
201 } else { /* insert in the beginning */
202 nr->next = cur;
203 *root = nr;
204 }
205 } else {
206 nr = cur;
207 }
208
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200209 assert(nr && nr->align == res->align);
Marc Jones1f500842020-10-15 14:32:51 -0600210
211 struct pci_resource *npr = malloc(sizeof(struct pci_resource));
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200212 if (!npr)
Marc Jones1f500842020-10-15 14:32:51 -0600213 die("%s: out of memory.\n", __func__);
214 npr->res = res;
215 npr->dev = dev;
216 npr->next = NULL;
217
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200218 if (!nr->children) {
Marc Jones1f500842020-10-15 14:32:51 -0600219 nr->children = npr;
220 } else {
221 struct pci_resource *pr = nr->children;
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200222 while (pr->next)
Marc Jones1f500842020-10-15 14:32:51 -0600223 pr = pr->next;
224 pr->next = npr;
225 }
226}
227
228static void reserve_dev_resources(STACK_RES *stack, RES_TYPE res_type,
229 struct stack_dev_resource *res_root, struct resource *bridge)
230{
Marc Jones1f500842020-10-15 14:32:51 -0600231 uint64_t orig_base, base;
232
233 orig_base = get_resource_base(stack, res_type);
234
Marc Jones1f500842020-10-15 14:32:51 -0600235 base = orig_base;
236 int first = 1;
237 while (res_root) { /* loop through all devices grouped by alignment requirements */
238 struct pci_resource *pr = res_root->children;
239 while (pr) {
240 if (first) {
241 if (bridge) { /* takes highest alignment */
242 if (bridge->align < pr->res->align)
243 bridge->align = pr->res->align;
244 orig_base = ALIGN_UP(orig_base, 1 << bridge->align);
245 } else {
246 orig_base = ALIGN_UP(orig_base, 1 << pr->res->align);
247 }
248 base = orig_base;
249
250 if (bridge)
251 bridge->base = base;
252 pr->res->base = base;
253 first = 0;
254 } else {
255 pr->res->base = ALIGN_UP(base, 1 << pr->res->align);
256 }
257 pr->res->limit = pr->res->base + pr->res->size - 1;
258 base = pr->res->limit + 1;
259 pr->res->flags |= (IORESOURCE_ASSIGNED);
260 pr = pr->next;
261 }
262 res_root = res_root->next;
263 }
264
265 if (bridge) {
266 /* this bridge doesn't have any resources, will set it to default window */
267 if (first) {
268 orig_base = ALIGN_UP(orig_base, 1 << bridge->align);
269 bridge->base = orig_base;
270 base = orig_base + (1ULL << bridge->gran);
271 }
272
273 bridge->size = ALIGN_UP(base, 1 << bridge->align) - bridge->base;
274
275 bridge->limit = bridge->base + bridge->size - 1;
276 bridge->flags |= (IORESOURCE_ASSIGNED);
277 base = bridge->limit + 1;
278 }
279
280 set_resource_base(stack, res_type, base);
281}
282
283static void reclaim_resource_mem(struct stack_dev_resource *res_root)
284{
285 while (res_root) { /* loop through all devices grouped by alignment requirements */
286 /* free pci_resource */
287 struct pci_resource *pr = res_root->children;
288 while (pr) {
289 struct pci_resource *dpr = pr;
290 pr = pr->next;
291 free(dpr);
292 }
293
294 /* free stack_dev_resource */
295 struct stack_dev_resource *ddr = res_root;
296 res_root = res_root->next;
297 free(ddr);
298 }
299}
300
301static void assign_bridge_resources(struct iiostack_resource *stack_list,
302 struct device *dev, struct resource *bridge)
303{
304 struct resource *res;
305 if (!dev->enabled)
306 return;
307
308 for (res = dev->resource_list; res; res = res->next) {
309 if (!(res->flags & IORESOURCE_BRIDGE) ||
310 (bridge && (get_res_type(bridge->flags) != get_res_type(res->flags))))
311 continue;
312
313 assign_stack_resources(stack_list, dev, res);
314
315 if (!bridge)
316 continue;
317
318 /* for 1st time update, overlading IORESOURCE_ASSIGNED */
319 if (!(bridge->flags & IORESOURCE_ASSIGNED)) {
320 bridge->base = res->base;
321 bridge->limit = res->limit;
322 bridge->flags |= (IORESOURCE_ASSIGNED);
323 } else {
324 /* update bridge range from child bridge range */
325 if (res->base < bridge->base)
326 bridge->base = res->base;
327 if (res->limit > bridge->limit)
328 bridge->limit = res->limit;
329 }
330 bridge->size = (bridge->limit - bridge->base + 1);
331 }
332}
333
334static void assign_stack_resources(struct iiostack_resource *stack_list,
335 struct device *dev, struct resource *bridge)
336{
337 struct bus *bus;
338
339 /* Read in the resources behind the current device's links. */
340 for (bus = dev->link_list; bus; bus = bus->next) {
341 struct device *curdev;
342 STACK_RES *stack;
343
344 /* get IIO stack for this bus */
345 stack = find_stack_for_bus(stack_list, bus->secondary);
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200346 assert(stack);
Marc Jones1f500842020-10-15 14:32:51 -0600347
348 /* Assign resources to bridge */
349 for (curdev = bus->children; curdev; curdev = curdev->sibling)
350 assign_bridge_resources(stack_list, curdev, bridge);
351
352 /* Pick non-bridged resources for resource allocation for each resource type */
353 RES_TYPE res_types[MAX_RES_TYPES] = {
354 RES_TYPE_IO,
355 RES_TYPE_NONPREF_MEM,
356 RES_TYPE_PREF_MEM
357 };
358
359 uint8_t no_res_types = MAX_RES_TYPES;
360
361 /* if it is a bridge, only process matching bridge resource type */
362 if (bridge) {
363 res_types[0] = get_res_type(bridge->flags);
364 no_res_types = 1;
365 }
366
367 printk(BIOS_DEBUG, "%s:%d no_res_types: %d\n", __func__, __LINE__,
368 no_res_types);
369
370 /* Process each resource type */
371 for (int rt = 0; rt < no_res_types; ++rt) {
372 struct stack_dev_resource *res_root = NULL;
373 printk(BIOS_DEBUG, "%s:%d rt: %d\n", __func__, __LINE__, rt);
374 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
375 struct resource *res;
376 printk(BIOS_DEBUG, "%s:%d dev: %s\n",
377 __func__, __LINE__, dev_path(curdev));
378 if (!curdev->enabled)
379 continue;
380
381 for (res = curdev->resource_list; res; res = res->next) {
382 printk(BIOS_DEBUG, "%s:%d dev: %s, flags: 0x%lx\n",
383 __func__, __LINE__,
384 dev_path(curdev), res->flags);
385 if (res->size == 0 ||
386 get_res_type(res->flags) != res_types[rt] ||
387 (res->flags & IORESOURCE_BRIDGE) ||
388 !need_assignment(res->flags))
389 continue;
390 else
391 add_res_to_stack(&res_root, curdev, res);
392 }
393 }
394
395 /* Allocate resources and update bridge range */
396 if (res_root || (bridge && !(bridge->flags & IORESOURCE_ASSIGNED))) {
397 reserve_dev_resources(stack, res_types[rt], res_root, bridge);
398 reclaim_resource_mem(res_root);
399 }
400 }
401 }
402}
403
Arthur Heymans165893b2020-11-06 12:15:41 +0100404static uint8_t is_pci64bit_alloc(void)
405{
Jonathan Zhanga63ea892023-01-25 11:16:58 -0800406/*
407 * For SPR-SP FSP which supports SOC_INTEL_PCIE_64BITS_ALLOC,
408 * Pci64BitResourceAllocation field does not exist in IIO_UDS HOB.
409 */
410#if CONFIG(SOC_INTEL_PCIE_64BIT_ALLOC)
411 return 1;
412#else
Arthur Heymans165893b2020-11-06 12:15:41 +0100413 const IIO_UDS *hob = get_iio_uds();
Arthur Heymans165893b2020-11-06 12:15:41 +0100414 return hob->PlatformData.Pci64BitResourceAllocation;
Jonathan Zhanga63ea892023-01-25 11:16:58 -0800415#endif
Arthur Heymans165893b2020-11-06 12:15:41 +0100416}
417
Marc Jones1f500842020-10-15 14:32:51 -0600418static void xeonsp_pci_domain_read_resources(struct device *dev)
419{
420 struct bus *link;
421
422 DEV_FUNC_ENTER(dev);
423
424 pci_domain_read_resources(dev);
425
426 /*
427 * Walk through all devices in this domain and read resources.
428 * Since there is no callback when read resource operation is
429 * complete for all devices, domain read resource function initiates
430 * read resources for all devices and swaps read resource operation
431 * with dummy function to avoid warning.
432 */
433 for (link = dev->link_list; link; link = link->next)
434 xeonsp_pci_dev_iterator(link, xeonsp_pci_dev_read_resources, NULL, NULL);
435
436 for (link = dev->link_list; link; link = link->next)
437 xeonsp_pci_dev_iterator(link, xeonsp_reset_pci_op, NULL, NULL);
438
439 struct iiostack_resource stack_info = {0};
Arthur Heymans165893b2020-11-06 12:15:41 +0100440 get_iiostack_info(&stack_info);
441 if (!is_pci64bit_alloc()) {
Marc Jones1f500842020-10-15 14:32:51 -0600442 /*
443 * Split 32 bit address space between prefetchable and
444 * non-prefetchable windows
445 */
446 for (int s = 0; s < stack_info.no_of_stacks; ++s) {
447 STACK_RES *res = &stack_info.res[s];
448 uint64_t length = (res->PciResourceMem32Limit -
449 res->PciResourceMem32Base + 1)/2;
450 res->PciResourceMem64Limit = res->PciResourceMem32Limit;
451 res->PciResourceMem32Limit = (res->PciResourceMem32Base + length - 1);
452 res->PciResourceMem64Base = res->PciResourceMem32Limit + 1;
453 }
454 }
455
456 /* assign resources */
457 assign_stack_resources(&stack_info, dev, NULL);
458
459 DEV_FUNC_EXIT(dev);
460}
461
462static void reset_resource_to_unassigned(struct device *dev, struct resource *res, void *data)
463{
464 if ((res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) &&
465 !(res->flags & (IORESOURCE_FIXED | IORESOURCE_RESERVE))) {
466 res->flags &= ~IORESOURCE_ASSIGNED;
467 }
468}
469
470void xeonsp_pci_domain_set_resources(struct device *dev)
471{
472 DEV_FUNC_ENTER(dev);
473
474 print_resource_tree(dev, BIOS_SPEW, "Before xeonsp pci domain set resource");
475
476 /* reset bus 0 dev resource assignment - need to change them to FSP IIOStack window */
477 xeonsp_pci_dev_iterator(dev->link_list, NULL, reset_resource_to_unassigned, NULL);
478
479 /* update dev resources based on IIOStack IO/Mem32/Mem64 windows */
480 xeonsp_pci_domain_read_resources(dev);
481
482 struct bus *link = dev->link_list;
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200483 while (link) {
Marc Jones1f500842020-10-15 14:32:51 -0600484 assign_resources(link);
485 link = link->next;
486 }
487
488 print_resource_tree(dev, BIOS_SPEW, "After xeonsp pci domain set resource");
489
490 DEV_FUNC_EXIT(dev);
491}
492
493/* Attach IIO stack bus numbers with dummy device to PCI DOMAIN 0000 device */
494void attach_iio_stacks(struct device *dev)
495{
496 struct bus *iiostack_bus;
497 struct device dummy;
498 struct iiostack_resource stack_info = {0};
499
500 DEV_FUNC_ENTER(dev);
501
502 get_iiostack_info(&stack_info);
503 for (int s = 0; s < stack_info.no_of_stacks; ++s) {
504 /* only non zero bus no. needs to be enumerated */
505 if (stack_info.res[s].BusBase == 0)
506 continue;
507
508 iiostack_bus = malloc(sizeof(struct bus));
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200509 if (!iiostack_bus)
Marc Jones1f500842020-10-15 14:32:51 -0600510 die("%s: out of memory.\n", __func__);
511 memset(iiostack_bus, 0, sizeof(*iiostack_bus));
512 memcpy(iiostack_bus, dev->bus, sizeof(*iiostack_bus));
513 iiostack_bus->secondary = stack_info.res[s].BusBase;
514 iiostack_bus->subordinate = stack_info.res[s].BusBase;
515 iiostack_bus->dev = NULL;
516 iiostack_bus->children = NULL;
517 iiostack_bus->next = NULL;
518 iiostack_bus->link_num = 1;
519
520 dummy.bus = iiostack_bus;
521 dummy.path.type = DEVICE_PATH_PCI;
522 dummy.path.pci.devfn = 0;
523 uint32_t id = pci_read_config32(&dummy, PCI_VENDOR_ID);
524 if (id == 0xffffffff)
525 printk(BIOS_WARNING, "IIO Stack device %s not visible\n",
526 dev_path(&dummy));
527
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200528 if (!dev->link_list) {
Marc Jones1f500842020-10-15 14:32:51 -0600529 dev->link_list = iiostack_bus;
530 } else {
531 struct bus *nlink = dev->link_list;
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200532 while (nlink->next)
Marc Jones1f500842020-10-15 14:32:51 -0600533 nlink = nlink->next;
534 nlink->next = iiostack_bus;
535 }
536 }
537
538 DEV_FUNC_EXIT(dev);
539}