blob: a9b1260646aa0182d5845619bafb652191493648 [file] [log] [blame]
Marc Jones1f500842020-10-15 14:32:51 -06001/* SPDX-License-Identifier: GPL-2.0-or-later */
2
3#include <assert.h>
4#include <console/console.h>
5#include <post.h>
6#include <device/pci.h>
7#include <soc/chip_common.h>
8#include <soc/soc_util.h>
9#include <soc/util.h>
10#include <stdlib.h>
11
12struct pci_resource {
13 struct device *dev;
14 struct resource *res;
15 struct pci_resource *next;
16};
17
18struct stack_dev_resource {
19 uint8_t align;
20 struct pci_resource *children;
21 struct stack_dev_resource *next;
22};
23
24typedef enum {
25 RES_TYPE_IO = 0,
26 RES_TYPE_NONPREF_MEM,
27 RES_TYPE_PREF_MEM,
28 MAX_RES_TYPES
29} RES_TYPE;
30
31static RES_TYPE get_res_type(uint64_t flags)
32{
33 if (flags & IORESOURCE_IO)
34 return RES_TYPE_IO;
35 if (flags & IORESOURCE_MEM) {
36 if (flags & IORESOURCE_PREFETCH) {
37 printk(BIOS_DEBUG, "%s:%d flags: 0x%llx\n", __func__, __LINE__, flags);
38 return RES_TYPE_PREF_MEM;
39 }
40 /* both 64-bit and 32-bit use below 4GB address space */
41 return RES_TYPE_NONPREF_MEM;
42 }
Nico Huber6a07db22023-05-12 15:46:24 +020043 die("Invalid resource type 0x%llx\n", flags);
Marc Jones1f500842020-10-15 14:32:51 -060044}
45
46static bool need_assignment(uint64_t flags)
47{
48 if (flags & (IORESOURCE_STORED | IORESOURCE_RESERVE | IORESOURCE_FIXED |
49 IORESOURCE_ASSIGNED))
50 return false;
51 else
52 return true;
53}
54
55static uint64_t get_resource_base(STACK_RES *stack, RES_TYPE res_type)
56{
57 if (res_type == RES_TYPE_IO) {
58 assert(stack->PciResourceIoBase <= stack->PciResourceIoLimit);
59 return stack->PciResourceIoBase;
60 }
61 if (res_type == RES_TYPE_NONPREF_MEM) {
62 assert(stack->PciResourceMem32Base <= stack->PciResourceMem32Limit);
63 return stack->PciResourceMem32Base;
64 }
65 assert(stack->PciResourceMem64Base <= stack->PciResourceMem64Limit);
66 return stack->PciResourceMem64Base;
67}
68
69static void set_resource_base(STACK_RES *stack, RES_TYPE res_type, uint64_t base)
70{
71 if (res_type == RES_TYPE_IO) {
72 assert(base <= (stack->PciResourceIoLimit + 1));
73 stack->PciResourceIoBase = base;
74 } else if (res_type == RES_TYPE_NONPREF_MEM) {
75 assert(base <= (stack->PciResourceMem32Limit + 1));
76 stack->PciResourceMem32Base = base;
77 } else {
78 assert(base <= (stack->PciResourceMem64Limit + 1));
79 stack->PciResourceMem64Base = base;
80 }
81}
82
83static void assign_stack_resources(struct iiostack_resource *stack_list,
84 struct device *dev, struct resource *bridge);
85
86void xeonsp_pci_domain_scan_bus(struct device *dev)
87{
88 DEV_FUNC_ENTER(dev);
89 struct bus *link = dev->link_list;
90
91 printk(BIOS_SPEW, "%s:%s scanning buses under device %s\n",
92 __FILE__, __func__, dev_path(dev));
Elyes Haouasf1ba7d62022-09-13 10:03:44 +020093 while (link) {
Marc Jones1f500842020-10-15 14:32:51 -060094 if (link->secondary == 0) { // scan only PSTACK buses
95 struct device *d;
96 for (d = link->children; d; d = d->sibling)
97 pci_probe_dev(d, link, d->path.pci.devfn);
98 scan_bridges(link);
99 } else {
100 pci_scan_bus(link, PCI_DEVFN(0, 0), 0xff);
101 }
102 link = link->next;
103 }
104 DEV_FUNC_EXIT(dev);
105}
106
107static void xeonsp_pci_dev_iterator(struct bus *bus,
108 void (*dev_iterator)(struct device *, void *),
109 void (*res_iterator)(struct device *, struct resource *, void *),
110 void *data)
111{
112 struct device *curdev;
113 struct resource *res;
114
115 /* Walk through all devices and find which resources they need. */
116 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
117 struct bus *link;
118
119 if (!curdev->enabled)
120 continue;
121
122 if (!curdev->ops || !curdev->ops->read_resources) {
123 if (curdev->path.type != DEVICE_PATH_APIC)
124 printk(BIOS_ERR, "%s missing read_resources\n",
125 dev_path(curdev));
126 continue;
127 }
128
129 if (dev_iterator)
130 dev_iterator(curdev, data);
131
132 if (res_iterator) {
133 for (res = curdev->resource_list; res; res = res->next)
134 res_iterator(curdev, res, data);
135 }
136
137 /* Read in the resources behind the current device's links. */
138 for (link = curdev->link_list; link; link = link->next)
139 xeonsp_pci_dev_iterator(link, dev_iterator, res_iterator, data);
140 }
141}
142
143static void xeonsp_pci_dev_read_resources(struct device *dev, void *data)
144{
145 post_log_path(dev);
146 dev->ops->read_resources(dev);
147}
148
149static void xeonsp_pci_dev_dummy_func(struct device *dev)
150{
151}
152
153static void xeonsp_reset_pci_op(struct device *dev, void *data)
154{
155 if (dev->ops)
156 dev->ops->read_resources = xeonsp_pci_dev_dummy_func;
157}
158
159static STACK_RES *find_stack_for_bus(struct iiostack_resource *info, uint8_t bus)
160{
161 for (int i = 0; i < info->no_of_stacks; ++i) {
162 if (bus >= info->res[i].BusBase && bus <= info->res[i].BusLimit)
163 return &info->res[i];
164 }
165 return NULL;
166}
167
168static void add_res_to_stack(struct stack_dev_resource **root,
169 struct device *dev, struct resource *res)
170{
171 struct stack_dev_resource *cur = *root;
172 while (cur) {
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200173 if (cur->align == res->align || !cur->next) /* equal or last record */
Marc Jones1f500842020-10-15 14:32:51 -0600174 break;
175 else if (cur->align > res->align) {
176 if (cur->next->align < res->align) /* need to insert new record here */
177 break;
178 cur = cur->next;
179 } else {
180 break;
181 }
182 }
183
184 struct stack_dev_resource *nr;
185 if (!cur || cur->align != res->align) { /* need to add new record */
186 nr = malloc(sizeof(struct stack_dev_resource));
187 if (nr == 0)
188 die("assign_resource_to_stack(): out of memory.\n");
189 memset(nr, 0, sizeof(struct stack_dev_resource));
190 nr->align = res->align;
191 if (!cur) {
192 *root = nr; /* head node */
193 } else if (cur->align > nr->align) {
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200194 if (!cur->next) {
Marc Jones1f500842020-10-15 14:32:51 -0600195 cur->next = nr;
196 } else {
197 nr->next = cur->next;
198 cur->next = nr;
199 }
200 } else { /* insert in the beginning */
201 nr->next = cur;
202 *root = nr;
203 }
204 } else {
205 nr = cur;
206 }
207
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200208 assert(nr && nr->align == res->align);
Marc Jones1f500842020-10-15 14:32:51 -0600209
210 struct pci_resource *npr = malloc(sizeof(struct pci_resource));
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200211 if (!npr)
Marc Jones1f500842020-10-15 14:32:51 -0600212 die("%s: out of memory.\n", __func__);
213 npr->res = res;
214 npr->dev = dev;
215 npr->next = NULL;
216
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200217 if (!nr->children) {
Marc Jones1f500842020-10-15 14:32:51 -0600218 nr->children = npr;
219 } else {
220 struct pci_resource *pr = nr->children;
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200221 while (pr->next)
Marc Jones1f500842020-10-15 14:32:51 -0600222 pr = pr->next;
223 pr->next = npr;
224 }
225}
226
227static void reserve_dev_resources(STACK_RES *stack, RES_TYPE res_type,
228 struct stack_dev_resource *res_root, struct resource *bridge)
229{
Marc Jones1f500842020-10-15 14:32:51 -0600230 uint64_t orig_base, base;
231
232 orig_base = get_resource_base(stack, res_type);
233
Marc Jones1f500842020-10-15 14:32:51 -0600234 base = orig_base;
235 int first = 1;
236 while (res_root) { /* loop through all devices grouped by alignment requirements */
237 struct pci_resource *pr = res_root->children;
238 while (pr) {
239 if (first) {
240 if (bridge) { /* takes highest alignment */
241 if (bridge->align < pr->res->align)
242 bridge->align = pr->res->align;
243 orig_base = ALIGN_UP(orig_base, 1 << bridge->align);
244 } else {
245 orig_base = ALIGN_UP(orig_base, 1 << pr->res->align);
246 }
247 base = orig_base;
248
249 if (bridge)
250 bridge->base = base;
251 pr->res->base = base;
252 first = 0;
253 } else {
254 pr->res->base = ALIGN_UP(base, 1 << pr->res->align);
255 }
256 pr->res->limit = pr->res->base + pr->res->size - 1;
257 base = pr->res->limit + 1;
258 pr->res->flags |= (IORESOURCE_ASSIGNED);
259 pr = pr->next;
260 }
261 res_root = res_root->next;
262 }
263
264 if (bridge) {
265 /* this bridge doesn't have any resources, will set it to default window */
266 if (first) {
267 orig_base = ALIGN_UP(orig_base, 1 << bridge->align);
268 bridge->base = orig_base;
269 base = orig_base + (1ULL << bridge->gran);
270 }
271
272 bridge->size = ALIGN_UP(base, 1 << bridge->align) - bridge->base;
273
274 bridge->limit = bridge->base + bridge->size - 1;
275 bridge->flags |= (IORESOURCE_ASSIGNED);
276 base = bridge->limit + 1;
277 }
278
279 set_resource_base(stack, res_type, base);
280}
281
282static void reclaim_resource_mem(struct stack_dev_resource *res_root)
283{
284 while (res_root) { /* loop through all devices grouped by alignment requirements */
285 /* free pci_resource */
286 struct pci_resource *pr = res_root->children;
287 while (pr) {
288 struct pci_resource *dpr = pr;
289 pr = pr->next;
290 free(dpr);
291 }
292
293 /* free stack_dev_resource */
294 struct stack_dev_resource *ddr = res_root;
295 res_root = res_root->next;
296 free(ddr);
297 }
298}
299
300static void assign_bridge_resources(struct iiostack_resource *stack_list,
301 struct device *dev, struct resource *bridge)
302{
303 struct resource *res;
304 if (!dev->enabled)
305 return;
306
307 for (res = dev->resource_list; res; res = res->next) {
308 if (!(res->flags & IORESOURCE_BRIDGE) ||
309 (bridge && (get_res_type(bridge->flags) != get_res_type(res->flags))))
310 continue;
311
312 assign_stack_resources(stack_list, dev, res);
313
314 if (!bridge)
315 continue;
316
317 /* for 1st time update, overlading IORESOURCE_ASSIGNED */
318 if (!(bridge->flags & IORESOURCE_ASSIGNED)) {
319 bridge->base = res->base;
320 bridge->limit = res->limit;
321 bridge->flags |= (IORESOURCE_ASSIGNED);
322 } else {
323 /* update bridge range from child bridge range */
324 if (res->base < bridge->base)
325 bridge->base = res->base;
326 if (res->limit > bridge->limit)
327 bridge->limit = res->limit;
328 }
329 bridge->size = (bridge->limit - bridge->base + 1);
330 }
331}
332
333static void assign_stack_resources(struct iiostack_resource *stack_list,
334 struct device *dev, struct resource *bridge)
335{
336 struct bus *bus;
337
338 /* Read in the resources behind the current device's links. */
339 for (bus = dev->link_list; bus; bus = bus->next) {
340 struct device *curdev;
341 STACK_RES *stack;
342
343 /* get IIO stack for this bus */
344 stack = find_stack_for_bus(stack_list, bus->secondary);
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200345 assert(stack);
Marc Jones1f500842020-10-15 14:32:51 -0600346
347 /* Assign resources to bridge */
348 for (curdev = bus->children; curdev; curdev = curdev->sibling)
349 assign_bridge_resources(stack_list, curdev, bridge);
350
351 /* Pick non-bridged resources for resource allocation for each resource type */
352 RES_TYPE res_types[MAX_RES_TYPES] = {
353 RES_TYPE_IO,
354 RES_TYPE_NONPREF_MEM,
355 RES_TYPE_PREF_MEM
356 };
357
358 uint8_t no_res_types = MAX_RES_TYPES;
359
360 /* if it is a bridge, only process matching bridge resource type */
361 if (bridge) {
362 res_types[0] = get_res_type(bridge->flags);
363 no_res_types = 1;
364 }
365
366 printk(BIOS_DEBUG, "%s:%d no_res_types: %d\n", __func__, __LINE__,
367 no_res_types);
368
369 /* Process each resource type */
370 for (int rt = 0; rt < no_res_types; ++rt) {
371 struct stack_dev_resource *res_root = NULL;
372 printk(BIOS_DEBUG, "%s:%d rt: %d\n", __func__, __LINE__, rt);
373 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
374 struct resource *res;
375 printk(BIOS_DEBUG, "%s:%d dev: %s\n",
376 __func__, __LINE__, dev_path(curdev));
377 if (!curdev->enabled)
378 continue;
379
380 for (res = curdev->resource_list; res; res = res->next) {
381 printk(BIOS_DEBUG, "%s:%d dev: %s, flags: 0x%lx\n",
382 __func__, __LINE__,
383 dev_path(curdev), res->flags);
384 if (res->size == 0 ||
385 get_res_type(res->flags) != res_types[rt] ||
386 (res->flags & IORESOURCE_BRIDGE) ||
387 !need_assignment(res->flags))
388 continue;
389 else
390 add_res_to_stack(&res_root, curdev, res);
391 }
392 }
393
394 /* Allocate resources and update bridge range */
395 if (res_root || (bridge && !(bridge->flags & IORESOURCE_ASSIGNED))) {
396 reserve_dev_resources(stack, res_types[rt], res_root, bridge);
397 reclaim_resource_mem(res_root);
398 }
399 }
400 }
401}
402
Arthur Heymans165893b2020-11-06 12:15:41 +0100403static uint8_t is_pci64bit_alloc(void)
404{
Jonathan Zhanga63ea892023-01-25 11:16:58 -0800405/*
406 * For SPR-SP FSP which supports SOC_INTEL_PCIE_64BITS_ALLOC,
407 * Pci64BitResourceAllocation field does not exist in IIO_UDS HOB.
408 */
409#if CONFIG(SOC_INTEL_PCIE_64BIT_ALLOC)
410 return 1;
411#else
Arthur Heymans165893b2020-11-06 12:15:41 +0100412 const IIO_UDS *hob = get_iio_uds();
Arthur Heymans165893b2020-11-06 12:15:41 +0100413 return hob->PlatformData.Pci64BitResourceAllocation;
Jonathan Zhanga63ea892023-01-25 11:16:58 -0800414#endif
Arthur Heymans165893b2020-11-06 12:15:41 +0100415}
416
Marc Jones1f500842020-10-15 14:32:51 -0600417static void xeonsp_pci_domain_read_resources(struct device *dev)
418{
419 struct bus *link;
420
421 DEV_FUNC_ENTER(dev);
422
423 pci_domain_read_resources(dev);
424
425 /*
426 * Walk through all devices in this domain and read resources.
427 * Since there is no callback when read resource operation is
428 * complete for all devices, domain read resource function initiates
429 * read resources for all devices and swaps read resource operation
430 * with dummy function to avoid warning.
431 */
432 for (link = dev->link_list; link; link = link->next)
433 xeonsp_pci_dev_iterator(link, xeonsp_pci_dev_read_resources, NULL, NULL);
434
435 for (link = dev->link_list; link; link = link->next)
436 xeonsp_pci_dev_iterator(link, xeonsp_reset_pci_op, NULL, NULL);
437
438 struct iiostack_resource stack_info = {0};
Arthur Heymans165893b2020-11-06 12:15:41 +0100439 get_iiostack_info(&stack_info);
440 if (!is_pci64bit_alloc()) {
Marc Jones1f500842020-10-15 14:32:51 -0600441 /*
442 * Split 32 bit address space between prefetchable and
443 * non-prefetchable windows
444 */
445 for (int s = 0; s < stack_info.no_of_stacks; ++s) {
446 STACK_RES *res = &stack_info.res[s];
447 uint64_t length = (res->PciResourceMem32Limit -
448 res->PciResourceMem32Base + 1)/2;
449 res->PciResourceMem64Limit = res->PciResourceMem32Limit;
450 res->PciResourceMem32Limit = (res->PciResourceMem32Base + length - 1);
451 res->PciResourceMem64Base = res->PciResourceMem32Limit + 1;
452 }
453 }
454
455 /* assign resources */
456 assign_stack_resources(&stack_info, dev, NULL);
457
458 DEV_FUNC_EXIT(dev);
459}
460
461static void reset_resource_to_unassigned(struct device *dev, struct resource *res, void *data)
462{
463 if ((res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) &&
464 !(res->flags & (IORESOURCE_FIXED | IORESOURCE_RESERVE))) {
465 res->flags &= ~IORESOURCE_ASSIGNED;
466 }
467}
468
469void xeonsp_pci_domain_set_resources(struct device *dev)
470{
471 DEV_FUNC_ENTER(dev);
472
473 print_resource_tree(dev, BIOS_SPEW, "Before xeonsp pci domain set resource");
474
475 /* reset bus 0 dev resource assignment - need to change them to FSP IIOStack window */
476 xeonsp_pci_dev_iterator(dev->link_list, NULL, reset_resource_to_unassigned, NULL);
477
478 /* update dev resources based on IIOStack IO/Mem32/Mem64 windows */
479 xeonsp_pci_domain_read_resources(dev);
480
481 struct bus *link = dev->link_list;
Elyes Haouasf1ba7d62022-09-13 10:03:44 +0200482 while (link) {
Marc Jones1f500842020-10-15 14:32:51 -0600483 assign_resources(link);
484 link = link->next;
485 }
486
487 print_resource_tree(dev, BIOS_SPEW, "After xeonsp pci domain set resource");
488
489 DEV_FUNC_EXIT(dev);
490}
491
492/* Attach IIO stack bus numbers with dummy device to PCI DOMAIN 0000 device */
493void attach_iio_stacks(struct device *dev)
494{
Marc Jones1f500842020-10-15 14:32:51 -0600495 struct device dummy;
496 struct iiostack_resource stack_info = {0};
497
498 DEV_FUNC_ENTER(dev);
499
500 get_iiostack_info(&stack_info);
Jonathan Zhang532e8c02023-01-25 11:28:49 -0800501
Marc Jones1f500842020-10-15 14:32:51 -0600502 for (int s = 0; s < stack_info.no_of_stacks; ++s) {
Jonathan Zhang532e8c02023-01-25 11:28:49 -0800503 STACK_RES *sr = &stack_info.res[s];
Marc Jones1f500842020-10-15 14:32:51 -0600504 /* only non zero bus no. needs to be enumerated */
Jonathan Zhang532e8c02023-01-25 11:28:49 -0800505 if (sr->BusBase == 0) {
506 /* Update BUS 0 BusLimit */
507 dev->link_list->max_subordinate = sr->BusLimit;
Marc Jones1f500842020-10-15 14:32:51 -0600508 continue;
Jonathan Zhang532e8c02023-01-25 11:28:49 -0800509 }
Marc Jones1f500842020-10-15 14:32:51 -0600510
Jonathan Zhang532e8c02023-01-25 11:28:49 -0800511 for (int b = sr->BusBase; b <= sr->BusLimit; ++b) {
512 struct bus tmp_bus;
513 memset(&tmp_bus, 0, sizeof(tmp_bus));
514 memcpy(&tmp_bus, dev->bus, sizeof(tmp_bus));
515 tmp_bus.secondary = b;
516 tmp_bus.subordinate = b;
517 tmp_bus.max_subordinate = sr->BusLimit;
518 tmp_bus.dev = NULL;
519 tmp_bus.children = NULL;
520 tmp_bus.next = NULL;
521 tmp_bus.link_num = 1;
Marc Jones1f500842020-10-15 14:32:51 -0600522
Jonathan Zhang532e8c02023-01-25 11:28:49 -0800523 dummy.bus = &tmp_bus;
524 dummy.path.type = DEVICE_PATH_PCI;
525 dummy.path.pci.devfn = 0;
526 uint32_t id = pci_read_config32(&dummy, PCI_VENDOR_ID);
527 if (id == 0xffffffff) {
528 printk(BIOS_DEBUG, "IIO Stack device %s not visible\n",
529 dev_path(&dummy));
530 continue;
531 }
532
533 printk(BIOS_DEBUG, "%s Attaching IIO Bus %s\n", __func__,
Marc Jones1f500842020-10-15 14:32:51 -0600534 dev_path(&dummy));
Jonathan Zhang532e8c02023-01-25 11:28:49 -0800535 printk(BIOS_DEBUG, " %s attach secondary: 0x%x, subordinate: 0x%x, dev: %s\n",
536 __func__, tmp_bus.secondary,
537 tmp_bus.subordinate, dev_path(&dummy));
Marc Jones1f500842020-10-15 14:32:51 -0600538
Jonathan Zhang532e8c02023-01-25 11:28:49 -0800539 struct bus *iiostack_bus = malloc(sizeof(struct bus));
540 if (iiostack_bus == NULL)
541 die("%s: out of memory.\n", __func__);
542 memcpy(iiostack_bus, &tmp_bus, sizeof(*iiostack_bus));
543
544 if (dev->link_list == NULL) {
545 dev->link_list = iiostack_bus;
546 } else {
547 struct bus *nlink = dev->link_list;
548 while (nlink->next != NULL)
549 nlink = nlink->next;
550 nlink->next = iiostack_bus;
551 }
Marc Jones1f500842020-10-15 14:32:51 -0600552 }
553 }
554
555 DEV_FUNC_EXIT(dev);
556}