blob: 0c1d52bc86f3448bcc21826ec26d80e300c3d3e3 [file] [log] [blame]
Marc Jones1f500842020-10-15 14:32:51 -06001/* SPDX-License-Identifier: GPL-2.0-or-later */
2
3#include <assert.h>
4#include <console/console.h>
5#include <post.h>
6#include <device/pci.h>
7#include <soc/chip_common.h>
8#include <soc/soc_util.h>
9#include <soc/util.h>
10#include <stdlib.h>
11
Arthur Heymans550f55e2022-08-24 14:44:26 +020012static const STACK_RES *domain_to_stack_res(const struct device *dev)
Marc Jones1f500842020-10-15 14:32:51 -060013{
Arthur Heymans550f55e2022-08-24 14:44:26 +020014 assert(dev->path.type == DEVICE_PATH_DOMAIN);
Patrick Rudolph8c99ebc2024-01-19 17:28:47 +010015 const union xeon_domain_path dn = {
16 .domain_path = dev->path.domain.domain
17 };
Arthur Heymans550f55e2022-08-24 14:44:26 +020018
19 const IIO_UDS *hob = get_iio_uds();
20 assert(hob != NULL);
21
Patrick Rudolph8c99ebc2024-01-19 17:28:47 +010022 return &hob->PlatformData.IIO_resource[dn.socket].StackRes[dn.stack];
Marc Jones1f500842020-10-15 14:32:51 -060023}
24
Patrick Rudolph9fa40482024-01-18 08:55:08 +010025/**
26 * Find a device of a given vendor and type for the specified socket.
27 * The function iterates over all PCI domains of the specified socket
28 * and matches the PCI vendor and device ID.
29 *
30 * @param socket The socket where to search for the device.
31 * @param vendor A PCI vendor ID (e.g. 0x8086 for Intel).
32 * @param device A PCI device ID.
33 * @return Pointer to the device struct.
34 */
35struct device *dev_find_device_on_socket(uint8_t socket, u16 vendor, u16 device)
36{
37 struct device *domain, *dev = NULL;
38 union xeon_domain_path dn;
39
40 while ((dev = dev_find_device(vendor, device, dev))) {
41 domain = dev_get_pci_domain(dev);
42 if (!domain)
43 continue;
44 dn.domain_path = domain->path.domain.domain;
45 if (dn.socket != socket)
46 continue;
47 return dev;
48 }
49
50 return NULL;
51}
52
53/**
54 * Returns the socket ID where the specified device is connected to.
55 * This is an integer in the range [0, CONFIG_MAX_SOCKET).
56 *
57 * @param dev The device to look up
58 *
59 * @return Socket ID the device is attached to, negative number on error.
60 */
61int iio_pci_domain_socket_from_dev(struct device *dev)
62{
63 struct device *domain;
64 union xeon_domain_path dn;
65
66 if (dev->path.type == DEVICE_PATH_DOMAIN)
67 domain = dev;
68 else
69 domain = dev_get_pci_domain(dev);
70
71 if (!domain)
72 return -1;
73
74 dn.domain_path = domain->path.domain.domain;
75
76 return dn.socket;
77}
78
79/**
80 * Returns the stack ID where the specified device is connected to.
81 * This is an integer in the range [0, MAX_IIO_STACK).
82 *
83 * @param dev The device to look up
84 *
85 * @return Stack ID the device is attached to, negative number on error.
86 */
87int iio_pci_domain_stack_from_dev(struct device *dev)
88{
89 struct device *domain;
90 union xeon_domain_path dn;
91
92 if (dev->path.type == DEVICE_PATH_DOMAIN)
93 domain = dev;
94 else
95 domain = dev_get_pci_domain(dev);
96
97 if (!domain)
98 return -1;
99
100 dn.domain_path = domain->path.domain.domain;
101
102 return dn.stack;
103}
104
Arthur Heymans550f55e2022-08-24 14:44:26 +0200105void iio_pci_domain_read_resources(struct device *dev)
Marc Jones1f500842020-10-15 14:32:51 -0600106{
107 struct resource *res;
Arthur Heymans550f55e2022-08-24 14:44:26 +0200108 const STACK_RES *sr = domain_to_stack_res(dev);
109
110 if (!sr)
Marc Jones1f500842020-10-15 14:32:51 -0600111 return;
112
Arthur Heymans550f55e2022-08-24 14:44:26 +0200113 int index = 0;
Marc Jones1f500842020-10-15 14:32:51 -0600114
Arthur Heymans550f55e2022-08-24 14:44:26 +0200115 if (dev->path.domain.domain == 0) {
116 /* The 0 - 0xfff IO range is not reported by the HOB but still gets decoded */
117 res = new_resource(dev, index++);
118 res->base = 0;
119 res->size = 0x1000;
120 res->limit = 0xfff;
121 res->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
122 }
Marc Jones1f500842020-10-15 14:32:51 -0600123
Arthur Heymans550f55e2022-08-24 14:44:26 +0200124 if (sr->PciResourceIoBase < sr->PciResourceIoLimit) {
125 res = new_resource(dev, index++);
126 res->base = sr->PciResourceIoBase;
127 res->limit = sr->PciResourceIoLimit;
128 res->size = res->limit - res->base + 1;
129 res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
130 }
Marc Jones1f500842020-10-15 14:32:51 -0600131
Arthur Heymans550f55e2022-08-24 14:44:26 +0200132 if (sr->PciResourceMem32Base < sr->PciResourceMem32Limit) {
133 res = new_resource(dev, index++);
134 res->base = sr->PciResourceMem32Base;
135 res->limit = sr->PciResourceMem32Limit;
136 res->size = res->limit - res->base + 1;
137 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
138 }
139
140 if (sr->PciResourceMem64Base < sr->PciResourceMem64Limit) {
141 res = new_resource(dev, index++);
142 res->base = sr->PciResourceMem64Base;
143 res->limit = sr->PciResourceMem64Limit;
144 res->size = res->limit - res->base + 1;
145 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
Marc Jones1f500842020-10-15 14:32:51 -0600146 }
147}
148
Arthur Heymans550f55e2022-08-24 14:44:26 +0200149void iio_pci_domain_scan_bus(struct device *dev)
Marc Jones1f500842020-10-15 14:32:51 -0600150{
Arthur Heymans550f55e2022-08-24 14:44:26 +0200151 const STACK_RES *sr = domain_to_stack_res(dev);
152 if (!sr)
153 return;
Marc Jones1f500842020-10-15 14:32:51 -0600154
Arthur Heymans3e99ba02024-01-25 22:26:07 +0100155 struct bus *bus = alloc_bus(dev);
Arthur Heymans550f55e2022-08-24 14:44:26 +0200156 bus->secondary = sr->BusBase;
157 bus->subordinate = sr->BusBase;
158 bus->max_subordinate = sr->BusLimit;
159
160 printk(BIOS_SPEW, "Scanning IIO stack %d: busses %x-%x\n", dev->path.domain.domain,
Arthur Heymans7fcd4d52023-08-24 15:12:19 +0200161 dev->downstream->secondary, dev->downstream->max_subordinate);
Arthur Heymans550f55e2022-08-24 14:44:26 +0200162 pci_host_bridge_scan_bus(dev);
Marc Jones1f500842020-10-15 14:32:51 -0600163}
164
Jonathan Zhanga63ea892023-01-25 11:16:58 -0800165/*
Arthur Heymans550f55e2022-08-24 14:44:26 +0200166 * Used by IIO stacks for PCIe bridges. Those contain 1 PCI host bridges,
167 * all the bus numbers on the IIO stack can be used for this bridge
Jonathan Zhanga63ea892023-01-25 11:16:58 -0800168 */
Arthur Heymans550f55e2022-08-24 14:44:26 +0200169static struct device_operations iio_pcie_domain_ops = {
170 .read_resources = iio_pci_domain_read_resources,
171 .set_resources = pci_domain_set_resources,
172 .scan_bus = iio_pci_domain_scan_bus,
173};
Arthur Heymans165893b2020-11-06 12:15:41 +0100174
Patrick Rudolph15672592024-01-18 07:57:07 +0100175/*
176 * Used by UBOX stacks. Those contain multiple PCI host bridges, each having
177 * only one bus with UBOX devices. UBOX devices have no resources.
178 */
179static struct device_operations ubox_pcie_domain_ops = {
180 .read_resources = noop_read_resources,
181 .set_resources = noop_set_resources,
182 .scan_bus = pci_host_bridge_scan_bus,
183};
184
185/*
186 * On the first Xeon-SP generations there are no separate UBOX stacks,
187 * and the UBOX devices reside on the first and second IIO. Starting
188 * with 3rd gen Xeon-SP the UBOX devices are located on their own IIO.
189 */
190static void soc_create_ubox_domains(const union xeon_domain_path dp, struct bus *upstream,
191 const unsigned int bus_base, const unsigned int bus_limit)
192{
193 union xeon_domain_path new_path = {
194 .domain_path = dp.domain_path
195 };
196
197 for (int i = bus_base; i <= bus_limit; i++) {
198 new_path.bus = i;
199
200 struct device_path path = {
201 .type = DEVICE_PATH_DOMAIN,
202 .domain = {
203 .domain = new_path.domain_path,
204 },
205 };
206 struct device *const domain = alloc_dev(upstream, &path);
207 if (!domain)
208 die("%s: out of memory.\n", __func__);
209
210 domain->ops = &ubox_pcie_domain_ops;
211
212 struct bus *const bus = alloc_bus(domain);
213 bus->secondary = i;
214 bus->subordinate = bus->secondary;
215 bus->max_subordinate = bus->secondary;
216 }
217}
218
219/* Attach stack as domains */
Marc Jones1f500842020-10-15 14:32:51 -0600220void attach_iio_stacks(struct device *dev)
221{
Arthur Heymans550f55e2022-08-24 14:44:26 +0200222 const IIO_UDS *hob = get_iio_uds();
Patrick Rudolph8c99ebc2024-01-19 17:28:47 +0100223 union xeon_domain_path dn = { .domain_path = 0 };
Arthur Heymans550f55e2022-08-24 14:44:26 +0200224 if (!hob)
225 return;
Marc Jones1f500842020-10-15 14:32:51 -0600226
Arthur Heymans550f55e2022-08-24 14:44:26 +0200227 for (int s = 0; s < hob->PlatformData.numofIIO; ++s) {
228 for (int x = 0; x < MAX_LOGIC_IIO_STACK; ++x) {
229 if (s == 0 && x == 0)
230 continue;
Marc Jones1f500842020-10-15 14:32:51 -0600231
Arthur Heymans550f55e2022-08-24 14:44:26 +0200232 const STACK_RES *ri = &hob->PlatformData.IIO_resource[s].StackRes[x];
Arthur Heymans470f1d32023-08-31 18:19:09 +0200233 if (ri->BusBase > ri->BusLimit)
Arthur Heymans550f55e2022-08-24 14:44:26 +0200234 continue;
Jonathan Zhang532e8c02023-01-25 11:28:49 -0800235
Patrick Rudolph8c99ebc2024-01-19 17:28:47 +0100236 /* Prepare domain path */
237 dn.socket = s;
238 dn.stack = x;
239 dn.bus = ri->BusBase;
240
Patrick Rudolph15672592024-01-18 07:57:07 +0100241 if (is_ubox_stack_res(ri)) {
242 soc_create_ubox_domains(dn, dev->upstream, ri->BusBase, ri->BusLimit);
243 } else if (is_pcie_iio_stack_res(ri)) {
244 struct device_path path;
245 path.type = DEVICE_PATH_DOMAIN;
246 path.domain.domain = dn.domain_path;
247 struct device *iio_domain = alloc_dev(dev->upstream, &path);
248 if (iio_domain == NULL)
249 die("%s: out of memory.\n", __func__);
Jonathan Zhang532e8c02023-01-25 11:28:49 -0800250
Patrick Rudolph15672592024-01-18 07:57:07 +0100251 iio_domain->ops = &iio_pcie_domain_ops;
252 } else if (CONFIG(HAVE_IOAT_DOMAINS))
253 soc_create_ioat_domains(dn, dev->upstream, ri);
Marc Jones1f500842020-10-15 14:32:51 -0600254 }
255 }
Marc Jones1f500842020-10-15 14:32:51 -0600256}