blob: 143cc89095cbf23a70f88e66fdb150cab73b7591 [file] [log] [blame]
Shuo Liuec58beb2024-03-11 07:14:07 +08001/* SPDX-License-Identifier: GPL-2.0-or-later */
2
3#include <acpi/acpigen_pci.h>
4#include <assert.h>
5#include <console/console.h>
6#include <device/pci.h>
Shuo Liu49437a62024-04-02 03:28:34 +08007#include <device/pci_ids.h>
8#include <device/pci_def.h>
9#include <soc/pci_devs.h>
Shuo Liuec58beb2024-03-11 07:14:07 +080010#include <intelblocks/acpi.h>
Shuo Liuec58beb2024-03-11 07:14:07 +080011#include <soc/acpi.h>
12#include <soc/chip_common.h>
13#include <soc/soc_util.h>
14#include <soc/util.h>
Shuo Liuec58beb2024-03-11 07:14:07 +080015
16static const STACK_RES *domain_to_stack_res(const struct device *dev)
17{
18 assert(dev->path.type == DEVICE_PATH_DOMAIN);
19 const union xeon_domain_path dn = {
20 .domain_path = dev->path.domain.domain
21 };
22
23 const IIO_UDS *hob = get_iio_uds();
24 assert(hob != NULL);
25
26 return &hob->PlatformData.IIO_resource[dn.socket].StackRes[dn.stack];
27}
28
29static void iio_pci_domain_read_resources(struct device *dev)
30{
31 struct resource *res;
32 const STACK_RES *sr = domain_to_stack_res(dev);
33
34 if (!sr)
35 return;
36
37 int index = 0;
38
39 if (is_domain0(dev)) {
40 /* The 0 - 0xfff IO range is not reported by the HOB but still gets decoded */
41 res = new_resource(dev, index++);
42 res->base = 0;
43 res->size = 0x1000;
44 res->limit = 0xfff;
45 res->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
46 }
47
48 if (sr->PciResourceIoBase < sr->PciResourceIoLimit) {
49 res = new_resource(dev, index++);
50 res->base = sr->PciResourceIoBase;
51 res->limit = sr->PciResourceIoLimit;
52 res->size = res->limit - res->base + 1;
53 res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
54 }
55
56 if (sr->PciResourceMem32Base < sr->PciResourceMem32Limit) {
57 res = new_resource(dev, index++);
58 res->base = sr->PciResourceMem32Base;
59 res->limit = sr->PciResourceMem32Limit;
60 res->size = res->limit - res->base + 1;
61 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
62 }
63
64 if (sr->PciResourceMem64Base < sr->PciResourceMem64Limit) {
65 res = new_resource(dev, index++);
66 res->base = sr->PciResourceMem64Base;
67 res->limit = sr->PciResourceMem64Limit;
68 res->size = res->limit - res->base + 1;
69 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
70 }
71}
72
73/*
74 * Used by IIO stacks for PCIe bridges. Those contain 1 PCI host bridges,
75 * all the bus numbers on the IIO stack can be used for this bridge
76 */
77static struct device_operations iio_pcie_domain_ops = {
78 .read_resources = iio_pci_domain_read_resources,
79 .set_resources = pci_domain_set_resources,
80 .scan_bus = pci_host_bridge_scan_bus,
81#if CONFIG(HAVE_ACPI_TABLES)
82 .acpi_name = soc_acpi_name,
83 .write_acpi_tables = northbridge_write_acpi_tables,
84 .acpi_fill_ssdt = pci_domain_fill_ssdt,
85#endif
86};
87
88/*
89 * Used by UBOX stacks. Those contain multiple PCI host bridges, each having
90 * only one bus with UBOX devices. UBOX devices have no resources.
91 */
92static struct device_operations ubox_pcie_domain_ops = {
93 .read_resources = noop_read_resources,
94 .set_resources = noop_set_resources,
95 .scan_bus = pci_host_bridge_scan_bus,
96#if CONFIG(HAVE_ACPI_TABLES)
97 .acpi_name = soc_acpi_name,
98 .write_acpi_tables = northbridge_write_acpi_tables,
99 .acpi_fill_ssdt = pci_domain_fill_ssdt,
100#endif
101};
102
103static void create_pcie_domains(const union xeon_domain_path dp, struct bus *upstream,
104 const STACK_RES *sr, const size_t pci_segment_group)
105{
106 create_domain(dp, upstream, sr->BusBase, sr->BusLimit, DOMAIN_TYPE_PCIE,
107 &iio_pcie_domain_ops, pci_segment_group);
108}
109
110/*
111 * On the first Xeon-SP generations there are no separate UBOX stacks,
112 * and the UBOX devices reside on the first and second IIO. Starting
113 * with 3rd gen Xeon-SP the UBOX devices are located on their own IIO.
114 */
115static void create_ubox_domains(const union xeon_domain_path dp, struct bus *upstream,
116 const STACK_RES *sr, const size_t pci_segment_group)
117{
118 /* Only expect 2 UBOX buses here */
119 assert(sr->BusBase + 1 == sr->BusLimit);
120
121 create_domain(dp, upstream, sr->BusBase, sr->BusBase, DOMAIN_TYPE_UBX0,
122 &ubox_pcie_domain_ops, pci_segment_group);
123 create_domain(dp, upstream, sr->BusLimit, sr->BusLimit, DOMAIN_TYPE_UBX1,
124 &ubox_pcie_domain_ops, pci_segment_group);
125}
126
127void create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
128 const STACK_RES *sr, const size_t pci_segment_group);
129
130#if CONFIG(SOC_INTEL_HAS_CXL)
131static void iio_cxl_domain_read_resources(struct device *dev)
132{
133 struct resource *res;
134 const STACK_RES *sr = domain_to_stack_res(dev);
135
136 if (!sr)
137 return;
138
139 int index = 0;
140
141 if (sr->IoBase < sr->PciResourceIoBase) {
142 res = new_resource(dev, index++);
143 res->base = sr->IoBase;
144 res->limit = sr->PciResourceIoBase - 1;
145 res->size = res->limit - res->base + 1;
146 res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
147 }
148
149 if (sr->Mmio32Base < sr->PciResourceMem32Base) {
150 res = new_resource(dev, index++);
151 res->base = sr->Mmio32Base;
152 res->limit = sr->PciResourceMem32Base - 1;
153 res->size = res->limit - res->base + 1;
154 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
155 }
156
157 if (sr->Mmio64Base < sr->PciResourceMem64Base) {
158 res = new_resource(dev, index++);
159 res->base = sr->Mmio64Base;
160 res->limit = sr->PciResourceMem64Base - 1;
161 res->size = res->limit - res->base + 1;
162 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
163 }
164}
165
166static struct device_operations iio_cxl_domain_ops = {
167 .read_resources = iio_cxl_domain_read_resources,
168 .set_resources = pci_domain_set_resources,
169 .scan_bus = pci_host_bridge_scan_bus,
170#if CONFIG(HAVE_ACPI_TABLES)
171 .acpi_name = soc_acpi_name,
172 .write_acpi_tables = northbridge_write_acpi_tables,
173 .acpi_fill_ssdt = pci_domain_fill_ssdt,
174#endif
175};
176
177void create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
178 const STACK_RES *sr, const size_t pci_segment_group)
179{
180 assert(sr->BusBase + 1 <= sr->BusLimit);
181
182 /* 1st domain contains PCIe RCiEPs */
183 create_domain(dp, bus, sr->BusBase, sr->BusBase, DOMAIN_TYPE_PCIE,
184 &iio_pcie_domain_ops, pci_segment_group);
185 /* 2nd domain contains CXL 1.1 end-points */
186 create_domain(dp, bus, sr->BusBase + 1, sr->BusLimit, DOMAIN_TYPE_CXL,
187 &iio_cxl_domain_ops, pci_segment_group);
188}
189#endif //CONFIG(SOC_INTEL_HAS_CXL)
190
191void create_xeonsp_domains(const union xeon_domain_path dp, struct bus *bus,
192 const STACK_RES *sr, const size_t pci_segment_group)
193{
194 if (is_ubox_stack_res(sr))
195 create_ubox_domains(dp, bus, sr, pci_segment_group);
196 else if (CONFIG(SOC_INTEL_HAS_CXL) && is_iio_cxl_stack_res(sr))
197 create_cxl_domains(dp, bus, sr, pci_segment_group);
198 else if (is_pcie_iio_stack_res(sr))
199 create_pcie_domains(dp, bus, sr, pci_segment_group);
200 else if (CONFIG(HAVE_IOAT_DOMAINS) && is_ioat_iio_stack_res(sr))
201 create_ioat_domains(dp, bus, sr, pci_segment_group);
202}
Shuo Liu49437a62024-04-02 03:28:34 +0800203
204/*
205 * Route PAM segment access to DRAM
206 * Only call this code from socket0!
207 */
208void unlock_pam_regions(void)
209{
210 uint32_t pam0123_unlock_dram = 0x33333330;
211 uint32_t pam456_unlock_dram = 0x00333333;
212 /* Get UBOX(1) for socket0 */
213 uint32_t bus1 = socket0_get_ubox_busno(PCU_IIO_STACK);
214
215 /* Assume socket0 owns PCI segment 0 */
216 pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
217 SAD_ALL_PAM0123_CSR, pam0123_unlock_dram);
218 pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
219 SAD_ALL_PAM456_CSR, pam456_unlock_dram);
220
221 uint32_t reg1 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
222 SAD_ALL_FUNC), SAD_ALL_PAM0123_CSR);
223 uint32_t reg2 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
224 SAD_ALL_FUNC), SAD_ALL_PAM456_CSR);
225 printk(BIOS_DEBUG, "%s:%s pam0123_csr: 0x%x, pam456_csr: 0x%x\n",
226 __FILE__, __func__, reg1, reg2);
227}