blob: 99f5bd96f66904962be371ad2c84581dd4da7ac2 [file] [log] [blame]
Shuo Liuec58beb2024-03-11 07:14:07 +08001/* SPDX-License-Identifier: GPL-2.0-or-later */
2
3#include <acpi/acpigen_pci.h>
4#include <assert.h>
5#include <console/console.h>
6#include <device/pci.h>
Shuo Liu49437a62024-04-02 03:28:34 +08007#include <device/pci_ids.h>
8#include <device/pci_def.h>
9#include <soc/pci_devs.h>
Shuo Liuec58beb2024-03-11 07:14:07 +080010#include <intelblocks/acpi.h>
11#include <post.h>
12#include <soc/acpi.h>
13#include <soc/chip_common.h>
14#include <soc/soc_util.h>
15#include <soc/util.h>
16#include <stdlib.h>
17
18static const STACK_RES *domain_to_stack_res(const struct device *dev)
19{
20 assert(dev->path.type == DEVICE_PATH_DOMAIN);
21 const union xeon_domain_path dn = {
22 .domain_path = dev->path.domain.domain
23 };
24
25 const IIO_UDS *hob = get_iio_uds();
26 assert(hob != NULL);
27
28 return &hob->PlatformData.IIO_resource[dn.socket].StackRes[dn.stack];
29}
30
31static void iio_pci_domain_read_resources(struct device *dev)
32{
33 struct resource *res;
34 const STACK_RES *sr = domain_to_stack_res(dev);
35
36 if (!sr)
37 return;
38
39 int index = 0;
40
41 if (is_domain0(dev)) {
42 /* The 0 - 0xfff IO range is not reported by the HOB but still gets decoded */
43 res = new_resource(dev, index++);
44 res->base = 0;
45 res->size = 0x1000;
46 res->limit = 0xfff;
47 res->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
48 }
49
50 if (sr->PciResourceIoBase < sr->PciResourceIoLimit) {
51 res = new_resource(dev, index++);
52 res->base = sr->PciResourceIoBase;
53 res->limit = sr->PciResourceIoLimit;
54 res->size = res->limit - res->base + 1;
55 res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
56 }
57
58 if (sr->PciResourceMem32Base < sr->PciResourceMem32Limit) {
59 res = new_resource(dev, index++);
60 res->base = sr->PciResourceMem32Base;
61 res->limit = sr->PciResourceMem32Limit;
62 res->size = res->limit - res->base + 1;
63 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
64 }
65
66 if (sr->PciResourceMem64Base < sr->PciResourceMem64Limit) {
67 res = new_resource(dev, index++);
68 res->base = sr->PciResourceMem64Base;
69 res->limit = sr->PciResourceMem64Limit;
70 res->size = res->limit - res->base + 1;
71 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
72 }
73}
74
75/*
76 * Used by IIO stacks for PCIe bridges. Those contain 1 PCI host bridges,
77 * all the bus numbers on the IIO stack can be used for this bridge
78 */
79static struct device_operations iio_pcie_domain_ops = {
80 .read_resources = iio_pci_domain_read_resources,
81 .set_resources = pci_domain_set_resources,
82 .scan_bus = pci_host_bridge_scan_bus,
83#if CONFIG(HAVE_ACPI_TABLES)
84 .acpi_name = soc_acpi_name,
85 .write_acpi_tables = northbridge_write_acpi_tables,
86 .acpi_fill_ssdt = pci_domain_fill_ssdt,
87#endif
88};
89
90/*
91 * Used by UBOX stacks. Those contain multiple PCI host bridges, each having
92 * only one bus with UBOX devices. UBOX devices have no resources.
93 */
94static struct device_operations ubox_pcie_domain_ops = {
95 .read_resources = noop_read_resources,
96 .set_resources = noop_set_resources,
97 .scan_bus = pci_host_bridge_scan_bus,
98#if CONFIG(HAVE_ACPI_TABLES)
99 .acpi_name = soc_acpi_name,
100 .write_acpi_tables = northbridge_write_acpi_tables,
101 .acpi_fill_ssdt = pci_domain_fill_ssdt,
102#endif
103};
104
105static void create_pcie_domains(const union xeon_domain_path dp, struct bus *upstream,
106 const STACK_RES *sr, const size_t pci_segment_group)
107{
108 create_domain(dp, upstream, sr->BusBase, sr->BusLimit, DOMAIN_TYPE_PCIE,
109 &iio_pcie_domain_ops, pci_segment_group);
110}
111
112/*
113 * On the first Xeon-SP generations there are no separate UBOX stacks,
114 * and the UBOX devices reside on the first and second IIO. Starting
115 * with 3rd gen Xeon-SP the UBOX devices are located on their own IIO.
116 */
117static void create_ubox_domains(const union xeon_domain_path dp, struct bus *upstream,
118 const STACK_RES *sr, const size_t pci_segment_group)
119{
120 /* Only expect 2 UBOX buses here */
121 assert(sr->BusBase + 1 == sr->BusLimit);
122
123 create_domain(dp, upstream, sr->BusBase, sr->BusBase, DOMAIN_TYPE_UBX0,
124 &ubox_pcie_domain_ops, pci_segment_group);
125 create_domain(dp, upstream, sr->BusLimit, sr->BusLimit, DOMAIN_TYPE_UBX1,
126 &ubox_pcie_domain_ops, pci_segment_group);
127}
128
129void create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
130 const STACK_RES *sr, const size_t pci_segment_group);
131
132#if CONFIG(SOC_INTEL_HAS_CXL)
133static void iio_cxl_domain_read_resources(struct device *dev)
134{
135 struct resource *res;
136 const STACK_RES *sr = domain_to_stack_res(dev);
137
138 if (!sr)
139 return;
140
141 int index = 0;
142
143 if (sr->IoBase < sr->PciResourceIoBase) {
144 res = new_resource(dev, index++);
145 res->base = sr->IoBase;
146 res->limit = sr->PciResourceIoBase - 1;
147 res->size = res->limit - res->base + 1;
148 res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
149 }
150
151 if (sr->Mmio32Base < sr->PciResourceMem32Base) {
152 res = new_resource(dev, index++);
153 res->base = sr->Mmio32Base;
154 res->limit = sr->PciResourceMem32Base - 1;
155 res->size = res->limit - res->base + 1;
156 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
157 }
158
159 if (sr->Mmio64Base < sr->PciResourceMem64Base) {
160 res = new_resource(dev, index++);
161 res->base = sr->Mmio64Base;
162 res->limit = sr->PciResourceMem64Base - 1;
163 res->size = res->limit - res->base + 1;
164 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
165 }
166}
167
168static struct device_operations iio_cxl_domain_ops = {
169 .read_resources = iio_cxl_domain_read_resources,
170 .set_resources = pci_domain_set_resources,
171 .scan_bus = pci_host_bridge_scan_bus,
172#if CONFIG(HAVE_ACPI_TABLES)
173 .acpi_name = soc_acpi_name,
174 .write_acpi_tables = northbridge_write_acpi_tables,
175 .acpi_fill_ssdt = pci_domain_fill_ssdt,
176#endif
177};
178
179void create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
180 const STACK_RES *sr, const size_t pci_segment_group)
181{
182 assert(sr->BusBase + 1 <= sr->BusLimit);
183
184 /* 1st domain contains PCIe RCiEPs */
185 create_domain(dp, bus, sr->BusBase, sr->BusBase, DOMAIN_TYPE_PCIE,
186 &iio_pcie_domain_ops, pci_segment_group);
187 /* 2nd domain contains CXL 1.1 end-points */
188 create_domain(dp, bus, sr->BusBase + 1, sr->BusLimit, DOMAIN_TYPE_CXL,
189 &iio_cxl_domain_ops, pci_segment_group);
190}
191#endif //CONFIG(SOC_INTEL_HAS_CXL)
192
193void create_xeonsp_domains(const union xeon_domain_path dp, struct bus *bus,
194 const STACK_RES *sr, const size_t pci_segment_group)
195{
196 if (is_ubox_stack_res(sr))
197 create_ubox_domains(dp, bus, sr, pci_segment_group);
198 else if (CONFIG(SOC_INTEL_HAS_CXL) && is_iio_cxl_stack_res(sr))
199 create_cxl_domains(dp, bus, sr, pci_segment_group);
200 else if (is_pcie_iio_stack_res(sr))
201 create_pcie_domains(dp, bus, sr, pci_segment_group);
202 else if (CONFIG(HAVE_IOAT_DOMAINS) && is_ioat_iio_stack_res(sr))
203 create_ioat_domains(dp, bus, sr, pci_segment_group);
204}
Shuo Liu49437a62024-04-02 03:28:34 +0800205
206/*
207 * Route PAM segment access to DRAM
208 * Only call this code from socket0!
209 */
210void unlock_pam_regions(void)
211{
212 uint32_t pam0123_unlock_dram = 0x33333330;
213 uint32_t pam456_unlock_dram = 0x00333333;
214 /* Get UBOX(1) for socket0 */
215 uint32_t bus1 = socket0_get_ubox_busno(PCU_IIO_STACK);
216
217 /* Assume socket0 owns PCI segment 0 */
218 pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
219 SAD_ALL_PAM0123_CSR, pam0123_unlock_dram);
220 pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
221 SAD_ALL_PAM456_CSR, pam456_unlock_dram);
222
223 uint32_t reg1 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
224 SAD_ALL_FUNC), SAD_ALL_PAM0123_CSR);
225 uint32_t reg2 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
226 SAD_ALL_FUNC), SAD_ALL_PAM456_CSR);
227 printk(BIOS_DEBUG, "%s:%s pam0123_csr: 0x%x, pam456_csr: 0x%x\n",
228 __FILE__, __func__, reg1, reg2);
229}