blob: b17b77347e5a5af09591b11c5c9f2bdb84b9bea2 [file] [log] [blame]
Shuo Liuec58beb2024-03-11 07:14:07 +08001/* SPDX-License-Identifier: GPL-2.0-or-later */
2
3#include <acpi/acpigen_pci.h>
4#include <assert.h>
5#include <console/console.h>
6#include <device/pci.h>
Shuo Liu49437a62024-04-02 03:28:34 +08007#include <device/pci_ids.h>
Shuo Liu49437a62024-04-02 03:28:34 +08008#include <soc/pci_devs.h>
Shuo Liuec58beb2024-03-11 07:14:07 +08009#include <intelblocks/acpi.h>
Shuo Liuec58beb2024-03-11 07:14:07 +080010#include <soc/acpi.h>
11#include <soc/chip_common.h>
12#include <soc/soc_util.h>
13#include <soc/util.h>
Shuo Liuec58beb2024-03-11 07:14:07 +080014
15static const STACK_RES *domain_to_stack_res(const struct device *dev)
16{
17 assert(dev->path.type == DEVICE_PATH_DOMAIN);
18 const union xeon_domain_path dn = {
19 .domain_path = dev->path.domain.domain
20 };
21
22 const IIO_UDS *hob = get_iio_uds();
23 assert(hob != NULL);
24
25 return &hob->PlatformData.IIO_resource[dn.socket].StackRes[dn.stack];
26}
27
28static void iio_pci_domain_read_resources(struct device *dev)
29{
30 struct resource *res;
31 const STACK_RES *sr = domain_to_stack_res(dev);
32
33 if (!sr)
34 return;
35
36 int index = 0;
37
38 if (is_domain0(dev)) {
39 /* The 0 - 0xfff IO range is not reported by the HOB but still gets decoded */
40 res = new_resource(dev, index++);
41 res->base = 0;
42 res->size = 0x1000;
43 res->limit = 0xfff;
44 res->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
45 }
46
47 if (sr->PciResourceIoBase < sr->PciResourceIoLimit) {
48 res = new_resource(dev, index++);
49 res->base = sr->PciResourceIoBase;
50 res->limit = sr->PciResourceIoLimit;
51 res->size = res->limit - res->base + 1;
52 res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
53 }
54
55 if (sr->PciResourceMem32Base < sr->PciResourceMem32Limit) {
56 res = new_resource(dev, index++);
57 res->base = sr->PciResourceMem32Base;
58 res->limit = sr->PciResourceMem32Limit;
59 res->size = res->limit - res->base + 1;
60 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
61 }
62
63 if (sr->PciResourceMem64Base < sr->PciResourceMem64Limit) {
64 res = new_resource(dev, index++);
65 res->base = sr->PciResourceMem64Base;
66 res->limit = sr->PciResourceMem64Limit;
67 res->size = res->limit - res->base + 1;
68 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
69 }
70}
71
72/*
73 * Used by IIO stacks for PCIe bridges. Those contain 1 PCI host bridges,
74 * all the bus numbers on the IIO stack can be used for this bridge
75 */
76static struct device_operations iio_pcie_domain_ops = {
77 .read_resources = iio_pci_domain_read_resources,
78 .set_resources = pci_domain_set_resources,
79 .scan_bus = pci_host_bridge_scan_bus,
80#if CONFIG(HAVE_ACPI_TABLES)
81 .acpi_name = soc_acpi_name,
82 .write_acpi_tables = northbridge_write_acpi_tables,
83 .acpi_fill_ssdt = pci_domain_fill_ssdt,
84#endif
85};
86
87/*
88 * Used by UBOX stacks. Those contain multiple PCI host bridges, each having
89 * only one bus with UBOX devices. UBOX devices have no resources.
90 */
91static struct device_operations ubox_pcie_domain_ops = {
92 .read_resources = noop_read_resources,
93 .set_resources = noop_set_resources,
94 .scan_bus = pci_host_bridge_scan_bus,
95#if CONFIG(HAVE_ACPI_TABLES)
96 .acpi_name = soc_acpi_name,
97 .write_acpi_tables = northbridge_write_acpi_tables,
98 .acpi_fill_ssdt = pci_domain_fill_ssdt,
99#endif
100};
101
102static void create_pcie_domains(const union xeon_domain_path dp, struct bus *upstream,
103 const STACK_RES *sr, const size_t pci_segment_group)
104{
105 create_domain(dp, upstream, sr->BusBase, sr->BusLimit, DOMAIN_TYPE_PCIE,
106 &iio_pcie_domain_ops, pci_segment_group);
107}
108
109/*
110 * On the first Xeon-SP generations there are no separate UBOX stacks,
111 * and the UBOX devices reside on the first and second IIO. Starting
112 * with 3rd gen Xeon-SP the UBOX devices are located on their own IIO.
113 */
114static void create_ubox_domains(const union xeon_domain_path dp, struct bus *upstream,
115 const STACK_RES *sr, const size_t pci_segment_group)
116{
117 /* Only expect 2 UBOX buses here */
118 assert(sr->BusBase + 1 == sr->BusLimit);
119
120 create_domain(dp, upstream, sr->BusBase, sr->BusBase, DOMAIN_TYPE_UBX0,
121 &ubox_pcie_domain_ops, pci_segment_group);
122 create_domain(dp, upstream, sr->BusLimit, sr->BusLimit, DOMAIN_TYPE_UBX1,
123 &ubox_pcie_domain_ops, pci_segment_group);
124}
125
126void create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
127 const STACK_RES *sr, const size_t pci_segment_group);
128
129#if CONFIG(SOC_INTEL_HAS_CXL)
130static void iio_cxl_domain_read_resources(struct device *dev)
131{
132 struct resource *res;
133 const STACK_RES *sr = domain_to_stack_res(dev);
134
135 if (!sr)
136 return;
137
138 int index = 0;
139
140 if (sr->IoBase < sr->PciResourceIoBase) {
141 res = new_resource(dev, index++);
142 res->base = sr->IoBase;
143 res->limit = sr->PciResourceIoBase - 1;
144 res->size = res->limit - res->base + 1;
145 res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
146 }
147
148 if (sr->Mmio32Base < sr->PciResourceMem32Base) {
149 res = new_resource(dev, index++);
150 res->base = sr->Mmio32Base;
151 res->limit = sr->PciResourceMem32Base - 1;
152 res->size = res->limit - res->base + 1;
153 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
154 }
155
156 if (sr->Mmio64Base < sr->PciResourceMem64Base) {
157 res = new_resource(dev, index++);
158 res->base = sr->Mmio64Base;
159 res->limit = sr->PciResourceMem64Base - 1;
160 res->size = res->limit - res->base + 1;
161 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
162 }
163}
164
165static struct device_operations iio_cxl_domain_ops = {
166 .read_resources = iio_cxl_domain_read_resources,
167 .set_resources = pci_domain_set_resources,
168 .scan_bus = pci_host_bridge_scan_bus,
169#if CONFIG(HAVE_ACPI_TABLES)
170 .acpi_name = soc_acpi_name,
171 .write_acpi_tables = northbridge_write_acpi_tables,
172 .acpi_fill_ssdt = pci_domain_fill_ssdt,
173#endif
174};
175
176void create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
177 const STACK_RES *sr, const size_t pci_segment_group)
178{
179 assert(sr->BusBase + 1 <= sr->BusLimit);
180
181 /* 1st domain contains PCIe RCiEPs */
182 create_domain(dp, bus, sr->BusBase, sr->BusBase, DOMAIN_TYPE_PCIE,
183 &iio_pcie_domain_ops, pci_segment_group);
184 /* 2nd domain contains CXL 1.1 end-points */
185 create_domain(dp, bus, sr->BusBase + 1, sr->BusLimit, DOMAIN_TYPE_CXL,
186 &iio_cxl_domain_ops, pci_segment_group);
187}
188#endif //CONFIG(SOC_INTEL_HAS_CXL)
189
190void create_xeonsp_domains(const union xeon_domain_path dp, struct bus *bus,
191 const STACK_RES *sr, const size_t pci_segment_group)
192{
193 if (is_ubox_stack_res(sr))
194 create_ubox_domains(dp, bus, sr, pci_segment_group);
195 else if (CONFIG(SOC_INTEL_HAS_CXL) && is_iio_cxl_stack_res(sr))
196 create_cxl_domains(dp, bus, sr, pci_segment_group);
197 else if (is_pcie_iio_stack_res(sr))
198 create_pcie_domains(dp, bus, sr, pci_segment_group);
199 else if (CONFIG(HAVE_IOAT_DOMAINS) && is_ioat_iio_stack_res(sr))
200 create_ioat_domains(dp, bus, sr, pci_segment_group);
201}
Shuo Liu49437a62024-04-02 03:28:34 +0800202
203/*
204 * Route PAM segment access to DRAM
205 * Only call this code from socket0!
206 */
207void unlock_pam_regions(void)
208{
209 uint32_t pam0123_unlock_dram = 0x33333330;
210 uint32_t pam456_unlock_dram = 0x00333333;
211 /* Get UBOX(1) for socket0 */
212 uint32_t bus1 = socket0_get_ubox_busno(PCU_IIO_STACK);
213
214 /* Assume socket0 owns PCI segment 0 */
215 pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
216 SAD_ALL_PAM0123_CSR, pam0123_unlock_dram);
217 pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
218 SAD_ALL_PAM456_CSR, pam456_unlock_dram);
219
220 uint32_t reg1 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
221 SAD_ALL_FUNC), SAD_ALL_PAM0123_CSR);
222 uint32_t reg2 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
223 SAD_ALL_FUNC), SAD_ALL_PAM456_CSR);
224 printk(BIOS_DEBUG, "%s:%s pam0123_csr: 0x%x, pam456_csr: 0x%x\n",
225 __FILE__, __func__, reg1, reg2);
226}