blob: 117de9ca004b631dd33d531ca789ac0c04ca845d [file] [log] [blame]
Shuo Liuec58beb2024-03-11 07:14:07 +08001/* SPDX-License-Identifier: GPL-2.0-or-later */
2
3#include <acpi/acpigen_pci.h>
4#include <assert.h>
5#include <console/console.h>
6#include <device/pci.h>
Shuo Liu49437a62024-04-02 03:28:34 +08007#include <device/pci_ids.h>
Shuo Liu49437a62024-04-02 03:28:34 +08008#include <soc/pci_devs.h>
Shuo Liuec58beb2024-03-11 07:14:07 +08009#include <intelblocks/acpi.h>
Shuo Liuec58beb2024-03-11 07:14:07 +080010#include <soc/acpi.h>
11#include <soc/chip_common.h>
12#include <soc/soc_util.h>
13#include <soc/util.h>
Shuo Liuec58beb2024-03-11 07:14:07 +080014
15static const STACK_RES *domain_to_stack_res(const struct device *dev)
16{
17 assert(dev->path.type == DEVICE_PATH_DOMAIN);
18 const union xeon_domain_path dn = {
19 .domain_path = dev->path.domain.domain
20 };
21
22 const IIO_UDS *hob = get_iio_uds();
23 assert(hob != NULL);
24
25 return &hob->PlatformData.IIO_resource[dn.socket].StackRes[dn.stack];
26}
27
28static void iio_pci_domain_read_resources(struct device *dev)
29{
Shuo Liuec58beb2024-03-11 07:14:07 +080030 const STACK_RES *sr = domain_to_stack_res(dev);
31
32 if (!sr)
33 return;
34
35 int index = 0;
36
37 if (is_domain0(dev)) {
38 /* The 0 - 0xfff IO range is not reported by the HOB but still gets decoded */
Shuo Liu6c708d82024-04-29 18:16:30 +080039 struct resource *res = new_resource(dev, index++);
Shuo Liuec58beb2024-03-11 07:14:07 +080040 res->base = 0;
41 res->size = 0x1000;
42 res->limit = 0xfff;
43 res->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
44 }
45
Shuo Liu6c708d82024-04-29 18:16:30 +080046 if (sr->PciResourceIoBase < sr->PciResourceIoLimit)
47 domain_io_window_from_to(dev, index++,
48 sr->PciResourceIoBase, sr->PciResourceIoLimit + 1);
Shuo Liuec58beb2024-03-11 07:14:07 +080049
Shuo Liu6c708d82024-04-29 18:16:30 +080050 if (sr->PciResourceMem32Base < sr->PciResourceMem32Limit)
51 domain_mem_window_from_to(dev, index++,
52 sr->PciResourceMem32Base, sr->PciResourceMem32Limit + 1);
Shuo Liuec58beb2024-03-11 07:14:07 +080053
Shuo Liu6c708d82024-04-29 18:16:30 +080054 if (sr->PciResourceMem64Base < sr->PciResourceMem64Limit)
55 domain_mem_window_from_to(dev, index++,
56 sr->PciResourceMem64Base, sr->PciResourceMem64Limit + 1);
Shuo Liuec58beb2024-03-11 07:14:07 +080057}
58
59/*
60 * Used by IIO stacks for PCIe bridges. Those contain 1 PCI host bridges,
61 * all the bus numbers on the IIO stack can be used for this bridge
62 */
63static struct device_operations iio_pcie_domain_ops = {
64 .read_resources = iio_pci_domain_read_resources,
65 .set_resources = pci_domain_set_resources,
66 .scan_bus = pci_host_bridge_scan_bus,
67#if CONFIG(HAVE_ACPI_TABLES)
68 .acpi_name = soc_acpi_name,
69 .write_acpi_tables = northbridge_write_acpi_tables,
70 .acpi_fill_ssdt = pci_domain_fill_ssdt,
71#endif
72};
73
74/*
75 * Used by UBOX stacks. Those contain multiple PCI host bridges, each having
76 * only one bus with UBOX devices. UBOX devices have no resources.
77 */
78static struct device_operations ubox_pcie_domain_ops = {
79 .read_resources = noop_read_resources,
80 .set_resources = noop_set_resources,
81 .scan_bus = pci_host_bridge_scan_bus,
82#if CONFIG(HAVE_ACPI_TABLES)
83 .acpi_name = soc_acpi_name,
84 .write_acpi_tables = northbridge_write_acpi_tables,
85 .acpi_fill_ssdt = pci_domain_fill_ssdt,
86#endif
87};
88
89static void create_pcie_domains(const union xeon_domain_path dp, struct bus *upstream,
90 const STACK_RES *sr, const size_t pci_segment_group)
91{
92 create_domain(dp, upstream, sr->BusBase, sr->BusLimit, DOMAIN_TYPE_PCIE,
93 &iio_pcie_domain_ops, pci_segment_group);
94}
95
96/*
97 * On the first Xeon-SP generations there are no separate UBOX stacks,
98 * and the UBOX devices reside on the first and second IIO. Starting
99 * with 3rd gen Xeon-SP the UBOX devices are located on their own IIO.
100 */
101static void create_ubox_domains(const union xeon_domain_path dp, struct bus *upstream,
102 const STACK_RES *sr, const size_t pci_segment_group)
103{
104 /* Only expect 2 UBOX buses here */
105 assert(sr->BusBase + 1 == sr->BusLimit);
106
107 create_domain(dp, upstream, sr->BusBase, sr->BusBase, DOMAIN_TYPE_UBX0,
108 &ubox_pcie_domain_ops, pci_segment_group);
109 create_domain(dp, upstream, sr->BusLimit, sr->BusLimit, DOMAIN_TYPE_UBX1,
110 &ubox_pcie_domain_ops, pci_segment_group);
111}
112
113void create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
114 const STACK_RES *sr, const size_t pci_segment_group);
115
116#if CONFIG(SOC_INTEL_HAS_CXL)
117static void iio_cxl_domain_read_resources(struct device *dev)
118{
Shuo Liuec58beb2024-03-11 07:14:07 +0800119 const STACK_RES *sr = domain_to_stack_res(dev);
120
121 if (!sr)
122 return;
123
124 int index = 0;
125
Shuo Liu6c708d82024-04-29 18:16:30 +0800126 if (sr->IoBase < sr->PciResourceIoBase)
127 domain_io_window_from_to(dev, index++,
128 sr->IoBase, sr->PciResourceIoBase);
Shuo Liuec58beb2024-03-11 07:14:07 +0800129
Shuo Liu6c708d82024-04-29 18:16:30 +0800130 if (sr->Mmio32Base < sr->PciResourceMem32Base)
131 domain_mem_window_from_to(dev, index++,
132 sr->Mmio32Base, sr->PciResourceMem32Base);
Shuo Liuec58beb2024-03-11 07:14:07 +0800133
Shuo Liu6c708d82024-04-29 18:16:30 +0800134 if (sr->Mmio64Base < sr->PciResourceMem64Base)
135 domain_mem_window_from_to(dev, index++,
136 sr->Mmio64Base, sr->PciResourceMem64Base);
Shuo Liuec58beb2024-03-11 07:14:07 +0800137}
138
139static struct device_operations iio_cxl_domain_ops = {
140 .read_resources = iio_cxl_domain_read_resources,
141 .set_resources = pci_domain_set_resources,
142 .scan_bus = pci_host_bridge_scan_bus,
143#if CONFIG(HAVE_ACPI_TABLES)
144 .acpi_name = soc_acpi_name,
145 .write_acpi_tables = northbridge_write_acpi_tables,
146 .acpi_fill_ssdt = pci_domain_fill_ssdt,
147#endif
148};
149
150void create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
151 const STACK_RES *sr, const size_t pci_segment_group)
152{
153 assert(sr->BusBase + 1 <= sr->BusLimit);
154
155 /* 1st domain contains PCIe RCiEPs */
156 create_domain(dp, bus, sr->BusBase, sr->BusBase, DOMAIN_TYPE_PCIE,
157 &iio_pcie_domain_ops, pci_segment_group);
158 /* 2nd domain contains CXL 1.1 end-points */
159 create_domain(dp, bus, sr->BusBase + 1, sr->BusLimit, DOMAIN_TYPE_CXL,
160 &iio_cxl_domain_ops, pci_segment_group);
161}
162#endif //CONFIG(SOC_INTEL_HAS_CXL)
163
164void create_xeonsp_domains(const union xeon_domain_path dp, struct bus *bus,
165 const STACK_RES *sr, const size_t pci_segment_group)
166{
167 if (is_ubox_stack_res(sr))
168 create_ubox_domains(dp, bus, sr, pci_segment_group);
169 else if (CONFIG(SOC_INTEL_HAS_CXL) && is_iio_cxl_stack_res(sr))
170 create_cxl_domains(dp, bus, sr, pci_segment_group);
171 else if (is_pcie_iio_stack_res(sr))
172 create_pcie_domains(dp, bus, sr, pci_segment_group);
173 else if (CONFIG(HAVE_IOAT_DOMAINS) && is_ioat_iio_stack_res(sr))
174 create_ioat_domains(dp, bus, sr, pci_segment_group);
175}
Shuo Liu49437a62024-04-02 03:28:34 +0800176
177/*
178 * Route PAM segment access to DRAM
179 * Only call this code from socket0!
180 */
181void unlock_pam_regions(void)
182{
183 uint32_t pam0123_unlock_dram = 0x33333330;
184 uint32_t pam456_unlock_dram = 0x00333333;
185 /* Get UBOX(1) for socket0 */
186 uint32_t bus1 = socket0_get_ubox_busno(PCU_IIO_STACK);
187
188 /* Assume socket0 owns PCI segment 0 */
189 pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
190 SAD_ALL_PAM0123_CSR, pam0123_unlock_dram);
191 pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
192 SAD_ALL_PAM456_CSR, pam456_unlock_dram);
193
194 uint32_t reg1 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
195 SAD_ALL_FUNC), SAD_ALL_PAM0123_CSR);
196 uint32_t reg2 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
197 SAD_ALL_FUNC), SAD_ALL_PAM456_CSR);
198 printk(BIOS_DEBUG, "%s:%s pam0123_csr: 0x%x, pam456_csr: 0x%x\n",
199 __FILE__, __func__, reg1, reg2);
200}