blob: b827dd3fad8b1bbc9bf3c28977334b7aaeeb748f [file] [log] [blame]
Felix Held407bd582023-04-24 17:58:24 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2
Felix Held7a5dd782023-04-28 22:47:33 +02003#include <acpi/acpigen.h>
Felix Held407bd582023-04-24 17:58:24 +02004#include <amdblocks/data_fabric.h>
Felix Held32169722023-07-14 19:41:06 +02005#include <amdblocks/root_complex.h>
Felix Held407bd582023-04-24 17:58:24 +02006#include <arch/ioapic.h>
Felix Held7a5dd782023-04-28 22:47:33 +02007#include <arch/vga.h>
Felix Held407bd582023-04-24 17:58:24 +02008#include <console/console.h>
9#include <cpu/amd/mtrr.h>
Felix Held89ca4782023-09-12 15:01:02 +020010#include <cpu/cpu.h>
Felix Held407bd582023-04-24 17:58:24 +020011#include <device/device.h>
Felix Held3b5b66d2024-01-11 22:26:18 +010012#include <device/pci.h>
Felix Held407bd582023-04-24 17:58:24 +020013#include <device/pci_ops.h>
14#include <types.h>
15
16void amd_pci_domain_scan_bus(struct device *domain)
17{
Felix Held6c00a6a2023-12-06 23:40:42 +010018 uint8_t segment_group, bus, limit;
Felix Held407bd582023-04-24 17:58:24 +020019
Felix Held6c00a6a2023-12-06 23:40:42 +010020 if (data_fabric_get_pci_bus_numbers(domain, &segment_group, &bus, &limit) != CB_SUCCESS) {
Felix Heldea831392023-08-08 02:55:09 +020021 printk(BIOS_ERR, "No PCI bus numbers decoded to PCI root.\n");
22 return;
23 }
24
Felix Held3b5b66d2024-01-11 22:26:18 +010025 if (segment_group >= PCI_SEGMENT_GROUP_COUNT) {
26 printk(BIOS_ERR, "Skipping domain %u due to too large segment group %u.\n",
27 domain->path.domain.domain, segment_group);
Felix Held6c00a6a2023-12-06 23:40:42 +010028 return;
29 }
30
Felix Held3b5b66d2024-01-11 22:26:18 +010031 /* TODO: Check if bus >= PCI_BUSES_PER_SEGMENT_GROUP and return in that case */
Felix Heldea831392023-08-08 02:55:09 +020032
Felix Held3b5b66d2024-01-11 22:26:18 +010033 /* Make sure to not report more than PCI_BUSES_PER_SEGMENT_GROUP PCI buses */
34 limit = MIN(limit, PCI_BUSES_PER_SEGMENT_GROUP - 1);
Felix Held407bd582023-04-24 17:58:24 +020035
36 /* Set bus first number of PCI root */
37 domain->link_list->secondary = bus;
Arthur Heymans0b0113f2023-08-31 17:09:28 +020038 /* subordinate needs to be the same as secondary before pci_host_bridge_scan_bus call. */
Felix Held407bd582023-04-24 17:58:24 +020039 domain->link_list->subordinate = bus;
Felix Held9dcdec52023-08-08 21:38:43 +020040 /* Tell allocator about maximum PCI bus number in domain */
41 domain->link_list->max_subordinate = limit;
Felix Held3b5b66d2024-01-11 22:26:18 +010042 domain->link_list->segment_group = segment_group;
Felix Held407bd582023-04-24 17:58:24 +020043
Arthur Heymans0b0113f2023-08-31 17:09:28 +020044 pci_host_bridge_scan_bus(domain);
Felix Held407bd582023-04-24 17:58:24 +020045}
46
Felix Held407bd582023-04-24 17:58:24 +020047static void print_df_mmio_outside_of_cpu_mmio_error(unsigned int reg)
48{
49 printk(BIOS_WARNING, "DF MMIO register %u outside of CPU MMIO region.\n", reg);
50}
51
52static bool is_mmio_region_valid(unsigned int reg, resource_t mmio_base, resource_t mmio_limit)
53{
54 if (mmio_base > mmio_limit) {
55 printk(BIOS_WARNING, "DF MMIO register %u's base is above its limit.\n", reg);
56 return false;
57 }
58 if (mmio_base >= 4ULL * GiB) {
59 /* MMIO region above 4GB needs to be above TOP_MEM2 MSR value */
60 if (mmio_base < get_top_of_mem_above_4gb()) {
61 print_df_mmio_outside_of_cpu_mmio_error(reg);
62 return false;
63 }
64 } else {
65 /* MMIO region below 4GB needs to be above TOP_MEM MSR value */
66 if (mmio_base < get_top_of_mem_below_4gb()) {
67 print_df_mmio_outside_of_cpu_mmio_error(reg);
68 return false;
69 }
70 /* MMIO region below 4GB mustn't cross the 4GB boundary. */
71 if (mmio_limit >= 4ULL * GiB) {
72 printk(BIOS_WARNING, "DF MMIO register %u crosses 4GB boundary.\n",
73 reg);
74 return false;
75 }
76 }
77
78 return true;
79}
80
81static void report_data_fabric_mmio(struct device *domain, unsigned int idx,
82 resource_t mmio_base, resource_t mmio_limit)
83{
84 struct resource *res;
85 res = new_resource(domain, idx);
86 res->base = mmio_base;
87 res->limit = mmio_limit;
88 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
89}
90
91/* Tell the resource allocator about the usable MMIO ranges configured in the data fabric */
92static void add_data_fabric_mmio_regions(struct device *domain, unsigned int *idx)
93{
Felix Held9cbdc8f2023-08-11 22:30:06 +020094 const signed int iohc_dest_fabric_id = get_iohc_fabric_id(domain);
Felix Held407bd582023-04-24 17:58:24 +020095 union df_mmio_control ctrl;
96 resource_t mmio_base;
97 resource_t mmio_limit;
98
99 /* The last 12GB of the usable address space are reserved and can't be used for MMIO */
100 const resource_t reserved_upper_mmio_base =
Felix Held89ca4782023-09-12 15:01:02 +0200101 (1ULL << cpu_phys_address_size()) - DF_RESERVED_TOP_12GB_MMIO_SIZE;
Felix Held407bd582023-04-24 17:58:24 +0200102
103 for (unsigned int i = 0; i < DF_MMIO_REG_SET_COUNT; i++) {
Felix Held18a3c232023-08-03 00:10:03 +0200104 ctrl.raw = data_fabric_broadcast_read32(DF_MMIO_CONTROL(i));
Felix Held407bd582023-04-24 17:58:24 +0200105
106 /* Relevant MMIO regions need to have both reads and writes enabled */
107 if (!ctrl.we || !ctrl.re)
108 continue;
109
110 /* Non-posted region contains fixed FCH MMIO devices */
111 if (ctrl.np)
112 continue;
113
Felix Held55822d92023-08-08 21:38:07 +0200114 /* Only look at MMIO regions that are decoded to the right PCI root */
Felix Held9cbdc8f2023-08-11 22:30:06 +0200115 if (ctrl.dst_fabric_id != iohc_dest_fabric_id)
Felix Held55822d92023-08-08 21:38:07 +0200116 continue;
Felix Held407bd582023-04-24 17:58:24 +0200117
118 data_fabric_get_mmio_base_size(i, &mmio_base, &mmio_limit);
119
120 if (!is_mmio_region_valid(i, mmio_base, mmio_limit))
121 continue;
122
123 /* Make sure to not report a region overlapping with the fixed MMIO resources
124 below 4GB or the reserved MMIO range in the last 12GB of the addressable
125 address range. The code assumes that the fixed MMIO resources below 4GB
126 are between IO_APIC_ADDR and the 4GB boundary. */
127 if (mmio_base < 4ULL * GiB) {
128 if (mmio_base >= IO_APIC_ADDR)
129 continue;
130 if (mmio_limit >= IO_APIC_ADDR)
131 mmio_limit = IO_APIC_ADDR - 1;
132 } else {
133 if (mmio_base >= reserved_upper_mmio_base)
134 continue;
135 if (mmio_limit >= reserved_upper_mmio_base)
136 mmio_limit = reserved_upper_mmio_base - 1;
137 }
138
139 report_data_fabric_mmio(domain, (*idx)++, mmio_base, mmio_limit);
140 }
141}
142
Felix Held2dfd48b2023-08-04 19:22:54 +0200143static void report_data_fabric_io(struct device *domain, unsigned int idx,
144 resource_t io_base, resource_t io_limit)
145{
146 struct resource *res;
147 res = new_resource(domain, idx);
148 res->base = io_base;
149 res->limit = io_limit;
150 res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
151}
152
Felix Held407bd582023-04-24 17:58:24 +0200153/* Tell the resource allocator about the usable I/O space */
Felix Held3f3f93b2023-08-04 22:14:40 +0200154static void add_data_fabric_io_regions(struct device *domain, unsigned int *idx)
Felix Held407bd582023-04-24 17:58:24 +0200155{
Felix Held9cbdc8f2023-08-11 22:30:06 +0200156 const signed int iohc_dest_fabric_id = get_iohc_fabric_id(domain);
Felix Held4eac0d42023-08-04 19:40:02 +0200157 union df_io_base base_reg;
158 union df_io_limit limit_reg;
159 resource_t io_base;
160 resource_t io_limit;
Felix Held407bd582023-04-24 17:58:24 +0200161
Felix Held4eac0d42023-08-04 19:40:02 +0200162 for (unsigned int i = 0; i < DF_IO_REG_COUNT; i++) {
163 base_reg.raw = data_fabric_broadcast_read32(DF_IO_BASE(i));
164
165 /* Relevant IO regions need to have both reads and writes enabled */
166 if (!base_reg.we || !base_reg.re)
167 continue;
168
169 limit_reg.raw = data_fabric_broadcast_read32(DF_IO_LIMIT(i));
170
Felix Held55822d92023-08-08 21:38:07 +0200171 /* Only look at IO regions that are decoded to the right PCI root */
Felix Held9cbdc8f2023-08-11 22:30:06 +0200172 if (limit_reg.dst_fabric_id != iohc_dest_fabric_id)
Felix Held55822d92023-08-08 21:38:07 +0200173 continue;
Felix Held4eac0d42023-08-04 19:40:02 +0200174
175 io_base = base_reg.io_base << DF_IO_ADDR_SHIFT;
176 io_limit = ((limit_reg.io_limit + 1) << DF_IO_ADDR_SHIFT) - 1;
177
178 /* Beware that the lower 25 bits of io_base and io_limit can be non-zero
179 despite there only being 16 bits worth of IO port address space. */
180 if (io_base > 0xffff) {
181 printk(BIOS_WARNING, "DF IO base register %d value outside of valid "
182 "IO port address range.\n", i);
183 continue;
184 }
185 /* If only the IO limit is outside of the valid 16 bit IO port range, report
186 the limit as 0xffff, so that the resource allcator won't put IO BARs outside
187 of the 16 bit IO port address range. */
188 io_limit = MIN(io_limit, 0xffff);
189
190 report_data_fabric_io(domain, (*idx)++, io_base, io_limit);
191 }
Felix Held407bd582023-04-24 17:58:24 +0200192}
193
194void amd_pci_domain_read_resources(struct device *domain)
195{
196 unsigned int idx = 0;
197
Felix Held3f3f93b2023-08-04 22:14:40 +0200198 add_data_fabric_io_regions(domain, &idx);
Felix Held407bd582023-04-24 17:58:24 +0200199
200 add_data_fabric_mmio_regions(domain, &idx);
Felix Held32169722023-07-14 19:41:06 +0200201
202 read_non_pci_resources(domain, &idx);
Felix Held407bd582023-04-24 17:58:24 +0200203}
Felix Held7a5dd782023-04-28 22:47:33 +0200204
205static void write_ssdt_domain_io_producer_range_helper(const char *domain_name,
206 resource_t base, resource_t limit)
207{
208 printk(BIOS_DEBUG, "%s _CRS: adding IO range [%llx-%llx]\n", domain_name, base, limit);
209 acpigen_resource_producer_io(base, limit);
210}
211
212static void write_ssdt_domain_io_producer_range(const char *domain_name,
213 resource_t base, resource_t limit)
214{
215 /*
216 * Split the IO region at the PCI config IO ports so that the IO resource producer
217 * won't cover the same IO ports that the IO resource consumer for the PCI config IO
218 * ports in the same ACPI device already covers.
219 */
220 if (base < PCI_IO_CONFIG_INDEX) {
221 write_ssdt_domain_io_producer_range_helper(domain_name,
222 base,
223 MIN(limit, PCI_IO_CONFIG_INDEX - 1));
224 }
225 if (limit > PCI_IO_CONFIG_LAST_PORT) {
226 write_ssdt_domain_io_producer_range_helper(domain_name,
227 MAX(base, PCI_IO_CONFIG_LAST_PORT + 1),
228 limit);
229 }
230}
231
232static void write_ssdt_domain_mmio_producer_range(const char *domain_name,
233 resource_t base, resource_t limit)
234{
235 printk(BIOS_DEBUG, "%s _CRS: adding MMIO range [%llx-%llx]\n",
236 domain_name, base, limit);
237 acpigen_resource_producer_mmio(base, limit,
238 MEM_RSRC_FLAG_MEM_READ_WRITE | MEM_RSRC_FLAG_MEM_ATTR_NON_CACHE);
239}
240
241void amd_pci_domain_fill_ssdt(const struct device *domain)
242{
243 const char *acpi_scope = acpi_device_path(domain);
244 printk(BIOS_DEBUG, "%s ACPI scope: '%s'\n", __func__, acpi_scope);
245 acpigen_write_scope(acpi_device_path(domain));
246
247 acpigen_write_name("_CRS");
248 acpigen_write_resourcetemplate_header();
249
250 /* PCI bus number range in domain */
Felix Held3b5b66d2024-01-11 22:26:18 +0100251 printk(BIOS_DEBUG, "%s _CRS: adding busses [%x-%x] in segment group %x\n",
252 acpi_device_name(domain), domain->link_list->secondary,
253 domain->link_list->max_subordinate, domain->link_list->segment_group);
Felix Held7a5dd782023-04-28 22:47:33 +0200254 acpigen_resource_producer_bus_number(domain->link_list->secondary,
Felix Held9dcdec52023-08-08 21:38:43 +0200255 domain->link_list->max_subordinate);
Felix Held7a5dd782023-04-28 22:47:33 +0200256
Felix Held3b5b66d2024-01-11 22:26:18 +0100257 if (domain->link_list->secondary == 0 && domain->link_list->segment_group == 0) {
Felix Held7a5dd782023-04-28 22:47:33 +0200258 /* ACPI 6.4.2.5 I/O Port Descriptor */
259 acpigen_write_io16(PCI_IO_CONFIG_INDEX, PCI_IO_CONFIG_LAST_PORT, 1,
260 PCI_IO_CONFIG_PORT_COUNT, 1);
261 }
262
263 struct resource *res;
264 for (res = domain->resource_list; res != NULL; res = res->next) {
265 if (!(res->flags & IORESOURCE_ASSIGNED))
Felix Helda239cf42023-07-29 01:45:31 +0200266 continue;
Felix Held0df754b2023-07-29 01:49:15 +0200267 /* Don't add MMIO producer ranges for reserved MMIO regions from non-PCI
268 devices */
269 if ((res->flags & IORESOURCE_RESERVE))
270 continue;
Felix Held7a5dd782023-04-28 22:47:33 +0200271 switch (res->flags & IORESOURCE_TYPE_MASK) {
272 case IORESOURCE_IO:
273 write_ssdt_domain_io_producer_range(acpi_device_name(domain),
274 res->base, res->limit);
275 break;
276 case IORESOURCE_MEM:
277 write_ssdt_domain_mmio_producer_range(acpi_device_name(domain),
278 res->base, res->limit);
279 break;
280 default:
281 break;
282 }
283 }
284
285 if (domain->link_list->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
286 printk(BIOS_DEBUG, "%s _CRS: adding VGA resource\n", acpi_device_name(domain));
287 acpigen_resource_producer_mmio(VGA_MMIO_BASE, VGA_MMIO_LIMIT,
288 MEM_RSRC_FLAG_MEM_READ_WRITE | MEM_RSRC_FLAG_MEM_ATTR_CACHE);
289 }
290
291 acpigen_write_resourcetemplate_footer();
Felix Helde4b65cc2023-05-05 20:46:11 +0200292
Felix Held3b5b66d2024-01-11 22:26:18 +0100293 acpigen_write_SEG(domain->link_list->segment_group);
Felix Helde4b65cc2023-05-05 20:46:11 +0200294 acpigen_write_BBN(domain->link_list->secondary);
295
Felix Held7a5dd782023-04-28 22:47:33 +0200296 /* Scope */
297 acpigen_pop_len();
298}