blob: 88b116063b31315ef8c195d3ad2f34b7667ecb69 [file] [log] [blame]
Felix Held407bd582023-04-24 17:58:24 +02001/* SPDX-License-Identifier: GPL-2.0-only */
2
Felix Held7a5dd782023-04-28 22:47:33 +02003#include <acpi/acpigen.h>
Felix Held407bd582023-04-24 17:58:24 +02004#include <amdblocks/cpu.h>
5#include <amdblocks/data_fabric.h>
Felix Held32169722023-07-14 19:41:06 +02006#include <amdblocks/root_complex.h>
Felix Held407bd582023-04-24 17:58:24 +02007#include <arch/ioapic.h>
Felix Held7a5dd782023-04-28 22:47:33 +02008#include <arch/vga.h>
Felix Held407bd582023-04-24 17:58:24 +02009#include <console/console.h>
10#include <cpu/amd/mtrr.h>
11#include <device/device.h>
12#include <device/pci_ops.h>
13#include <types.h>
14
15void amd_pci_domain_scan_bus(struct device *domain)
16{
17 uint8_t bus, limit;
18
19 /* TODO: Systems with more than one PCI root need to read the data fabric registers to
20 see which PCI bus numbers get decoded to which PCI root. */
21 bus = 0;
22 limit = CONFIG_ECAM_MMCONF_BUS_NUMBER - 1;
23
24 /* Set bus first number of PCI root */
25 domain->link_list->secondary = bus;
26 /* subordinate needs to be the same as secondary before pci_domain_scan_bus call. */
27 domain->link_list->subordinate = bus;
Felix Held9dcdec52023-08-08 21:38:43 +020028 /* Tell allocator about maximum PCI bus number in domain */
29 domain->link_list->max_subordinate = limit;
Felix Held407bd582023-04-24 17:58:24 +020030
31 pci_domain_scan_bus(domain);
Felix Held407bd582023-04-24 17:58:24 +020032}
33
34/* Read the registers and return normalized values */
35static void data_fabric_get_mmio_base_size(unsigned int reg,
36 resource_t *mmio_base, resource_t *mmio_limit)
37{
Felix Held18a3c232023-08-03 00:10:03 +020038 const uint32_t base_reg = data_fabric_broadcast_read32(DF_MMIO_BASE(reg));
39 const uint32_t limit_reg = data_fabric_broadcast_read32(DF_MMIO_LIMIT(reg));
Felix Held407bd582023-04-24 17:58:24 +020040 /* The raw register values are bits 47..16 of the actual address */
Felix Held382c83e2023-08-03 00:29:55 +020041 *mmio_base = (resource_t)base_reg << DF_MMIO_SHIFT;
42 *mmio_limit = (((resource_t)limit_reg + 1) << DF_MMIO_SHIFT) - 1;
Felix Held407bd582023-04-24 17:58:24 +020043}
44
45static void print_df_mmio_outside_of_cpu_mmio_error(unsigned int reg)
46{
47 printk(BIOS_WARNING, "DF MMIO register %u outside of CPU MMIO region.\n", reg);
48}
49
50static bool is_mmio_region_valid(unsigned int reg, resource_t mmio_base, resource_t mmio_limit)
51{
52 if (mmio_base > mmio_limit) {
53 printk(BIOS_WARNING, "DF MMIO register %u's base is above its limit.\n", reg);
54 return false;
55 }
56 if (mmio_base >= 4ULL * GiB) {
57 /* MMIO region above 4GB needs to be above TOP_MEM2 MSR value */
58 if (mmio_base < get_top_of_mem_above_4gb()) {
59 print_df_mmio_outside_of_cpu_mmio_error(reg);
60 return false;
61 }
62 } else {
63 /* MMIO region below 4GB needs to be above TOP_MEM MSR value */
64 if (mmio_base < get_top_of_mem_below_4gb()) {
65 print_df_mmio_outside_of_cpu_mmio_error(reg);
66 return false;
67 }
68 /* MMIO region below 4GB mustn't cross the 4GB boundary. */
69 if (mmio_limit >= 4ULL * GiB) {
70 printk(BIOS_WARNING, "DF MMIO register %u crosses 4GB boundary.\n",
71 reg);
72 return false;
73 }
74 }
75
76 return true;
77}
78
79static void report_data_fabric_mmio(struct device *domain, unsigned int idx,
80 resource_t mmio_base, resource_t mmio_limit)
81{
82 struct resource *res;
83 res = new_resource(domain, idx);
84 res->base = mmio_base;
85 res->limit = mmio_limit;
86 res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
87}
88
89/* Tell the resource allocator about the usable MMIO ranges configured in the data fabric */
90static void add_data_fabric_mmio_regions(struct device *domain, unsigned int *idx)
91{
92 union df_mmio_control ctrl;
93 resource_t mmio_base;
94 resource_t mmio_limit;
95
96 /* The last 12GB of the usable address space are reserved and can't be used for MMIO */
97 const resource_t reserved_upper_mmio_base =
98 (1ULL << get_usable_physical_address_bits()) - DF_RESERVED_TOP_12GB_MMIO_SIZE;
99
100 for (unsigned int i = 0; i < DF_MMIO_REG_SET_COUNT; i++) {
Felix Held18a3c232023-08-03 00:10:03 +0200101 ctrl.raw = data_fabric_broadcast_read32(DF_MMIO_CONTROL(i));
Felix Held407bd582023-04-24 17:58:24 +0200102
103 /* Relevant MMIO regions need to have both reads and writes enabled */
104 if (!ctrl.we || !ctrl.re)
105 continue;
106
107 /* Non-posted region contains fixed FCH MMIO devices */
108 if (ctrl.np)
109 continue;
110
111 /* TODO: Systems with more than one PCI root need to check to which PCI root
112 the MMIO range gets decoded to. */
113
114 data_fabric_get_mmio_base_size(i, &mmio_base, &mmio_limit);
115
116 if (!is_mmio_region_valid(i, mmio_base, mmio_limit))
117 continue;
118
119 /* Make sure to not report a region overlapping with the fixed MMIO resources
120 below 4GB or the reserved MMIO range in the last 12GB of the addressable
121 address range. The code assumes that the fixed MMIO resources below 4GB
122 are between IO_APIC_ADDR and the 4GB boundary. */
123 if (mmio_base < 4ULL * GiB) {
124 if (mmio_base >= IO_APIC_ADDR)
125 continue;
126 if (mmio_limit >= IO_APIC_ADDR)
127 mmio_limit = IO_APIC_ADDR - 1;
128 } else {
129 if (mmio_base >= reserved_upper_mmio_base)
130 continue;
131 if (mmio_limit >= reserved_upper_mmio_base)
132 mmio_limit = reserved_upper_mmio_base - 1;
133 }
134
135 report_data_fabric_mmio(domain, (*idx)++, mmio_base, mmio_limit);
136 }
137}
138
Felix Held2dfd48b2023-08-04 19:22:54 +0200139static void report_data_fabric_io(struct device *domain, unsigned int idx,
140 resource_t io_base, resource_t io_limit)
141{
142 struct resource *res;
143 res = new_resource(domain, idx);
144 res->base = io_base;
145 res->limit = io_limit;
146 res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
147}
148
Felix Held407bd582023-04-24 17:58:24 +0200149/* Tell the resource allocator about the usable I/O space */
Felix Held3f3f93b2023-08-04 22:14:40 +0200150static void add_data_fabric_io_regions(struct device *domain, unsigned int *idx)
Felix Held407bd582023-04-24 17:58:24 +0200151{
Felix Held4eac0d42023-08-04 19:40:02 +0200152 union df_io_base base_reg;
153 union df_io_limit limit_reg;
154 resource_t io_base;
155 resource_t io_limit;
Felix Held407bd582023-04-24 17:58:24 +0200156
Felix Held4eac0d42023-08-04 19:40:02 +0200157 for (unsigned int i = 0; i < DF_IO_REG_COUNT; i++) {
158 base_reg.raw = data_fabric_broadcast_read32(DF_IO_BASE(i));
159
160 /* Relevant IO regions need to have both reads and writes enabled */
161 if (!base_reg.we || !base_reg.re)
162 continue;
163
164 limit_reg.raw = data_fabric_broadcast_read32(DF_IO_LIMIT(i));
165
166 /* TODO: Systems with more than one PCI root need to check to which PCI root
167 the IO range gets decoded to. */
168
169 io_base = base_reg.io_base << DF_IO_ADDR_SHIFT;
170 io_limit = ((limit_reg.io_limit + 1) << DF_IO_ADDR_SHIFT) - 1;
171
172 /* Beware that the lower 25 bits of io_base and io_limit can be non-zero
173 despite there only being 16 bits worth of IO port address space. */
174 if (io_base > 0xffff) {
175 printk(BIOS_WARNING, "DF IO base register %d value outside of valid "
176 "IO port address range.\n", i);
177 continue;
178 }
179 /* If only the IO limit is outside of the valid 16 bit IO port range, report
180 the limit as 0xffff, so that the resource allcator won't put IO BARs outside
181 of the 16 bit IO port address range. */
182 io_limit = MIN(io_limit, 0xffff);
183
184 report_data_fabric_io(domain, (*idx)++, io_base, io_limit);
185 }
Felix Held407bd582023-04-24 17:58:24 +0200186}
187
188void amd_pci_domain_read_resources(struct device *domain)
189{
190 unsigned int idx = 0;
191
Felix Held3f3f93b2023-08-04 22:14:40 +0200192 add_data_fabric_io_regions(domain, &idx);
Felix Held407bd582023-04-24 17:58:24 +0200193
194 add_data_fabric_mmio_regions(domain, &idx);
Felix Held32169722023-07-14 19:41:06 +0200195
196 read_non_pci_resources(domain, &idx);
Felix Held407bd582023-04-24 17:58:24 +0200197}
Felix Held7a5dd782023-04-28 22:47:33 +0200198
199static void write_ssdt_domain_io_producer_range_helper(const char *domain_name,
200 resource_t base, resource_t limit)
201{
202 printk(BIOS_DEBUG, "%s _CRS: adding IO range [%llx-%llx]\n", domain_name, base, limit);
203 acpigen_resource_producer_io(base, limit);
204}
205
206static void write_ssdt_domain_io_producer_range(const char *domain_name,
207 resource_t base, resource_t limit)
208{
209 /*
210 * Split the IO region at the PCI config IO ports so that the IO resource producer
211 * won't cover the same IO ports that the IO resource consumer for the PCI config IO
212 * ports in the same ACPI device already covers.
213 */
214 if (base < PCI_IO_CONFIG_INDEX) {
215 write_ssdt_domain_io_producer_range_helper(domain_name,
216 base,
217 MIN(limit, PCI_IO_CONFIG_INDEX - 1));
218 }
219 if (limit > PCI_IO_CONFIG_LAST_PORT) {
220 write_ssdt_domain_io_producer_range_helper(domain_name,
221 MAX(base, PCI_IO_CONFIG_LAST_PORT + 1),
222 limit);
223 }
224}
225
226static void write_ssdt_domain_mmio_producer_range(const char *domain_name,
227 resource_t base, resource_t limit)
228{
229 printk(BIOS_DEBUG, "%s _CRS: adding MMIO range [%llx-%llx]\n",
230 domain_name, base, limit);
231 acpigen_resource_producer_mmio(base, limit,
232 MEM_RSRC_FLAG_MEM_READ_WRITE | MEM_RSRC_FLAG_MEM_ATTR_NON_CACHE);
233}
234
235void amd_pci_domain_fill_ssdt(const struct device *domain)
236{
237 const char *acpi_scope = acpi_device_path(domain);
238 printk(BIOS_DEBUG, "%s ACPI scope: '%s'\n", __func__, acpi_scope);
239 acpigen_write_scope(acpi_device_path(domain));
240
241 acpigen_write_name("_CRS");
242 acpigen_write_resourcetemplate_header();
243
244 /* PCI bus number range in domain */
245 printk(BIOS_DEBUG, "%s _CRS: adding busses [%x-%x]\n", acpi_device_name(domain),
Felix Held9dcdec52023-08-08 21:38:43 +0200246 domain->link_list->secondary, domain->link_list->max_subordinate);
Felix Held7a5dd782023-04-28 22:47:33 +0200247 acpigen_resource_producer_bus_number(domain->link_list->secondary,
Felix Held9dcdec52023-08-08 21:38:43 +0200248 domain->link_list->max_subordinate);
Felix Held7a5dd782023-04-28 22:47:33 +0200249
250 if (domain->link_list->secondary == 0) {
251 /* ACPI 6.4.2.5 I/O Port Descriptor */
252 acpigen_write_io16(PCI_IO_CONFIG_INDEX, PCI_IO_CONFIG_LAST_PORT, 1,
253 PCI_IO_CONFIG_PORT_COUNT, 1);
254 }
255
256 struct resource *res;
257 for (res = domain->resource_list; res != NULL; res = res->next) {
258 if (!(res->flags & IORESOURCE_ASSIGNED))
Felix Helda239cf42023-07-29 01:45:31 +0200259 continue;
Felix Held0df754b2023-07-29 01:49:15 +0200260 /* Don't add MMIO producer ranges for reserved MMIO regions from non-PCI
261 devices */
262 if ((res->flags & IORESOURCE_RESERVE))
263 continue;
Felix Held7a5dd782023-04-28 22:47:33 +0200264 switch (res->flags & IORESOURCE_TYPE_MASK) {
265 case IORESOURCE_IO:
266 write_ssdt_domain_io_producer_range(acpi_device_name(domain),
267 res->base, res->limit);
268 break;
269 case IORESOURCE_MEM:
270 write_ssdt_domain_mmio_producer_range(acpi_device_name(domain),
271 res->base, res->limit);
272 break;
273 default:
274 break;
275 }
276 }
277
278 if (domain->link_list->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
279 printk(BIOS_DEBUG, "%s _CRS: adding VGA resource\n", acpi_device_name(domain));
280 acpigen_resource_producer_mmio(VGA_MMIO_BASE, VGA_MMIO_LIMIT,
281 MEM_RSRC_FLAG_MEM_READ_WRITE | MEM_RSRC_FLAG_MEM_ATTR_CACHE);
282 }
283
284 acpigen_write_resourcetemplate_footer();
Felix Helde4b65cc2023-05-05 20:46:11 +0200285
286 acpigen_write_BBN(domain->link_list->secondary);
287
Felix Held7a5dd782023-04-28 22:47:33 +0200288 /* Scope */
289 acpigen_pop_len();
290}