blob: d603c8a1326153b7caf8bbf4bdb17b44f77c08d9 [file] [log] [blame]
Felix Heldea32c522021-02-13 01:42:44 +01001/* SPDX-License-Identifier: GPL-2.0-only */
2
Felix Held4ef37ae2021-02-14 22:45:48 +01003#include <acpi/acpi_device.h>
Felix Heldea32c522021-02-13 01:42:44 +01004#include <amdblocks/data_fabric.h>
5#include <console/console.h>
6#include <cpu/x86/lapic_def.h>
Felix Held4ef37ae2021-02-14 22:45:48 +01007#include <device/device.h>
8#include <device/pci.h>
9#include <device/pci_ids.h>
Felix Heldea32c522021-02-13 01:42:44 +010010#include <soc/data_fabric.h>
11#include <soc/iomap.h>
12#include <types.h>
13
14void data_fabric_set_mmio_np(void)
15{
16 /*
17 * Mark region from HPET-LAPIC or 0xfed00000-0xfee00000-1 as NP.
18 *
19 * AGESA has already programmed the NB MMIO routing, however nothing
20 * is yet marked as non-posted.
21 *
22 * If there exists an overlapping routing base/limit pair, trim its
23 * base or limit to avoid the new NP region. If any pair exists
24 * completely within HPET-LAPIC range, remove it. If any pair surrounds
25 * HPET-LAPIC, it must be split into two regions.
26 *
27 * TODO(b/156296146): Remove the settings from AGESA and allow coreboot
28 * to own everything. If not practical, consider erasing all settings
29 * and have coreboot reprogram them. At that time, make the source
30 * below more flexible.
31 * * Note that the code relies on the granularity of the HPET and
32 * LAPIC addresses being sufficiently large that the shifted limits
33 * +/-1 are always equivalent to the non-shifted values +/-1.
34 */
35
36 unsigned int i;
37 int reg;
38 uint32_t base, limit, ctrl;
39 const uint32_t np_bot = HPET_BASE_ADDRESS >> D18F0_MMIO_SHIFT;
40 const uint32_t np_top = (LOCAL_APIC_ADDR - 1) >> D18F0_MMIO_SHIFT;
41
42 data_fabric_print_mmio_conf();
43
44 for (i = 0; i < NUM_NB_MMIO_REGS; i++) {
45 /* Adjust all registers that overlap */
46 ctrl = data_fabric_broadcast_read32(0, NB_MMIO_CONTROL(i));
47 if (!(ctrl & (MMIO_WE | MMIO_RE)))
48 continue; /* not enabled */
49
50 base = data_fabric_broadcast_read32(0, NB_MMIO_BASE(i));
51 limit = data_fabric_broadcast_read32(0, NB_MMIO_LIMIT(i));
52
53 if (base > np_top || limit < np_bot)
54 continue; /* no overlap at all */
55
56 if (base >= np_bot && limit <= np_top) {
57 data_fabric_disable_mmio_reg(i); /* 100% within, so remove */
58 continue;
59 }
60
61 if (base < np_bot && limit > np_top) {
62 /* Split the configured region */
63 data_fabric_broadcast_write32(0, NB_MMIO_LIMIT(i), np_bot - 1);
64 reg = data_fabric_find_unused_mmio_reg();
65 if (reg < 0) {
66 /* Although a pair could be freed later, this condition is
67 * very unusual and deserves analysis. Flag an error and
68 * leave the topmost part unconfigured. */
69 printk(BIOS_ERR,
70 "Error: Not enough NB MMIO routing registers\n");
71 continue;
72 }
73 data_fabric_broadcast_write32(0, NB_MMIO_BASE(reg), np_top + 1);
74 data_fabric_broadcast_write32(0, NB_MMIO_LIMIT(reg), limit);
75 data_fabric_broadcast_write32(0, NB_MMIO_CONTROL(reg), ctrl);
76 continue;
77 }
78
79 /* If still here, adjust only the base or limit */
80 if (base <= np_bot)
81 data_fabric_broadcast_write32(0, NB_MMIO_LIMIT(i), np_bot - 1);
82 else
83 data_fabric_broadcast_write32(0, NB_MMIO_BASE(i), np_top + 1);
84 }
85
86 reg = data_fabric_find_unused_mmio_reg();
87 if (reg < 0) {
88 printk(BIOS_ERR, "Error: cannot configure region as NP\n");
89 return;
90 }
91
92 data_fabric_broadcast_write32(0, NB_MMIO_BASE(reg), np_bot);
93 data_fabric_broadcast_write32(0, NB_MMIO_LIMIT(reg), np_top);
94 data_fabric_broadcast_write32(0, NB_MMIO_CONTROL(reg),
95 (IOMS0_FABRIC_ID << MMIO_DST_FABRIC_ID_SHIFT) | MMIO_NP | MMIO_WE
96 | MMIO_RE);
97
98 data_fabric_print_mmio_conf();
99}
Felix Held4ef37ae2021-02-14 22:45:48 +0100100
101static const char *data_fabric_acpi_name(const struct device *dev)
102{
103 switch (dev->device) {
104 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF0:
105 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF0:
106 return "DFD0";
107 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF1:
108 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF1:
109 return "DFD1";
110 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF2:
111 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF2:
112 return "DFD2";
113 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF3:
114 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF3:
115 return "DFD3";
116 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF4:
117 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF4:
118 return "DFD4";
119 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF5:
120 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF5:
121 return "DFD5";
122 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF6:
123 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF6:
124 return "DFD6";
125 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF7:
126 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF7:
127 return "DFD7";
128 default:
129 printk(BIOS_ERR, "%s: Unhandled device id 0x%x\n", __func__, dev->device);
130 }
131
132 return NULL;
133}
134
135static struct device_operations data_fabric_ops = {
136 .read_resources = noop_read_resources,
137 .set_resources = noop_set_resources,
138 .acpi_name = data_fabric_acpi_name,
139 .acpi_fill_ssdt = acpi_device_write_pci_dev,
140};
141
142static const unsigned short pci_device_ids[] = {
143 /* Renoir DF devices */
144 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF0,
145 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF1,
146 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF2,
147 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF3,
148 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF4,
149 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF5,
150 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF6,
151 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF7,
152 /* Cezanne DF devices */
153 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF0,
154 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF1,
155 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF2,
156 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF3,
157 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF4,
158 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF5,
159 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF6,
160 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF7,
161 0
162};
163
164static const struct pci_driver data_fabric_driver __pci_driver = {
165 .ops = &data_fabric_ops,
166 .vendor = PCI_VENDOR_ID_AMD,
167 .devices = pci_device_ids,
168};