blob: a51a716334ad50f37715f82913eac316e1316802 [file] [log] [blame]
Felix Held3c44c622022-01-10 20:57:29 +01001/* SPDX-License-Identifier: GPL-2.0-only */
2
3/* TODO: Check if this is still correct */
4
5#include <acpi/acpi_device.h>
6#include <amdblocks/data_fabric.h>
7#include <console/console.h>
8#include <cpu/x86/lapic_def.h>
9#include <device/device.h>
10#include <device/pci.h>
11#include <device/pci_ids.h>
12#include <soc/data_fabric.h>
13#include <soc/iomap.h>
14#include <types.h>
15
16void data_fabric_set_mmio_np(void)
17{
18 /*
19 * Mark region from HPET-LAPIC or 0xfed00000-0xfee00000-1 as NP.
20 *
21 * AGESA has already programmed the NB MMIO routing, however nothing
22 * is yet marked as non-posted.
23 *
24 * If there exists an overlapping routing base/limit pair, trim its
25 * base or limit to avoid the new NP region. If any pair exists
26 * completely within HPET-LAPIC range, remove it. If any pair surrounds
27 * HPET-LAPIC, it must be split into two regions.
28 *
29 * TODO(b/156296146): Remove the settings from AGESA and allow coreboot
30 * to own everything. If not practical, consider erasing all settings
31 * and have coreboot reprogram them. At that time, make the source
32 * below more flexible.
33 * * Note that the code relies on the granularity of the HPET and
34 * LAPIC addresses being sufficiently large that the shifted limits
35 * +/-1 are always equivalent to the non-shifted values +/-1.
36 */
37
38 unsigned int i;
39 int reg;
40 uint32_t base, limit, ctrl;
41 const uint32_t np_bot = HPET_BASE_ADDRESS >> D18F0_MMIO_SHIFT;
42 const uint32_t np_top = (LAPIC_DEFAULT_BASE - 1) >> D18F0_MMIO_SHIFT;
43
44 data_fabric_print_mmio_conf();
45
46 for (i = 0; i < NUM_NB_MMIO_REGS; i++) {
47 /* Adjust all registers that overlap */
48 ctrl = data_fabric_broadcast_read32(0, NB_MMIO_CONTROL(i));
49 if (!(ctrl & (DF_MMIO_WE | DF_MMIO_RE)))
50 continue; /* not enabled */
51
52 base = data_fabric_broadcast_read32(0, NB_MMIO_BASE(i));
53 limit = data_fabric_broadcast_read32(0, NB_MMIO_LIMIT(i));
54
55 if (base > np_top || limit < np_bot)
56 continue; /* no overlap at all */
57
58 if (base >= np_bot && limit <= np_top) {
59 data_fabric_disable_mmio_reg(i); /* 100% within, so remove */
60 continue;
61 }
62
63 if (base < np_bot && limit > np_top) {
64 /* Split the configured region */
65 data_fabric_broadcast_write32(0, NB_MMIO_LIMIT(i), np_bot - 1);
66 reg = data_fabric_find_unused_mmio_reg();
67 if (reg < 0) {
68 /* Although a pair could be freed later, this condition is
69 * very unusual and deserves analysis. Flag an error and
70 * leave the topmost part unconfigured. */
71 printk(BIOS_ERR,
72 "Error: Not enough NB MMIO routing registers\n");
73 continue;
74 }
75 data_fabric_broadcast_write32(0, NB_MMIO_BASE(reg), np_top + 1);
76 data_fabric_broadcast_write32(0, NB_MMIO_LIMIT(reg), limit);
77 data_fabric_broadcast_write32(0, NB_MMIO_CONTROL(reg), ctrl);
78 continue;
79 }
80
81 /* If still here, adjust only the base or limit */
82 if (base <= np_bot)
83 data_fabric_broadcast_write32(0, NB_MMIO_LIMIT(i), np_bot - 1);
84 else
85 data_fabric_broadcast_write32(0, NB_MMIO_BASE(i), np_top + 1);
86 }
87
88 reg = data_fabric_find_unused_mmio_reg();
89 if (reg < 0) {
90 printk(BIOS_ERR, "Error: cannot configure region as NP\n");
91 return;
92 }
93
94 data_fabric_broadcast_write32(0, NB_MMIO_BASE(reg), np_bot);
95 data_fabric_broadcast_write32(0, NB_MMIO_LIMIT(reg), np_top);
96 data_fabric_broadcast_write32(0, NB_MMIO_CONTROL(reg),
97 (IOMS0_FABRIC_ID << DF_MMIO_DST_FABRIC_ID_SHIFT) | DF_MMIO_NP
98 | DF_MMIO_WE | DF_MMIO_RE);
99
100 data_fabric_print_mmio_conf();
101}
102
103static const char *data_fabric_acpi_name(const struct device *dev)
104{
105 switch (dev->device) {
106 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF0:
107 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF0:
108 return "DFD0";
109 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF1:
110 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF1:
111 return "DFD1";
112 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF2:
113 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF2:
114 return "DFD2";
115 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF3:
116 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF3:
117 return "DFD3";
118 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF4:
119 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF4:
120 return "DFD4";
121 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF5:
122 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF5:
123 return "DFD5";
124 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF6:
125 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF6:
126 return "DFD6";
127 case PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF7:
128 case PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF7:
129 return "DFD7";
130 default:
131 printk(BIOS_ERR, "%s: Unhandled device id 0x%x\n", __func__, dev->device);
132 }
133
134 return NULL;
135}
136
137static struct device_operations data_fabric_ops = {
138 .read_resources = noop_read_resources,
139 .set_resources = noop_set_resources,
140 .acpi_name = data_fabric_acpi_name,
141 .acpi_fill_ssdt = acpi_device_write_pci_dev,
142};
143
144static const unsigned short pci_device_ids[] = {
145 /* Renoir DF devices */
146 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF0,
147 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF1,
148 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF2,
149 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF3,
150 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF4,
151 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF5,
152 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF6,
153 PCI_DEVICE_ID_AMD_FAM17H_MODEL60H_DF7,
154 /* Cezanne DF devices */
155 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF0,
156 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF1,
157 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF2,
158 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF3,
159 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF4,
160 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF5,
161 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF6,
162 PCI_DEVICE_ID_AMD_FAM19H_MODEL51H_DF7,
163 0
164};
165
166static const struct pci_driver data_fabric_driver __pci_driver = {
167 .ops = &data_fabric_ops,
168 .vendor = PCI_VENDOR_ID_AMD,
169 .devices = pci_device_ids,
170};