blob: 5249a944740f4cebad3cb1cb927818a9609718f9 [file] [log] [blame]
Patrick Georgiac959032020-05-05 22:49:26 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Jonathan Zhang8f895492020-01-16 11:16:45 -08002
3#include <cbmem.h>
4#include <console/console.h>
Elyes HAOUAS32da3432020-05-17 17:15:31 +02005#include <cpu/x86/lapic_def.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -08006#include <device/pci.h>
7#include <device/pci_ids.h>
Jonathan Zhang907b6f52023-01-30 12:11:52 -08008#include <drivers/ocp/include/vpd.h>
Marc Jones521a03f2020-10-19 13:46:59 -06009#include <soc/acpi.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -080010#include <soc/iomap.h>
11#include <soc/pci_devs.h>
12#include <soc/ramstage.h>
Andrey Petrov662da6c2020-03-16 22:46:57 -070013#include <soc/util.h>
14#include <fsp/util.h>
Arthur Heymans77509be2020-10-22 17:11:22 +020015#include <security/intel/txt/txt_platform.h>
Arthur Heymans9d8a4552021-02-02 19:21:24 +010016#include <security/intel/txt/txt.h>
Jonathan Zhang907b6f52023-01-30 12:11:52 -080017#include <soc/numa.h>
18#include <soc/soc_util.h>
Arthur Heymans63660592022-01-06 12:28:44 +010019#include <stdint.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -080020
Jonathan Zhang907b6f52023-01-30 12:11:52 -080021struct proximity_domains pds = {
22 .num_pds = 0,
23 .pds = NULL,
24};
25
Jonathan Zhang8f895492020-01-16 11:16:45 -080026struct map_entry {
27 uint32_t reg;
28 int is_64_bit;
29 int is_limit;
30 int mask_bits;
31 const char *description;
32};
33
34enum {
35 TOHM_REG,
36 MMIOL_REG,
37 MMCFG_BASE_REG,
38 MMCFG_LIMIT_REG,
39 TOLM_REG,
Jonathan Zhang09d2c932023-01-30 11:23:04 -080040 /* NCMEM and ME ranges are mutually exclusive */
41 NCMEM_BASE_REG,
42 NCMEM_LIMIT_REG,
Jonathan Zhang8f895492020-01-16 11:16:45 -080043 ME_BASE_REG,
44 ME_LIMIT_REG,
45 TSEG_BASE_REG,
46 TSEG_LIMIT_REG,
47 /* Must be last. */
48 NUM_MAP_ENTRIES
49};
50
51static struct map_entry memory_map[NUM_MAP_ENTRIES] = {
52 [TOHM_REG] = MAP_ENTRY_LIMIT_64(VTD_TOHM_CSR, 26, "TOHM"),
53 [MMIOL_REG] = MAP_ENTRY_BASE_32(VTD_MMIOL_CSR, "MMIOL"),
54 [MMCFG_BASE_REG] = MAP_ENTRY_BASE_64(VTD_MMCFG_BASE_CSR, "MMCFG_BASE"),
55 [MMCFG_LIMIT_REG] = MAP_ENTRY_LIMIT_64(VTD_MMCFG_LIMIT_CSR, 26, "MMCFG_LIMIT"),
56 [TOLM_REG] = MAP_ENTRY_LIMIT_32(VTD_TOLM_CSR, 26, "TOLM"),
Jonathan Zhang09d2c932023-01-30 11:23:04 -080057#if CONFIG(SOC_INTEL_HAS_NCMEM)
58 [NCMEM_BASE_REG] = MAP_ENTRY_BASE_64(VTD_NCMEM_BASE_CSR, "NCMEM_BASE"),
59 [NCMEM_LIMIT_REG] = MAP_ENTRY_LIMIT_64(VTD_NCMEM_LIMIT_CSR, 19, "NCMEM_LIMIT"),
60#else
Jonathan Zhang8f895492020-01-16 11:16:45 -080061 [ME_BASE_REG] = MAP_ENTRY_BASE_64(VTD_ME_BASE_CSR, "ME_BASE"),
62 [ME_LIMIT_REG] = MAP_ENTRY_LIMIT_64(VTD_ME_LIMIT_CSR, 19, "ME_LIMIT"),
Jonathan Zhang09d2c932023-01-30 11:23:04 -080063#endif
Jonathan Zhang8f895492020-01-16 11:16:45 -080064 [TSEG_BASE_REG] = MAP_ENTRY_BASE_32(VTD_TSEG_BASE_CSR, "TSEGMB_BASE"),
65 [TSEG_LIMIT_REG] = MAP_ENTRY_LIMIT_32(VTD_TSEG_LIMIT_CSR, 20, "TSEGMB_LIMIT"),
66};
67
68static void read_map_entry(struct device *dev, struct map_entry *entry,
69 uint64_t *result)
70{
71 uint64_t value;
72 uint64_t mask;
73
74 /* All registers are on a 1MiB granularity. */
75 mask = ((1ULL << entry->mask_bits) - 1);
76 mask = ~mask;
77
78 value = 0;
79
80 if (entry->is_64_bit) {
81 value = pci_read_config32(dev, entry->reg + sizeof(uint32_t));
82 value <<= 32;
83 }
84
85 value |= (uint64_t)pci_read_config32(dev, entry->reg);
86 value &= mask;
87
88 if (entry->is_limit)
89 value |= ~mask;
90
91 *result = value;
92}
93
94static void mc_read_map_entries(struct device *dev, uint64_t *values)
95{
96 int i;
97 for (i = 0; i < NUM_MAP_ENTRIES; i++)
98 read_map_entry(dev, &memory_map[i], &values[i]);
99}
100
101static void mc_report_map_entries(struct device *dev, uint64_t *values)
102{
103 int i;
104 for (i = 0; i < NUM_MAP_ENTRIES; i++) {
105 printk(BIOS_DEBUG, "MC MAP: %s: 0x%llx\n",
106 memory_map[i].description, values[i]);
107 }
108}
109
Arthur Heymans77509be2020-10-22 17:11:22 +0200110static void configure_dpr(struct device *dev)
111{
112 const uintptr_t cbmem_top_mb = ALIGN_UP((uintptr_t)cbmem_top(), MiB) / MiB;
113 union dpr_register dpr = { .raw = pci_read_config32(dev, VTD_LTDPR) };
114
115 /* The DPR lock bit has to be set sufficiently early. It looks like
116 * it cannot be set anymore after FSP-S.
117 */
118 dpr.lock = 1;
119 dpr.epm = 1;
120 dpr.size = dpr.top - cbmem_top_mb;
121 pci_write_config32(dev, VTD_LTDPR, dpr.raw);
122}
123
Jonathan Zhang8f895492020-01-16 11:16:45 -0800124/*
125 * Host Memory Map:
126 *
127 * +--------------------------+ TOCM (2 pow 46 - 1)
128 * | Reserved |
129 * +--------------------------+
130 * | MMIOH (relocatable) |
131 * +--------------------------+
132 * | PCISeg |
133 * +--------------------------+ TOHM
134 * | High DRAM Memory |
135 * +--------------------------+ 4GiB (0x100000000)
136 * +--------------------------+ 0xFFFF_FFFF
137 * | Firmware |
138 * +--------------------------+ 0xFF00_0000
139 * | Reserved |
140 * +--------------------------+ 0xFEF0_0000
141 * | Local xAPIC |
142 * +--------------------------+ 0xFEE0_0000
143 * | HPET/LT/TPM/Others |
144 * +--------------------------+ 0xFED0_0000
145 * | I/O xAPIC |
146 * +--------------------------+ 0xFEC0_0000
147 * | Reserved |
148 * +--------------------------+ 0xFEB8_0000
149 * | Reserved |
150 * +--------------------------+ 0xFEB0_0000
151 * | Reserved |
152 * +--------------------------+ 0xFE00_0000
153 * | MMIOL (relocatable) |
154 * | P2SB PCR cfg BAR | (0xfd000000 - 0xfdffffff
155 * | BAR space | [mem 0x90000000-0xfcffffff] available for PCI devices
156 * +--------------------------+ 0x9000_0000
Shelley Chen4e9bb332021-10-20 15:43:45 -0700157 * |PCIe MMCFG (relocatable) | CONFIG_ECAM_MMCONF_BASE_ADDRESS 64 or 256MB
Jonathan Zhang8f895492020-01-16 11:16:45 -0800158 * | | (0x80000000 - 0x8fffffff, 0x40000)
159 * +--------------------------+ TOLM
160 * | MEseg (relocatable) | 32, 64, 128 or 256 MB (0x78000000 - 0x7fffffff, 0x20000)
161 * +--------------------------+
162 * | Tseg (relocatable) | N x 8MB (0x70000000 - 0x77ffffff, 0x20000)
Arthur Heymans77509be2020-10-22 17:11:22 +0200163 * +--------------------------+
164 * | DPR |
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800165 * +--------------------------+ 1M aligned DPR base
166 * | Unused memory |
Jonathan Zhang8f895492020-01-16 11:16:45 -0800167 * +--------------------------+ cbmem_top
168 * | Reserved - CBMEM | (0x6fffe000 - 0x6fffffff, 0x2000)
169 * +--------------------------+
170 * | Reserved - FSP | (0x6fbfe000 - 0x6fffdfff, 0x400000)
171 * +--------------------------+ top_of_ram (0x6fbfdfff)
172 * | Low DRAM Memory |
173 * +--------------------------+ FFFFF (1MB)
174 * | E & F segments |
175 * +--------------------------+ E0000
176 * | C & D segments |
177 * +--------------------------+ C0000
178 * | VGA & SMM Memory |
179 * +--------------------------+ A0000
180 * | Conventional Memory |
181 * | (DOS Range) |
182 * +--------------------------+ 0
183 */
184
185static void mc_add_dram_resources(struct device *dev, int *res_count)
186{
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300187 const struct resource *res;
Jonathan Zhang8f895492020-01-16 11:16:45 -0800188 uint64_t mc_values[NUM_MAP_ENTRIES];
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800189 uint64_t top_of_ram;
Jonathan Zhang8f895492020-01-16 11:16:45 -0800190 int index = *res_count;
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800191 struct range_entry fsp_mem;
Jonathan Zhang8f895492020-01-16 11:16:45 -0800192
Marc Jones662ac542020-11-02 21:26:41 -0700193 /* Only add dram resources once. */
194 if (dev->bus->secondary != 0)
195 return;
196
Jonathan Zhang8f895492020-01-16 11:16:45 -0800197 /* Read in the MAP registers and report their values. */
198 mc_read_map_entries(dev, &mc_values[0]);
199 mc_report_map_entries(dev, &mc_values[0]);
200
Jonathan Zhang8f895492020-01-16 11:16:45 -0800201 /* Conventional Memory (DOS region, 0x0 to 0x9FFFF) */
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300202 res = ram_from_to(dev, index++, 0, 0xa0000);
203 LOG_RESOURCE("legacy_ram", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800204
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800205 /* 1MB -> top_of_ram */
206 fsp_find_reserved_memory(&fsp_mem);
207 top_of_ram = range_entry_base(&fsp_mem) - 1;
208 res = ram_from_to(dev, index++, 1 * MiB, top_of_ram);
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300209 LOG_RESOURCE("low_ram", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800210
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800211 /* top_of_ram -> cbmem_top */
212 res = ram_from_to(dev, index++, top_of_ram, (uintptr_t)cbmem_top());
213 LOG_RESOURCE("cbmem_ram", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800214
Jonathan Zhang2e495b02023-01-30 11:32:26 -0800215 /* Mark TSEG/SMM region as reserved */
216 res = reserved_ram_from_to(dev, index++, mc_values[TSEG_BASE_REG],
217 mc_values[TSEG_LIMIT_REG] + 1);
218 LOG_RESOURCE("mmio_tseg", dev, res);
219
Jonathan Zhangda538cb2022-12-19 15:49:33 -0800220 /* Reserve DPR region */
Arthur Heymans77509be2020-10-22 17:11:22 +0200221 union dpr_register dpr = { .raw = pci_read_config32(dev, VTD_LTDPR) };
222 if (dpr.size) {
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800223 /*
224 * cbmem_top -> DPR base:
225 * DPR has a 1M granularity so it's possible if cbmem_top is not 1M
226 * aligned that some memory does not get marked as assigned.
227 */
228 res = reserved_ram_from_to(dev, index++, (uintptr_t)cbmem_top(),
229 (dpr.top - dpr.size) * MiB);
230 LOG_RESOURCE("unused_dram", dev, res);
231
232 /* DPR base -> DPR top */
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300233 res = reserved_ram_from_to(dev, index++, (dpr.top - dpr.size) * MiB,
234 dpr.top * MiB);
235 LOG_RESOURCE("dpr", dev, res);
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800236
Arthur Heymans77509be2020-10-22 17:11:22 +0200237 }
238
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800239 /* Mark TSEG/SMM region as reserved */
240 res = reserved_ram_from_to(dev, index++, mc_values[TSEG_BASE_REG],
241 mc_values[TSEG_LIMIT_REG] + 1);
242 LOG_RESOURCE("mmio_tseg", dev, res);
243
Jonathan Zhang8f895492020-01-16 11:16:45 -0800244 /* Mark region between TSEG - TOLM (eg. MESEG) as reserved */
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300245 res = reserved_ram_from_to(dev, index++, mc_values[TSEG_LIMIT_REG] + 1,
246 mc_values[TOLM_REG]);
247 LOG_RESOURCE("mmio_tolm", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800248
Jonathan Zhang907b6f52023-01-30 12:11:52 -0800249 if (CONFIG(SOC_INTEL_HAS_CXL)) {
250 /* 4GiB -> CXL Memory */
251 uint32_t gi_mem_size;
252 gi_mem_size = get_generic_initiator_mem_size();
253
254 res = reserved_ram_from_to(dev, index++, 0x100000000,
255 mc_values[TOHM_REG] - (uint64_t)gi_mem_size + 1);
256 LOG_RESOURCE("high_ram", dev, res);
257
258 /* CXL Memory */
259 uint8_t i;
260 for (i = 0; i < pds.num_pds; i++) {
261 if (pds.pds[i].pd_type == PD_TYPE_PROCESSOR)
262 continue;
263
264 if (CONFIG(OCP_VPD)) {
265 unsigned long flags = IORESOURCE_CACHEABLE;
266 int cxl_mode = get_cxl_mode_from_vpd();
267 if (cxl_mode == CXL_SPM)
268 flags |= IORESOURCE_SOFT_RESERVE;
269 else
270 flags |= IORESOURCE_STORED;
271
272 res = fixed_mem_range_flags(dev, index++, (uint64_t)pds.pds[i].base,
273 (uint64_t)pds.pds[i].size, flags);
274 if (cxl_mode == CXL_SPM)
275 LOG_RESOURCE("specific_purpose_memory", dev, res);
276 else
277 LOG_RESOURCE("CXL_memory", dev, res);
278 }
279 }
280 } else {
281 /* 4GiB -> TOHM */
282 res = upper_ram_end(dev, index++, mc_values[TOHM_REG] + 1);
283 LOG_RESOURCE("high_ram", dev, res);
284 }
Jonathan Zhang8f895492020-01-16 11:16:45 -0800285
286 /* add MMIO CFG resource */
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300287 res = mmio_from_to(dev, index++, mc_values[MMCFG_BASE_REG],
288 mc_values[MMCFG_LIMIT_REG] + 1);
289 LOG_RESOURCE("mmiocfg_res", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800290
291 /* add Local APIC resource */
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300292 res = mmio_range(dev, index++, LAPIC_DEFAULT_BASE, 0x00001000);
293 LOG_RESOURCE("apic_res", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800294
295 /*
296 * Add legacy region as reserved - 0xa000 - 1MB
297 * Reserve everything between A segment and 1MB:
298 *
299 * 0xa0000 - 0xbffff: legacy VGA
300 * 0xc0000 - 0xfffff: RAM
301 */
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300302 res = mmio_range(dev, index++, VGA_BASE_ADDRESS, VGA_BASE_SIZE);
303 LOG_RESOURCE("legacy_mmio", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800304
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300305 res = reserved_ram_from_to(dev, index++, 0xc0000, 1 * MiB);
306 LOG_RESOURCE("legacy_write_protect", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800307
308 *res_count = index;
309}
310
311static void mmapvtd_read_resources(struct device *dev)
312{
313 int index = 0;
314
Jonathan Zhang907b6f52023-01-30 12:11:52 -0800315 if (CONFIG(SOC_INTEL_HAS_CXL)) {
316 /* Construct NUMA data structure. This is needed for CXL. */
317 if (fill_pds() != CB_SUCCESS)
318 pds.num_pds = 0;
319
320 dump_pds();
321 }
322
Jonathan Zhang8f895492020-01-16 11:16:45 -0800323 /* Read standard PCI resources. */
324 pci_dev_read_resources(dev);
325
Jonathan Zhangda538cb2022-12-19 15:49:33 -0800326 /* set up DPR */
327 configure_dpr(dev);
328
Jonathan Zhang8f895492020-01-16 11:16:45 -0800329 /* Calculate and add DRAM resources. */
330 mc_add_dram_resources(dev, &index);
331}
332
333static void mmapvtd_init(struct device *dev)
334{
335}
336
337static struct device_operations mmapvtd_ops = {
338 .read_resources = mmapvtd_read_resources,
339 .set_resources = pci_dev_set_resources,
340 .enable_resources = pci_dev_enable_resources,
341 .init = mmapvtd_init,
342 .ops_pci = &soc_pci_ops,
Marc Jones521a03f2020-10-19 13:46:59 -0600343#if CONFIG(HAVE_ACPI_TABLES)
344 .acpi_inject_dsdt = uncore_inject_dsdt,
345#endif
Jonathan Zhang8f895492020-01-16 11:16:45 -0800346};
347
348static const unsigned short mmapvtd_ids[] = {
349 MMAP_VTD_CFG_REG_DEVID, /* Memory Map/Intel® VT-d Configuration Registers */
350 0
351};
352
353static const struct pci_driver mmapvtd_driver __pci_driver = {
354 .ops = &mmapvtd_ops,
Felix Singer43b7f412022-03-07 04:34:52 +0100355 .vendor = PCI_VID_INTEL,
Jonathan Zhang8f895492020-01-16 11:16:45 -0800356 .devices = mmapvtd_ids
357};
Arthur Heymans77509be2020-10-22 17:11:22 +0200358
Jonathan Zhanga5bd5802023-01-30 11:17:25 -0800359#if !CONFIG(SOC_INTEL_MMAPVTD_ONLY_FOR_DPR)
Arthur Heymans77509be2020-10-22 17:11:22 +0200360static void vtd_read_resources(struct device *dev)
361{
362 pci_dev_read_resources(dev);
363
364 configure_dpr(dev);
365}
366
367static struct device_operations vtd_ops = {
368 .read_resources = vtd_read_resources,
369 .set_resources = pci_dev_set_resources,
370 .enable_resources = pci_dev_enable_resources,
371 .ops_pci = &soc_pci_ops,
372};
373
374/* VTD devices on other stacks */
375static const struct pci_driver vtd_driver __pci_driver = {
376 .ops = &vtd_ops,
Felix Singer43b7f412022-03-07 04:34:52 +0100377 .vendor = PCI_VID_INTEL,
Arthur Heymans77509be2020-10-22 17:11:22 +0200378 .device = MMAP_VTD_STACK_CFG_REG_DEVID,
379};
Jonathan Zhanga5bd5802023-01-30 11:17:25 -0800380#endif
Arthur Heymans42a6f7e2020-11-10 16:46:18 +0100381
382static void dmi3_init(struct device *dev)
383{
Arthur Heymans9d8a4552021-02-02 19:21:24 +0100384 if (CONFIG(INTEL_TXT) && skip_intel_txt_lockdown())
385 return;
Arthur Heymans42a6f7e2020-11-10 16:46:18 +0100386 /* Disable error injection */
387 pci_or_config16(dev, ERRINJCON, 1 << 0);
388
389 /*
390 * DMIRCBAR registers are not TXT lockable, but the BAR enable
391 * bit is. TXT requires that DMIRCBAR be disabled for security.
392 */
393 pci_and_config32(dev, DMIRCBAR, ~(1 << 0));
394}
395
396static struct device_operations dmi3_ops = {
397 .read_resources = pci_dev_read_resources,
398 .set_resources = pci_dev_set_resources,
399 .enable_resources = pci_dev_enable_resources,
400 .init = dmi3_init,
401 .ops_pci = &soc_pci_ops,
402};
403
404static const struct pci_driver dmi3_driver __pci_driver = {
405 .ops = &dmi3_ops,
Felix Singer43b7f412022-03-07 04:34:52 +0100406 .vendor = PCI_VID_INTEL,
Arthur Heymans42a6f7e2020-11-10 16:46:18 +0100407 .device = DMI3_DEVID,
408};
Arthur Heymans7a36ca52020-11-10 15:55:31 +0100409
410static void iio_dfx_global_init(struct device *dev)
411{
Arthur Heymans9d8a4552021-02-02 19:21:24 +0100412 if (CONFIG(INTEL_TXT) && skip_intel_txt_lockdown())
413 return;
414
Arthur Heymans7a36ca52020-11-10 15:55:31 +0100415 uint16_t reg16;
416 pci_or_config16(dev, IIO_DFX_LCK_CTL, 0x3ff);
417 reg16 = pci_read_config16(dev, IIO_DFX_TSWCTL0);
418 reg16 &= ~(1 << 4); // allow ib mmio cfg
419 reg16 &= ~(1 << 5); // ignore acs p2p ma lpbk
420 reg16 |= (1 << 3); // me disable
421 pci_write_config16(dev, IIO_DFX_TSWCTL0, reg16);
422}
423
424static const unsigned short iio_dfx_global_ids[] = {
425 0x202d,
426 0x203d,
427 0
428};
429
430static struct device_operations iio_dfx_global_ops = {
431 .read_resources = pci_dev_read_resources,
432 .set_resources = pci_dev_set_resources,
433 .enable_resources = pci_dev_enable_resources,
434 .init = iio_dfx_global_init,
435 .ops_pci = &soc_pci_ops,
436};
437
438static const struct pci_driver iio_dfx_global_driver __pci_driver = {
439 .ops = &iio_dfx_global_ops,
Felix Singer43b7f412022-03-07 04:34:52 +0100440 .vendor = PCI_VID_INTEL,
Arthur Heymans7a36ca52020-11-10 15:55:31 +0100441 .devices = iio_dfx_global_ids,
442};