Patrick Georgi | ac95903 | 2020-05-05 22:49:26 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 2 | |
Felix Held | 97439ec | 2023-06-05 19:30:23 +0200 | [diff] [blame] | 3 | #include <arch/vga.h> |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 4 | #include <cbmem.h> |
| 5 | #include <console/console.h> |
Elyes HAOUAS | 32da343 | 2020-05-17 17:15:31 +0200 | [diff] [blame] | 6 | #include <cpu/x86/lapic_def.h> |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 7 | #include <device/pci.h> |
| 8 | #include <device/pci_ids.h> |
Jonathan Zhang | 907b6f5 | 2023-01-30 12:11:52 -0800 | [diff] [blame] | 9 | #include <drivers/ocp/include/vpd.h> |
Marc Jones | 521a03f | 2020-10-19 13:46:59 -0600 | [diff] [blame] | 10 | #include <soc/acpi.h> |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 11 | #include <soc/iomap.h> |
| 12 | #include <soc/pci_devs.h> |
| 13 | #include <soc/ramstage.h> |
Andrey Petrov | 662da6c | 2020-03-16 22:46:57 -0700 | [diff] [blame] | 14 | #include <soc/util.h> |
| 15 | #include <fsp/util.h> |
Arthur Heymans | 77509be | 2020-10-22 17:11:22 +0200 | [diff] [blame] | 16 | #include <security/intel/txt/txt_platform.h> |
Arthur Heymans | 9d8a455 | 2021-02-02 19:21:24 +0100 | [diff] [blame] | 17 | #include <security/intel/txt/txt.h> |
Jonathan Zhang | 907b6f5 | 2023-01-30 12:11:52 -0800 | [diff] [blame] | 18 | #include <soc/numa.h> |
| 19 | #include <soc/soc_util.h> |
Arthur Heymans | 6366059 | 2022-01-06 12:28:44 +0100 | [diff] [blame] | 20 | #include <stdint.h> |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 21 | |
Jonathan Zhang | 907b6f5 | 2023-01-30 12:11:52 -0800 | [diff] [blame] | 22 | struct proximity_domains pds = { |
| 23 | .num_pds = 0, |
| 24 | .pds = NULL, |
| 25 | }; |
| 26 | |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 27 | struct map_entry { |
| 28 | uint32_t reg; |
| 29 | int is_64_bit; |
| 30 | int is_limit; |
| 31 | int mask_bits; |
| 32 | const char *description; |
| 33 | }; |
| 34 | |
| 35 | enum { |
| 36 | TOHM_REG, |
| 37 | MMIOL_REG, |
| 38 | MMCFG_BASE_REG, |
| 39 | MMCFG_LIMIT_REG, |
| 40 | TOLM_REG, |
Jonathan Zhang | 09d2c93 | 2023-01-30 11:23:04 -0800 | [diff] [blame] | 41 | /* NCMEM and ME ranges are mutually exclusive */ |
| 42 | NCMEM_BASE_REG, |
| 43 | NCMEM_LIMIT_REG, |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 44 | ME_BASE_REG, |
| 45 | ME_LIMIT_REG, |
| 46 | TSEG_BASE_REG, |
| 47 | TSEG_LIMIT_REG, |
| 48 | /* Must be last. */ |
| 49 | NUM_MAP_ENTRIES |
| 50 | }; |
| 51 | |
| 52 | static struct map_entry memory_map[NUM_MAP_ENTRIES] = { |
| 53 | [TOHM_REG] = MAP_ENTRY_LIMIT_64(VTD_TOHM_CSR, 26, "TOHM"), |
| 54 | [MMIOL_REG] = MAP_ENTRY_BASE_32(VTD_MMIOL_CSR, "MMIOL"), |
| 55 | [MMCFG_BASE_REG] = MAP_ENTRY_BASE_64(VTD_MMCFG_BASE_CSR, "MMCFG_BASE"), |
| 56 | [MMCFG_LIMIT_REG] = MAP_ENTRY_LIMIT_64(VTD_MMCFG_LIMIT_CSR, 26, "MMCFG_LIMIT"), |
| 57 | [TOLM_REG] = MAP_ENTRY_LIMIT_32(VTD_TOLM_CSR, 26, "TOLM"), |
Jonathan Zhang | 09d2c93 | 2023-01-30 11:23:04 -0800 | [diff] [blame] | 58 | #if CONFIG(SOC_INTEL_HAS_NCMEM) |
| 59 | [NCMEM_BASE_REG] = MAP_ENTRY_BASE_64(VTD_NCMEM_BASE_CSR, "NCMEM_BASE"), |
| 60 | [NCMEM_LIMIT_REG] = MAP_ENTRY_LIMIT_64(VTD_NCMEM_LIMIT_CSR, 19, "NCMEM_LIMIT"), |
| 61 | #else |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 62 | [ME_BASE_REG] = MAP_ENTRY_BASE_64(VTD_ME_BASE_CSR, "ME_BASE"), |
| 63 | [ME_LIMIT_REG] = MAP_ENTRY_LIMIT_64(VTD_ME_LIMIT_CSR, 19, "ME_LIMIT"), |
Jonathan Zhang | 09d2c93 | 2023-01-30 11:23:04 -0800 | [diff] [blame] | 64 | #endif |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 65 | [TSEG_BASE_REG] = MAP_ENTRY_BASE_32(VTD_TSEG_BASE_CSR, "TSEGMB_BASE"), |
| 66 | [TSEG_LIMIT_REG] = MAP_ENTRY_LIMIT_32(VTD_TSEG_LIMIT_CSR, 20, "TSEGMB_LIMIT"), |
| 67 | }; |
| 68 | |
| 69 | static void read_map_entry(struct device *dev, struct map_entry *entry, |
| 70 | uint64_t *result) |
| 71 | { |
| 72 | uint64_t value; |
| 73 | uint64_t mask; |
| 74 | |
| 75 | /* All registers are on a 1MiB granularity. */ |
| 76 | mask = ((1ULL << entry->mask_bits) - 1); |
| 77 | mask = ~mask; |
| 78 | |
| 79 | value = 0; |
| 80 | |
| 81 | if (entry->is_64_bit) { |
| 82 | value = pci_read_config32(dev, entry->reg + sizeof(uint32_t)); |
| 83 | value <<= 32; |
| 84 | } |
| 85 | |
| 86 | value |= (uint64_t)pci_read_config32(dev, entry->reg); |
| 87 | value &= mask; |
| 88 | |
| 89 | if (entry->is_limit) |
| 90 | value |= ~mask; |
| 91 | |
| 92 | *result = value; |
| 93 | } |
| 94 | |
| 95 | static void mc_read_map_entries(struct device *dev, uint64_t *values) |
| 96 | { |
| 97 | int i; |
| 98 | for (i = 0; i < NUM_MAP_ENTRIES; i++) |
| 99 | read_map_entry(dev, &memory_map[i], &values[i]); |
| 100 | } |
| 101 | |
| 102 | static void mc_report_map_entries(struct device *dev, uint64_t *values) |
| 103 | { |
| 104 | int i; |
| 105 | for (i = 0; i < NUM_MAP_ENTRIES; i++) { |
| 106 | printk(BIOS_DEBUG, "MC MAP: %s: 0x%llx\n", |
| 107 | memory_map[i].description, values[i]); |
| 108 | } |
| 109 | } |
| 110 | |
Arthur Heymans | 77509be | 2020-10-22 17:11:22 +0200 | [diff] [blame] | 111 | static void configure_dpr(struct device *dev) |
| 112 | { |
| 113 | const uintptr_t cbmem_top_mb = ALIGN_UP((uintptr_t)cbmem_top(), MiB) / MiB; |
| 114 | union dpr_register dpr = { .raw = pci_read_config32(dev, VTD_LTDPR) }; |
| 115 | |
| 116 | /* The DPR lock bit has to be set sufficiently early. It looks like |
| 117 | * it cannot be set anymore after FSP-S. |
| 118 | */ |
| 119 | dpr.lock = 1; |
| 120 | dpr.epm = 1; |
| 121 | dpr.size = dpr.top - cbmem_top_mb; |
| 122 | pci_write_config32(dev, VTD_LTDPR, dpr.raw); |
| 123 | } |
| 124 | |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 125 | /* |
| 126 | * Host Memory Map: |
| 127 | * |
| 128 | * +--------------------------+ TOCM (2 pow 46 - 1) |
| 129 | * | Reserved | |
| 130 | * +--------------------------+ |
| 131 | * | MMIOH (relocatable) | |
| 132 | * +--------------------------+ |
| 133 | * | PCISeg | |
| 134 | * +--------------------------+ TOHM |
| 135 | * | High DRAM Memory | |
| 136 | * +--------------------------+ 4GiB (0x100000000) |
| 137 | * +--------------------------+ 0xFFFF_FFFF |
| 138 | * | Firmware | |
| 139 | * +--------------------------+ 0xFF00_0000 |
| 140 | * | Reserved | |
| 141 | * +--------------------------+ 0xFEF0_0000 |
| 142 | * | Local xAPIC | |
| 143 | * +--------------------------+ 0xFEE0_0000 |
| 144 | * | HPET/LT/TPM/Others | |
| 145 | * +--------------------------+ 0xFED0_0000 |
| 146 | * | I/O xAPIC | |
| 147 | * +--------------------------+ 0xFEC0_0000 |
| 148 | * | Reserved | |
| 149 | * +--------------------------+ 0xFEB8_0000 |
| 150 | * | Reserved | |
| 151 | * +--------------------------+ 0xFEB0_0000 |
| 152 | * | Reserved | |
| 153 | * +--------------------------+ 0xFE00_0000 |
| 154 | * | MMIOL (relocatable) | |
| 155 | * | P2SB PCR cfg BAR | (0xfd000000 - 0xfdffffff |
| 156 | * | BAR space | [mem 0x90000000-0xfcffffff] available for PCI devices |
| 157 | * +--------------------------+ 0x9000_0000 |
Shelley Chen | 4e9bb33 | 2021-10-20 15:43:45 -0700 | [diff] [blame] | 158 | * |PCIe MMCFG (relocatable) | CONFIG_ECAM_MMCONF_BASE_ADDRESS 64 or 256MB |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 159 | * | | (0x80000000 - 0x8fffffff, 0x40000) |
| 160 | * +--------------------------+ TOLM |
| 161 | * | MEseg (relocatable) | 32, 64, 128 or 256 MB (0x78000000 - 0x7fffffff, 0x20000) |
| 162 | * +--------------------------+ |
| 163 | * | Tseg (relocatable) | N x 8MB (0x70000000 - 0x77ffffff, 0x20000) |
Arthur Heymans | 77509be | 2020-10-22 17:11:22 +0200 | [diff] [blame] | 164 | * +--------------------------+ |
| 165 | * | DPR | |
Jonathan Zhang | 43b0ed7 | 2022-12-19 15:42:56 -0800 | [diff] [blame] | 166 | * +--------------------------+ 1M aligned DPR base |
| 167 | * | Unused memory | |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 168 | * +--------------------------+ cbmem_top |
| 169 | * | Reserved - CBMEM | (0x6fffe000 - 0x6fffffff, 0x2000) |
| 170 | * +--------------------------+ |
| 171 | * | Reserved - FSP | (0x6fbfe000 - 0x6fffdfff, 0x400000) |
| 172 | * +--------------------------+ top_of_ram (0x6fbfdfff) |
| 173 | * | Low DRAM Memory | |
| 174 | * +--------------------------+ FFFFF (1MB) |
| 175 | * | E & F segments | |
| 176 | * +--------------------------+ E0000 |
| 177 | * | C & D segments | |
| 178 | * +--------------------------+ C0000 |
| 179 | * | VGA & SMM Memory | |
| 180 | * +--------------------------+ A0000 |
| 181 | * | Conventional Memory | |
| 182 | * | (DOS Range) | |
| 183 | * +--------------------------+ 0 |
| 184 | */ |
| 185 | |
| 186 | static void mc_add_dram_resources(struct device *dev, int *res_count) |
| 187 | { |
Kyösti Mälkki | 6f9c357 | 2021-06-14 00:40:22 +0300 | [diff] [blame] | 188 | const struct resource *res; |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 189 | uint64_t mc_values[NUM_MAP_ENTRIES]; |
Jonathan Zhang | 43b0ed7 | 2022-12-19 15:42:56 -0800 | [diff] [blame] | 190 | uint64_t top_of_ram; |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 191 | int index = *res_count; |
Jonathan Zhang | 43b0ed7 | 2022-12-19 15:42:56 -0800 | [diff] [blame] | 192 | struct range_entry fsp_mem; |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 193 | |
Marc Jones | 662ac54 | 2020-11-02 21:26:41 -0700 | [diff] [blame] | 194 | /* Only add dram resources once. */ |
Felix Held | 3b5b66d | 2024-01-11 22:26:18 +0100 | [diff] [blame] | 195 | if (dev->bus->secondary != 0 || dev->bus->segment_group != 0) |
Marc Jones | 662ac54 | 2020-11-02 21:26:41 -0700 | [diff] [blame] | 196 | return; |
| 197 | |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 198 | /* Read in the MAP registers and report their values. */ |
| 199 | mc_read_map_entries(dev, &mc_values[0]); |
| 200 | mc_report_map_entries(dev, &mc_values[0]); |
| 201 | |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 202 | /* Conventional Memory (DOS region, 0x0 to 0x9FFFF) */ |
Kyösti Mälkki | 6f9c357 | 2021-06-14 00:40:22 +0300 | [diff] [blame] | 203 | res = ram_from_to(dev, index++, 0, 0xa0000); |
| 204 | LOG_RESOURCE("legacy_ram", dev, res); |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 205 | |
Jonathan Zhang | 43b0ed7 | 2022-12-19 15:42:56 -0800 | [diff] [blame] | 206 | /* 1MB -> top_of_ram */ |
| 207 | fsp_find_reserved_memory(&fsp_mem); |
| 208 | top_of_ram = range_entry_base(&fsp_mem) - 1; |
| 209 | res = ram_from_to(dev, index++, 1 * MiB, top_of_ram); |
Kyösti Mälkki | 6f9c357 | 2021-06-14 00:40:22 +0300 | [diff] [blame] | 210 | LOG_RESOURCE("low_ram", dev, res); |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 211 | |
Jonathan Zhang | 43b0ed7 | 2022-12-19 15:42:56 -0800 | [diff] [blame] | 212 | /* top_of_ram -> cbmem_top */ |
| 213 | res = ram_from_to(dev, index++, top_of_ram, (uintptr_t)cbmem_top()); |
| 214 | LOG_RESOURCE("cbmem_ram", dev, res); |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 215 | |
Jonathan Zhang | 2e495b0 | 2023-01-30 11:32:26 -0800 | [diff] [blame] | 216 | /* Mark TSEG/SMM region as reserved */ |
| 217 | res = reserved_ram_from_to(dev, index++, mc_values[TSEG_BASE_REG], |
| 218 | mc_values[TSEG_LIMIT_REG] + 1); |
| 219 | LOG_RESOURCE("mmio_tseg", dev, res); |
| 220 | |
Jonathan Zhang | da538cb | 2022-12-19 15:49:33 -0800 | [diff] [blame] | 221 | /* Reserve DPR region */ |
Arthur Heymans | 77509be | 2020-10-22 17:11:22 +0200 | [diff] [blame] | 222 | union dpr_register dpr = { .raw = pci_read_config32(dev, VTD_LTDPR) }; |
| 223 | if (dpr.size) { |
Jonathan Zhang | 43b0ed7 | 2022-12-19 15:42:56 -0800 | [diff] [blame] | 224 | /* |
| 225 | * cbmem_top -> DPR base: |
| 226 | * DPR has a 1M granularity so it's possible if cbmem_top is not 1M |
| 227 | * aligned that some memory does not get marked as assigned. |
| 228 | */ |
| 229 | res = reserved_ram_from_to(dev, index++, (uintptr_t)cbmem_top(), |
| 230 | (dpr.top - dpr.size) * MiB); |
| 231 | LOG_RESOURCE("unused_dram", dev, res); |
| 232 | |
| 233 | /* DPR base -> DPR top */ |
Kyösti Mälkki | 6f9c357 | 2021-06-14 00:40:22 +0300 | [diff] [blame] | 234 | res = reserved_ram_from_to(dev, index++, (dpr.top - dpr.size) * MiB, |
| 235 | dpr.top * MiB); |
| 236 | LOG_RESOURCE("dpr", dev, res); |
Jonathan Zhang | 43b0ed7 | 2022-12-19 15:42:56 -0800 | [diff] [blame] | 237 | |
Arthur Heymans | 77509be | 2020-10-22 17:11:22 +0200 | [diff] [blame] | 238 | } |
| 239 | |
Jonathan Zhang | 43b0ed7 | 2022-12-19 15:42:56 -0800 | [diff] [blame] | 240 | /* Mark TSEG/SMM region as reserved */ |
| 241 | res = reserved_ram_from_to(dev, index++, mc_values[TSEG_BASE_REG], |
| 242 | mc_values[TSEG_LIMIT_REG] + 1); |
| 243 | LOG_RESOURCE("mmio_tseg", dev, res); |
| 244 | |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 245 | /* Mark region between TSEG - TOLM (eg. MESEG) as reserved */ |
Kyösti Mälkki | 6f9c357 | 2021-06-14 00:40:22 +0300 | [diff] [blame] | 246 | res = reserved_ram_from_to(dev, index++, mc_values[TSEG_LIMIT_REG] + 1, |
| 247 | mc_values[TOLM_REG]); |
| 248 | LOG_RESOURCE("mmio_tolm", dev, res); |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 249 | |
Jonathan Zhang | 907b6f5 | 2023-01-30 12:11:52 -0800 | [diff] [blame] | 250 | if (CONFIG(SOC_INTEL_HAS_CXL)) { |
| 251 | /* 4GiB -> CXL Memory */ |
| 252 | uint32_t gi_mem_size; |
Johnny Lin | 514930c | 2023-04-11 15:30:02 +0800 | [diff] [blame] | 253 | gi_mem_size = get_generic_initiator_mem_size(); /* unit: 64MB */ |
| 254 | /* |
| 255 | * Memory layout when there is CXL HDM (Host-managed Device Memory): |
| 256 | * -------------- <- TOHM |
| 257 | * CXL memory regions (pds global variable records the base/size of them) |
| 258 | * Processor attached high memory |
| 259 | * -------------- <- 0x100000000 (4GB) |
| 260 | */ |
| 261 | res = upper_ram_end(dev, index++, |
| 262 | mc_values[TOHM_REG] - ((uint64_t)gi_mem_size << 26) + 1); |
Jonathan Zhang | 907b6f5 | 2023-01-30 12:11:52 -0800 | [diff] [blame] | 263 | LOG_RESOURCE("high_ram", dev, res); |
| 264 | |
| 265 | /* CXL Memory */ |
| 266 | uint8_t i; |
| 267 | for (i = 0; i < pds.num_pds; i++) { |
| 268 | if (pds.pds[i].pd_type == PD_TYPE_PROCESSOR) |
| 269 | continue; |
| 270 | |
| 271 | if (CONFIG(OCP_VPD)) { |
| 272 | unsigned long flags = IORESOURCE_CACHEABLE; |
| 273 | int cxl_mode = get_cxl_mode_from_vpd(); |
| 274 | if (cxl_mode == CXL_SPM) |
| 275 | flags |= IORESOURCE_SOFT_RESERVE; |
| 276 | else |
| 277 | flags |= IORESOURCE_STORED; |
| 278 | |
Johnny Lin | 514930c | 2023-04-11 15:30:02 +0800 | [diff] [blame] | 279 | res = fixed_mem_range_flags(dev, index++, |
| 280 | (uint64_t)pds.pds[i].base << 26, |
| 281 | (uint64_t)pds.pds[i].size << 26, flags); |
Jonathan Zhang | 907b6f5 | 2023-01-30 12:11:52 -0800 | [diff] [blame] | 282 | if (cxl_mode == CXL_SPM) |
| 283 | LOG_RESOURCE("specific_purpose_memory", dev, res); |
| 284 | else |
| 285 | LOG_RESOURCE("CXL_memory", dev, res); |
| 286 | } |
| 287 | } |
| 288 | } else { |
| 289 | /* 4GiB -> TOHM */ |
| 290 | res = upper_ram_end(dev, index++, mc_values[TOHM_REG] + 1); |
| 291 | LOG_RESOURCE("high_ram", dev, res); |
| 292 | } |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 293 | |
| 294 | /* add MMIO CFG resource */ |
Kyösti Mälkki | 6f9c357 | 2021-06-14 00:40:22 +0300 | [diff] [blame] | 295 | res = mmio_from_to(dev, index++, mc_values[MMCFG_BASE_REG], |
| 296 | mc_values[MMCFG_LIMIT_REG] + 1); |
| 297 | LOG_RESOURCE("mmiocfg_res", dev, res); |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 298 | |
| 299 | /* add Local APIC resource */ |
Kyösti Mälkki | 6f9c357 | 2021-06-14 00:40:22 +0300 | [diff] [blame] | 300 | res = mmio_range(dev, index++, LAPIC_DEFAULT_BASE, 0x00001000); |
| 301 | LOG_RESOURCE("apic_res", dev, res); |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 302 | |
| 303 | /* |
| 304 | * Add legacy region as reserved - 0xa000 - 1MB |
| 305 | * Reserve everything between A segment and 1MB: |
| 306 | * |
| 307 | * 0xa0000 - 0xbffff: legacy VGA |
| 308 | * 0xc0000 - 0xfffff: RAM |
| 309 | */ |
Felix Held | 97439ec | 2023-06-05 19:30:23 +0200 | [diff] [blame] | 310 | res = mmio_range(dev, index++, VGA_MMIO_BASE, VGA_MMIO_SIZE); |
Kyösti Mälkki | 6f9c357 | 2021-06-14 00:40:22 +0300 | [diff] [blame] | 311 | LOG_RESOURCE("legacy_mmio", dev, res); |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 312 | |
Kyösti Mälkki | 6f9c357 | 2021-06-14 00:40:22 +0300 | [diff] [blame] | 313 | res = reserved_ram_from_to(dev, index++, 0xc0000, 1 * MiB); |
| 314 | LOG_RESOURCE("legacy_write_protect", dev, res); |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 315 | |
| 316 | *res_count = index; |
| 317 | } |
| 318 | |
| 319 | static void mmapvtd_read_resources(struct device *dev) |
| 320 | { |
| 321 | int index = 0; |
| 322 | |
Jonathan Zhang | 907b6f5 | 2023-01-30 12:11:52 -0800 | [diff] [blame] | 323 | if (CONFIG(SOC_INTEL_HAS_CXL)) { |
| 324 | /* Construct NUMA data structure. This is needed for CXL. */ |
| 325 | if (fill_pds() != CB_SUCCESS) |
| 326 | pds.num_pds = 0; |
| 327 | |
| 328 | dump_pds(); |
| 329 | } |
| 330 | |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 331 | /* Read standard PCI resources. */ |
| 332 | pci_dev_read_resources(dev); |
| 333 | |
Jonathan Zhang | da538cb | 2022-12-19 15:49:33 -0800 | [diff] [blame] | 334 | /* set up DPR */ |
| 335 | configure_dpr(dev); |
| 336 | |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 337 | /* Calculate and add DRAM resources. */ |
| 338 | mc_add_dram_resources(dev, &index); |
| 339 | } |
| 340 | |
| 341 | static void mmapvtd_init(struct device *dev) |
| 342 | { |
| 343 | } |
| 344 | |
| 345 | static struct device_operations mmapvtd_ops = { |
| 346 | .read_resources = mmapvtd_read_resources, |
| 347 | .set_resources = pci_dev_set_resources, |
| 348 | .enable_resources = pci_dev_enable_resources, |
| 349 | .init = mmapvtd_init, |
| 350 | .ops_pci = &soc_pci_ops, |
Marc Jones | 521a03f | 2020-10-19 13:46:59 -0600 | [diff] [blame] | 351 | #if CONFIG(HAVE_ACPI_TABLES) |
Arthur Heymans | cd6fed2 | 2022-12-08 17:27:11 +0100 | [diff] [blame] | 352 | .acpi_fill_ssdt = uncore_fill_ssdt, |
Marc Jones | 521a03f | 2020-10-19 13:46:59 -0600 | [diff] [blame] | 353 | #endif |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 354 | }; |
| 355 | |
| 356 | static const unsigned short mmapvtd_ids[] = { |
| 357 | MMAP_VTD_CFG_REG_DEVID, /* Memory Map/Intel® VT-d Configuration Registers */ |
| 358 | 0 |
| 359 | }; |
| 360 | |
| 361 | static const struct pci_driver mmapvtd_driver __pci_driver = { |
| 362 | .ops = &mmapvtd_ops, |
Felix Singer | 43b7f41 | 2022-03-07 04:34:52 +0100 | [diff] [blame] | 363 | .vendor = PCI_VID_INTEL, |
Jonathan Zhang | 8f89549 | 2020-01-16 11:16:45 -0800 | [diff] [blame] | 364 | .devices = mmapvtd_ids |
| 365 | }; |
Arthur Heymans | 77509be | 2020-10-22 17:11:22 +0200 | [diff] [blame] | 366 | |
Jonathan Zhang | a5bd580 | 2023-01-30 11:17:25 -0800 | [diff] [blame] | 367 | #if !CONFIG(SOC_INTEL_MMAPVTD_ONLY_FOR_DPR) |
Arthur Heymans | 77509be | 2020-10-22 17:11:22 +0200 | [diff] [blame] | 368 | static void vtd_read_resources(struct device *dev) |
| 369 | { |
| 370 | pci_dev_read_resources(dev); |
| 371 | |
| 372 | configure_dpr(dev); |
| 373 | } |
| 374 | |
| 375 | static struct device_operations vtd_ops = { |
| 376 | .read_resources = vtd_read_resources, |
| 377 | .set_resources = pci_dev_set_resources, |
| 378 | .enable_resources = pci_dev_enable_resources, |
| 379 | .ops_pci = &soc_pci_ops, |
| 380 | }; |
| 381 | |
| 382 | /* VTD devices on other stacks */ |
| 383 | static const struct pci_driver vtd_driver __pci_driver = { |
| 384 | .ops = &vtd_ops, |
Felix Singer | 43b7f41 | 2022-03-07 04:34:52 +0100 | [diff] [blame] | 385 | .vendor = PCI_VID_INTEL, |
Arthur Heymans | 77509be | 2020-10-22 17:11:22 +0200 | [diff] [blame] | 386 | .device = MMAP_VTD_STACK_CFG_REG_DEVID, |
| 387 | }; |
Jonathan Zhang | a5bd580 | 2023-01-30 11:17:25 -0800 | [diff] [blame] | 388 | #endif |
Arthur Heymans | 42a6f7e | 2020-11-10 16:46:18 +0100 | [diff] [blame] | 389 | |
| 390 | static void dmi3_init(struct device *dev) |
| 391 | { |
Arthur Heymans | 9d8a455 | 2021-02-02 19:21:24 +0100 | [diff] [blame] | 392 | if (CONFIG(INTEL_TXT) && skip_intel_txt_lockdown()) |
| 393 | return; |
Arthur Heymans | 42a6f7e | 2020-11-10 16:46:18 +0100 | [diff] [blame] | 394 | /* Disable error injection */ |
| 395 | pci_or_config16(dev, ERRINJCON, 1 << 0); |
| 396 | |
| 397 | /* |
| 398 | * DMIRCBAR registers are not TXT lockable, but the BAR enable |
| 399 | * bit is. TXT requires that DMIRCBAR be disabled for security. |
| 400 | */ |
| 401 | pci_and_config32(dev, DMIRCBAR, ~(1 << 0)); |
| 402 | } |
| 403 | |
| 404 | static struct device_operations dmi3_ops = { |
| 405 | .read_resources = pci_dev_read_resources, |
| 406 | .set_resources = pci_dev_set_resources, |
| 407 | .enable_resources = pci_dev_enable_resources, |
| 408 | .init = dmi3_init, |
| 409 | .ops_pci = &soc_pci_ops, |
| 410 | }; |
| 411 | |
| 412 | static const struct pci_driver dmi3_driver __pci_driver = { |
| 413 | .ops = &dmi3_ops, |
Felix Singer | 43b7f41 | 2022-03-07 04:34:52 +0100 | [diff] [blame] | 414 | .vendor = PCI_VID_INTEL, |
Arthur Heymans | 42a6f7e | 2020-11-10 16:46:18 +0100 | [diff] [blame] | 415 | .device = DMI3_DEVID, |
| 416 | }; |
Arthur Heymans | 7a36ca5 | 2020-11-10 15:55:31 +0100 | [diff] [blame] | 417 | |
| 418 | static void iio_dfx_global_init(struct device *dev) |
| 419 | { |
Arthur Heymans | 9d8a455 | 2021-02-02 19:21:24 +0100 | [diff] [blame] | 420 | if (CONFIG(INTEL_TXT) && skip_intel_txt_lockdown()) |
| 421 | return; |
| 422 | |
Arthur Heymans | 7a36ca5 | 2020-11-10 15:55:31 +0100 | [diff] [blame] | 423 | uint16_t reg16; |
| 424 | pci_or_config16(dev, IIO_DFX_LCK_CTL, 0x3ff); |
| 425 | reg16 = pci_read_config16(dev, IIO_DFX_TSWCTL0); |
| 426 | reg16 &= ~(1 << 4); // allow ib mmio cfg |
| 427 | reg16 &= ~(1 << 5); // ignore acs p2p ma lpbk |
| 428 | reg16 |= (1 << 3); // me disable |
| 429 | pci_write_config16(dev, IIO_DFX_TSWCTL0, reg16); |
| 430 | } |
| 431 | |
| 432 | static const unsigned short iio_dfx_global_ids[] = { |
| 433 | 0x202d, |
| 434 | 0x203d, |
| 435 | 0 |
| 436 | }; |
| 437 | |
| 438 | static struct device_operations iio_dfx_global_ops = { |
| 439 | .read_resources = pci_dev_read_resources, |
| 440 | .set_resources = pci_dev_set_resources, |
| 441 | .enable_resources = pci_dev_enable_resources, |
| 442 | .init = iio_dfx_global_init, |
| 443 | .ops_pci = &soc_pci_ops, |
| 444 | }; |
| 445 | |
| 446 | static const struct pci_driver iio_dfx_global_driver __pci_driver = { |
| 447 | .ops = &iio_dfx_global_ops, |
Felix Singer | 43b7f41 | 2022-03-07 04:34:52 +0100 | [diff] [blame] | 448 | .vendor = PCI_VID_INTEL, |
Arthur Heymans | 7a36ca5 | 2020-11-10 15:55:31 +0100 | [diff] [blame] | 449 | .devices = iio_dfx_global_ids, |
| 450 | }; |