blob: a177a89d66223ff7c85d346c8eb207eb9f007206 [file] [log] [blame]
Patrick Georgiac959032020-05-05 22:49:26 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Jonathan Zhang8f895492020-01-16 11:16:45 -08002
Felix Held97439ec2023-06-05 19:30:23 +02003#include <arch/vga.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -08004#include <cbmem.h>
5#include <console/console.h>
Elyes HAOUAS32da3432020-05-17 17:15:31 +02006#include <cpu/x86/lapic_def.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -08007#include <device/pci.h>
8#include <device/pci_ids.h>
Jonathan Zhang907b6f52023-01-30 12:11:52 -08009#include <drivers/ocp/include/vpd.h>
Marc Jones521a03f2020-10-19 13:46:59 -060010#include <soc/acpi.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -080011#include <soc/iomap.h>
12#include <soc/pci_devs.h>
13#include <soc/ramstage.h>
Andrey Petrov662da6c2020-03-16 22:46:57 -070014#include <soc/util.h>
15#include <fsp/util.h>
Arthur Heymans77509be2020-10-22 17:11:22 +020016#include <security/intel/txt/txt_platform.h>
Arthur Heymans9d8a4552021-02-02 19:21:24 +010017#include <security/intel/txt/txt.h>
Jonathan Zhang907b6f52023-01-30 12:11:52 -080018#include <soc/numa.h>
19#include <soc/soc_util.h>
Arthur Heymans63660592022-01-06 12:28:44 +010020#include <stdint.h>
Jonathan Zhang8f895492020-01-16 11:16:45 -080021
Jonathan Zhang907b6f52023-01-30 12:11:52 -080022struct proximity_domains pds = {
23 .num_pds = 0,
24 .pds = NULL,
25};
26
Jonathan Zhang8f895492020-01-16 11:16:45 -080027struct map_entry {
28 uint32_t reg;
29 int is_64_bit;
30 int is_limit;
31 int mask_bits;
32 const char *description;
33};
34
35enum {
36 TOHM_REG,
37 MMIOL_REG,
38 MMCFG_BASE_REG,
39 MMCFG_LIMIT_REG,
40 TOLM_REG,
Jonathan Zhang09d2c932023-01-30 11:23:04 -080041 /* NCMEM and ME ranges are mutually exclusive */
42 NCMEM_BASE_REG,
43 NCMEM_LIMIT_REG,
Jonathan Zhang8f895492020-01-16 11:16:45 -080044 ME_BASE_REG,
45 ME_LIMIT_REG,
46 TSEG_BASE_REG,
47 TSEG_LIMIT_REG,
Patrick Rudolph89cacb92024-02-14 13:53:28 +010048 VTDBAR_REG,
Jonathan Zhang8f895492020-01-16 11:16:45 -080049 /* Must be last. */
50 NUM_MAP_ENTRIES
51};
52
53static struct map_entry memory_map[NUM_MAP_ENTRIES] = {
54 [TOHM_REG] = MAP_ENTRY_LIMIT_64(VTD_TOHM_CSR, 26, "TOHM"),
55 [MMIOL_REG] = MAP_ENTRY_BASE_32(VTD_MMIOL_CSR, "MMIOL"),
56 [MMCFG_BASE_REG] = MAP_ENTRY_BASE_64(VTD_MMCFG_BASE_CSR, "MMCFG_BASE"),
57 [MMCFG_LIMIT_REG] = MAP_ENTRY_LIMIT_64(VTD_MMCFG_LIMIT_CSR, 26, "MMCFG_LIMIT"),
58 [TOLM_REG] = MAP_ENTRY_LIMIT_32(VTD_TOLM_CSR, 26, "TOLM"),
Jonathan Zhang09d2c932023-01-30 11:23:04 -080059#if CONFIG(SOC_INTEL_HAS_NCMEM)
60 [NCMEM_BASE_REG] = MAP_ENTRY_BASE_64(VTD_NCMEM_BASE_CSR, "NCMEM_BASE"),
61 [NCMEM_LIMIT_REG] = MAP_ENTRY_LIMIT_64(VTD_NCMEM_LIMIT_CSR, 19, "NCMEM_LIMIT"),
62#else
Jonathan Zhang8f895492020-01-16 11:16:45 -080063 [ME_BASE_REG] = MAP_ENTRY_BASE_64(VTD_ME_BASE_CSR, "ME_BASE"),
64 [ME_LIMIT_REG] = MAP_ENTRY_LIMIT_64(VTD_ME_LIMIT_CSR, 19, "ME_LIMIT"),
Jonathan Zhang09d2c932023-01-30 11:23:04 -080065#endif
Jonathan Zhang8f895492020-01-16 11:16:45 -080066 [TSEG_BASE_REG] = MAP_ENTRY_BASE_32(VTD_TSEG_BASE_CSR, "TSEGMB_BASE"),
67 [TSEG_LIMIT_REG] = MAP_ENTRY_LIMIT_32(VTD_TSEG_LIMIT_CSR, 20, "TSEGMB_LIMIT"),
Patrick Rudolph89cacb92024-02-14 13:53:28 +010068 [VTDBAR_REG] = MAP_ENTRY_BASE_32(VTD_BAR_CSR, "VTD_BAR"),
Jonathan Zhang8f895492020-01-16 11:16:45 -080069};
70
71static void read_map_entry(struct device *dev, struct map_entry *entry,
72 uint64_t *result)
73{
74 uint64_t value;
75 uint64_t mask;
76
Patrick Rudolpheba383c2024-02-14 10:39:32 +010077 if (!entry->reg) {
78 *result = 0;
79 return;
80 }
Patrick Rudolph89cacb92024-02-14 13:53:28 +010081 if (entry->reg == VTD_BAR_CSR && !(pci_read_config32(dev, entry->reg) & 1)) {
82 /* VTDBAR is not enabled */
83 *result = 0;
84 return;
85 }
Patrick Rudolpheba383c2024-02-14 10:39:32 +010086
Jonathan Zhang8f895492020-01-16 11:16:45 -080087 mask = ((1ULL << entry->mask_bits) - 1);
88 mask = ~mask;
89
90 value = 0;
91
92 if (entry->is_64_bit) {
93 value = pci_read_config32(dev, entry->reg + sizeof(uint32_t));
94 value <<= 32;
95 }
96
97 value |= (uint64_t)pci_read_config32(dev, entry->reg);
98 value &= mask;
99
100 if (entry->is_limit)
101 value |= ~mask;
102
103 *result = value;
104}
105
106static void mc_read_map_entries(struct device *dev, uint64_t *values)
107{
108 int i;
109 for (i = 0; i < NUM_MAP_ENTRIES; i++)
110 read_map_entry(dev, &memory_map[i], &values[i]);
111}
112
113static void mc_report_map_entries(struct device *dev, uint64_t *values)
114{
115 int i;
116 for (i = 0; i < NUM_MAP_ENTRIES; i++) {
Patrick Rudolpheba383c2024-02-14 10:39:32 +0100117 if (!memory_map[i].description)
118 continue;
119
Patrick Rudolph2b64dbe2024-02-14 13:51:51 +0100120 printk(BIOS_DEBUG, "%s: MC MAP: %s: 0x%llx\n",
121 dev_path(dev), memory_map[i].description, values[i]);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800122 }
123}
124
Arthur Heymans77509be2020-10-22 17:11:22 +0200125static void configure_dpr(struct device *dev)
126{
127 const uintptr_t cbmem_top_mb = ALIGN_UP((uintptr_t)cbmem_top(), MiB) / MiB;
128 union dpr_register dpr = { .raw = pci_read_config32(dev, VTD_LTDPR) };
129
130 /* The DPR lock bit has to be set sufficiently early. It looks like
131 * it cannot be set anymore after FSP-S.
132 */
133 dpr.lock = 1;
134 dpr.epm = 1;
135 dpr.size = dpr.top - cbmem_top_mb;
136 pci_write_config32(dev, VTD_LTDPR, dpr.raw);
137}
138
Jonathan Zhang8f895492020-01-16 11:16:45 -0800139/*
140 * Host Memory Map:
141 *
142 * +--------------------------+ TOCM (2 pow 46 - 1)
143 * | Reserved |
144 * +--------------------------+
145 * | MMIOH (relocatable) |
146 * +--------------------------+
147 * | PCISeg |
148 * +--------------------------+ TOHM
149 * | High DRAM Memory |
150 * +--------------------------+ 4GiB (0x100000000)
151 * +--------------------------+ 0xFFFF_FFFF
152 * | Firmware |
153 * +--------------------------+ 0xFF00_0000
154 * | Reserved |
155 * +--------------------------+ 0xFEF0_0000
156 * | Local xAPIC |
157 * +--------------------------+ 0xFEE0_0000
158 * | HPET/LT/TPM/Others |
159 * +--------------------------+ 0xFED0_0000
160 * | I/O xAPIC |
161 * +--------------------------+ 0xFEC0_0000
162 * | Reserved |
163 * +--------------------------+ 0xFEB8_0000
164 * | Reserved |
165 * +--------------------------+ 0xFEB0_0000
166 * | Reserved |
167 * +--------------------------+ 0xFE00_0000
168 * | MMIOL (relocatable) |
169 * | P2SB PCR cfg BAR | (0xfd000000 - 0xfdffffff
170 * | BAR space | [mem 0x90000000-0xfcffffff] available for PCI devices
171 * +--------------------------+ 0x9000_0000
Shelley Chen4e9bb332021-10-20 15:43:45 -0700172 * |PCIe MMCFG (relocatable) | CONFIG_ECAM_MMCONF_BASE_ADDRESS 64 or 256MB
Jonathan Zhang8f895492020-01-16 11:16:45 -0800173 * | | (0x80000000 - 0x8fffffff, 0x40000)
174 * +--------------------------+ TOLM
175 * | MEseg (relocatable) | 32, 64, 128 or 256 MB (0x78000000 - 0x7fffffff, 0x20000)
176 * +--------------------------+
177 * | Tseg (relocatable) | N x 8MB (0x70000000 - 0x77ffffff, 0x20000)
Arthur Heymans77509be2020-10-22 17:11:22 +0200178 * +--------------------------+
179 * | DPR |
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800180 * +--------------------------+ 1M aligned DPR base
181 * | Unused memory |
Jonathan Zhang8f895492020-01-16 11:16:45 -0800182 * +--------------------------+ cbmem_top
183 * | Reserved - CBMEM | (0x6fffe000 - 0x6fffffff, 0x2000)
184 * +--------------------------+
185 * | Reserved - FSP | (0x6fbfe000 - 0x6fffdfff, 0x400000)
186 * +--------------------------+ top_of_ram (0x6fbfdfff)
187 * | Low DRAM Memory |
188 * +--------------------------+ FFFFF (1MB)
189 * | E & F segments |
190 * +--------------------------+ E0000
191 * | C & D segments |
192 * +--------------------------+ C0000
193 * | VGA & SMM Memory |
194 * +--------------------------+ A0000
195 * | Conventional Memory |
196 * | (DOS Range) |
197 * +--------------------------+ 0
198 */
199
200static void mc_add_dram_resources(struct device *dev, int *res_count)
201{
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300202 const struct resource *res;
Jonathan Zhang8f895492020-01-16 11:16:45 -0800203 uint64_t mc_values[NUM_MAP_ENTRIES];
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800204 uint64_t top_of_ram;
Jonathan Zhang8f895492020-01-16 11:16:45 -0800205 int index = *res_count;
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800206 struct range_entry fsp_mem;
Jonathan Zhang8f895492020-01-16 11:16:45 -0800207
Jonathan Zhang8f895492020-01-16 11:16:45 -0800208 /* Read in the MAP registers and report their values. */
209 mc_read_map_entries(dev, &mc_values[0]);
210 mc_report_map_entries(dev, &mc_values[0]);
211
Patrick Rudolph89cacb92024-02-14 13:53:28 +0100212 if (mc_values[VTDBAR_REG]) {
213 res = mmio_range(dev, VTD_BAR_CSR, mc_values[VTDBAR_REG], 8 * KiB);
214 LOG_RESOURCE("vtd_bar", dev, res);
215 }
216
217 /* Only add dram resources once. */
218 if (dev->upstream->secondary != 0 || dev->upstream->segment_group != 0)
219 return;
220
Jonathan Zhang8f895492020-01-16 11:16:45 -0800221 /* Conventional Memory (DOS region, 0x0 to 0x9FFFF) */
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300222 res = ram_from_to(dev, index++, 0, 0xa0000);
223 LOG_RESOURCE("legacy_ram", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800224
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800225 /* 1MB -> top_of_ram */
226 fsp_find_reserved_memory(&fsp_mem);
227 top_of_ram = range_entry_base(&fsp_mem) - 1;
228 res = ram_from_to(dev, index++, 1 * MiB, top_of_ram);
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300229 LOG_RESOURCE("low_ram", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800230
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800231 /* top_of_ram -> cbmem_top */
232 res = ram_from_to(dev, index++, top_of_ram, (uintptr_t)cbmem_top());
233 LOG_RESOURCE("cbmem_ram", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800234
Jonathan Zhang2e495b02023-01-30 11:32:26 -0800235 /* Mark TSEG/SMM region as reserved */
236 res = reserved_ram_from_to(dev, index++, mc_values[TSEG_BASE_REG],
237 mc_values[TSEG_LIMIT_REG] + 1);
238 LOG_RESOURCE("mmio_tseg", dev, res);
239
Jonathan Zhangda538cb2022-12-19 15:49:33 -0800240 /* Reserve DPR region */
Arthur Heymans77509be2020-10-22 17:11:22 +0200241 union dpr_register dpr = { .raw = pci_read_config32(dev, VTD_LTDPR) };
242 if (dpr.size) {
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800243 /*
244 * cbmem_top -> DPR base:
245 * DPR has a 1M granularity so it's possible if cbmem_top is not 1M
246 * aligned that some memory does not get marked as assigned.
247 */
248 res = reserved_ram_from_to(dev, index++, (uintptr_t)cbmem_top(),
249 (dpr.top - dpr.size) * MiB);
250 LOG_RESOURCE("unused_dram", dev, res);
251
252 /* DPR base -> DPR top */
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300253 res = reserved_ram_from_to(dev, index++, (dpr.top - dpr.size) * MiB,
254 dpr.top * MiB);
255 LOG_RESOURCE("dpr", dev, res);
Arthur Heymans77509be2020-10-22 17:11:22 +0200256 }
257
Jonathan Zhang43b0ed72022-12-19 15:42:56 -0800258 /* Mark TSEG/SMM region as reserved */
259 res = reserved_ram_from_to(dev, index++, mc_values[TSEG_BASE_REG],
260 mc_values[TSEG_LIMIT_REG] + 1);
261 LOG_RESOURCE("mmio_tseg", dev, res);
262
Jonathan Zhang8f895492020-01-16 11:16:45 -0800263 /* Mark region between TSEG - TOLM (eg. MESEG) as reserved */
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300264 res = reserved_ram_from_to(dev, index++, mc_values[TSEG_LIMIT_REG] + 1,
265 mc_values[TOLM_REG]);
266 LOG_RESOURCE("mmio_tolm", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800267
Jonathan Zhang907b6f52023-01-30 12:11:52 -0800268 if (CONFIG(SOC_INTEL_HAS_CXL)) {
269 /* 4GiB -> CXL Memory */
270 uint32_t gi_mem_size;
Johnny Lin514930c2023-04-11 15:30:02 +0800271 gi_mem_size = get_generic_initiator_mem_size(); /* unit: 64MB */
272 /*
273 * Memory layout when there is CXL HDM (Host-managed Device Memory):
274 * -------------- <- TOHM
275 * CXL memory regions (pds global variable records the base/size of them)
276 * Processor attached high memory
277 * -------------- <- 0x100000000 (4GB)
278 */
279 res = upper_ram_end(dev, index++,
280 mc_values[TOHM_REG] - ((uint64_t)gi_mem_size << 26) + 1);
Jonathan Zhang907b6f52023-01-30 12:11:52 -0800281 LOG_RESOURCE("high_ram", dev, res);
282
283 /* CXL Memory */
284 uint8_t i;
285 for (i = 0; i < pds.num_pds; i++) {
286 if (pds.pds[i].pd_type == PD_TYPE_PROCESSOR)
287 continue;
288
289 if (CONFIG(OCP_VPD)) {
290 unsigned long flags = IORESOURCE_CACHEABLE;
291 int cxl_mode = get_cxl_mode_from_vpd();
292 if (cxl_mode == CXL_SPM)
293 flags |= IORESOURCE_SOFT_RESERVE;
294 else
295 flags |= IORESOURCE_STORED;
296
Johnny Lin514930c2023-04-11 15:30:02 +0800297 res = fixed_mem_range_flags(dev, index++,
298 (uint64_t)pds.pds[i].base << 26,
299 (uint64_t)pds.pds[i].size << 26, flags);
Jonathan Zhang907b6f52023-01-30 12:11:52 -0800300 if (cxl_mode == CXL_SPM)
301 LOG_RESOURCE("specific_purpose_memory", dev, res);
302 else
303 LOG_RESOURCE("CXL_memory", dev, res);
304 }
305 }
306 } else {
307 /* 4GiB -> TOHM */
308 res = upper_ram_end(dev, index++, mc_values[TOHM_REG] + 1);
309 LOG_RESOURCE("high_ram", dev, res);
310 }
Jonathan Zhang8f895492020-01-16 11:16:45 -0800311
312 /* add MMIO CFG resource */
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300313 res = mmio_from_to(dev, index++, mc_values[MMCFG_BASE_REG],
314 mc_values[MMCFG_LIMIT_REG] + 1);
315 LOG_RESOURCE("mmiocfg_res", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800316
317 /* add Local APIC resource */
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300318 res = mmio_range(dev, index++, LAPIC_DEFAULT_BASE, 0x00001000);
319 LOG_RESOURCE("apic_res", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800320
321 /*
322 * Add legacy region as reserved - 0xa000 - 1MB
323 * Reserve everything between A segment and 1MB:
324 *
325 * 0xa0000 - 0xbffff: legacy VGA
326 * 0xc0000 - 0xfffff: RAM
327 */
Felix Held97439ec2023-06-05 19:30:23 +0200328 res = mmio_range(dev, index++, VGA_MMIO_BASE, VGA_MMIO_SIZE);
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300329 LOG_RESOURCE("legacy_mmio", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800330
Kyösti Mälkki6f9c3572021-06-14 00:40:22 +0300331 res = reserved_ram_from_to(dev, index++, 0xc0000, 1 * MiB);
332 LOG_RESOURCE("legacy_write_protect", dev, res);
Jonathan Zhang8f895492020-01-16 11:16:45 -0800333
334 *res_count = index;
335}
336
337static void mmapvtd_read_resources(struct device *dev)
338{
339 int index = 0;
340
Jonathan Zhang907b6f52023-01-30 12:11:52 -0800341 if (CONFIG(SOC_INTEL_HAS_CXL)) {
Patrick Rudolph8bbc07e2024-02-14 10:44:11 +0100342 static bool once;
343 if (!once) {
344 /* Construct NUMA data structure. This is needed for CXL. */
345 fill_pds();
346 dump_pds();
347 once = true;
348 }
Jonathan Zhang907b6f52023-01-30 12:11:52 -0800349 }
350
Jonathan Zhang8f895492020-01-16 11:16:45 -0800351 /* Read standard PCI resources. */
352 pci_dev_read_resources(dev);
353
Jonathan Zhangda538cb2022-12-19 15:49:33 -0800354 /* set up DPR */
355 configure_dpr(dev);
356
Jonathan Zhang8f895492020-01-16 11:16:45 -0800357 /* Calculate and add DRAM resources. */
358 mc_add_dram_resources(dev, &index);
359}
360
361static void mmapvtd_init(struct device *dev)
362{
363}
364
365static struct device_operations mmapvtd_ops = {
366 .read_resources = mmapvtd_read_resources,
367 .set_resources = pci_dev_set_resources,
368 .enable_resources = pci_dev_enable_resources,
369 .init = mmapvtd_init,
370 .ops_pci = &soc_pci_ops,
Jonathan Zhang8f895492020-01-16 11:16:45 -0800371};
372
373static const unsigned short mmapvtd_ids[] = {
374 MMAP_VTD_CFG_REG_DEVID, /* Memory Map/Intel® VT-d Configuration Registers */
375 0
376};
377
378static const struct pci_driver mmapvtd_driver __pci_driver = {
379 .ops = &mmapvtd_ops,
Felix Singer43b7f412022-03-07 04:34:52 +0100380 .vendor = PCI_VID_INTEL,
Jonathan Zhang8f895492020-01-16 11:16:45 -0800381 .devices = mmapvtd_ids
382};
Arthur Heymans77509be2020-10-22 17:11:22 +0200383
Jonathan Zhanga5bd5802023-01-30 11:17:25 -0800384#if !CONFIG(SOC_INTEL_MMAPVTD_ONLY_FOR_DPR)
Arthur Heymans77509be2020-10-22 17:11:22 +0200385static void vtd_read_resources(struct device *dev)
386{
387 pci_dev_read_resources(dev);
388
389 configure_dpr(dev);
390}
391
392static struct device_operations vtd_ops = {
393 .read_resources = vtd_read_resources,
394 .set_resources = pci_dev_set_resources,
395 .enable_resources = pci_dev_enable_resources,
396 .ops_pci = &soc_pci_ops,
397};
398
399/* VTD devices on other stacks */
400static const struct pci_driver vtd_driver __pci_driver = {
401 .ops = &vtd_ops,
Felix Singer43b7f412022-03-07 04:34:52 +0100402 .vendor = PCI_VID_INTEL,
Arthur Heymans77509be2020-10-22 17:11:22 +0200403 .device = MMAP_VTD_STACK_CFG_REG_DEVID,
404};
Jonathan Zhanga5bd5802023-01-30 11:17:25 -0800405#endif
Arthur Heymans42a6f7e2020-11-10 16:46:18 +0100406
407static void dmi3_init(struct device *dev)
408{
Arthur Heymans9d8a4552021-02-02 19:21:24 +0100409 if (CONFIG(INTEL_TXT) && skip_intel_txt_lockdown())
410 return;
Arthur Heymans42a6f7e2020-11-10 16:46:18 +0100411 /* Disable error injection */
412 pci_or_config16(dev, ERRINJCON, 1 << 0);
413
414 /*
415 * DMIRCBAR registers are not TXT lockable, but the BAR enable
416 * bit is. TXT requires that DMIRCBAR be disabled for security.
417 */
418 pci_and_config32(dev, DMIRCBAR, ~(1 << 0));
419}
420
421static struct device_operations dmi3_ops = {
422 .read_resources = pci_dev_read_resources,
423 .set_resources = pci_dev_set_resources,
424 .enable_resources = pci_dev_enable_resources,
425 .init = dmi3_init,
426 .ops_pci = &soc_pci_ops,
427};
428
429static const struct pci_driver dmi3_driver __pci_driver = {
430 .ops = &dmi3_ops,
Felix Singer43b7f412022-03-07 04:34:52 +0100431 .vendor = PCI_VID_INTEL,
Arthur Heymans42a6f7e2020-11-10 16:46:18 +0100432 .device = DMI3_DEVID,
433};
Arthur Heymans7a36ca52020-11-10 15:55:31 +0100434
435static void iio_dfx_global_init(struct device *dev)
436{
Arthur Heymans9d8a4552021-02-02 19:21:24 +0100437 if (CONFIG(INTEL_TXT) && skip_intel_txt_lockdown())
438 return;
439
Arthur Heymans7a36ca52020-11-10 15:55:31 +0100440 uint16_t reg16;
441 pci_or_config16(dev, IIO_DFX_LCK_CTL, 0x3ff);
442 reg16 = pci_read_config16(dev, IIO_DFX_TSWCTL0);
443 reg16 &= ~(1 << 4); // allow ib mmio cfg
444 reg16 &= ~(1 << 5); // ignore acs p2p ma lpbk
445 reg16 |= (1 << 3); // me disable
446 pci_write_config16(dev, IIO_DFX_TSWCTL0, reg16);
447}
448
449static const unsigned short iio_dfx_global_ids[] = {
450 0x202d,
451 0x203d,
452 0
453};
454
455static struct device_operations iio_dfx_global_ops = {
456 .read_resources = pci_dev_read_resources,
457 .set_resources = pci_dev_set_resources,
458 .enable_resources = pci_dev_enable_resources,
459 .init = iio_dfx_global_init,
460 .ops_pci = &soc_pci_ops,
461};
462
463static const struct pci_driver iio_dfx_global_driver __pci_driver = {
464 .ops = &iio_dfx_global_ops,
Felix Singer43b7f412022-03-07 04:34:52 +0100465 .vendor = PCI_VID_INTEL,
Arthur Heymans7a36ca52020-11-10 15:55:31 +0100466 .devices = iio_dfx_global_ids,
467};